Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CI benchmarking suite #533

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
64 changes: 64 additions & 0 deletions .github/workflows/benchmark_pr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
name: Benchmark a pull request

on:
pull_request_target:
branches:
- main

permissions:
pull-requests: write

jobs:
generate_plots:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
with:
version: "1.9"
- uses: julia-actions/cache@v1
- name: Extract Package Name from Project.toml
id: extract-package-name
run: |
PACKAGE_NAME=$(grep "^name" Project.toml | sed 's/^name = "\(.*\)"$/\1/')
echo "::set-output name=package_name::$PACKAGE_NAME"
- name: Build AirspeedVelocity
env:
JULIA_NUM_THREADS: 2
run: |
# Lightweight build step, as sometimes the runner runs out of memory:
julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.add(;url="https://github.com/MilesCranmer/AirspeedVelocity.jl.git")'
julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.build("AirspeedVelocity")'
- name: Add ~/.julia/bin to PATH
run: |
echo "$HOME/.julia/bin" >> $GITHUB_PATH
- name: Run benchmarks
run: |
echo $PATH
ls -l ~/.julia/bin
mkdir results
benchpkg ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --url=${{ github.event.repository.clone_url }} --bench-on="${{github.event.pull_request.head.sha}}" --output-dir=results/ --tune --exeflags="-O3 --threads=auto"
- name: Create markdown table from benchmarks
run: |
benchpkgtable ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --input-dir=results/ --ratio > table.md
echo '### Benchmark Results' > body.md
echo '' >> body.md
echo '' >> body.md
cat table.md >> body.md
echo '' >> body.md
- name: Find Comment
uses: peter-evans/find-comment@v2
id: fcbenchmark
with:
issue-number: ${{ github.event.pull_request.number }}
comment-author: 'github-actions[bot]'
body-includes: Benchmark Results

- name: Comment on PR
uses: peter-evans/create-or-update-comment@v3
with:
comment-id: ${{ steps.fcbenchmark.outputs.comment-id }}
issue-number: ${{ github.event.pull_request.number }}
body-path: body.md
edit-mode: replace
2 changes: 2 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
62 changes: 62 additions & 0 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
using BenchmarkTools
using PythonCall
using PythonCall: pydel!, pyimport, pydict, pystr, pyrange

const SUITE = BenchmarkGroup()

function test_pydict_init()
random = pyimport("random").random
x = pydict()
for i in pyrange(1000)
x[pystr(i)] = i + random()
end
return x
end

SUITE["basic"]["julia"]["pydict"]["init"] = @benchmarkable test_pydict_init()

function test_pydict_pydel()
random = pyimport("random").random
x = pydict()
for i in pyrange(1000)
k = pystr(i)
r = random()
v = i + r
x[k] = v
pydel!(k)
pydel!(r)
pydel!(v)
pydel!(i)
end
return x
end

SUITE["basic"]["julia"]["pydict"]["pydel"] = @benchmarkable test_pydict_pydel()

@generated function test_atpy(::Val{use_pydel}) where {use_pydel}
quote
@py begin
import random: random
x = {}
for i in range(1000)
x[str(i)] = i + random()
$(use_pydel ? :(@jl PythonCall.pydel!(i)) : :(nothing))
end
x
end
end
end

SUITE["basic"]["@py"]["pydict"]["init"] = @benchmarkable test_atpy(Val(false))
SUITE["basic"]["@py"]["pydict"]["pydel"] = @benchmarkable test_atpy(Val(true))


include("gcbench.jl")
using .GCBench: append_lots

SUITE["gc"]["full"] = @benchmarkable(
GC.gc(true),
setup=(GC.gc(true); append_lots(size=159)),
seconds=30,
MilesCranmer marked this conversation as resolved.
Show resolved Hide resolved
evals=1,
)
13 changes: 13 additions & 0 deletions benchmark/gcbench.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
module GCBench

using PythonCall

function append_lots(; iters=100 * 1024, size=1596)
v = pylist()
for i = 1:iters
v.append(pylist(rand(size)))
end
return v
end

end
Loading