Skip to content

Commit

Permalink
Made benchmarks extendable
Browse files Browse the repository at this point in the history
  • Loading branch information
JordiManyer committed Nov 5, 2024
1 parent 45ff3e3 commit 60fab73
Show file tree
Hide file tree
Showing 4 changed files with 120 additions and 45 deletions.
53 changes: 53 additions & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# Benchmarking Suite

The following benchmarking suite uses `PkgBenchmark.jl` and `BenchmarkTools.jl` to compare the performance of different branches of Gridap.

## Running the benchmarks

### Running as CI job

The benchmarks are setup as a manual Github Actions workflow in `.github/workflows/benchmark.yml`. To run the workflow, you will need administrator access to the Gridap repository, then follow instructions [here](https://docs.github.com/en/actions/managing-workflow-runs-and-deployments/managing-workflow-runs/manually-running-a-workflow).

The workflow will has two inputs: `target` and `base`, which are the branches/tags/commits you want to compare. The workflow will run the benchmarks on the `target` branch and compare them with the `base` branch (`master` by default).

### Running Locally

To run the benchmarks locally, you can have a look at the [documentation for `PkgBenchmark.jl`](https://juliaci.github.io/PkgBenchmark.jl/stable/run_benchmarks/).

Alternatively, you can run the CI script locally from a local copy of the repository. From the Gridap root directory, run the following commands:

```bash
# Instantiate Gridap and Gridap/benchmark
julia -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'
julia --project=benchmark/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'
# Run the benchmarks
export BM_TARGET = "your target branch"
export BM_BASE = "your reference branch"
julia --project=benchmark/ --color=yes benchmark/run_benchmarks.jl
```

where `BM_TARGET` and `BM_BASE` are the branches/tags/commits you want to compare.

## Adding a new benchmark

To add a new benchmark suite `xyx`, create a new file `bm/bm_xyx.jl` that with the following structure:

```julia
module bm_xyx

using PkgBenchmark, BenchmarkTools

const SUITE = BenchmarkGroup()

[... Add your benchmarks here ...]

end # module
```

Then, add the following line to the `benchmarks.jl` file:

```julia
@include_bm "bm_xyz"
```

This should automatically include the new benchmarks into the global benchmarking suite.
31 changes: 8 additions & 23 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -1,29 +1,14 @@
using BenchmarkTools
using PkgBenchmark
using Gridap

include("drivers.jl")
macro include_bm(SUITE,name)
quote
include("bm/$($name).jl")
SUITE["$($name)"] = $(Symbol(name)).SUITE
end
end

const SUITE = BenchmarkGroup()

for (D,ncells) in [(2,20),(3,8)]
for order in [1,2,3]
basis_cases = [
("lagrangian",lagrangian,Float64,order),
("vector_lagragian",lagrangian,VectorValue{D,Float64},order),
("raviart_thomas",raviart_thomas,Float64,order-1),
]
for (basis_name,basis,T,degree) in basis_cases
biform_cases = [
("mass",mass,2*order),
("laplacian",laplacian,2*(order-1)),
]
for (biform_name,biform,qdegree) in biform_cases
reffe = ReferenceFE(basis, T, degree)
name = "assembly_$(D)D_$(basis_name)_$(biform_name)_$(order)"
SUITE[name] = @benchmarkable bm_matrix_assembly(
$(D),$(ncells),$(reffe),$(qdegree),$(biform)
)
end
end
end
end
@include_bm SUITE "bm_assembly"
59 changes: 59 additions & 0 deletions benchmark/bm/bm_assembly.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
module bm_assembly

using PkgBenchmark, BenchmarkTools
using Gridap
using Gridap.Geometry

mass(u,v,dΩ) = (uv)dΩ
laplacian(u,v,dΩ) = ((u)(v))dΩ
graddiv(u,v,dΩ) = ((∇u)(∇v))dΩ

function driver(
Ω :: Triangulation,
reffe :: Tuple,
qdegree :: Integer,
biform :: Function
)
model = get_background_model(Ω)
= Measure(Ω,qdegree)
V = TestFESpace(model, reffe)
a(u,v) = biform(u,v,dΩ)
A = assemble_matrix(a, V, V)
end

const SUITE = BenchmarkGroup()

for (D,n) in [(2,10),(3,6)]
domain = Tuple([repeat([0,1], D)...])
partition = Tuple(fill(n, D))
model = UnstructuredDiscreteModel(CartesianDiscreteModel(domain, partition))
trian_cases = [
("bulk",Triangulation(model)),
("view",Triangulation(model,collect(1:div(n^D,2)))), # About half of the cells
]
for (trian_name, trian) in trian_cases
for order in [1,2,3]
basis_cases = [
("lagrangian",lagrangian,Float64,order),
("vector_lagragian",lagrangian,VectorValue{D,Float64},order),
("raviart_thomas",raviart_thomas,Float64,order-1),
]
for (basis_name,basis,T,degree) in basis_cases
biform_cases = [
("mass",mass,2*order),
("laplacian",laplacian,2*(order-1)),
("graddiv",graddiv,2*(order-1)),
]
for (biform_name,biform,qdegree) in biform_cases
reffe = ReferenceFE(basis, T, degree)
name = "assembly_$(D)D_$(trian_name)_$(basis_name)_$(biform_name)_$(order)"
SUITE[name] = @benchmarkable driver(
$(trian),$(reffe),$(qdegree),$(biform)
)
end
end
end
end
end

end # module
22 changes: 0 additions & 22 deletions benchmark/drivers.jl

This file was deleted.

0 comments on commit 60fab73

Please sign in to comment.