Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add benchmarking as test #146

Merged
merged 13 commits into from
Mar 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 59 additions & 0 deletions .github/workflows/Benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
name: Benchmarking

on:
push:
branches:
- main
tags: ['*']
pull_request:

jobs:
profile:
name: Benchmarking Analysis Passes
runs-on: ubuntu-latest
continue-on-error: ${{ matrix.version == 'nightly' }}
strategy:
fail-fast: false
matrix:
version:
- '1'
os:
- ubuntu-latest
arch:
- x64
include:
- version: '1'
os: ubuntu-latest
arch: x64
coverage: false
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- uses: actions/cache@v1
env:
cache-name: cache-artifacts
with:
path: ~/.julia/artifacts
key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }}
restore-keys: |
${{ runner.os }}-test-${{ env.cache-name }}-
${{ runner.os }}-test-
${{ runner.os }}-
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
env:
RUN_MODE: profile
- uses: julia-actions/julia-processcoverage@v1
if: matrix.coverage
- uses: codecov/codecov-action@v1
if: matrix.coverage
with:
file: lcov.info
- uses: coverallsapp/github-action@master
if: matrix.coverage
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
path-to-lcov: lcov.info
1 change: 1 addition & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ version = "0.3.0"
AbstractMCMC = "80f14c24-f653-4e6a-9b94-39d6b0f70001"
AbstractPPL = "7a57a42e-76ec-4ea3-a279-07e840d6d9cf"
BangBang = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Bijectors = "76274a88-744f-5084-9051-94815aaf08c4"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
Expand Down
27 changes: 20 additions & 7 deletions src/JuliaBUGS.jl
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,24 @@ function merge_with_coalescence(
return NamedTuple{Tuple(unioned_keys)}(Tuple(coalesced_values))
end

function compute_data_transformation(scalar, array_sizes, model_def, data)
transformed_variables = Dict{Symbol,Any}()
for s in scalar
transformed_variables[s] = missing
end
for (k, v) in array_sizes
transformed_variables[k] = Array{Union{Missing,Int,Float64}}(missing, v...)
end

has_new_val = true
while has_new_val
has_new_val, transformed_variables = analyze_program(
DataTransformation(false, transformed_variables), model_def, data
)
end
return transformed_variables
end

"""
compile(model_def[, data, initializations])
Expand Down Expand Up @@ -231,14 +249,9 @@ function compile(model_def::Expr, data, inits; is_transformed=true)
scalars, array_sizes = analyze_program(
CollectVariables(model_def, data), model_def, data
)
has_new_val, transformed_variables = analyze_program(
DataTransformation(scalars, array_sizes), model_def, data
transformed_variables = compute_data_transformation(
scalars, array_sizes, model_def, data
)
while has_new_val
has_new_val, transformed_variables = analyze_program(
DataTransformation(false, transformed_variables), model_def, data
)
end
array_bitmap, transformed_variables = analyze_program(
PostChecking(data, transformed_variables), model_def, data
)
Expand Down
2 changes: 1 addition & 1 deletion src/compiler_pass.jl
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ struct PostChecking <: CompilerPass
logical_or_stochastic::Dict # used to identify logical or stochastic assignment
end

function PostChecking(data, transformed_variables::Dict)
function PostChecking(data::NamedTuple, transformed_variables::Dict)
is_data = Dict()
definition_bit_map = Dict()
logical_or_stochastic = Dict()
Expand Down
172 changes: 172 additions & 0 deletions test/profile.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
using BenchmarkTools

using JuliaBUGS
using JuliaBUGS:
BUGSExamples,
analyze_program,
CollectVariables,
DataTransformation,
PostChecking,
NodeFunctions,
merge_with_coalescence,
compute_data_transformation

suite = BenchmarkGroup()

for name in keys(BUGSExamples.VOLUME_I)
@info "Adding benchmark for $name"
model_def = BUGSExamples.VOLUME_I[name].model_def
data = BUGSExamples.VOLUME_I[name].data

scalars, array_sizes = analyze_program(
CollectVariables(model_def, data), model_def, data
)

transformed_variables = compute_data_transformation(
scalars, array_sizes, model_def, data
)

array_bitmap, transformed_variables = analyze_program(
PostChecking(data, transformed_variables), model_def, data
)
merged_data = merge_with_coalescence(deepcopy(data), transformed_variables)

vars, array_sizes, array_bitmap, node_args, node_functions, dependencies = analyze_program(
NodeFunctions(array_sizes, array_bitmap), model_def, merged_data
)

_suite = BenchmarkGroup()

_suite["CollectVariables"] = @benchmarkable analyze_program(
CollectVariables($model_def, $data), $model_def, $data
)

_suite["DataTransformation"] = @benchmarkable compute_data_transformation(
$scalars, $array_sizes, $model_def, $data
)

_suite["NodeFunctions"] = @benchmarkable analyze_program(
NodeFunctions($array_sizes, $array_bitmap), $model_def, $merged_data
)

tune!(_suite)
suite[string(name)] = _suite
end

results = run(suite; verbose=true)

function create_result_dict(results)
result_dict = Dict{String,Dict{String,Dict{String,String}}}()
for (name, example_suite) in results
_d = Dict{String,Dict{String,String}}()
for k in ("CollectVariables", "DataTransformation", "NodeFunctions")
__d = Dict{String,String}()
med = median(example_suite[k])
min = minimum(example_suite[k])
max = maximum(example_suite[k])
for (str, val) in zip(["median", "minimum", "maximum"], [med, min, max])
__d[str] = BenchmarkTools.prettytime(val.time)
end
__d["memory"] = BenchmarkTools.prettymemory(memory(example_suite[k]))
_d[k] = __d
end
result_dict[name] = _d
end
return result_dict
end

function print_pure_text_table(result_dict)
# Define the table header
println(
rpad("Example Name", 25),
"|",
lpad("Category", 20),
"|",
lpad("Median Time", 15),
"|",
lpad("Minimum Time", 15),
"|",
lpad("Maximum Time", 15),
"|",
lpad("Memory Usage", 15),
)
println("-"^105) # Adjust the number based on the total length of the header

# Iterate through each example and its benchmarks to populate the table rows
for (name, benchmarks) in result_dict
first_category = true
for (category, results) in benchmarks
if first_category
println(
rpad(name, 25),
"|",
lpad(category, 20),
"|",
lpad(results["median"], 15),
"|",
lpad(results["minimum"], 15),
"|",
lpad(results["maximum"], 15),
"|",
lpad(results["memory"], 15),
)
first_category = false
else
println(
rpad("", 25),
"|",
lpad(category, 20),
"|",
lpad(results["median"], 15),
"|",
lpad(results["minimum"], 15),
"|",
lpad(results["maximum"], 15),
"|",
lpad(results["memory"], 15),
)
end
end
println("-"^105) # Adjust the number based on the total length of the header
end
end

function print_markdown_table_to_file(result_dict, filename=nothing)
output_target = filename !== nothing ? open(filename, "w") : stdout

try
println(
output_target,
"| Example Name | Category | Median Time | Minimum Time | Maximum Time | Memory Usage |",
)
println(
output_target,
"|--------------|----------|-------------|--------------|--------------|--------------|",
)

for (name, benchmarks) in result_dict
first_category = true
for (category, results) in benchmarks
if first_category
println(
output_target,
"| $(name) | $(category) | $(results["median"]) | $(results["minimum"]) | $(results["maximum"]) | $(results["memory"]) |",
)
first_category = false
else
println(
output_target,
"| | $(category) | $(results["median"]) | $(results["minimum"]) | $(results["maximum"]) | $(results["memory"]) |",
)
end
end
end
finally
if filename !== nothing
close(output_target)
end
end
end

result_dict = create_result_dict(results)
print_pure_text_table(result_dict)
Loading
Loading