Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add data_rep_level for SeparateLevel and MutexLevel #659

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 74 additions & 27 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ eval(let
x = rand(1)
y = rand(1)
@finch_kernel function spmv(y, A, x)
for j=_, i=_
for j = _, i = _
y[i] += A[i, j] * x[j]
end
end
Expand Down Expand Up @@ -120,7 +120,7 @@ C = Tensor(Dense(SparseList(Element(0.0))))

@finch (C .= 0; for i=_, j=_, k=_; C[j, i] += A[k, i] * B[k, i] end)
"""
cmd = pipeline(`$(Base.julia_cmd()) --project=$(Base.active_project()) --eval $code`, stdout = IOBuffer())
cmd = pipeline(`$(Base.julia_cmd()) --project=$(Base.active_project()) --eval $code`, stdout=IOBuffer())

SUITE["compile"]["time_to_first_SpGeMM"] = @benchmarkable run(cmd)

Expand All @@ -131,7 +131,11 @@ let

SUITE["compile"]["compile_SpGeMM"] = @benchmarkable begin
A, B, C = ($A, $B, $C)
Finch.execute_code(:ex, typeof(Finch.@finch_program_instance (C .= 0; for i=_, j=_, k=_; C[j, i] += A[k, i] * B[k, j] end; return C)))
Finch.execute_code(:ex, typeof(Finch.@finch_program_instance (C .= 0;
for i = _, j = _, k = _
C[j, i] += A[k, i] * B[k, j]
end;
return C)))
end
end

Expand All @@ -141,7 +145,11 @@ let

SUITE["compile"]["compile_pretty_triangle"] = @benchmarkable begin
A, c = ($A, $c)
@finch_code (c .= 0; for i=_, j=_, k=_; c[] += A[i, j] * A[j, k] * A[i, k] end; return c)
@finch_code (c .= 0;
for i = _, j = _, k = _
c[] += A[i, j] * A[j, k] * A[i, k]
end;
return c)
end
end

Expand Down Expand Up @@ -186,22 +194,28 @@ end
SUITE["indices"] = BenchmarkGroup()

function spmv32(A, x)
y = Tensor(Dense{Int32}(Element{0.0, Float64, Int32}()))
@finch (y .= 0; for i=_, j=_; y[i] += A[j, i] * x[j] end)
y = Tensor(Dense{Int32}(Element{0.0,Float64,Int32}()))
@finch (y .= 0;
for i = _, j = _
y[i] += A[j, i] * x[j]
end)
return y
end

SUITE["indices"]["SpMV_32"] = BenchmarkGroup()
for mtx in ["SNAP/soc-Epinions1"]#, "SNAP/soc-LiveJournal1"]
A = SparseMatrixCSC(matrixdepot(mtx))
A = Tensor(Dense{Int32}(SparseList{Int32}(Element{0.0, Float64, Int32}())), A)
x = Tensor(Dense{Int32}(Element{0.0, Float64, Int32}()), rand(size(A)[2]))
A = Tensor(Dense{Int32}(SparseList{Int32}(Element{0.0,Float64,Int32}())), A)
x = Tensor(Dense{Int32}(Element{0.0,Float64,Int32}()), rand(size(A)[2]))
SUITE["indices"]["SpMV_32"][mtx] = @benchmarkable spmv32($A, $x)
end

function spmv_p1(A, x)
y = Tensor(Dense(Element(0.0)))
@finch (y .= 0; for i=_, j=_; y[i] += A[j, i] * x[j] end)
@finch (y .= 0;
for i = _, j = _
y[i] += A[j, i] * x[j]
end)
return y
end

Expand All @@ -217,56 +231,89 @@ for mtx in ["SNAP/soc-Epinions1"]#, "SNAP/soc-LiveJournal1"]
end

function spmv64(A, x)
y = Tensor(Dense{Int64}(Element{0.0, Float64, Int64}()))
@finch (y .= 0; for i=_, j=_; y[i] += A[j, i] * x[j] end)
y = Tensor(Dense{Int64}(Element{0.0,Float64,Int64}()))
@finch (y .= 0;
for i = _, j = _
y[i] += A[j, i] * x[j]
end)
return y
end

SUITE["indices"]["SpMV_64"] = BenchmarkGroup()
for mtx in ["SNAP/soc-Epinions1"]#, "SNAP/soc-LiveJournal1"]
A = SparseMatrixCSC(matrixdepot(mtx))
A = Tensor(Dense{Int64}(SparseList{Int64}(Element{0.0, Float64, Int64}())), A)
x = Tensor(Dense{Int64}(Element{0.0, Float64, Int64}()), rand(size(A)[2]))
A = Tensor(Dense{Int64}(SparseList{Int64}(Element{0.0,Float64,Int64}())), A)
x = Tensor(Dense{Int64}(Element{0.0,Float64,Int64}()), rand(size(A)[2]))
SUITE["indices"]["SpMV_64"][mtx] = @benchmarkable spmv64($A, $x)
end

SUITE["parallel"] = BenchmarkGroup()

function spmv_serial(A, x)
y = Tensor(Dense{Int64}(Element{0.0, Float64}()))
y = Tensor(Dense{Int64}(Element{0.0,Float64}()))
@finch begin
y .= 0
for i=_
for j=_
y[i] += A[j, i] * x[j]
for i = _
for j = _
y[j] += A[j, i] * x[i]
end
end
return y
end
end

function spmv_threaded(A, x)
y = Tensor(Dense{Int64}(Element{0.0, Float64}()))
# function spmv_threaded(A, x)
# y = Tensor(Dense{Int64}(Element{0.0,Float64}()))
# @finch begin
# y .= 0
# for i = parallel(_)
# for j = _
# y[i] += A[j, i] * x[j]
# end
# end
# return y
# end
# end

function spmv_atomic_element(A, x)
y = Tensor(Dense{Int64}(AtomicElement{0.0,Float64}()))
@finch begin
y .= 0
for i=parallel(_)
for j=_
y[i] += A[j, i] * x[j]
for i = parallel(_)
for j = _
y[j] += A[j, i] * x[i]
end
end
return y
end
end

function spmv_mutex(A, x)
y = Tensor(Dense{Int64}(Mutex(Element{0.0,Float64}())))
@finch begin
y .= 0
for i = parallel(_)
for j = _
y[j] += A[j, i] * x[i]
end
end
return y
end
end

SUITE["parallel"]["SpMV_serial"] = BenchmarkGroup()
SUITE["parallel"]["SpMV_threaded"] = BenchmarkGroup()
# SUITE["parallel"]["SpMV_threaded"] = BenchmarkGroup()
SUITE["parallel"]["SpMV_atomic_element"] = BenchmarkGroup()
SUITE["parallel"]["SpMV_mutex"] = BenchmarkGroup()
for (key, mtx) in [
"SNAP/soc-Epinions1" => SparseMatrixCSC(matrixdepot("SNAP/soc-Epinions1")),
"fsprand(10_000, 10_000, 0.01)" => fsprand(10_000, 10_000, 0.01)]
A = Tensor(Dense{Int64}(SparseList{Int64}(Element{0.0, Float64, Int64}())), mtx)
x = Tensor(Dense{Int64}(Element{0.0, Float64, Int64}()), rand(size(A)[2]))
A = Tensor(Dense{Int64}(SparseList{Int64}(Element{0.0,Float64,Int64}())), mtx)
x = Tensor(Dense{Int64}(Element{0.0,Float64,Int64}()), rand(size(A)[2]))
SUITE["parallel"]["SpMV_serial"][key] = @benchmarkable spmv_serial($A, $x)
SUITE["parallel"]["SpMV_threaded"][key] = @benchmarkable spmv_threaded($A, $x)
# SUITE["parallel"]["SpMV_threaded"][key] = @benchmarkable spmv_threaded($A, $x)
SUITE["parallel"]["SpMV_atomic_element"][key] = @benchmarkable spmv_atomic_element($A, $x)
SUITE["parallel"]["SpMV_mutex"][key] = @benchmarkable spmv_mutex($A, $x)
end

SUITE["structure"] = BenchmarkGroup()
Expand Down Expand Up @@ -304,4 +351,4 @@ SUITE["structure"]["banded"]["SparseList"] = @benchmarkable spmv_serial($A_ref,
SUITE["structure"]["banded"]["SparseBand"] = @benchmarkable spmv_serial($A, $x)
SUITE["structure"]["banded"]["SparseInterval"] = @benchmarkable spmv_serial($A2, $x)

SUITE = SUITE["structure"]
SUITE = SUITE["structure"]
67 changes: 34 additions & 33 deletions src/tensors/levels/mutex_levels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ julia> tensor_tree(Tensor(Dense(Mutex(Element(0.0))), [1, 2, 3]))
└─ 3.0
```
"""
struct MutexLevel{AVal, Lvl} <: AbstractLevel
struct MutexLevel{AVal,Lvl} <: AbstractLevel
lvl::Lvl
locks::AVal
end
Expand All @@ -27,12 +27,12 @@ const Mutex = MutexLevel
MutexLevel(lvl) = MutexLevel(lvl, Base.Threads.SpinLock[])
#MutexLevel(lvl::Lvl, locks::AVal) where {Lvl, AVal} =
# MutexLevel{AVal, Lvl}(lvl, locks)
Base.summary(::MutexLevel{AVal, Lvl}) where {Lvl, AVal} = "MutexLevel($(AVal), $(Lvl))"
Base.summary(::MutexLevel{AVal,Lvl}) where {Lvl,AVal} = "MutexLevel($(AVal), $(Lvl))"

similar_level(lvl::Mutex{AVal, Lvl}, fill_value, eltype::Type, dims...) where {Lvl, AVal} =
similar_level(lvl::Mutex{AVal,Lvl}, fill_value, eltype::Type, dims...) where {Lvl,AVal} =
MutexLevel(similar_level(lvl.lvl, fill_value, eltype, dims...))

postype(::Type{<:MutexLevel{AVal, Lvl}}) where {Lvl, AVal} = postype(Lvl)
postype(::Type{<:MutexLevel{AVal,Lvl}}) where {Lvl,AVal} = postype(Lvl)

function moveto(lvl::MutexLevel, device)
lvl_2 = moveto(lvl.lvl, device)
Expand All @@ -46,14 +46,14 @@ set_fill_value!(lvl::MutexLevel, init) = MutexLevel(set_fill_value!(lvl.lvl, ini
Base.resize!(lvl::MutexLevel, dims...) = MutexLevel(resize!(lvl.lvl, dims...), lvl.locks)


function Base.show(io::IO, lvl::MutexLevel{AVal, Lvl}) where {AVal, Lvl}
function Base.show(io::IO, lvl::MutexLevel{AVal,Lvl}) where {AVal,Lvl}
print(io, "Mutex(")
if get(io, :compact, false)
print(io, "…")
else
show(IOContext(io), lvl.lvl)
print(io, ", ")
show(IOContext(io, :typeinfo=>AVal), lvl.locks)
show(IOContext(io, :typeinfo => AVal), lvl.locks)
end
print(io, ")")
end
Expand All @@ -68,11 +68,12 @@ function labelled_children(fbr::SubFiber{<:MutexLevel})
end


@inline level_ndims(::Type{<:MutexLevel{AVal, Lvl}}) where {AVal, Lvl} = level_ndims(Lvl)
@inline level_size(lvl::MutexLevel{AVal, Lvl}) where {AVal, Lvl} = level_size(lvl.lvl)
@inline level_axes(lvl::MutexLevel{AVal, Lvl}) where {AVal, Lvl} = level_axes(lvl.lvl)
@inline level_eltype(::Type{MutexLevel{AVal, Lvl}}) where {AVal, Lvl} = level_eltype(Lvl)
@inline level_fill_value(::Type{<:MutexLevel{AVal, Lvl}}) where {AVal, Lvl} = level_fill_value(Lvl)
@inline level_ndims(::Type{<:MutexLevel{AVal,Lvl}}) where {AVal,Lvl} = level_ndims(Lvl)
@inline level_size(lvl::MutexLevel{AVal,Lvl}) where {AVal,Lvl} = level_size(lvl.lvl)
@inline level_axes(lvl::MutexLevel{AVal,Lvl}) where {AVal,Lvl} = level_axes(lvl.lvl)
@inline level_eltype(::Type{MutexLevel{AVal,Lvl}}) where {AVal,Lvl} = level_eltype(Lvl)
@inline level_fill_value(::Type{<:MutexLevel{AVal,Lvl}}) where {AVal,Lvl} = level_fill_value(Lvl)
data_rep_level(::Type{<:MutexLevel{AVal,Lvl}}) where {AVal,Lvl} = data_rep_level(Lvl)

# FIXME: These.
(fbr::Tensor{<:MutexLevel})() = SubFiber(fbr.lvl, 1)()
Expand All @@ -92,9 +93,9 @@ mutable struct VirtualMutexLevel <: AbstractVirtualLevel
AVal
Lvl
end
postype(lvl:: MutexLevel) = postype(lvl.lvl)
postype(lvl::MutexLevel) = postype(lvl.lvl)

postype(lvl:: VirtualMutexLevel) = postype(lvl.lvl)
postype(lvl::VirtualMutexLevel) = postype(lvl.lvl)

is_level_injective(ctx, lvl::VirtualMutexLevel) = [is_level_injective(ctx, lvl.lvl)...]

Expand All @@ -110,17 +111,17 @@ end

function lower(ctx::AbstractCompiler, lvl::VirtualMutexLevel, ::DefaultStyle)
quote
$MutexLevel{$(lvl.AVal), $(lvl.Lvl)}($(ctx(lvl.lvl)), $(lvl.locks))
$MutexLevel{$(lvl.AVal),$(lvl.Lvl)}($(ctx(lvl.lvl)), $(lvl.locks))
end
end

function virtualize(ctx, ex, ::Type{MutexLevel{AVal, Lvl}}, tag=:lvl) where {AVal, Lvl}
function virtualize(ctx, ex, ::Type{MutexLevel{AVal,Lvl}}, tag=:lvl) where {AVal,Lvl}
sym = freshen(ctx, tag)
atomics = freshen(ctx, tag, :_locks)
push_preamble!(ctx, quote
$sym = $ex
$atomics = $ex.locks
end)
$sym = $ex
$atomics = $ex.locks
end)
lvl_2 = virtualize(ctx, :($sym.lvl), Lvl, sym)
temp = VirtualMutexLevel(lvl_2, sym, atomics, typeof(level_fill_value(Lvl)), Val, AVal, Lvl)
temp
Expand All @@ -144,11 +145,11 @@ function assemble_level!(ctx, lvl::VirtualMutexLevel, pos_start, pos_stop)
idx = freshen(ctx, :idx)
lockVal = freshen(ctx, :lock)
push_preamble!(ctx, quote
Finch.resize_if_smaller!($(lvl.locks), $(ctx(pos_stop)))
@inbounds for $idx = $(ctx(pos_start)):$(ctx(pos_stop))
$(lvl.locks)[$idx] = Finch.make_lock(eltype($(lvl.AVal)))
end
end)
Finch.resize_if_smaller!($(lvl.locks), $(ctx(pos_stop)))
@inbounds for $idx = $(ctx(pos_start)):$(ctx(pos_stop))
$(lvl.locks)[$idx] = Finch.make_lock(eltype($(lvl.AVal)))
end
end)
assemble_level!(ctx, lvl.lvl, pos_start, pos_stop)
end

Expand All @@ -159,11 +160,11 @@ function reassemble_level!(ctx, lvl::VirtualMutexLevel, pos_start, pos_stop)
idx = freshen(ctx, :idx)
lockVal = freshen(ctx, :lock)
push_preamble!(ctx, quote
Finch.resize_if_smaller!($lvl.locks, $(ctx(pos_stop)))
@inbounds for $idx = $(ctx(pos_start)):$(ctx(pos_stop))
$lvl.locks[$idx] = Finch.make_lock(eltype($(lvl.AVal)))
end
end)
Finch.resize_if_smaller!($lvl.locks, $(ctx(pos_stop)))
@inbounds for $idx = $(ctx(pos_start)):$(ctx(pos_stop))
$lvl.locks[$idx] = Finch.make_lock(eltype($(lvl.AVal)))
end
end)
reassemble_level!(ctx, lvl.lvl, pos_start, pos_stop)
lvl
end
Expand Down Expand Up @@ -213,7 +214,7 @@ function unfurl(ctx, fbr::VirtualSubFiber{VirtualMutexLevel}, ext, mode::Updater
lockVal = freshen(ctx, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
push_preamble!(ctx, quote
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$lockVal = Finch.aquire_lock!($dev, $atomicData)
end)
res = unfurl(ctx, VirtualSubFiber(lvl.lvl, pos), ext, mode, proto)
Expand All @@ -230,7 +231,7 @@ function unfurl(ctx, fbr::VirtualHollowSubFiber{VirtualMutexLevel}, ext, mode::U
lockVal = freshen(ctx, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
push_preamble!(ctx, quote
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$lockVal = Finch.aquire_lock!($dev, $atomicData)
end)
res = unfurl(ctx, VirtualHollowSubFiber(lvl.lvl, pos, fbr.dirty), ext, mode, proto)
Expand All @@ -247,7 +248,7 @@ function lower_assign(ctx, fbr::VirtualSubFiber{VirtualMutexLevel}, mode::Update
lockVal = freshen(ctx, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
push_preamble!(ctx, quote
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$lockVal = Finch.aquire_lock!($dev, $atomicData)
end)
res = lower_assign(ctx, VirtualSubFiber(lvl.lvl, pos), mode, op, rhs)
Expand All @@ -264,12 +265,12 @@ function lower_assign(ctx, fbr::VirtualHollowSubFiber{VirtualMutexLevel}, mode::
lockVal = freshen(ctx, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
push_preamble!(ctx, quote
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$lockVal = Finch.aquire_lock!($dev, $atomicData)
end)
res = lower_assign(ctx, VirtualHollowSubFiber(lvl.lvl, pos, fbr.dirty), mode, op, rhs)
push_epilogue!(ctx, quote
Finch.release_lock!($dev, $atomicData)
end)
return res
end
end
Loading
Loading