Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix coding style to match with julials #665

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 39 additions & 22 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ eval(let
x = rand(1)
y = rand(1)
@finch_kernel function spmv(y, A, x)
for j=_, i=_
for j = _, i = _
y[i] += A[i, j] * x[j]
end
end
Expand Down Expand Up @@ -120,7 +120,7 @@ C = Tensor(Dense(SparseList(Element(0.0))))

@finch (C .= 0; for i=_, j=_, k=_; C[j, i] += A[k, i] * B[k, i] end)
"""
cmd = pipeline(`$(Base.julia_cmd()) --project=$(Base.active_project()) --eval $code`, stdout = IOBuffer())
cmd = pipeline(`$(Base.julia_cmd()) --project=$(Base.active_project()) --eval $code`, stdout=IOBuffer())

SUITE["compile"]["time_to_first_SpGeMM"] = @benchmarkable run(cmd)

Expand All @@ -131,7 +131,11 @@ let

SUITE["compile"]["compile_SpGeMM"] = @benchmarkable begin
A, B, C = ($A, $B, $C)
Finch.execute_code(:ex, typeof(Finch.@finch_program_instance (C .= 0; for i=_, j=_, k=_; C[j, i] += A[k, i] * B[k, j] end; return C)))
Finch.execute_code(:ex, typeof(Finch.@finch_program_instance (C .= 0;
for i = _, j = _, k = _
C[j, i] += A[k, i] * B[k, j]
end;
return C)))
end
end

Expand All @@ -141,7 +145,11 @@ let

SUITE["compile"]["compile_pretty_triangle"] = @benchmarkable begin
A, c = ($A, $c)
@finch_code (c .= 0; for i=_, j=_, k=_; c[] += A[i, j] * A[j, k] * A[i, k] end; return c)
@finch_code (c .= 0;
for i = _, j = _, k = _
c[] += A[i, j] * A[j, k] * A[i, k]
end;
return c)
end
end

Expand Down Expand Up @@ -186,22 +194,28 @@ end
SUITE["indices"] = BenchmarkGroup()

function spmv32(A, x)
y = Tensor(Dense{Int32}(Element{0.0, Float64, Int32}()))
@finch (y .= 0; for i=_, j=_; y[i] += A[j, i] * x[j] end)
y = Tensor(Dense{Int32}(Element{0.0,Float64,Int32}()))
@finch (y .= 0;
for i = _, j = _
y[i] += A[j, i] * x[j]
end)
return y
end

SUITE["indices"]["SpMV_32"] = BenchmarkGroup()
for mtx in ["SNAP/soc-Epinions1"]#, "SNAP/soc-LiveJournal1"]
A = SparseMatrixCSC(matrixdepot(mtx))
A = Tensor(Dense{Int32}(SparseList{Int32}(Element{0.0, Float64, Int32}())), A)
x = Tensor(Dense{Int32}(Element{0.0, Float64, Int32}()), rand(size(A)[2]))
A = Tensor(Dense{Int32}(SparseList{Int32}(Element{0.0,Float64,Int32}())), A)
x = Tensor(Dense{Int32}(Element{0.0,Float64,Int32}()), rand(size(A)[2]))
SUITE["indices"]["SpMV_32"][mtx] = @benchmarkable spmv32($A, $x)
end

function spmv_p1(A, x)
y = Tensor(Dense(Element(0.0)))
@finch (y .= 0; for i=_, j=_; y[i] += A[j, i] * x[j] end)
@finch (y .= 0;
for i = _, j = _
y[i] += A[j, i] * x[j]
end)
return y
end

Expand All @@ -217,27 +231,30 @@ for mtx in ["SNAP/soc-Epinions1"]#, "SNAP/soc-LiveJournal1"]
end

function spmv64(A, x)
y = Tensor(Dense{Int64}(Element{0.0, Float64, Int64}()))
@finch (y .= 0; for i=_, j=_; y[i] += A[j, i] * x[j] end)
y = Tensor(Dense{Int64}(Element{0.0,Float64,Int64}()))
@finch (y .= 0;
for i = _, j = _
y[i] += A[j, i] * x[j]
end)
return y
end

SUITE["indices"]["SpMV_64"] = BenchmarkGroup()
for mtx in ["SNAP/soc-Epinions1"]#, "SNAP/soc-LiveJournal1"]
A = SparseMatrixCSC(matrixdepot(mtx))
A = Tensor(Dense{Int64}(SparseList{Int64}(Element{0.0, Float64, Int64}())), A)
x = Tensor(Dense{Int64}(Element{0.0, Float64, Int64}()), rand(size(A)[2]))
A = Tensor(Dense{Int64}(SparseList{Int64}(Element{0.0,Float64,Int64}())), A)
x = Tensor(Dense{Int64}(Element{0.0,Float64,Int64}()), rand(size(A)[2]))
SUITE["indices"]["SpMV_64"][mtx] = @benchmarkable spmv64($A, $x)
end

SUITE["parallel"] = BenchmarkGroup()

function spmv_serial(A, x)
y = Tensor(Dense{Int64}(Element{0.0, Float64}()))
y = Tensor(Dense{Int64}(Element{0.0,Float64}()))
@finch begin
y .= 0
for i=_
for j=_
for i = _
for j = _
y[i] += A[j, i] * x[j]
end
end
Expand All @@ -246,11 +263,11 @@ function spmv_serial(A, x)
end

function spmv_threaded(A, x)
y = Tensor(Dense{Int64}(Element{0.0, Float64}()))
y = Tensor(Dense{Int64}(Element{0.0,Float64}()))
@finch begin
y .= 0
for i=parallel(_)
for j=_
for i = parallel(_)
for j = _
y[i] += A[j, i] * x[j]
end
end
Expand All @@ -263,8 +280,8 @@ SUITE["parallel"]["SpMV_threaded"] = BenchmarkGroup()
for (key, mtx) in [
"SNAP/soc-Epinions1" => SparseMatrixCSC(matrixdepot("SNAP/soc-Epinions1")),
"fsprand(10_000, 10_000, 0.01)" => fsprand(10_000, 10_000, 0.01)]
A = Tensor(Dense{Int64}(SparseList{Int64}(Element{0.0, Float64, Int64}())), mtx)
x = Tensor(Dense{Int64}(Element{0.0, Float64, Int64}()), rand(size(A)[2]))
A = Tensor(Dense{Int64}(SparseList{Int64}(Element{0.0,Float64,Int64}())), mtx)
x = Tensor(Dense{Int64}(Element{0.0,Float64,Int64}()), rand(size(A)[2]))
SUITE["parallel"]["SpMV_serial"][key] = @benchmarkable spmv_serial($A, $x)
SUITE["parallel"]["SpMV_threaded"][key] = @benchmarkable spmv_threaded($A, $x)
end
Expand Down Expand Up @@ -302,4 +319,4 @@ x = rand(N)

SUITE["structure"]["banded"]["SparseList"] = @benchmarkable spmv_serial($A_ref, $x)
SUITE["structure"]["banded"]["SparseBand"] = @benchmarkable spmv_serial($A, $x)
SUITE["structure"]["banded"]["SparseInterval"] = @benchmarkable spmv_serial($A2, $x)
SUITE["structure"]["banded"]["SparseInterval"] = @benchmarkable spmv_serial($A2, $x)
67 changes: 33 additions & 34 deletions src/tensors/levels/mutex_levels.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

"""
MutexLevel{Val, Lvl}()

Expand All @@ -17,7 +16,7 @@
└─ 3.0
```
"""
struct MutexLevel{AVal, Lvl} <: AbstractLevel
struct MutexLevel{AVal,Lvl} <: AbstractLevel
lvl::Lvl
locks::AVal
end
Expand All @@ -27,12 +26,12 @@
MutexLevel(lvl) = MutexLevel(lvl, Base.Threads.SpinLock[])
#MutexLevel(lvl::Lvl, locks::AVal) where {Lvl, AVal} =
# MutexLevel{AVal, Lvl}(lvl, locks)
Base.summary(::MutexLevel{AVal, Lvl}) where {Lvl, AVal} = "MutexLevel($(AVal), $(Lvl))"
Base.summary(::MutexLevel{AVal,Lvl}) where {Lvl,AVal} = "MutexLevel($(AVal), $(Lvl))"

Check warning on line 29 in src/tensors/levels/mutex_levels.jl

View check run for this annotation

Codecov / codecov/patch

src/tensors/levels/mutex_levels.jl#L29

Added line #L29 was not covered by tests

similar_level(lvl::Mutex{AVal, Lvl}, fill_value, eltype::Type, dims...) where {Lvl, AVal} =
similar_level(lvl::Mutex{AVal,Lvl}, fill_value, eltype::Type, dims...) where {Lvl,AVal} =
MutexLevel(similar_level(lvl.lvl, fill_value, eltype, dims...))

postype(::Type{<:MutexLevel{AVal, Lvl}}) where {Lvl, AVal} = postype(Lvl)
postype(::Type{<:MutexLevel{AVal,Lvl}}) where {Lvl,AVal} = postype(Lvl)

Check warning on line 34 in src/tensors/levels/mutex_levels.jl

View check run for this annotation

Codecov / codecov/patch

src/tensors/levels/mutex_levels.jl#L34

Added line #L34 was not covered by tests

function moveto(lvl::MutexLevel, device)
lvl_2 = moveto(lvl.lvl, device)
Expand All @@ -46,14 +45,14 @@
Base.resize!(lvl::MutexLevel, dims...) = MutexLevel(resize!(lvl.lvl, dims...), lvl.locks)


function Base.show(io::IO, lvl::MutexLevel{AVal, Lvl}) where {AVal, Lvl}
function Base.show(io::IO, lvl::MutexLevel{AVal,Lvl}) where {AVal,Lvl}
print(io, "Mutex(")
if get(io, :compact, false)
print(io, "…")
else
show(IOContext(io), lvl.lvl)
print(io, ", ")
show(IOContext(io, :typeinfo=>AVal), lvl.locks)
show(IOContext(io, :typeinfo => AVal), lvl.locks)
end
print(io, ")")
end
Expand All @@ -68,11 +67,11 @@
end


@inline level_ndims(::Type{<:MutexLevel{AVal, Lvl}}) where {AVal, Lvl} = level_ndims(Lvl)
@inline level_size(lvl::MutexLevel{AVal, Lvl}) where {AVal, Lvl} = level_size(lvl.lvl)
@inline level_axes(lvl::MutexLevel{AVal, Lvl}) where {AVal, Lvl} = level_axes(lvl.lvl)
@inline level_eltype(::Type{MutexLevel{AVal, Lvl}}) where {AVal, Lvl} = level_eltype(Lvl)
@inline level_fill_value(::Type{<:MutexLevel{AVal, Lvl}}) where {AVal, Lvl} = level_fill_value(Lvl)
@inline level_ndims(::Type{<:MutexLevel{AVal,Lvl}}) where {AVal,Lvl} = level_ndims(Lvl)
@inline level_size(lvl::MutexLevel{AVal,Lvl}) where {AVal,Lvl} = level_size(lvl.lvl)
@inline level_axes(lvl::MutexLevel{AVal,Lvl}) where {AVal,Lvl} = level_axes(lvl.lvl)
@inline level_eltype(::Type{MutexLevel{AVal,Lvl}}) where {AVal,Lvl} = level_eltype(Lvl)
@inline level_fill_value(::Type{<:MutexLevel{AVal,Lvl}}) where {AVal,Lvl} = level_fill_value(Lvl)

# FIXME: These.
(fbr::Tensor{<:MutexLevel})() = SubFiber(fbr.lvl, 1)()
Expand All @@ -92,9 +91,9 @@
AVal
Lvl
end
postype(lvl:: MutexLevel) = postype(lvl.lvl)
postype(lvl::MutexLevel) = postype(lvl.lvl)

Check warning on line 94 in src/tensors/levels/mutex_levels.jl

View check run for this annotation

Codecov / codecov/patch

src/tensors/levels/mutex_levels.jl#L94

Added line #L94 was not covered by tests

postype(lvl:: VirtualMutexLevel) = postype(lvl.lvl)
postype(lvl::VirtualMutexLevel) = postype(lvl.lvl)

Check warning on line 96 in src/tensors/levels/mutex_levels.jl

View check run for this annotation

Codecov / codecov/patch

src/tensors/levels/mutex_levels.jl#L96

Added line #L96 was not covered by tests

is_level_injective(ctx, lvl::VirtualMutexLevel) = [is_level_injective(ctx, lvl.lvl)...]

Expand All @@ -110,17 +109,17 @@

function lower(ctx::AbstractCompiler, lvl::VirtualMutexLevel, ::DefaultStyle)
quote
$MutexLevel{$(lvl.AVal), $(lvl.Lvl)}($(ctx(lvl.lvl)), $(lvl.locks))
$MutexLevel{$(lvl.AVal),$(lvl.Lvl)}($(ctx(lvl.lvl)), $(lvl.locks))
end
end

function virtualize(ctx, ex, ::Type{MutexLevel{AVal, Lvl}}, tag=:lvl) where {AVal, Lvl}
function virtualize(ctx, ex, ::Type{MutexLevel{AVal,Lvl}}, tag=:lvl) where {AVal,Lvl}
sym = freshen(ctx, tag)
atomics = freshen(ctx, tag, :_locks)
push_preamble!(ctx, quote
$sym = $ex
$atomics = $ex.locks
end)
$sym = $ex
$atomics = $ex.locks
end)
lvl_2 = virtualize(ctx, :($sym.lvl), Lvl, sym)
temp = VirtualMutexLevel(lvl_2, sym, atomics, typeof(level_fill_value(Lvl)), Val, AVal, Lvl)
temp
Expand All @@ -144,11 +143,11 @@
idx = freshen(ctx, :idx)
lockVal = freshen(ctx, :lock)
push_preamble!(ctx, quote
Finch.resize_if_smaller!($(lvl.locks), $(ctx(pos_stop)))
@inbounds for $idx = $(ctx(pos_start)):$(ctx(pos_stop))
$(lvl.locks)[$idx] = Finch.make_lock(eltype($(lvl.AVal)))
end
end)
Finch.resize_if_smaller!($(lvl.locks), $(ctx(pos_stop)))
@inbounds for $idx = $(ctx(pos_start)):$(ctx(pos_stop))
$(lvl.locks)[$idx] = Finch.make_lock(eltype($(lvl.AVal)))
end
end)
assemble_level!(ctx, lvl.lvl, pos_start, pos_stop)
end

Expand All @@ -159,11 +158,11 @@
idx = freshen(ctx, :idx)
lockVal = freshen(ctx, :lock)
push_preamble!(ctx, quote
Finch.resize_if_smaller!($lvl.locks, $(ctx(pos_stop)))
@inbounds for $idx = $(ctx(pos_start)):$(ctx(pos_stop))
$lvl.locks[$idx] = Finch.make_lock(eltype($(lvl.AVal)))
end
end)
Finch.resize_if_smaller!($lvl.locks, $(ctx(pos_stop)))
@inbounds for $idx = $(ctx(pos_start)):$(ctx(pos_stop))
$lvl.locks[$idx] = Finch.make_lock(eltype($(lvl.AVal)))
end

Check warning on line 164 in src/tensors/levels/mutex_levels.jl

View check run for this annotation

Codecov / codecov/patch

src/tensors/levels/mutex_levels.jl#L161-L164

Added lines #L161 - L164 were not covered by tests
end)
reassemble_level!(ctx, lvl.lvl, pos_start, pos_stop)
lvl
end
Expand Down Expand Up @@ -213,7 +212,7 @@
lockVal = freshen(ctx, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
push_preamble!(ctx, quote
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$lockVal = Finch.aquire_lock!($dev, $atomicData)
end)
res = unfurl(ctx, VirtualSubFiber(lvl.lvl, pos), ext, mode, proto)
Expand All @@ -230,7 +229,7 @@
lockVal = freshen(ctx, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
push_preamble!(ctx, quote
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))

Check warning on line 232 in src/tensors/levels/mutex_levels.jl

View check run for this annotation

Codecov / codecov/patch

src/tensors/levels/mutex_levels.jl#L232

Added line #L232 was not covered by tests
$lockVal = Finch.aquire_lock!($dev, $atomicData)
end)
res = unfurl(ctx, VirtualHollowSubFiber(lvl.lvl, pos, fbr.dirty), ext, mode, proto)
Expand All @@ -247,7 +246,7 @@
lockVal = freshen(ctx, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
push_preamble!(ctx, quote
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$lockVal = Finch.aquire_lock!($dev, $atomicData)
end)
res = lower_assign(ctx, VirtualSubFiber(lvl.lvl, pos), mode, op, rhs)
Expand All @@ -264,12 +263,12 @@
lockVal = freshen(ctx, lvl.ex, :lockVal)
dev = lower(ctx, virtual_get_device(ctx.code.task), DefaultStyle())
push_preamble!(ctx, quote
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))
$atomicData = Finch.get_lock($dev, $(lvl.locks), $(ctx(pos)), eltype($(lvl.AVal)))

Check warning on line 266 in src/tensors/levels/mutex_levels.jl

View check run for this annotation

Codecov / codecov/patch

src/tensors/levels/mutex_levels.jl#L266

Added line #L266 was not covered by tests
$lockVal = Finch.aquire_lock!($dev, $atomicData)
end)
res = lower_assign(ctx, VirtualHollowSubFiber(lvl.lvl, pos, fbr.dirty), mode, op, rhs)
push_epilogue!(ctx, quote
Finch.release_lock!($dev, $atomicData)
end)
return res
end
end
Loading
Loading