Skip to content

Commit

Permalink
[SparseArrayInterface] [BlockSparseArrays] Sparse and block sparse do…
Browse files Browse the repository at this point in the history
…t products (#1577)

* [SparseArrayInterface] [BlockSparseArrays] Sparse and block sparse dot products

* [NDTensors] Bump to v0.3.62
  • Loading branch information
mtfishman authored Nov 12, 2024
1 parent a92459e commit 4996dca
Show file tree
Hide file tree
Showing 12 changed files with 175 additions and 22 deletions.
2 changes: 1 addition & 1 deletion NDTensors/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "NDTensors"
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
authors = ["Matthew Fishman <[email protected]>"]
version = "0.3.61"
version = "0.3.62"

[deps]
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,27 @@
using ArrayLayouts: ArrayLayouts, MemoryLayout, MulAdd
using ArrayLayouts: ArrayLayouts, DualLayout, MemoryLayout, MulAdd
using BlockArrays: BlockLayout
using ..SparseArrayInterface: SparseLayout
using ..TypeParameterAccessors: similartype
using ..TypeParameterAccessors: parenttype, similartype

function ArrayLayouts.MemoryLayout(arraytype::Type{<:BlockSparseArrayLike})
outer_layout = typeof(MemoryLayout(blockstype(arraytype)))
inner_layout = typeof(MemoryLayout(blocktype(arraytype)))
return BlockLayout{outer_layout,inner_layout}()
end

# TODO: Generalize to `BlockSparseVectorLike`/`AnyBlockSparseVector`.
function ArrayLayouts.MemoryLayout(
arraytype::Type{<:Adjoint{<:Any,<:AbstractBlockSparseVector}}
)
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
end
# TODO: Generalize to `BlockSparseVectorLike`/`AnyBlockSparseVector`.
function ArrayLayouts.MemoryLayout(
arraytype::Type{<:Transpose{<:Any,<:AbstractBlockSparseVector}}
)
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
end

function Base.similar(
mul::MulAdd{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout},<:Any,<:Any,A,B},
elt::Type,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,13 @@ function Base.similar(
return similar(arraytype, eltype(arraytype), axes)
end

# Fixes ambiguity error.
function Base.similar(
arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}}
)
return similar(arraytype, eltype(arraytype), axes)
end

# Needed by `BlockArrays` matrix multiplication interface
# TODO: This fixes an ambiguity error with `OffsetArrays.jl`, but
# is only appears to be needed in older versions of Julia like v1.6.
Expand Down Expand Up @@ -221,7 +228,7 @@ function Base.similar(
end

# Fixes ambiguity error.
function Base.similar(a::BlockSparseArrayLike{<:Any,0}, elt::Type, axes::Tuple{})
function Base.similar(a::BlockSparseArrayLike, elt::Type, axes::Tuple{})
return blocksparse_similar(a, elt, axes)
end

Expand Down
Original file line number Diff line number Diff line change
@@ -1,23 +1,48 @@
using ArrayLayouts: ArrayLayouts, MatMulMatAdd
using ArrayLayouts: ArrayLayouts, Dot, MatMulMatAdd, MatMulVecAdd, MulAdd
using BlockArrays: BlockLayout
using ..SparseArrayInterface: SparseLayout
using LinearAlgebra: mul!
using LinearAlgebra: dot, mul!

function blocksparse_muladd!(
α::Number, a1::AbstractMatrix, a2::AbstractMatrix, β::Number, a_dest::AbstractMatrix
α::Number, a1::AbstractArray, a2::AbstractArray, β::Number, a_dest::AbstractArray
)
mul!(blocks(a_dest), blocks(a1), blocks(a2), α, β)
return a_dest
end

function blocksparse_matmul!(m::MulAdd)
α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C
blocksparse_muladd!(α, a1, a2, β, a_dest)
return a_dest
end

function ArrayLayouts.materialize!(
m::MatMulMatAdd{
<:BlockLayout{<:SparseLayout},
<:BlockLayout{<:SparseLayout},
<:BlockLayout{<:SparseLayout},
},
)
α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C
blocksparse_muladd!(α, a1, a2, β, a_dest)
return a_dest
blocksparse_matmul!(m)
return m.C
end
function ArrayLayouts.materialize!(
m::MatMulVecAdd{
<:BlockLayout{<:SparseLayout},
<:BlockLayout{<:SparseLayout},
<:BlockLayout{<:SparseLayout},
},
)
blocksparse_matmul!(m)
return m.C
end

function blocksparse_dot(a1::AbstractArray, a2::AbstractArray)
# TODO: Add a check that the blocking of `a1` and `a2` are
# the same, or the same up to a reshape.
return dot(blocks(a1), blocks(a2))
end

function Base.copy(d::Dot{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout}})
return blocksparse_dot(d.A, d.B)
end
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ reverse_index(index::CartesianIndex) = CartesianIndex(reverse(Tuple(index)))

# Represents the array of arrays of a `Transpose`
# wrapping a block spare array, i.e. `blocks(array)` where `a` is a `Transpose`.
struct SparseTransposeBlocks{T,BlockType<:AbstractMatrix{T},Array<:Transpose{T}} <:
struct SparseTransposeBlocks{T,BlockType<:AbstractArray{T},Array<:Transpose{T}} <:
AbstractSparseMatrix{BlockType}
array::Array
end
Expand Down Expand Up @@ -219,7 +219,7 @@ end

# Represents the array of arrays of a `Adjoint`
# wrapping a block spare array, i.e. `blocks(array)` where `a` is a `Adjoint`.
struct SparseAdjointBlocks{T,BlockType<:AbstractMatrix{T},Array<:Adjoint{T}} <:
struct SparseAdjointBlocks{T,BlockType<:AbstractArray{T},Array<:Adjoint{T}} <:
AbstractSparseMatrix{BlockType}
array::Array
end
Expand Down
16 changes: 15 additions & 1 deletion NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ using BlockArrays:
mortar
using Compat: @compat
using GPUArraysCore: @allowscalar
using LinearAlgebra: Adjoint, mul!, norm
using LinearAlgebra: Adjoint, dot, mul!, norm
using NDTensors.BlockSparseArrays:
@view!,
BlockSparseArray,
Expand Down Expand Up @@ -575,7 +575,11 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype
a[b] = randn(elt, size(a[b]))
end
@test isassigned(a, 1, 1)
@test isassigned(a, 1, 1, 1)
@test !isassigned(a, 1, 1, 2)
@test isassigned(a, 5, 7)
@test isassigned(a, 5, 7, 1)
@test !isassigned(a, 5, 7, 2)
@test !isassigned(a, 0, 1)
@test !isassigned(a, 5, 8)
@test isassigned(a, Block(1), Block(1))
Expand Down Expand Up @@ -852,6 +856,16 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype
@allowscalar @test Array(a_dest) Array(a1′) * Array(a2′)
end
end
@testset "Dot product" begin
a1 = dev(BlockSparseArray{elt}([2, 3, 4]))
a1[Block(1)] = dev(randn(elt, size(@view(a1[Block(1)]))))
a1[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)]))))
a2 = dev(BlockSparseArray{elt}([2, 3, 4]))
a2[Block(2)] = dev(randn(elt, size(@view(a1[Block(2)]))))
a2[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)]))))
@test a1' * a2 Array(a1)' * Array(a2)
@test dot(a1, a2) a1' * a2
end
@testset "TensorAlgebra" begin
a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3]))
a1[Block(1, 1)] = dev(randn(elt, size(@view(a1[Block(1, 1)]))))
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,41 @@
using ArrayLayouts: ArrayLayouts, MatMulMatAdd
using ArrayLayouts: ArrayLayouts, Dot, DualLayout, MatMulMatAdd, MatMulVecAdd, MulAdd
using LinearAlgebra: Adjoint, Transpose
using ..TypeParameterAccessors: parenttype

function ArrayLayouts.MemoryLayout(arraytype::Type{<:SparseArrayLike})
return SparseLayout()
end

function ArrayLayouts.materialize!(
m::MatMulMatAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout}
# TODO: Generalize to `SparseVectorLike`/`AnySparseVector`.
function ArrayLayouts.MemoryLayout(arraytype::Type{<:Adjoint{<:Any,<:AbstractSparseVector}})
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
end
# TODO: Generalize to `SparseVectorLike`/`AnySparseVector`.
function ArrayLayouts.MemoryLayout(
arraytype::Type{<:Transpose{<:Any,<:AbstractSparseVector}}
)
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
end

function sparse_matmul!(m::MulAdd)
α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C
sparse_mul!(a_dest, a1, a2, α, β)
return a_dest
end

function ArrayLayouts.materialize!(
m::MatMulMatAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout}
)
sparse_matmul!(m)
return m.C
end
function ArrayLayouts.materialize!(
m::MatMulVecAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout}
)
sparse_matmul!(m)
return m.C
end

function Base.copy(d::Dot{<:SparseLayout,<:SparseLayout})
return sparse_dot(d.A, d.B)
end
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
using LinearAlgebra: mul!, norm
using LinearAlgebra: dot, mul!, norm

sparse_norm(a::AbstractArray, p::Real=2) = norm(sparse_storage(a))

Expand All @@ -9,6 +9,14 @@ function mul_indices(I1::CartesianIndex{2}, I2::CartesianIndex{2})
return CartesianIndex(I1[1], I2[2])
end

# TODO: Is this needed? Maybe when multiplying vectors?
function mul_indices(I1::CartesianIndex{1}, I2::CartesianIndex{1})
if I1 I2
return nothing
end
return CartesianIndex(I1)
end

function default_mul!!(
a_dest::AbstractMatrix,
a1::AbstractMatrix,
Expand All @@ -28,9 +36,9 @@ end

# a1 * a2 * α + a_dest * β
function sparse_mul!(
a_dest::AbstractMatrix,
a1::AbstractMatrix,
a2::AbstractMatrix,
a_dest::AbstractArray,
a1::AbstractArray,
a2::AbstractArray,
α::Number=true,
β::Number=false;
(mul!!)=(default_mul!!),
Expand All @@ -45,3 +53,26 @@ function sparse_mul!(
end
return a_dest
end

function sparse_dot(a1::AbstractArray, a2::AbstractArray)
# This requires that `a1` and `a2` have the same shape.
# TODO: Generalize (Base supports dot products of
# arrays with the same length but different sizes).
size(a1) == size(a2) ||
throw(DimensionMismatch("Sizes $(size(a1)) and $(size(a2)) don't match."))
dot_dest = zero(Base.promote_op(dot, eltype(a1), eltype(a2)))
# TODO: First check if the number of stored elements (`nstored`, to be renamed
# `stored_length`) is smaller in `a1` or `a2` and use whicheven one is smallar
# as the outer loop.
for I1 in stored_indices(a1)
# TODO: Overload and use `Base.isstored(a, I) = I in stored_indices(a)` instead.
# TODO: This assumes fast lookup of indices, which may not always be the case.
# It could be better to loop over `stored_indices(a2)` and check that
# `I1 == I2` instead (say using `mul_indices(I1, I2)`. We could have a trait
# `HasFastIsStored(a::AbstractArray)` to choose between the two.
if I1 in stored_indices(a2)
dot_dest += dot(a1[I1], a2[I1])
end
end
return dot_dest
end
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,10 @@ end
function sparse_isassigned(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N}
return sparse_isassigned(a, Tuple(I)...)
end
function sparse_isassigned(a::AbstractArray{<:Any,N}, I::Vararg{Integer,N}) where {N}
function sparse_isassigned(a::AbstractArray, I::Integer...)
# Check trailing dimensions are one. This is needed in generic
# AbstractArray show when `a isa AbstractVector`.
all(d -> isone(I[d]), (ndims(a) + 1):length(I)) || return false
return all(dim -> I[dim] axes(a, dim), 1:ndims(a))
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ function LinearAlgebra.mul!(
return a_dest
end

function LinearAlgebra.dot(a1::SparseArray, a2::SparseArray)
return SparseArrayInterface.sparse_dot(a1, a2)
end

# AbstractArray interface
Base.size(a::SparseArray) = a.dims
function Base.similar(a::SparseArray, elt::Type, dims::Tuple{Vararg{Int}})
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
@eval module $(gensym())
using LinearAlgebra: mul!, norm
using LinearAlgebra: dot, mul!, norm
using NDTensors.SparseArrayInterface: SparseArrayInterface
include("SparseArrayInterfaceTestUtils/SparseArrayInterfaceTestUtils.jl")
using .SparseArrayInterfaceTestUtils.AbstractSparseArrays: AbstractSparseArrays
Expand Down Expand Up @@ -299,6 +299,18 @@ using Test: @test, @testset
@test a_dest isa SparseArray{elt}
@test SparseArrayInterface.nstored(a_dest) == 2

# Dot product
a1 = SparseArray{elt}(4)
a1[1] = randn()
a1[3] = randn()
a2 = SparseArray{elt}(4)
a2[2] = randn()
a2[3] = randn()
a_dest = a1' * a2
@test a_dest isa elt
@test a_dest Array(a1)' * Array(a2)
@test a_dest dot(a1, a2)

# In-place matrix multiplication
a1 = SparseArray{elt}(2, 3)
a1[1, 2] = 12
Expand Down
16 changes: 16 additions & 0 deletions NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
using ArrayLayouts: LayoutMatrix
using LinearAlgebra: LinearAlgebra, qr
using ..TensorAlgebra:
TensorAlgebra,
Expand All @@ -8,6 +9,8 @@ using ..TensorAlgebra:
fusedims,
splitdims

# TODO: Define as `tensor_qr`.
# TODO: This look generic but doesn't work for `BlockSparseArrays`.
function _qr(a::AbstractArray, biperm::BlockedPermutation{2})
a_matricized = fusedims(a, biperm)

Expand Down Expand Up @@ -38,6 +41,12 @@ function LinearAlgebra.qr(a::AbstractMatrix, biperm::BlockedPermutation{2})
return _qr(a, biperm)
end

# Fix ambiguity error with `ArrayLayouts`.
function LinearAlgebra.qr(a::LayoutMatrix, biperm::BlockedPermutation{2})
return _qr(a, biperm)
end

# TODO: Define in terms of an inner function `_qr` or `tensor_qr`.
function LinearAlgebra.qr(
a::AbstractArray, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple
)
Expand All @@ -50,3 +59,10 @@ function LinearAlgebra.qr(
)
return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r))
end

# Fix ambiguity error with `ArrayLayouts`.
function LinearAlgebra.qr(
a::LayoutMatrix, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple
)
return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r))
end

2 comments on commit 4996dca

@mtfishman
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register subdir=NDTensors

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/119277

Tip: Release Notes

Did you know you can add release notes too? Just add markdown formatted text underneath the comment after the text
"Release notes:" and it will be added to the registry PR, and if TagBot is installed it will also be added to the
release that TagBot creates. i.e.

@JuliaRegistrator register

Release notes:

## Breaking changes

- blah

To add them here just re-invoke and the PR will be updated.

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a NDTensors-v0.3.62 -m "<description of version>" 4996dcad557f780678db621b29b3873ef2b84d7b
git push origin NDTensors-v0.3.62

Please sign in to comment.