diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 5910135fb0..b4edf4d5de 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.61" +version = "0.3.62" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl index 7486d443c7..069d367cf6 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl @@ -1,7 +1,7 @@ -using ArrayLayouts: ArrayLayouts, MemoryLayout, MulAdd +using ArrayLayouts: ArrayLayouts, DualLayout, MemoryLayout, MulAdd using BlockArrays: BlockLayout using ..SparseArrayInterface: SparseLayout -using ..TypeParameterAccessors: similartype +using ..TypeParameterAccessors: parenttype, similartype function ArrayLayouts.MemoryLayout(arraytype::Type{<:BlockSparseArrayLike}) outer_layout = typeof(MemoryLayout(blockstype(arraytype))) @@ -9,6 +9,19 @@ function ArrayLayouts.MemoryLayout(arraytype::Type{<:BlockSparseArrayLike}) return BlockLayout{outer_layout,inner_layout}() end +# TODO: Generalize to `BlockSparseVectorLike`/`AnyBlockSparseVector`. +function ArrayLayouts.MemoryLayout( + arraytype::Type{<:Adjoint{<:Any,<:AbstractBlockSparseVector}} +) + return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}() +end +# TODO: Generalize to `BlockSparseVectorLike`/`AnyBlockSparseVector`. +function ArrayLayouts.MemoryLayout( + arraytype::Type{<:Transpose{<:Any,<:AbstractBlockSparseVector}} +) + return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}() +end + function Base.similar( mul::MulAdd{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout},<:Any,<:Any,A,B}, elt::Type, diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index dfb797caaa..e8d1e5e15b 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -158,6 +158,13 @@ function Base.similar( return similar(arraytype, eltype(arraytype), axes) end +# Fixes ambiguity error. +function Base.similar( + arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}} +) + return similar(arraytype, eltype(arraytype), axes) +end + # Needed by `BlockArrays` matrix multiplication interface # TODO: This fixes an ambiguity error with `OffsetArrays.jl`, but # is only appears to be needed in older versions of Julia like v1.6. @@ -221,7 +228,7 @@ function Base.similar( end # Fixes ambiguity error. -function Base.similar(a::BlockSparseArrayLike{<:Any,0}, elt::Type, axes::Tuple{}) +function Base.similar(a::BlockSparseArrayLike, elt::Type, axes::Tuple{}) return blocksparse_similar(a, elt, axes) end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/arraylayouts.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/arraylayouts.jl index 7543ad434d..483c53f835 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/arraylayouts.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/arraylayouts.jl @@ -1,15 +1,21 @@ -using ArrayLayouts: ArrayLayouts, MatMulMatAdd +using ArrayLayouts: ArrayLayouts, Dot, MatMulMatAdd, MatMulVecAdd, MulAdd using BlockArrays: BlockLayout using ..SparseArrayInterface: SparseLayout -using LinearAlgebra: mul! +using LinearAlgebra: dot, mul! function blocksparse_muladd!( - α::Number, a1::AbstractMatrix, a2::AbstractMatrix, β::Number, a_dest::AbstractMatrix + α::Number, a1::AbstractArray, a2::AbstractArray, β::Number, a_dest::AbstractArray ) mul!(blocks(a_dest), blocks(a1), blocks(a2), α, β) return a_dest end +function blocksparse_matmul!(m::MulAdd) + α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C + blocksparse_muladd!(α, a1, a2, β, a_dest) + return a_dest +end + function ArrayLayouts.materialize!( m::MatMulMatAdd{ <:BlockLayout{<:SparseLayout}, @@ -17,7 +23,26 @@ function ArrayLayouts.materialize!( <:BlockLayout{<:SparseLayout}, }, ) - α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C - blocksparse_muladd!(α, a1, a2, β, a_dest) - return a_dest + blocksparse_matmul!(m) + return m.C +end +function ArrayLayouts.materialize!( + m::MatMulVecAdd{ + <:BlockLayout{<:SparseLayout}, + <:BlockLayout{<:SparseLayout}, + <:BlockLayout{<:SparseLayout}, + }, +) + blocksparse_matmul!(m) + return m.C +end + +function blocksparse_dot(a1::AbstractArray, a2::AbstractArray) + # TODO: Add a check that the blocking of `a1` and `a2` are + # the same, or the same up to a reshape. + return dot(blocks(a1), blocks(a2)) +end + +function Base.copy(d::Dot{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout}}) + return blocksparse_dot(d.A, d.B) end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl index fbb5f220f4..601ead6c53 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl @@ -184,7 +184,7 @@ reverse_index(index::CartesianIndex) = CartesianIndex(reverse(Tuple(index))) # Represents the array of arrays of a `Transpose` # wrapping a block spare array, i.e. `blocks(array)` where `a` is a `Transpose`. -struct SparseTransposeBlocks{T,BlockType<:AbstractMatrix{T},Array<:Transpose{T}} <: +struct SparseTransposeBlocks{T,BlockType<:AbstractArray{T},Array<:Transpose{T}} <: AbstractSparseMatrix{BlockType} array::Array end @@ -219,7 +219,7 @@ end # Represents the array of arrays of a `Adjoint` # wrapping a block spare array, i.e. `blocks(array)` where `a` is a `Adjoint`. -struct SparseAdjointBlocks{T,BlockType<:AbstractMatrix{T},Array<:Adjoint{T}} <: +struct SparseAdjointBlocks{T,BlockType<:AbstractArray{T},Array<:Adjoint{T}} <: AbstractSparseMatrix{BlockType} array::Array end diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index 27012d32c9..4374be541c 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -16,7 +16,7 @@ using BlockArrays: mortar using Compat: @compat using GPUArraysCore: @allowscalar -using LinearAlgebra: Adjoint, mul!, norm +using LinearAlgebra: Adjoint, dot, mul!, norm using NDTensors.BlockSparseArrays: @view!, BlockSparseArray, @@ -575,7 +575,11 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[b] = randn(elt, size(a[b])) end @test isassigned(a, 1, 1) + @test isassigned(a, 1, 1, 1) + @test !isassigned(a, 1, 1, 2) @test isassigned(a, 5, 7) + @test isassigned(a, 5, 7, 1) + @test !isassigned(a, 5, 7, 2) @test !isassigned(a, 0, 1) @test !isassigned(a, 5, 8) @test isassigned(a, Block(1), Block(1)) @@ -852,6 +856,16 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @allowscalar @test Array(a_dest) ≈ Array(a1′) * Array(a2′) end end + @testset "Dot product" begin + a1 = dev(BlockSparseArray{elt}([2, 3, 4])) + a1[Block(1)] = dev(randn(elt, size(@view(a1[Block(1)])))) + a1[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)])))) + a2 = dev(BlockSparseArray{elt}([2, 3, 4])) + a2[Block(2)] = dev(randn(elt, size(@view(a1[Block(2)])))) + a2[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)])))) + @test a1' * a2 ≈ Array(a1)' * Array(a2) + @test dot(a1, a2) ≈ a1' * a2 + end @testset "TensorAlgebra" begin a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) a1[Block(1, 1)] = dev(randn(elt, size(@view(a1[Block(1, 1)])))) diff --git a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/arraylayouts.jl b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/arraylayouts.jl index 038689be8a..58176846ca 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/arraylayouts.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/arraylayouts.jl @@ -1,13 +1,41 @@ -using ArrayLayouts: ArrayLayouts, MatMulMatAdd +using ArrayLayouts: ArrayLayouts, Dot, DualLayout, MatMulMatAdd, MatMulVecAdd, MulAdd +using LinearAlgebra: Adjoint, Transpose +using ..TypeParameterAccessors: parenttype function ArrayLayouts.MemoryLayout(arraytype::Type{<:SparseArrayLike}) return SparseLayout() end -function ArrayLayouts.materialize!( - m::MatMulMatAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout} +# TODO: Generalize to `SparseVectorLike`/`AnySparseVector`. +function ArrayLayouts.MemoryLayout(arraytype::Type{<:Adjoint{<:Any,<:AbstractSparseVector}}) + return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}() +end +# TODO: Generalize to `SparseVectorLike`/`AnySparseVector`. +function ArrayLayouts.MemoryLayout( + arraytype::Type{<:Transpose{<:Any,<:AbstractSparseVector}} ) + return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}() +end + +function sparse_matmul!(m::MulAdd) α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C sparse_mul!(a_dest, a1, a2, α, β) return a_dest end + +function ArrayLayouts.materialize!( + m::MatMulMatAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout} +) + sparse_matmul!(m) + return m.C +end +function ArrayLayouts.materialize!( + m::MatMulVecAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout} +) + sparse_matmul!(m) + return m.C +end + +function Base.copy(d::Dot{<:SparseLayout,<:SparseLayout}) + return sparse_dot(d.A, d.B) +end diff --git a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/SparseArrayInterfaceLinearAlgebraExt.jl b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/SparseArrayInterfaceLinearAlgebraExt.jl index 7a444297f7..633506d630 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/SparseArrayInterfaceLinearAlgebraExt.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/SparseArrayInterfaceLinearAlgebraExt.jl @@ -1,4 +1,4 @@ -using LinearAlgebra: mul!, norm +using LinearAlgebra: dot, mul!, norm sparse_norm(a::AbstractArray, p::Real=2) = norm(sparse_storage(a)) @@ -9,6 +9,14 @@ function mul_indices(I1::CartesianIndex{2}, I2::CartesianIndex{2}) return CartesianIndex(I1[1], I2[2]) end +# TODO: Is this needed? Maybe when multiplying vectors? +function mul_indices(I1::CartesianIndex{1}, I2::CartesianIndex{1}) + if I1 ≠ I2 + return nothing + end + return CartesianIndex(I1) +end + function default_mul!!( a_dest::AbstractMatrix, a1::AbstractMatrix, @@ -28,9 +36,9 @@ end # a1 * a2 * α + a_dest * β function sparse_mul!( - a_dest::AbstractMatrix, - a1::AbstractMatrix, - a2::AbstractMatrix, + a_dest::AbstractArray, + a1::AbstractArray, + a2::AbstractArray, α::Number=true, β::Number=false; (mul!!)=(default_mul!!), @@ -45,3 +53,26 @@ function sparse_mul!( end return a_dest end + +function sparse_dot(a1::AbstractArray, a2::AbstractArray) + # This requires that `a1` and `a2` have the same shape. + # TODO: Generalize (Base supports dot products of + # arrays with the same length but different sizes). + size(a1) == size(a2) || + throw(DimensionMismatch("Sizes $(size(a1)) and $(size(a2)) don't match.")) + dot_dest = zero(Base.promote_op(dot, eltype(a1), eltype(a2))) + # TODO: First check if the number of stored elements (`nstored`, to be renamed + # `stored_length`) is smaller in `a1` or `a2` and use whicheven one is smallar + # as the outer loop. + for I1 in stored_indices(a1) + # TODO: Overload and use `Base.isstored(a, I) = I in stored_indices(a)` instead. + # TODO: This assumes fast lookup of indices, which may not always be the case. + # It could be better to loop over `stored_indices(a2)` and check that + # `I1 == I2` instead (say using `mul_indices(I1, I2)`. We could have a trait + # `HasFastIsStored(a::AbstractArray)` to choose between the two. + if I1 in stored_indices(a2) + dot_dest += dot(a1[I1], a2[I1]) + end + end + return dot_dest +end diff --git a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl index 216c03e7ac..ae0f6f6d61 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl @@ -166,7 +166,10 @@ end function sparse_isassigned(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N} return sparse_isassigned(a, Tuple(I)...) end -function sparse_isassigned(a::AbstractArray{<:Any,N}, I::Vararg{Integer,N}) where {N} +function sparse_isassigned(a::AbstractArray, I::Integer...) + # Check trailing dimensions are one. This is needed in generic + # AbstractArray show when `a isa AbstractVector`. + all(d -> isone(I[d]), (ndims(a) + 1):length(I)) || return false return all(dim -> I[dim] ∈ axes(a, dim), 1:ndims(a)) end diff --git a/NDTensors/src/lib/SparseArrayInterface/test/SparseArrayInterfaceTestUtils/SparseArrays.jl b/NDTensors/src/lib/SparseArrayInterface/test/SparseArrayInterfaceTestUtils/SparseArrays.jl index 05b2cb3072..e1ca4f0661 100644 --- a/NDTensors/src/lib/SparseArrayInterface/test/SparseArrayInterfaceTestUtils/SparseArrays.jl +++ b/NDTensors/src/lib/SparseArrayInterface/test/SparseArrayInterfaceTestUtils/SparseArrays.jl @@ -32,6 +32,10 @@ function LinearAlgebra.mul!( return a_dest end +function LinearAlgebra.dot(a1::SparseArray, a2::SparseArray) + return SparseArrayInterface.sparse_dot(a1, a2) +end + # AbstractArray interface Base.size(a::SparseArray) = a.dims function Base.similar(a::SparseArray, elt::Type, dims::Tuple{Vararg{Int}}) diff --git a/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl b/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl index cd233f4ff1..743f457d43 100644 --- a/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl +++ b/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl @@ -1,5 +1,5 @@ @eval module $(gensym()) -using LinearAlgebra: mul!, norm +using LinearAlgebra: dot, mul!, norm using NDTensors.SparseArrayInterface: SparseArrayInterface include("SparseArrayInterfaceTestUtils/SparseArrayInterfaceTestUtils.jl") using .SparseArrayInterfaceTestUtils.AbstractSparseArrays: AbstractSparseArrays @@ -299,6 +299,18 @@ using Test: @test, @testset @test a_dest isa SparseArray{elt} @test SparseArrayInterface.nstored(a_dest) == 2 + # Dot product + a1 = SparseArray{elt}(4) + a1[1] = randn() + a1[3] = randn() + a2 = SparseArray{elt}(4) + a2[2] = randn() + a2[3] = randn() + a_dest = a1' * a2 + @test a_dest isa elt + @test a_dest ≈ Array(a1)' * Array(a2) + @test a_dest ≈ dot(a1, a2) + # In-place matrix multiplication a1 = SparseArray{elt}(2, 3) a1[1, 2] = 12 diff --git a/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl b/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl index 7230258e7c..7c0d3f0ebb 100644 --- a/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl +++ b/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl @@ -1,3 +1,4 @@ +using ArrayLayouts: LayoutMatrix using LinearAlgebra: LinearAlgebra, qr using ..TensorAlgebra: TensorAlgebra, @@ -8,6 +9,8 @@ using ..TensorAlgebra: fusedims, splitdims +# TODO: Define as `tensor_qr`. +# TODO: This look generic but doesn't work for `BlockSparseArrays`. function _qr(a::AbstractArray, biperm::BlockedPermutation{2}) a_matricized = fusedims(a, biperm) @@ -38,6 +41,12 @@ function LinearAlgebra.qr(a::AbstractMatrix, biperm::BlockedPermutation{2}) return _qr(a, biperm) end +# Fix ambiguity error with `ArrayLayouts`. +function LinearAlgebra.qr(a::LayoutMatrix, biperm::BlockedPermutation{2}) + return _qr(a, biperm) +end + +# TODO: Define in terms of an inner function `_qr` or `tensor_qr`. function LinearAlgebra.qr( a::AbstractArray, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple ) @@ -50,3 +59,10 @@ function LinearAlgebra.qr( ) return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r)) end + +# Fix ambiguity error with `ArrayLayouts`. +function LinearAlgebra.qr( + a::LayoutMatrix, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple +) + return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r)) +end