diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 96f72ef08b..b901db1668 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -46,6 +46,7 @@ include("abstractarray/fill.jl") include("array/set_types.jl") include("tupletools.jl") include("tensorstorage/tensorstorage.jl") +include("tensorstorage/set_types.jl") include("tensorstorage/default_storage.jl") include("tensorstorage/similar.jl") include("tensor/tensor.jl") diff --git a/NDTensors/src/abstractarray/set_types.jl b/NDTensors/src/abstractarray/set_types.jl index ae3700d075..d4b267dba9 100644 --- a/NDTensors/src/abstractarray/set_types.jl +++ b/NDTensors/src/abstractarray/set_types.jl @@ -22,6 +22,15 @@ function set_ndims(arraytype::Type{<:AbstractArray}, ndims) ) end +# This is for uniform `Diag` storage which uses +# a Number as the data type. +# TODO: Delete this when we change to using a +# `FillArray` instead. This is a stand-in +# to make things work with the current design. +function set_ndims(numbertype::Type{<:Number}, ndims) + return numbertype +end + """ `set_indstype` should be overloaded for types with structured dimensions, diff --git a/NDTensors/src/abstractarray/similar.jl b/NDTensors/src/abstractarray/similar.jl index 44f0cecb84..7e8313cd1c 100644 --- a/NDTensors/src/abstractarray/similar.jl +++ b/NDTensors/src/abstractarray/similar.jl @@ -177,6 +177,15 @@ end return similartype(parenttype(arraytype), dims) end +# This is for uniform `Diag` storage which uses +# a Number as the data type. +# TODO: Delete this when we change to using a +# `FillArray` instead. This is a stand-in +# to make things work with the current design. +function similartype(numbertype::Type{<:Number}) + return numbertype +end + # Instances function similartype(array::AbstractArray, eltype::Type, dims...) return similartype(typeof(array), eltype, dims...) diff --git a/NDTensors/src/blocksparse/block.jl b/NDTensors/src/blocksparse/block.jl index 60179bc7b7..78ff0dea3f 100644 --- a/NDTensors/src/blocksparse/block.jl +++ b/NDTensors/src/blocksparse/block.jl @@ -20,7 +20,7 @@ end # Constructors # -Block{N}(t::Tuple{Vararg{<:Any,N}}) where {N} = Block{N}(UInt.(t)) +Block{N}(t::Tuple{Vararg{Any,N}}) where {N} = Block{N}(UInt.(t)) Block{N}(I::CartesianIndex{N}) where {N} = Block{N}(I.I) @@ -38,7 +38,7 @@ Block(v::SVector{N}) where {N} = Block{N}(v) Block(t::NTuple{N,UInt}) where {N} = Block{N}(t) -Block(t::Tuple{Vararg{<:Any,N}}) where {N} = Block{N}(t) +Block(t::Tuple{Vararg{Any,N}}) where {N} = Block{N}(t) Block(::Tuple{}) = Block{0}(()) diff --git a/NDTensors/src/blocksparse/blockoffsets.jl b/NDTensors/src/blocksparse/blockoffsets.jl index f3a899d4bb..0385605b9b 100644 --- a/NDTensors/src/blocksparse/blockoffsets.jl +++ b/NDTensors/src/blocksparse/blockoffsets.jl @@ -90,7 +90,7 @@ Assumes the blocks are allong the diagonal. """ function diagblockoffsets( blocks::Vector{BlockT}, inds -) where {BlockT<:Union{Block{N},Tuple{Vararg{<:Any,N}}}} where {N} +) where {BlockT<:Union{Block{N},Tuple{Vararg{Any,N}}}} where {N} blockoffsets = BlockOffsets{N}() nnzdiag = 0 for (i, block) in enumerate(blocks) diff --git a/NDTensors/src/dense/densetensor.jl b/NDTensors/src/dense/densetensor.jl index 51827b1d20..8a019e1f3a 100644 --- a/NDTensors/src/dense/densetensor.jl +++ b/NDTensors/src/dense/densetensor.jl @@ -261,7 +261,7 @@ First T is permuted as `permutedims(3,2,1)`, then reshaped such that the original indices 3 and 2 are combined. """ function permute_reshape( - T::DenseTensor{ElT,NT,IndsT}, pos::Vararg{<:Any,N} + T::DenseTensor{ElT,NT,IndsT}, pos::Vararg{Any,N} ) where {ElT,NT,IndsT,N} perm = flatten(pos...) diff --git a/NDTensors/src/empty/EmptyTensor.jl b/NDTensors/src/empty/EmptyTensor.jl index abb2a9c1f1..87b13d83ea 100644 --- a/NDTensors/src/empty/EmptyTensor.jl +++ b/NDTensors/src/empty/EmptyTensor.jl @@ -25,6 +25,13 @@ fulltype(T::EmptyStorage) = fulltype(typeof(T)) fulltype(T::Tensor) = fulltype(typeof(T)) +# Needed for correct `NDTensors.ndims` definitions, for +# example `EmptyStorage` that wraps a `BlockSparse` which +# can have non-unity dimensions. +function ndims(storagetype::Type{<:EmptyStorage}) + return ndims(fulltype(storagetype)) +end + # From an EmptyTensor, return the closest Tensor type function fulltype(::Type{TensorT}) where {TensorT<:Tensor} return Tensor{ diff --git a/NDTensors/src/tensor/similar.jl b/NDTensors/src/tensor/similar.jl index e1a7252a7b..0eb56d969b 100644 --- a/NDTensors/src/tensor/similar.jl +++ b/NDTensors/src/tensor/similar.jl @@ -62,5 +62,8 @@ end function similartype(tensortype::Type{<:Tensor}, dims::Tuple) tensortype_new_inds = set_indstype(tensortype, dims) - return set_storagetype(tensortype_new_inds, similartype(storagetype(tensortype_new_inds))) + # Need to pass `dims` in case that information is needed to make a storage type, + # for example `BlockSparse` needs the number of dimensions. + storagetype_new_inds = similartype(storagetype(tensortype_new_inds), dims) + return set_storagetype(tensortype_new_inds, storagetype_new_inds) end diff --git a/NDTensors/src/tensorstorage/set_types.jl b/NDTensors/src/tensorstorage/set_types.jl index cdd9b16a33..40484421da 100644 --- a/NDTensors/src/tensorstorage/set_types.jl +++ b/NDTensors/src/tensorstorage/set_types.jl @@ -3,5 +3,8 @@ function set_eltype(arraytype::Type{<:TensorStorage}, eltype::Type) end function set_ndims(arraytype::Type{<:TensorStorage}, ndims) - return set_datatype(arraytype, set_ndims(datatype(arraytype), ndims)) + # TODO: Change to this once `TensorStorage` types support wrapping + # non-AbstractVector types. + # return set_datatype(arraytype, set_ndims(datatype(arraytype), ndims)) + return arraytype end diff --git a/NDTensors/src/tensorstorage/similar.jl b/NDTensors/src/tensorstorage/similar.jl index fbcf2ce057..d309b32727 100644 --- a/NDTensors/src/tensorstorage/similar.jl +++ b/NDTensors/src/tensorstorage/similar.jl @@ -65,7 +65,15 @@ function similartype(storagetype::Type{<:TensorStorage}, eltype::Type) # TODO: Don't convert to an `AbstractVector` with `set_ndims(datatype, 1)`, once we support # more general data types. # return set_datatype(storagetype, NDTensors.similartype(datatype(storagetype), eltype)) - return set_datatype( - storagetype, set_ndims(NDTensors.similartype(datatype(storagetype), eltype), 1) + return set_datatype(storagetype, set_ndims(similartype(datatype(storagetype), eltype), 1)) +end + +function similartype(storagetype::Type{<:TensorStorage}, dims::Tuple) + # TODO: In the future, set the dimensions of the data type based on `dims`, once + # more general data types beyond `AbstractVector` are supported. + # `similartype` unwraps any wrapped data. + return set_ndims( + set_datatype(storagetype, set_ndims(similartype(datatype(storagetype)), 1)), + length(dims), ) end diff --git a/NDTensors/src/tupletools.jl b/NDTensors/src/tupletools.jl index f6433ae3fe..8f2a419dd9 100644 --- a/NDTensors/src/tupletools.jl +++ b/NDTensors/src/tupletools.jl @@ -21,9 +21,9 @@ ValLength(::NTuple{N}) where {N} = Val(N) # is not type stable and therefore not efficient. ValLength(v::Vector) = Val(length(v)) -ValLength(::Tuple{Vararg{<:Any,N}}) where {N} = Val(N) +ValLength(::Tuple{Vararg{Any,N}}) where {N} = Val(N) -ValLength(::Type{<:Tuple{Vararg{<:Any,N}}}) where {N} = Val{N} +ValLength(::Type{<:Tuple{Vararg{Any,N}}}) where {N} = Val{N} ValLength(::CartesianIndex{N}) where {N} = Val(N) ValLength(::Type{CartesianIndex{N}}) where {N} = Val{N} diff --git a/NDTensors/test/blocksparse.jl b/NDTensors/test/blocksparse.jl index 10b1e48e73..2b980e10ff 100644 --- a/NDTensors/test/blocksparse.jl +++ b/NDTensors/test/blocksparse.jl @@ -88,6 +88,18 @@ using Test @test conj(data(store(A))) == data(store(conj(A))) @test typeof(conj(A)) <: BlockSparseTensor + @testset "similartype regression test" begin + # Regression test for issue seen in: + # https://github.com/ITensor/ITensorInfiniteMPS.jl/pull/77 + # Previously, `similartype` wasn't using information about the dimensions + # properly and was returning a `BlockSparse` storage of the dimensions + # of the input tensor. + T = BlockSparseTensor([(1, 1)], ([2], [2])) + @test NDTensors.ndims( + NDTensors.storagetype(NDTensors.similartype(typeof(T), ([2], [2], [2]))) + ) == 3 + end + @testset "Random constructor" begin T = randomBlockSparseTensor([(1, 1), (2, 2)], ([2, 2], [2, 2])) @test nnzblocks(T) == 2 diff --git a/src/broadcast.jl b/src/broadcast.jl index ec0b77f044..af3082ac1a 100644 --- a/src/broadcast.jl +++ b/src/broadcast.jl @@ -280,16 +280,16 @@ end # B .+= A # -function fmap(bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{<:ITensor}}}) +function fmap(bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{ITensor}}}) return (r, t) -> bc.f(r, t) end -function fmap(bc::Broadcasted{ITensorStyle,<:Any,typeof(-),<:Tuple{Vararg{<:ITensor}}}) +function fmap(bc::Broadcasted{ITensorStyle,<:Any,typeof(-),<:Tuple{Vararg{ITensor}}}) return (r, t) -> bc.f(r, t) end function Base.copyto!( - T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{<:ITensor}}} + T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{ITensor}}} ) if T === bc.args[1] A = bc.args[2] @@ -307,7 +307,7 @@ end # function Base.copyto!( - T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(-),<:Tuple{Vararg{<:ITensor}}} + T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(-),<:Tuple{Vararg{ITensor}}} ) if T === bc.args[1] A = bc.args[2] @@ -380,7 +380,7 @@ end function Base.copyto!( T::ITensor, - bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(+),<:Tuple{Vararg{<:Broadcasted}}}, + bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(+),<:Tuple{Vararg{Broadcasted}}}, ) bc_α = bc.args[1] bc_β = bc.args[2] @@ -417,7 +417,7 @@ end function Base.copyto!( T::ITensor, bc::Broadcasted{ - ITensorOpScalarStyle,<:Any,typeof(+),<:Tuple{Vararg{<:Union{<:ITensor,<:Number}}} + ITensorOpScalarStyle,<:Any,typeof(+),<:Tuple{Vararg{Union{<:ITensor,<:Number}}} }, ) α = find_type(Number, bc.args) @@ -496,7 +496,7 @@ end # function Base.copyto!( - R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{<:Broadcasted}}} + R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{Broadcasted}}} ) bc1 = bc.args[1] bc2 = bc.args[2] diff --git a/src/indexset.jl b/src/indexset.jl index 6a219b6bed..cc1528f125 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -71,9 +71,9 @@ Tuple(is::IndexSet) = _Tuple(is) NTuple{N}(is::IndexSet) where {N} = _NTuple(Val(N), is) """ - not(inds::Union{IndexSet, Tuple{Vararg{<:Index}}}) + not(inds::Union{IndexSet, Tuple{Vararg{Index}}}) not(inds::Index...) - !(inds::Union{IndexSet, Tuple{Vararg{<:Index}}}) + !(inds::Union{IndexSet, Tuple{Vararg{Index}}}) !(inds::Index...) Represents the set of indices not in the specified diff --git a/src/itensor.jl b/src/itensor.jl index f212585cbb..9706c66297 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -667,6 +667,7 @@ randomITensor() = randomITensor(Random.default_rng()) randomITensor(rng::AbstractRNG) = randomITensor(rng, Float64, ()) copy(T::ITensor)::ITensor = itensor(copy(tensor(T))) +zero(T::ITensor)::ITensor = itensor(zero(tensor(T))) # # Construct from Array @@ -1112,7 +1113,7 @@ end # CartesianIndices @propagate_inbounds getindex(T::ITensor, I::CartesianIndex)::Any = T[Tuple(I)...] -@propagate_inbounds @inline function _getindex(T::Tensor, ivs::Vararg{<:Any,N}) where {N} +@propagate_inbounds @inline function _getindex(T::Tensor, ivs::Vararg{Any,N}) where {N} # Tried ind.(ivs), val.(ivs) but it is slower p = NDTensors.getperm(inds(T), ntuple(n -> ind(@inbounds ivs[n]), Val(N))) fac = NDTensors.permfactor(p, ivs...) # possible sign @@ -1133,7 +1134,7 @@ A = ITensor(2.0, i, i') A[i => 1, i' => 2] # 2.0, same as: A[i' => 2, i => 1] ``` """ -@propagate_inbounds (getindex(T::ITensor, ivs::Vararg{<:Any,N})::Any) where {N} = +@propagate_inbounds (getindex(T::ITensor, ivs::Vararg{Any,N})::Any) where {N} = _getindex(tensor(T), ivs...) @propagate_inbounds function getindex(T::ITensor)::Any @@ -1233,7 +1234,7 @@ end end @propagate_inbounds @inline function _setindex!!( - T::Tensor, x::Number, ivs::Vararg{<:Any,N} + T::Tensor, x::Number, ivs::Vararg{Any,N} ) where {N} # Would be nice to split off the functions for extracting the `ind` and `val` as Tuples, # but it was slower. @@ -1245,7 +1246,7 @@ end end @propagate_inbounds @inline function setindex!( - T::ITensor, x::Number, I::Vararg{<:Any,N} + T::ITensor, x::Number, I::Vararg{Any,N} ) where {N} return settensor!(T, _setindex!!(tensor(T), x, I...)) end @@ -1335,7 +1336,7 @@ itensor2inds(A::ITensor)::Any = inds(A) itensor2inds(A::Tensor) = inds(A) itensor2inds(i::Index) = (i,) itensor2inds(A) = A -function map_itensor2inds(A::Tuple{Vararg{<:Any,N}}) where {N} +function map_itensor2inds(A::Tuple{Vararg{Any,N}}) where {N} return ntuple(i -> itensor2inds(A[i]), Val(N)) end @@ -1376,7 +1377,7 @@ hassameinds(A, B) = issetequal(itensor2inds(A), itensor2inds(B)) # Apply the Index set function and then filter the results function filter_inds_set_function( - ffilter::Function, fset::Function, A::Vararg{<:Any,N} + ffilter::Function, fset::Function, A::Vararg{Any,N} ) where {N} return filter(ffilter, fset(map_itensor2inds(A)...)) end diff --git a/src/tensor_operations/tensor_algebra.jl b/src/tensor_operations/tensor_algebra.jl index be47cdca21..eb39886ecd 100644 --- a/src/tensor_operations/tensor_algebra.jl +++ b/src/tensor_operations/tensor_algebra.jl @@ -106,7 +106,7 @@ function contract(A::ITensor, B::ITensor)::ITensor end end -function optimal_contraction_sequence(A::Union{Vector{<:ITensor},Tuple{Vararg{<:ITensor}}}) +function optimal_contraction_sequence(A::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}}) if length(A) == 1 return optimal_contraction_sequence(A[1]) elseif length(A) == 2 @@ -130,7 +130,7 @@ _optimal_contraction_sequence(As::Tuple{<:ITensor,<:ITensor}) = Any[1, 2] function _optimal_contraction_sequence(As::Tuple{<:ITensor,<:ITensor,<:ITensor}) return optimal_contraction_sequence(inds(As[1]), inds(As[2]), inds(As[3])) end -function _optimal_contraction_sequence(As::Tuple{Vararg{<:ITensor}}) +function _optimal_contraction_sequence(As::Tuple{Vararg{ITensor}}) return __optimal_contraction_sequence(As) end @@ -145,7 +145,7 @@ function default_sequence() return using_contraction_sequence_optimization() ? "automatic" : "left_associative" end -function contraction_cost(As::Union{Vector{<:ITensor},Tuple{Vararg{<:ITensor}}}; kwargs...) +function contraction_cost(As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}}; kwargs...) indsAs = [inds(A) for A in As] return contraction_cost(indsAs; kwargs...) end diff --git a/test/ITensorChainRules/test_chainrules_ops.jl b/test/ITensorChainRules/test_chainrules_ops.jl index 84dc1a0aac..297e7e5bd3 100644 --- a/test/ITensorChainRules/test_chainrules_ops.jl +++ b/test/ITensorChainRules/test_chainrules_ops.jl @@ -43,20 +43,29 @@ using Zygote: ZygoteRuleConfig, gradient atol=1.0e-7, ) - f = function (x) - y = Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x) - return y[1].params.θ + function sometimes_broken_test() + f = function (x) + y = Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x) + return y[1].params.θ + end + args = (x,) + test_rrule( + ZygoteRuleConfig(), + f, + args...; + rrule_f=rrule_via_ad, + check_inferred=false, + rtol=1.0e-7, + atol=1.0e-7, + ) + return nothing + end + + @static if VERSION > v"1.8" + @test_skip sometimes_broken_test() + else + sometimes_broken_test() end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) f = function (x) y = ITensor(Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x), s) diff --git a/test/base/test_contract.jl b/test/base/test_contract.jl index 6b91776546..6078489f37 100644 --- a/test/base/test_contract.jl +++ b/test/base/test_contract.jl @@ -45,6 +45,14 @@ digits(::Type{T}, i, j, k) where {T} = T(i * 10^2 + j * 10 + k) CArray = transpose(array(Ai)) * array(Bi) @test CArray ≈ scalar(C) end + @testset "Test Matrix{ITensor} * Matrix{ITensor}" begin + M1 = [Aij Aij; Aij Aij] + M2 = [Ajk Ajk; Ajk Ajk] + M12 = M1 * M2 + for x in 1:2, y in 1:2 + @test M12[x, y] ≈ 2 * Aij * Ajk + end + end @testset "Test contract ITensors (Vector*Vectorᵀ -> Matrix)" begin C = Ai * Aj for ii in 1:dim(i), jj in 1:dim(j) diff --git a/test/base/test_itensor.jl b/test/base/test_itensor.jl index 09c43fab78..19046be1b4 100644 --- a/test/base/test_itensor.jl +++ b/test/base/test_itensor.jl @@ -448,6 +448,13 @@ end @test all(ITensors.data(A) .== 1.0) end + @testset "zero" begin + i = Index(2) + A = randomITensor(i) + B = zero(A) + @test false * A ≈ B + end + @testset "copyto!" begin i = Index(2, "i") j = Index(2, "j") diff --git a/test/base/test_qnitensor.jl b/test/base/test_qnitensor.jl index 42a82c69f6..625c27b07c 100644 --- a/test/base/test_qnitensor.jl +++ b/test/base/test_qnitensor.jl @@ -98,6 +98,20 @@ Random.seed!(1234) @test_throws ErrorException ITensor(A, i', dag(i); tol=1e-8) end + @testset "similartype regression test" begin + # Regression test for issue seen in: + # https://github.com/ITensor/ITensorInfiniteMPS.jl/pull/77 + # Previously, `similartype` wasn't using information about the dimensions + # properly and was returning a `BlockSparse` storage of the dimensions + # of the input tensor. + i = Index([QN() => 2]) + A = ITensor(i, i') + B = ITensor(i'') + C = A * B + @test NDTensors.ndims(NDTensors.storagetype(C)) == 3 + @test C + ITensor(i, i', i'') == ITensor(i, i', i'') + end + @testset "Construct from Array regression test" begin i = Index([QN(0) => 2, QN(1) => 2]) T = itensor([0, 0, 1, 2], i)