diff --git a/src/ITensorNetworks.jl b/src/ITensorNetworks.jl index 2eca4ad4..28da183a 100644 --- a/src/ITensorNetworks.jl +++ b/src/ITensorNetworks.jl @@ -40,7 +40,9 @@ include("solvers/local_solvers/contract.jl") include("solvers/local_solvers/linsolve.jl") include("treetensornetworks/abstracttreetensornetwork.jl") include("treetensornetworks/treetensornetwork.jl") -include("treetensornetworks/opsum_to_ttn.jl") +include("treetensornetworks/opsum_to_ttn/matelem.jl") +include("treetensornetworks/opsum_to_ttn/qnarrelem.jl") +include("treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl") include("treetensornetworks/projttns/abstractprojttn.jl") include("treetensornetworks/projttns/projttn.jl") include("treetensornetworks/projttns/projttnsum.jl") diff --git a/src/apply.jl b/src/apply.jl index 8548acc8..62bc5676 100644 --- a/src/apply.jl +++ b/src/apply.jl @@ -266,7 +266,12 @@ function ITensors.apply( end function ITensors.apply( - o⃗::Scaled, ψ::AbstractITensorNetwork; normalize=false, ortho=false, apply_kwargs... + o⃗::Scaled, + ψ::AbstractITensorNetwork; + cutoff=nothing, + normalize=false, + ortho=false, + apply_kwargs..., ) return maybe_real(Ops.coefficient(o⃗)) * apply(Ops.argument(o⃗), ψ; cutoff, maxdim, normalize, ortho, apply_kwargs...) diff --git a/src/treetensornetworks/opsum_to_ttn/matelem.jl b/src/treetensornetworks/opsum_to_ttn/matelem.jl new file mode 100644 index 00000000..d1b98dc0 --- /dev/null +++ b/src/treetensornetworks/opsum_to_ttn/matelem.jl @@ -0,0 +1,41 @@ + +# +# MatElem +# + +struct MatElem{T} + row::Int + col::Int + val::T +end + +#function Base.show(io::IO,m::MatElem) +# print(io,"($(m.row),$(m.col),$(m.val))") +#end + +function toMatrix(els::Vector{MatElem{T}})::Matrix{T} where {T} + nr = 0 + nc = 0 + for el in els + nr = max(nr, el.row) + nc = max(nc, el.col) + end + M = zeros(T, nr, nc) + for el in els + M[el.row, el.col] = el.val + end + return M +end + +function Base.:(==)(m1::MatElem{T}, m2::MatElem{T})::Bool where {T} + return (m1.row == m2.row && m1.col == m2.col && m1.val == m2.val) +end + +function Base.isless(m1::MatElem{T}, m2::MatElem{T})::Bool where {T} + if m1.row != m2.row + return m1.row < m2.row + elseif m1.col != m2.col + return m1.col < m2.col + end + return m1.val < m2.val +end diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl similarity index 50% rename from src/treetensornetworks/opsum_to_ttn.jl rename to src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index c54d3b01..a21eb522 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -1,15 +1,14 @@ +#using FillArrays: OneElement +#using DataGraphs: DataGraph using Graphs: degree, is_tree -using ITensors: flux, has_fermion_string, itensor, ops, removeqns, space, val -using ITensors.ITensorMPS: ITensorMPS, cutoff, linkdims, truncate! +using ITensors: flux, has_fermion_string, itensor, ops, removeqns, space, truncate!, val using ITensors.LazyApply: Prod, Sum, coefficient using ITensors.NDTensors: Block, blockdim, maxdim, nblocks, nnzblocks -using ITensors.Ops: Op, OpSum +using ITensors.Ops: argument, coefficient, Op, OpSum, name, params, site, terms, which_op using NamedGraphs.GraphsExtensions: GraphsExtensions, boundary_edges, degrees, is_leaf_vertex, vertex_path using StaticArrays: MVector -# convert ITensors.OpSum to TreeTensorNetwork - # # Utility methods # @@ -32,6 +31,14 @@ end # Tree adaptations of functionalities in ITensors.jl/src/physics/autompo/opsum_to_mpo.jl # +function determine_coefficient_type(terms) + isempty(terms) && return Float64 + if all(t -> isreal(coefficient(t)), terms) + return real(typeof(coefficient(first(terms)))) + end + return typeof(coefficient(first(terms))) +end + """ ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex, kwargs...) @@ -40,230 +47,243 @@ Hamiltonian, compressing shared interaction channels. """ function ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex; kwargs...) # Function barrier to improve type stability - coefficient_type = ITensorMPS.determineValType(terms(os)) + coefficient_type = determine_coefficient_type(terms(os)) return ttn_svd(coefficient_type, os, sites, root_vertex; kwargs...) end -function ttn_svd( - coefficient_type::Type{<:Number}, - os::OpSum, - sites0::IndsNetwork, - root_vertex; - mindim::Int=1, - maxdim::Int=typemax(Int), - cutoff=eps(real(coefficient_type)) * 10, -) - linkdir_ref = ITensors.In # safe to always use autofermion default here - - sites = copy(sites0) # copy because of modification to handle internal indices - edgetype_sites = edgetype(sites) - vertextype_sites = vertextype(sites) - thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) - - # traverse tree outwards from root vertex - vs = _default_vertex_ordering(sites, root_vertex) - # TODO: Add check in ttn_svd that the ordering matches that of find_index_in_tree, which is used in sorteachterm #fermion-sign! - # store edges in fixed ordering relative to root - es = _default_edge_ordering(sites, root_vertex) - # some things to keep track of - # rank of every TTN tensor in network - degrees = Dict(v => degree(sites, v) for v in vs) - # link isometries for SVD compression of TTN - Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in es) - # map from term in Hamiltonian to incoming channel index for every edge - inmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() - # map from term in Hamiltonian to outgoing channel index for every edge - outmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() - - op_cache = Dict{Pair{String,vertextype_sites},ITensor}() - - function calc_qn(term::Vector{Op}) - q = QN() - for st in term - op_tensor = get(op_cache, ITensors.which_op(st) => ITensors.site(st), nothing) - if op_tensor === nothing - op_tensor = op( - sites[ITensors.site(st)], ITensors.which_op(st); ITensors.params(st)... - ) - op_cache[ITensors.which_op(st) => ITensors.site(st)] = op_tensor - end - if !isnothing(flux(op_tensor)) - q += flux(op_tensor) - end +# TODO: should be equivalent to `sort!(v); unique!(v)` however, +# Base.unique! is giving incorrect results for data involving the +# Prod type from LazyApply. This could be a combination of Base.unique! +# not strictly relying on isequal combined with an incorrect +# implementation of isequal/isless for Prod. +function _sort_unique!(v::Vector) + N = length(v) + (N == 0) && return nothing + sort!(v) + n = 1 + u = 2 + while u <= N + while u < N && v[u] == v[n] + u += 1 + end + if v[u] != v[n] + v[n + 1] = v[u] + n += 1 end - return q + u += 1 end + resize!(v, n) + return nothing +end - Hflux = -calc_qn(terms(first(terms(os)))) - # insert dummy indices on internal vertices, these will not show up in the final tensor - is_internal = Dict{vertextype_sites,Bool}() - for v in vs - is_internal[v] = isempty(sites[v]) - if isempty(sites[v]) - # FIXME: This logic only works for trivial flux, breaks for nonzero flux - # TODO: add assert or fix and add test! - sites[v] = [Index(Hflux => 1)] - end +function pos_in_link!(linkmap::Dict, k) + isempty(k) && return -1 + pos = get(linkmap, k, -1) + if pos == -1 + pos = length(linkmap) + 1 + linkmap[k] = pos end - # bond coefficients for incoming edge channels + return pos +end + +function make_symbolic_ttn( + coefficient_type, + opsum::OpSum, + sites::IndsNetwork; + ordered_verts, + ordered_edges, + root_vertex, + term_qn_map, +) + inmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() + outmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() + + #g = underlying_graph(sites) + + # Bond coefficients for incoming edge channels. + # These become the "M" coefficient matrices that get SVD'd. inbond_coefs = Dict( - e => Dict{QN,Vector{ITensorMPS.MatElem{coefficient_type}}}() for e in es + e => Dict{QN,Vector{MatElem{coefficient_type}}}() for e in ordered_edges ) - # list of terms for which the coefficient has been added to a site factor + + # List of terms for which the coefficient has been added to a site factor site_coef_done = Prod{Op}[] - # temporary symbolic representation of TTN Hamiltonian - tempTTN = Dict(v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degrees[v]}[] for v in vs) - # build compressed finite state machine representation - for v in vs - # for every vertex, find all edges that contain this vertex - edges = align_and_reorder_edges(incident_edges(sites, v), es) + # Temporary symbolic representation of TTN Hamiltonian + symbolic_ttn = Dict( + v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degree(sites, v)}[] for + v in ordered_verts + ) + + # Build compressed finite state machine representation (symbolic_ttn) + for v in ordered_verts + v_degree = degree(sites, v) + # For every vertex, find all edges that contain this vertex + # (align_and_reorder_edges makes the output of indicident edges match the + # direction and ordering match that of ordered_edges) + edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) - # use the corresponding ordering as index order for tensor elements at this site + # Use the corresponding ordering as index order for tensor elements at this site dim_in = findfirst(e -> dst(e) == v, edges) - edge_in = (isnothing(dim_in) ? [] : edges[dim_in]) + edge_in = (isnothing(dim_in) ? nothing : edges[dim_in]) dims_out = findall(e -> src(e) == v, edges) edges_out = edges[dims_out] - # for every site w except v, determine the incident edge to v that lies - # in the edge_path(w,v) + # For every site v' except v, determine the incident edge to v + # that lies in the edge_path(v',v) + # TODO: better way to make which_incident_edge below? subgraphs = split_at_vertex(sites, v) - _boundary_edges = align_edges( - [only(boundary_edges(underlying_graph(sites), subgraph)) for subgraph in subgraphs], - edges, - ) + _boundary_edges = [ + only(boundary_edges(underlying_graph(sites), subgraph)) for subgraph in subgraphs + ] + _boundary_edges = align_edges(_boundary_edges, edges) which_incident_edge = Dict( Iterators.flatten([ subgraphs[i] .=> ((_boundary_edges[i]),) for i in eachindex(subgraphs) ]), ) - # sanity check, leaves only have single incoming or outgoing edge + # Sanity check, leaves only have single incoming or outgoing edge @assert !isempty(dims_out) || !isnothing(dim_in) (isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf_vertex(sites, v) - for term in os - # loop over OpSum and pick out terms that act on current vertex - - factors = ITensors.terms(term) - if v in ITensors.site.(factors) + for term in opsum + # Loop over OpSum and pick out terms that act on current vertex + ops = ITensors.terms(term) + if v in ITensors.site.(ops) crosses_vertex = true else crosses_vertex = - !isone( - length(Set([which_incident_edge[site] for site in ITensors.site.(factors)])) - ) + !isone(length(Set([which_incident_edge[site] for site in site.(ops)]))) end - #if term doesn't cross vertex, skip it + # If term doesn't cross vertex, skip it crosses_vertex || continue - # filter out factor that acts on current vertex - onsite = filter(t -> (ITensors.site(t) == v), factors) - not_onsite_factors = setdiff(factors, onsite) + # Filter out ops that acts on current vertex + onsite_ops = filter(t -> (site(t) == v), ops) + non_onsite_ops = setdiff(ops, onsite_ops) - # filter out factors that come in from the direction of the incoming edge - incoming = filter( - t -> which_incident_edge[ITensors.site(t)] == edge_in, not_onsite_factors - ) + # Filter out ops that come in from the direction of the incoming edge + incoming_ops = filter(t -> which_incident_edge[site(t)] == edge_in, non_onsite_ops) - # also store all non-incoming factors in standard order, used for channel merging - not_incoming = filter( - t -> (ITensors.site(t) == v) || which_incident_edge[ITensors.site(t)] != edge_in, - factors, + # Also store all non-incoming ops in standard order, used for channel merging + non_incoming_ops = filter( + t -> (site(t) == v) || which_incident_edge[site(t)] != edge_in, ops ) - # for every outgoing edge, filter out factors that go out along that edge - outgoing = Dict( - e => filter(t -> which_incident_edge[ITensors.site(t)] == e, not_onsite_factors) for + # For every outgoing edge, filter out ops that go out along that edge + outgoing_ops = Dict( + e => filter(t -> which_incident_edge[site(t)] == e, non_onsite_ops) for e in edges_out ) - # compute QNs - incoming_qn = calc_qn(incoming) - not_incoming_qn = calc_qn(not_incoming) - outgoing_qns = Dict(e => calc_qn(outgoing[e]) for e in edges_out) - site_qn = calc_qn(onsite) + # Compute QNs + incoming_qn = term_qn_map(incoming_ops) + non_incoming_qn = term_qn_map(non_incoming_ops) + site_qn = term_qn_map(onsite_ops) - # initialize QNArrayElement indices and quantum numbers - T_inds = MVector{degrees[v]}(fill(-1, degrees[v])) - T_qns = MVector{degrees[v]}(fill(QN(), degrees[v])) + # Initialize QNArrayElement indices and quantum numbers + T_inds = MVector{v_degree}(fill(-1, v_degree)) + T_qns = MVector{v_degree}(fill(QN(), v_degree)) # initialize ArrayElement indices for inbond_coefs bond_row = -1 bond_col = -1 - if !isempty(incoming) - # get the correct map from edge=>QN to term and channel - # this checks if term exists on edge=>QN ( otherwise insert it) and returns it's index - coutmap = get!(outmaps, edge_in => not_incoming_qn, Dict{Vector{Op},Int}()) + if !isempty(incoming_ops) + # Get the correct map from edge=>QN to term and channel. + # This checks if term exists on edge=>QN (otherwise insert it) and returns its index. + coutmap = get!(outmaps, edge_in => non_incoming_qn, Dict{Vector{Op},Int}()) cinmap = get!(inmaps, edge_in => -incoming_qn, Dict{Vector{Op},Int}()) - bond_row = ITensorMPS.posInLink!(cinmap, incoming) - bond_col = ITensorMPS.posInLink!(coutmap, not_incoming) # get incoming channel - bond_coef = convert(coefficient_type, ITensors.coefficient(term)) + bond_row = pos_in_link!(cinmap, incoming_ops) + bond_col = pos_in_link!(coutmap, non_incoming_ops) # get incoming channel + bond_coef = convert(coefficient_type, coefficient(term)) q_inbond_coefs = get!( - inbond_coefs[edge_in], incoming_qn, ITensorMPS.MatElem{coefficient_type}[] + inbond_coefs[edge_in], incoming_qn, MatElem{coefficient_type}[] ) - push!(q_inbond_coefs, ITensorMPS.MatElem(bond_row, bond_col, bond_coef)) + push!(q_inbond_coefs, MatElem(bond_row, bond_col, bond_coef)) T_inds[dim_in] = bond_col T_qns[dim_in] = -incoming_qn end for dout in dims_out - coutmap = get!( - outmaps, edges[dout] => outgoing_qns[edges[dout]], Dict{Vector{Op},Int}() - ) - # add outgoing channel - T_inds[dout] = ITensorMPS.posInLink!(coutmap, outgoing[edges[dout]]) - T_qns[dout] = outgoing_qns[edges[dout]] + out_edge = edges[dout] + out_op = outgoing_ops[out_edge] + coutmap = get!(outmaps, out_edge => term_qn_map(out_op), Dict{Vector{Op},Int}()) + # Add outgoing channel + T_inds[dout] = pos_in_link!(coutmap, out_op) + T_qns[dout] = term_qn_map(out_op) end - # if term starts at this site, add its coefficient as a site factor + # If term starts at this site, add its coefficient as a site factor site_coef = one(coefficient_type) - if (isnothing(dim_in) || T_inds[dim_in] == -1) && - ITensors.argument(term) ∉ site_coef_done - site_coef = ITensors.coefficient(term) - # required since ITensors.coefficient seems to return ComplexF64 even if coefficient_type is determined to be real - site_coef = convert(coefficient_type, site_coef) - push!(site_coef_done, ITensors.argument(term)) + if (isnothing(dim_in) || T_inds[dim_in] == -1) && argument(term) ∉ site_coef_done + site_coef = convert(coefficient_type, coefficient(term)) + push!(site_coef_done, argument(term)) end - # add onsite identity for interactions passing through vertex - if isempty(onsite) - if !ITensors.using_auto_fermion() && isfermionic(incoming, sites) - error("No verified fermion support for automatic TTN constructor!") + # Add onsite identity for interactions passing through vertex + if isempty(onsite_ops) + if !ITensors.using_auto_fermion() && isfermionic(incoming_ops, sites) + push!(onsite_ops, Op("F", v)) else - push!(onsite, Op("Id", v)) + push!(onsite_ops, Op("Id", v)) end end - # save indices and value of symbolic tensor entry - el = QNArrElem(T_qns, T_inds, site_coef * Prod(onsite)) - - push!(tempTTN[v], el) + # Save indices and value of symbolic tensor entry + el = QNArrElem(T_qns, T_inds, site_coef * Prod(onsite_ops)) + push!(symbolic_ttn[v], el) end + _sort_unique!(symbolic_ttn[v]) + end + + return symbolic_ttn, inbond_coefs +end - ITensorMPS.remove_dups!(tempTTN[v]) - # manual truncation: isometry on incoming edge +function svd_bond_coefs( + coefficient_type, sites, inbond_coefs; ordered_verts, ordered_edges, kws... +) + Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in ordered_edges) + for v in ordered_verts + edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) + dim_in = findfirst(e -> dst(e) == v, edges) if !isnothing(dim_in) && !isempty(inbond_coefs[edges[dim_in]]) for (q, mat) in inbond_coefs[edges[dim_in]] - M = ITensorMPS.toMatrix(mat) + M = toMatrix(mat) U, S, V = svd(M) P = S .^ 2 - truncate!(P; maxdim, cutoff, mindim) + truncate!(P; kws...) tdim = length(P) nc = size(M, 2) Vs[edges[dim_in]][q] = Matrix{coefficient_type}(V[1:nc, 1:tdim]) end end end + return Vs +end - # compress this tempTTN representation into dense form - - link_space = Dict{edgetype_sites,Index}() - for e in es - qi = Vector{Pair{QN,Int}}() - push!(qi, QN() => 1) - for (q, Vq) in Vs[e] - cols = size(Vq, 2) - push!(qi, q => cols) +function compress_ttn( + coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered_verts, ordered_edges +) + # Insert dummy indices on internal vertices, these will not show up in the final tensor + # TODO: come up with a better solution for this + sites = copy(sites0) + is_internal = Dict{vertextype(sites),Bool}() + for v in ordered_verts + is_internal[v] = isempty(sites[v]) + if isempty(sites[v]) + # FIXME: This logic only works for trivial flux, breaks for nonzero flux + # TODO: add assert or fix and add test! + sites[v] = [Index(Hflux => 1)] end - push!(qi, Hflux => 1) - link_space[e] = Index(qi...; tags=edge_tag(e), dir=linkdir_ref) + end + + linkdir_ref = ITensors.In # safe to always use autofermion default here + # Compress this symbolic_ttn representation into dense form + thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) + + link_space = Dict{edgetype(sites),Index}() + for e in ordered_edges + operator_blocks = [q => size(Vq, 2) for (q, Vq) in Vs[e]] + link_space[e] = Index( + QN() => 1, operator_blocks..., Hflux => 1; tags=edge_tag(e), dir=linkdir_ref + ) end H = ttn(sites0) # initialize TTN without the dummy indices added @@ -275,10 +295,11 @@ function ttn_svd( end qnblockdim(i::Index, q::QN) = blockdim(i, qnblock(i, q)) - for v in vs - # redo the whole thing like before + for v in ordered_verts + v_degree = degree(sites, v) + # Redo the whole thing like before # TODO: use neighborhood instead of going through all edges, see above - edges = align_and_reorder_edges(incident_edges(sites, v), es) + edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) dim_in = findfirst(e -> dst(e) == v, edges) dims_out = findall(e -> src(e) == v, edges) # slice isometries at this vertex @@ -286,20 +307,20 @@ function ttn_svd( linkinds = [link_space[e] for e in edges] # construct blocks - blocks = Dict{Tuple{Block{degrees[v]},Vector{Op}},Array{coefficient_type,degrees[v]}}() - for el in tempTTN[v] + blocks = Dict{Tuple{Block{v_degree},Vector{Op}},Array{coefficient_type,v_degree}}() + for el in symbolic_ttn[v] t = el.val (abs(coefficient(t)) > eps(real(coefficient_type))) || continue - block_helper_inds = fill(-1, degrees[v]) # we manipulate T_inds later, and loose track of ending/starting information, so keep track of it here + block_helper_inds = fill(-1, v_degree) # we manipulate T_inds later, and loose track of ending/starting information, so keep track of it here T_inds = el.idxs T_qns = el.qn_idxs ct = convert(coefficient_type, coefficient(t)) sublinkdims = [ - (T_inds[i] == -1 ? 1 : qnblockdim(linkinds[i], T_qns[i])) for i in 1:degrees[v] + (T_inds[i] == -1 ? 1 : qnblockdim(linkinds[i], T_qns[i])) for i in 1:v_degree ] zero_arr() = zeros(coefficient_type, sublinkdims...) - terminal_dims = findall(d -> T_inds[d] == -1, 1:degrees[v]) # directions in which term starts or ends - normal_dims = findall(d -> T_inds[d] ≠ -1, 1:degrees[v]) # normal dimensions, do truncation thingies + terminal_dims = findall(d -> T_inds[d] == -1, 1:v_degree) # directions in which term starts or ends + normal_dims = findall(d -> T_inds[d] ≠ -1, 1:v_degree) # normal dimensions, do truncation thingies T_inds[terminal_dims] .= 1 # start in channel 1 ###?? block_helper_inds[terminal_dims] .= 1 for dout in filter(d -> d ∈ terminal_dims, dims_out) @@ -338,10 +359,9 @@ function ttn_svd( H[v] = ITensor() - # make the final arrow directions - _linkinds = copy(linkinds) + # Set the final arrow directions if !isnothing(dim_in) - _linkinds[dim_in] = dag(_linkinds[dim_in]) + linkinds[dim_in] = dag(linkinds[dim_in]) end for ((b, q_op), m) in blocks @@ -363,7 +383,7 @@ function ttn_svd( end end end - T = ITensors.BlockSparseTensor(coefficient_type, [b], _linkinds) + T = ITensors.BlockSparseTensor(coefficient_type, [b], linkinds) T[b] .= m iT = itensor(T) if !thishasqns @@ -386,7 +406,7 @@ function ttn_svd( idT = zeros(coefficient_type, linkdims...) if isnothing(dim_in) # only one real starting identity - idT[ones(Int, degrees[v])...] = 1.0 + idT[ones(Int, v_degree)...] = 1.0 end # ending identities are a little more involved if !isnothing(dim_in) @@ -403,7 +423,8 @@ function ttn_svd( idT_end_inds[dout] = linkdims[dout] end end - T = itensor(idT, _linkinds) + + T = itensor(idT, linkinds) if !thishasqns T = removeqns(T) end @@ -413,10 +434,63 @@ function ttn_svd( H[v] += T * ITensorNetworks.computeSiteProd(sites, Prod([(Op("Id", v))])) end end - return H end +# +# TermQNMap implements a function with an internal cache. +# Calling it on a term determines that term's flux +# but tries not to rebuild the corresponding ITensor. +# (Previously it was the function calc_qn.) +# +struct TermQNMap{V} + sites::IndsNetwork + op_cache::Dict{Pair{String,V},ITensor} + TermQNMap{V}(s) where {V} = new{V}(s, Dict{Pair{String,V},ITensor}()) +end + +function (t::TermQNMap)(term) + q = QN() + for st in term + op_tensor = get(t.op_cache, which_op(st) => site(st), nothing) + if op_tensor === nothing + op_tensor = op(t.sites[site(st)], which_op(st); params(st)...) + t.op_cache[which_op(st) => site(st)] = op_tensor + end + if !isnothing(flux(op_tensor)) + q += flux(op_tensor) + end + end + return q +end + +function ttn_svd( + coefficient_type::Type{<:Number}, os::OpSum, sites::IndsNetwork, root_vertex; kws... +) + term_qn_map = TermQNMap{vertextype(sites)}(sites) + + # Traverse tree outwards from root vertex + ordered_verts = _default_vertex_ordering(sites, root_vertex) + # Store edges in fixed ordering relative to root + ordered_edges = _default_edge_ordering(sites, root_vertex) + + symbolic_ttn, inbond_coefs = make_symbolic_ttn( + coefficient_type, os, sites; ordered_verts, ordered_edges, root_vertex, term_qn_map + ) + + Vs = svd_bond_coefs( + coefficient_type, sites, inbond_coefs; ordered_verts, ordered_edges, kws... + ) + + Hflux = -term_qn_map(terms(first(terms(os)))) + + T = compress_ttn( + coefficient_type, sites, Hflux, symbolic_ttn, Vs; ordered_verts, ordered_edges + ) + + return T +end + # # Tree adaptations of functionalities in ITensors.jl/src/physics/autompo/opsum_to_mpo_generic.jl # @@ -428,7 +502,7 @@ end function isfermionic(t::Vector{Op}, sites::IndsNetwork{V,<:Index}) where {V} p = +1 for op in t - if has_fermion_string(ITensors.name(op), only(sites[ITensors.site(op)])) + if has_fermion_string(name(op), only(sites[site(op)])) p *= -1 end end @@ -437,11 +511,11 @@ end # only(site(ops[1])) in ITensors breaks for Tuple site labels, had to drop the only function computeSiteProd(sites::IndsNetwork{V,<:Index}, ops::Prod{Op})::ITensor where {V} - v = ITensors.site(ops[1]) - T = op(sites[v], ITensors.which_op(ops[1]); ITensors.params(ops[1])...) + v = site(ops[1]) + T = op(sites[v], which_op(ops[1]); params(ops[1])...) for j in 2:length(ops) - (ITensors.site(ops[j]) != v) && error("Mismatch of vertex labels in computeSiteProd") - opj = op(sites[v], ITensors.which_op(ops[j]); ITensors.params(ops[j])...) + (site(ops[j]) != v) && error("Mismatch of vertex labels in computeSiteProd") + opj = op(sites[v], which_op(ops[j]); params(ops[j])...) T = product(T, opj) end return T @@ -455,66 +529,66 @@ function _default_edge_ordering(g::AbstractGraph, root_vertex) return reverse(reverse.(post_order_dfs_edges(g, root_vertex))) end -# This is almost an exact copy of ITensors/src/opsum_to_mpo_generic:sorteachterm except for the site ordering being -# given via find_index_in_tree -# changed `isless_site` to use tree vertex ordering relative to root -function sorteachterm(os::OpSum, sites::IndsNetwork{V,<:Index}, root_vertex::V) where {V} - os = copy(os) - - # linear ordering of vertices in tree graph relative to chosen root, chosen outward from root - ordering = _default_vertex_ordering(sites, root_vertex) - site_positions = Dict(zip(ordering, 1:length(ordering))) - findpos(op::Op) = site_positions[ITensors.site(op)] - isless_site(o1::Op, o2::Op) = findpos(o1) < findpos(o2) - N = nv(sites) - for n in eachindex(os) - t = os[n] - Nt = length(t) - +function check_terms_support(os::OpSum, sites) + for t in os if !all(map(v -> has_vertex(sites, v), ITensors.sites(t))) error( "The OpSum contains a term $t that does not have support on the underlying graph." ) end + end +end - prevsite = N + 1 #keep track of whether we are switching - #to a new site to make sure F string - #is only placed at most once for each site +# This code is very similar to ITensorMPS sorteachterm in opsum_generic.jl +function sorteachterm(os::OpSum, sites, root_vertex) + os = copy(os) + + # Build the isless_site function to pass to sortperm below: + # + ordering = array of vertices ordered relative to chosen root, chosen outward from root + # + site_positions = map from vertex to where it is in ordering (inverse map of `ordering`) + ordering = _default_vertex_ordering(sites, root_vertex) + site_positions = Dict(zip(ordering, 1:length(ordering))) + isless_site(o1::Op, o2::Op) = site_positions[site(o1)] < site_positions[site(o2)] + + N = nv(sites) + for j in eachindex(os) + t = os[j] # Sort operators in t by site order, # and keep the permutation used, perm, for analysis below - perm = Vector{Int}(undef, Nt) - sortperm!(perm, ITensors.terms(t); alg=InsertionSort, lt=isless_site) + Nt = length(t) + #perm = Vector{Int}(undef, Nt) + perm = sortperm(terms(t); alg=InsertionSort, lt=isless_site) + t = coefficient(t) * Prod(terms(t)[perm]) - t = coefficient(t) * Prod(ITensors.terms(t)[perm]) + # Everything below deals with fermionic operators: # Identify fermionic operators, # zeroing perm for bosonic operators, # and inserting string "F" operators - parity = +1 - for n in Nt:-1:1 - currsite = ITensors.site(t[n]) - fermionic = has_fermion_string( - ITensors.which_op(t[n]), only(sites[ITensors.site(t[n])]) - ) - if !ITensors.using_auto_fermion() && (parity == -1) && (currsite < prevsite) + prevsite = typemax(Int) #keep track of whether we are switching to a new site + t_parity = +1 + for n in reverse(1:Nt) + currsite = site(t[n]) + fermionic = has_fermion_string(which_op(t[n]), only(sites[site(t[n])])) + if !ITensors.using_auto_fermion() && (t_parity == -1) && (currsite < prevsite) error("No verified fermion support for automatic TTN constructor!") # no verified support, just throw error # Put local piece of Jordan-Wigner string emanating # from fermionic operators to the right # (Remaining F operators will be put in by svdMPO) - terms(t)[n] = Op("$(ITensors.which_op(t[n])) * F", only(ITensors.site(t[n]))) + terms(t)[n] = Op("$(which_op(t[n])) * F", only(site(t[n]))) end prevsite = currsite if fermionic - parity = -parity + t_parity = -t_parity else # Ignore bosonic operators in perm # by zeroing corresponding entries perm[n] = 0 end end - if parity == -1 + if t_parity == -1 error("Parity-odd fermionic terms not yet supported by AutoTTN") end @@ -523,11 +597,33 @@ function sorteachterm(os::OpSum, sites::IndsNetwork{V,<:Index}, root_vertex::V) # and account for anti-commuting, fermionic operators # during above sort; put resulting sign into coef t *= ITensors.parity_sign(perm) - ITensors.terms(os)[n] = t + terms(os)[j] = t end return os end +function sortmergeterms(os::OpSum{C}) where {C} + os_sorted_terms = sort(terms(os)) + os = Sum(os_sorted_terms) + # Merge (add) terms with same operators + merge_os_data = Scaled{C,Prod{Op}}[] + last_term = copy(os[1]) + last_term_coef = coefficient(last_term) + for n in 2:length(os) + if argument(os[n]) == argument(last_term) + last_term_coef += coefficient(os[n]) + last_term = last_term_coef * argument(last_term) + else + push!(merge_os_data, last_term) + last_term = os[n] + last_term_coef = coefficient(last_term) + end + end + push!(merge_os_data, last_term) + os = Sum(merge_os_data) + return os +end + """ ttn(os::OpSum, sites::IndsNetwork{<:Index}; kwargs...) ttn(eltype::Type{<:Number}, os::OpSum, sites::IndsNetwork{<:Index}; kwargs...) @@ -540,13 +636,13 @@ function ttn( root_vertex=GraphsExtensions.default_root_vertex(sites), kwargs..., ) - length(ITensors.terms(os)) == 0 && error("OpSum has no terms") + length(terms(os)) == 0 && error("OpSum has no terms") is_tree(sites) || error("Site index graph must be a tree.") is_leaf_vertex(sites, root_vertex) || error("Tree root must be a leaf vertex.") - - os = deepcopy(os) + check_terms_support(os, sites) + os = deepcopy(os) #TODO: do we need this? sorteachterm copies `os` again os = sorteachterm(os, sites, root_vertex) - os = ITensorMPS.sortmergeterms(os) + os = sortmergeterms(os) return ttn_svd(os, sites, root_vertex; kwargs...) end @@ -584,66 +680,11 @@ function ttn(eltype::Type{<:Number}, os, sites::IndsNetwork; kwargs...) return NDTensors.convert_scalartype(eltype, ttn(os, sites; kwargs...)) end -# -# Tree adaptation of functionalities in ITensors.jl/src/physics/autompo/matelem.jl -# - -################################# -# ArrElem (simple sparse array) # -################################# - -struct ArrElem{T,N} - idxs::MVector{N,Int} - val::T -end - -function Base.:(==)(a1::ArrElem{T,N}, a2::ArrElem{T,N})::Bool where {T,N} - return (a1.idxs == a2.idxs && a1.val == a2.val) -end - -function Base.isless(a1::ArrElem{T,N}, a2::ArrElem{T,N})::Bool where {T,N} - for n in 1:N - if a1.idxs[n] != a2.idxs[n] - return a1.idxs[n] < a2.idxs[n] - end - end - return a1.val < a2.val -end - -##################################### -# QNArrElem (sparse array with QNs) # -##################################### - -struct QNArrElem{T,N} - qn_idxs::MVector{N,QN} - idxs::MVector{N,Int} - val::T -end - -function Base.:(==)(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} - return (a1.idxs == a2.idxs && a1.val == a2.val && a1.qn_idxs == a2.qn_idxs) -end - -function Base.isless(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} - ###two separate loops s.t. the MPS case reduces to the ITensors Implementation of QNMatElem - for n in 1:N - if a1.qn_idxs[n] != a2.qn_idxs[n] - return a1.qn_idxs[n] < a2.qn_idxs[n] - end - end - for n in 1:N - if a1.idxs[n] != a2.idxs[n] - return a1.idxs[n] < a2.idxs[n] - end - end - return a1.val < a2.val -end - # # Sparse finite state machine construction # -# allow sparse arrays with ITensors.Sum entries +# Allow sparse arrays with ITensors.Sum entries function Base.zero(::Type{S}) where {S<:Sum} return S() end diff --git a/src/treetensornetworks/opsum_to_ttn/qnarrelem.jl b/src/treetensornetworks/opsum_to_ttn/qnarrelem.jl new file mode 100644 index 00000000..4f765f07 --- /dev/null +++ b/src/treetensornetworks/opsum_to_ttn/qnarrelem.jl @@ -0,0 +1,30 @@ +using StaticArrays: MVector + +##################################### +# QNArrElem (sparse array with QNs) # +##################################### + +struct QNArrElem{T,N} + qn_idxs::MVector{N,QN} + idxs::MVector{N,Int} + val::T +end + +function Base.:(==)(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} + return (a1.idxs == a2.idxs && a1.val == a2.val && a1.qn_idxs == a2.qn_idxs) +end + +function Base.isless(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} + ###two separate loops s.t. the MPS case reduces to the ITensors Implementation of QNMatElem + for n in 1:N + if a1.qn_idxs[n] != a2.qn_idxs[n] + return a1.qn_idxs[n] < a2.qn_idxs[n] + end + end + for n in 1:N + if a1.idxs[n] != a2.idxs[n] + return a1.idxs[n] < a2.idxs[n] + end + end + return a1.val < a2.val +end diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg.jl b/test/test_treetensornetworks/test_solvers/test_dmrg.jl index 2e9f3f6d..d4ca53f6 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg.jl @@ -3,19 +3,9 @@ using DataGraphs: edge_data, vertex_data using Dictionaries: Dictionary using Graphs: nv, vertices using ITensors: ITensors -using ITensors.ITensorMPS: ITensorMPS +using ITensors.ITensorMPS: ITensorMPS, linkdims using ITensorNetworks: - ITensorNetworks, - OpSum, - ttn, - apply, - dmrg, - inner, - linkdims, - mpo, - random_mps, - random_ttn, - siteinds + ITensorNetworks, OpSum, ttn, apply, dmrg, inner, mpo, random_mps, random_ttn, siteinds using ITensorNetworks.ITensorsExtensions: replace_vertices using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using KrylovKit: eigsolve diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl index c684fc36..467d116c 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl @@ -1,8 +1,9 @@ @eval module $(gensym()) using Dictionaries: Dictionary using Graphs: nv, vertices +using ITensors.ITensorMPS: linkdims using ITensorNetworks: - OpSum, ttn, apply, contract, dmrg_x, inner, linkdims, mpo, mps, random_mps, siteinds + OpSum, ttn, apply, contract, dmrg_x, inner, mpo, mps, random_mps, siteinds using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using ITensors: @disable_warn_order, array, dag, onehot, uniqueind using LinearAlgebra: eigen, normalize