Skip to content

Commit

Permalink
[ITensors] Optimize directsum and QN Array to ITensor conversion (
Browse files Browse the repository at this point in the history
  • Loading branch information
mtfishman authored Aug 30, 2023
1 parent b54e8d7 commit 20aae8c
Show file tree
Hide file tree
Showing 5 changed files with 86 additions and 33 deletions.
14 changes: 7 additions & 7 deletions src/global_variables.jl
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,9 @@ end
# Turn debug checks on and off
#

using_debug_checks() = false
const _using_debug_checks = Ref{Bool}(false)

using_debug_checks() = _using_debug_checks[]

macro debug_check(ex)
quote
Expand All @@ -199,15 +201,13 @@ macro debug_check(ex)
end

function enable_debug_checks()
if !getfield(@__MODULE__, :using_debug_checks)()
Core.eval(@__MODULE__, :(using_debug_checks() = true))
end
_using_debug_checks[] = true
return nothing
end

function disable_debug_checks()
if getfield(@__MODULE__, :using_debug_checks)()
Core.eval(@__MODULE__, :(using_debug_checks() = false))
end
_using_debug_checks[] = false
return nothing
end

#
Expand Down
28 changes: 24 additions & 4 deletions src/qn/flux.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
flux(T::Union{Tensor,ITensor}, args...) = flux(inds(T), args...)
flux(T::ITensor, args...) = flux(tensor(T), args...)

"""
flux(T::ITensor)
Expand All @@ -7,14 +7,34 @@ Returns the flux of the ITensor.
If the ITensor is empty or it has no QNs, returns `nothing`.
"""
function flux(T::Union{Tensor,ITensor})
function flux(T::ITensor)
return flux(tensor(T))
end

function checkflux(T::ITensor, flux_check)
return checkflux(tensor(T), flux_check)
end

function checkflux(T::ITensor)
return checkflux(tensor(T))
end

#
# Tensor versions
# TODO: Move to NDTensors when QN functionality
# is moved there.
#

flux(T::Tensor, args...) = flux(inds(T), args...)

function flux(T::Tensor)
(!hasqns(T) || isempty(T)) && return nothing
@debug_check checkflux(T)
block1 = first(eachnzblock(T))
return flux(T, block1)
end

function checkflux(T::Union{Tensor,ITensor}, flux_check)
function checkflux(T::Tensor, flux_check)
for b in nzblocks(T)
fluxTb = flux(T, b)
if fluxTb != flux_check
Expand All @@ -26,7 +46,7 @@ function checkflux(T::Union{Tensor,ITensor}, flux_check)
return nothing
end

function checkflux(T::Union{Tensor,ITensor})
function checkflux(T::Tensor)
b1 = first(nzblocks(T))
fluxTb1 = flux(T, b1)
return checkflux(T, fluxTb1)
Expand Down
42 changes: 33 additions & 9 deletions src/qn/qnitensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ julia> flux(A)
QN(-1)
```
"""
function ITensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number}
function ITensor(::Type{ElT}, flux::QN, inds::QNIndices) where {ElT<:Number}
is = Tuple(inds)
blocks = nzblocks(flux, is)
if length(blocks) == 0
Expand All @@ -146,6 +146,12 @@ function ITensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number}
return itensor(T)
end

# This helps with making code more generic between block sparse
# and dense.
function ITensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number}
return itensor(Dense(ElT, dim(inds)), inds)
end

function ITensor(::Type{ElT}, flux::QN, is...) where {ElT<:Number}
return ITensor(ElT, flux, indices(is...))
end
Expand Down Expand Up @@ -250,7 +256,7 @@ ITensor(eltype::Type{<:Number}, x::Number, is::QNIndices) = ITensor(eltype, x, Q
#ITensor(x::RealOrComplex{Int}, flux::QN, is...) = ITensor(float(x), is...)

"""
ITensor([ElT::Type, ]::AbstractArray, inds; tol = 0)
ITensor([ElT::Type, ]::AbstractArray, inds; tol=0.0, checkflux=true)
Create a block sparse ITensor from the input Array, and collection
of QN indices. Zeros are dropped and nonzero blocks are determined
Expand All @@ -259,6 +265,10 @@ from the zero values of the array.
Optionally, you can set a tolerance such that elements
less than or equal to the tolerance are dropped.
By default, this will check that the flux of the nonzero blocks
are consistent with each other. You can disable this check by
setting `checkflux=false`.
# Examples
```julia
Expand All @@ -285,20 +295,34 @@ Block: (2, 2)
```
"""
function ITensor(
::AliasStyle, ::Type{ElT}, A::AbstractArray{<:Number}, inds::QNIndices; tol=0
) where {ElT<:Number}
::AliasStyle,
elt::Type{<:Number},
A::AbstractArray{<:Number},
inds::QNIndices;
tol=0.0,
checkflux=true,
)
is = Tuple(inds)
length(A) dim(inds) && throw(
DimensionMismatch(
"In ITensor(::AbstractArray, inds), length of AbstractArray ($(length(A))) must match total dimension of the indices ($(dim(is)))",
),
)
T = emptyITensor(ElT, is)
blocks = Block{length(is)}[]
T = BlockSparseTensor(elt, blocks, inds)
A = reshape(A, dims(is)...)
for vs in eachindex(T)
Avs = A[vs]
if abs(Avs) > tol
T[vs] = A[vs]
_copyto_dropzeros!(T, A; tol)
if checkflux
ITensors.checkflux(T)
end
return itensor(T)
end

function _copyto_dropzeros!(T::Tensor, A::AbstractArray; tol)
for i in eachindex(T)
Aᵢ = A[i]
if abs(Aᵢ) > tol
T[i] = Aᵢ
end
end
return T
Expand Down
34 changes: 21 additions & 13 deletions src/tensor_operations/tensor_algebra.jl
Original file line number Diff line number Diff line change
Expand Up @@ -263,18 +263,25 @@ end

(A::ITensor, B::ITensor) = hadamard_product(A, B)

# Helper tensors for performing a partial direct sum
function directsum_itensors(i::Index, j::Index, ij::Index)
S1 = zeros(dim(i), dim(ij))
for ii in 1:dim(i)
S1[ii, ii] = true
function directsum_projectors!(D1::Tensor, D2::Tensor)
d1 = size(D1, 1)
for ii in 1:d1
D1[ii, ii] = one(eltype(D1))
end
S2 = zeros(dim(j), dim(ij))
for jj in 1:dim(j)
S2[jj, dim(i) + jj] = true
d2 = size(D2, 1)
for jj in 1:d2
D2[jj, d1 + jj] = one(eltype(D1))
end
D1 = itensor(S1, dag(i), ij)
D2 = itensor(S2, dag(j), ij)
return D1, D2
end

# Helper tensors for performing a partial direct sum
function directsum_projectors(
elt1::Type{<:Number}, elt2::Type{<:Number}, i::Index, j::Index, ij::Index
)
D1 = ITensor(elt1, QN(), dag(i), ij)
D2 = ITensor(elt2, QN(), dag(j), ij)
directsum_projectors!(tensor(D1), tensor(D2))
return D1, D2
end

Expand Down Expand Up @@ -337,9 +344,10 @@ function _directsum(IJ, A::ITensor, I, B::ITensor, J; tags=nothing)
I = map(In -> getfirst(==(In), inds(A)), I)
J = map(Jn -> getfirst(==(Jn), inds(B)), J)
for n in 1:N
D1, D2 = directsum_itensors(I[n], J[n], IJ[n])
A *= D1
B *= D2
# TODO: Pass the entire `datatype` instead of just the `eltype`.
D1, D2 = directsum_projectors(eltype(A), eltype(B), I[n], J[n], IJ[n])
A *= adapt(datatype(A), D1)
B *= adapt(datatype(B), D2)
end
C = A + B
return C => IJ
Expand Down
1 change: 1 addition & 0 deletions test/base/test_qnitensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ Random.seed!(1234)
2e-9 1e-10 4e-10
]
@test_throws ErrorException ITensor(A, i', dag(i); tol=1e-8)
@test ITensor(A, i', dag(i); tol=1e-8, checkflux=false) isa ITensor
end

@testset "similartype regression test" begin
Expand Down

0 comments on commit 20aae8c

Please sign in to comment.