diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 7a42286b83..94626cdddb 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.65" +version = "0.3.67" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/BlockSparseArrays/examples/README.jl b/NDTensors/src/lib/BlockSparseArrays/examples/README.jl index c2facfac0d..9fff22ae7b 100644 --- a/NDTensors/src/lib/BlockSparseArrays/examples/README.jl +++ b/NDTensors/src/lib/BlockSparseArrays/examples/README.jl @@ -7,7 +7,7 @@ # `BlockArrays` reinterprets the `SparseArray` as a blocked data structure. using BlockArrays: BlockArrays, PseudoBlockVector, blockedrange -using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored +using NDTensors.BlockSparseArrays: BlockSparseArray, block_stored_length using Test: @test, @test_broken function main() @@ -36,13 +36,13 @@ function main() ] b = BlockSparseArray(nz_blocks, d_blocks, i_axes) - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 ## Blocks with discontiguous underlying data d_blocks = randn.(nz_block_sizes) b = BlockSparseArray(nz_blocks, d_blocks, i_axes) - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 ## Access a block @test b[Block(1, 1)] == d_blocks[1] @@ -65,7 +65,7 @@ function main() @test b + b ≈ Array(b) + Array(b) @test b + b isa BlockSparseArray - @test block_nstored(b + b) == 2 + @test block_stored_length(b + b) == 2 scaled_b = 2b @test scaled_b ≈ 2Array(b) diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl index 11c40d10dc..9296f22f2d 100644 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl +++ b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl @@ -2,7 +2,7 @@ using Test: @test, @testset using BlockArrays: AbstractBlockArray, Block, BlockedOneTo, blockedrange, blocklengths, blocksize -using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored +using NDTensors.BlockSparseArrays: BlockSparseArray, block_stored_length using NDTensors.GradedAxes: GradedAxes, GradedOneTo, @@ -13,7 +13,7 @@ using NDTensors.GradedAxes: gradedrange, isdual using NDTensors.LabelledNumbers: label -using NDTensors.SparseArrayInterface: nstored +using NDTensors.SparseArrayInterface: stored_length using NDTensors.SymmetrySectors: U1 using NDTensors.TensorAlgebra: fusedims, splitdims using LinearAlgebra: adjoint @@ -40,8 +40,8 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @test size(b) == (4, 4, 4, 4) @test blocksize(b) == (2, 2, 2, 2) @test blocklengths.(axes(b)) == ([2, 2], [2, 2], [2, 2], [2, 2]) - @test nstored(b) == 32 - @test block_nstored(b) == 2 + @test stored_length(b) == 32 + @test block_stored_length(b) == 2 for i in 1:ndims(a) @test axes(b, i) isa GradedOneTo end @@ -58,8 +58,8 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @test size(b) == (4, 4, 4, 4) @test blocksize(b) == (2, 2, 2, 2) @test blocklengths.(axes(b)) == ([2, 2], [2, 2], [2, 2], [2, 2]) - @test nstored(b) == 256 - @test block_nstored(b) == 16 + @test stored_length(b) == 256 + @test block_stored_length(b) == 16 for i in 1:ndims(a) @test axes(b, i) isa BlockedOneTo{Int} end @@ -71,8 +71,8 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) b = a[2:3, 2:3, 2:3, 2:3] @test size(b) == (2, 2, 2, 2) @test blocksize(b) == (2, 2, 2, 2) - @test nstored(b) == 2 - @test block_nstored(b) == 2 + @test stored_length(b) == 2 + @test block_stored_length(b) == 2 for i in 1:ndims(a) @test axes(b, i) isa GradedOneTo end @@ -156,7 +156,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test Array(b) == 2 * Array(a) for i in 1:2 @test axes(b, i) isa GradedOneTo @@ -177,7 +177,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test Array(b) == 2 * Array(a) for i in 1:2 @test axes(b, i) isa GradedUnitRange @@ -204,7 +204,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test Array(b) == 2 * Array(a) for i in 1:2 @test axes(b, i) isa GradedUnitRangeDual @@ -229,7 +229,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test Array(b) == 2 * Array(a) for i in 1:2 @test axes(b, i) isa GradedUnitRangeDual @@ -255,7 +255,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test Array(b) == 2 * Array(a) @test a[:, :] isa BlockSparseArray for i in 1:2 @@ -280,7 +280,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a' - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test Array(b) == 2 * Array(a)' for ax in axes(b) @test ax isa typeof(dual(r)) diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl index 39b4e885ac..67d667353e 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl @@ -22,7 +22,7 @@ using BlockArrays: using Compat: allequal using Dictionaries: Dictionary, Indices using ..GradedAxes: blockedunitrange_getindices, to_blockindices -using ..SparseArrayInterface: SparseArrayInterface, nstored, stored_indices +using ..SparseArrayInterface: SparseArrayInterface, stored_length, stored_indices # A return type for `blocks(array)` when `array` isn't blocked. # Represents a vector with just that single block. @@ -534,13 +534,13 @@ function Base.setindex!(a::BlockView{<:Any,N}, value, index::Vararg{Int,N}) wher return a end -function SparseArrayInterface.nstored(a::BlockView) +function SparseArrayInterface.stored_length(a::BlockView) # TODO: Store whether or not the block is stored already as # a Bool in `BlockView`. I = CartesianIndex(Int.(a.block)) # TODO: Use `block_stored_indices`. if I ∈ stored_indices(blocks(a.array)) - return nstored(blocks(a.array)[I]) + return stored_length(blocks(a.array)[I]) end return 0 end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysSparseArrayInterfaceExt/BlockArraysSparseArrayInterfaceExt.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysSparseArrayInterfaceExt/BlockArraysSparseArrayInterfaceExt.jl index 658fe4436d..335c37b0fb 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysSparseArrayInterfaceExt/BlockArraysSparseArrayInterfaceExt.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysSparseArrayInterfaceExt/BlockArraysSparseArrayInterfaceExt.jl @@ -1,8 +1,8 @@ using BlockArrays: AbstractBlockArray, BlocksView -using ..SparseArrayInterface: SparseArrayInterface, nstored +using ..SparseArrayInterface: SparseArrayInterface, stored_length -function SparseArrayInterface.nstored(a::AbstractBlockArray) - return sum(b -> nstored(b), blocks(a); init=zero(Int)) +function SparseArrayInterface.stored_length(a::AbstractBlockArray) + return sum(b -> stored_length(b), blocks(a); init=zero(Int)) end # TODO: Handle `BlocksView` wrapping a sparse array? diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl index 9ecbb252fd..ca21af2136 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl @@ -3,7 +3,7 @@ using BlockArrays: using ..SparseArrayInterface: sparse_getindex, sparse_setindex! # TODO: Delete this. This function was replaced -# by `nstored` but is still used in `NDTensors`. +# by `stored_length` but is still used in `NDTensors`. function nonzero_keys end abstract type AbstractBlockSparseArray{T,N} <: AbstractBlockArray{T,N} end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl index 069d367cf6..7b8088ff2b 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl @@ -3,7 +3,7 @@ using BlockArrays: BlockLayout using ..SparseArrayInterface: SparseLayout using ..TypeParameterAccessors: parenttype, similartype -function ArrayLayouts.MemoryLayout(arraytype::Type{<:BlockSparseArrayLike}) +function ArrayLayouts.MemoryLayout(arraytype::Type{<:AnyAbstractBlockSparseArray}) outer_layout = typeof(MemoryLayout(blockstype(arraytype))) inner_layout = typeof(MemoryLayout(blocktype(arraytype))) return BlockLayout{outer_layout,inner_layout}() diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/broadcast.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/broadcast.jl index 5887673036..96841be6f1 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/broadcast.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/broadcast.jl @@ -1,7 +1,7 @@ using BlockArrays: AbstractBlockedUnitRange, BlockSlice using Base.Broadcast: Broadcast -function Broadcast.BroadcastStyle(arraytype::Type{<:BlockSparseArrayLike}) +function Broadcast.BroadcastStyle(arraytype::Type{<:AnyAbstractBlockSparseArray}) return BlockSparseArrayStyle{ndims(arraytype)}() end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/cat.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/cat.jl index eac4ea1b02..3023037113 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/cat.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/cat.jl @@ -1,5 +1,5 @@ # TODO: Change to `AnyAbstractBlockSparseArray`. -function Base.cat(as::BlockSparseArrayLike...; dims) +function Base.cat(as::AnyAbstractBlockSparseArray...; dims) # TODO: Use `sparse_cat` instead, currently # that erroneously allocates too many blocks that are # zero and shouldn't be stored. diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl index b9ab510566..d7ffe36487 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl @@ -92,27 +92,27 @@ end # function SparseArrayInterface.sparse_mapreduce(::BlockSparseArrayStyle, f, a_dest::AbstractArray, a_srcs::Vararg{AbstractArray}) # end -function Base.map!(f, a_dest::AbstractArray, a_srcs::Vararg{BlockSparseArrayLike}) +function Base.map!(f, a_dest::AbstractArray, a_srcs::Vararg{AnyAbstractBlockSparseArray}) sparse_map!(f, a_dest, a_srcs...) return a_dest end -function Base.map(f, as::Vararg{BlockSparseArrayLike}) +function Base.map(f, as::Vararg{AnyAbstractBlockSparseArray}) return f.(as...) end -function Base.copy!(a_dest::AbstractArray, a_src::BlockSparseArrayLike) +function Base.copy!(a_dest::AbstractArray, a_src::AnyAbstractBlockSparseArray) sparse_copy!(a_dest, a_src) return a_dest end -function Base.copyto!(a_dest::AbstractArray, a_src::BlockSparseArrayLike) +function Base.copyto!(a_dest::AbstractArray, a_src::AnyAbstractBlockSparseArray) sparse_copyto!(a_dest, a_src) return a_dest end # Fix ambiguity error -function Base.copyto!(a_dest::LayoutArray, a_src::BlockSparseArrayLike) +function Base.copyto!(a_dest::LayoutArray, a_src::AnyAbstractBlockSparseArray) sparse_copyto!(a_dest, a_src) return a_dest end @@ -131,21 +131,21 @@ function Base.copyto!( return a_dest end -function Base.permutedims!(a_dest, a_src::BlockSparseArrayLike, perm) +function Base.permutedims!(a_dest, a_src::AnyAbstractBlockSparseArray, perm) sparse_permutedims!(a_dest, a_src, perm) return a_dest end -function Base.mapreduce(f, op, as::Vararg{BlockSparseArrayLike}; kwargs...) +function Base.mapreduce(f, op, as::Vararg{AnyAbstractBlockSparseArray}; kwargs...) return sparse_mapreduce(f, op, as...; kwargs...) end # TODO: Why isn't this calling `mapreduce` already? -function Base.iszero(a::BlockSparseArrayLike) +function Base.iszero(a::AnyAbstractBlockSparseArray) return sparse_iszero(blocks(a)) end # TODO: Why isn't this calling `mapreduce` already? -function Base.isreal(a::BlockSparseArrayLike) +function Base.isreal(a::AnyAbstractBlockSparseArray) return sparse_isreal(blocks(a)) end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/sparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/sparsearrayinterface.jl index ac866afc9a..691aad5f2b 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/sparsearrayinterface.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/sparsearrayinterface.jl @@ -33,6 +33,6 @@ function SparseArrayInterface.sparse_storage(a::AbstractBlockSparseArray) return BlockSparseStorage(a) end -function SparseArrayInterface.nstored(a::BlockSparseArrayLike) - return sum(nstored, sparse_storage(blocks(a)); init=zero(Int)) +function SparseArrayInterface.stored_length(a::AnyAbstractBlockSparseArray) + return sum(stored_length, sparse_storage(blocks(a)); init=zero(Int)) end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/views.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/views.jl index aa3c7711c6..4742f7b781 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/views.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/views.jl @@ -20,10 +20,12 @@ end # Override the default definition of `BlockArrays.blocksize`, # which is incorrect for certain slices. -function BlockArrays.blocksize(a::SubArray{<:Any,<:Any,<:BlockSparseArrayLike}) +function BlockArrays.blocksize(a::SubArray{<:Any,<:Any,<:AnyAbstractBlockSparseArray}) return blocklength.(axes(a)) end -function BlockArrays.blocksize(a::SubArray{<:Any,<:Any,<:BlockSparseArrayLike}, i::Int) +function BlockArrays.blocksize( + a::SubArray{<:Any,<:Any,<:AnyAbstractBlockSparseArray}, i::Int +) # TODO: Maybe use `blocklength(axes(a, i))` which would be a bit faster. return blocksize(a)[i] end @@ -33,7 +35,7 @@ end # which don't handle subslices of blocks properly. function Base.view( a::SubArray{ - <:Any,N,<:BlockSparseArrayLike,<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} + <:Any,N,<:AnyAbstractBlockSparseArray,<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} }, I::Block{N}, ) where {N} @@ -41,14 +43,14 @@ function Base.view( end function Base.view( a::SubArray{ - <:Any,N,<:BlockSparseArrayLike,<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} + <:Any,N,<:AnyAbstractBlockSparseArray,<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} }, I::Vararg{Block{1},N}, ) where {N} return blocksparse_view(a, I...) end function Base.view( - V::SubArray{<:Any,1,<:BlockSparseArrayLike,<:Tuple{BlockSlice{<:BlockRange{1}}}}, + V::SubArray{<:Any,1,<:AnyAbstractBlockSparseArray,<:Tuple{BlockSlice{<:BlockRange{1}}}}, I::Block{1}, ) return blocksparse_view(a, I) @@ -154,7 +156,7 @@ function BlockArrays.viewblock( return viewblock(a, to_tuple(block)...) end -# Fixes ambiguity error with `BlockSparseArrayLike` definition. +# Fixes ambiguity error with `AnyAbstractBlockSparseArray` definition. function Base.view( a::SubArray{ T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} @@ -163,7 +165,7 @@ function Base.view( ) where {T,N} return viewblock(a, block) end -# Fixes ambiguity error with `BlockSparseArrayLike` definition. +# Fixes ambiguity error with `AnyAbstractBlockSparseArray` definition. function Base.view( a::SubArray{ T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index e8d1e5e15b..c961f67e5a 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -16,20 +16,20 @@ const WrappedAbstractBlockSparseArray{T,N} = WrappedArray{ } # TODO: Rename `AnyBlockSparseArray`. -const BlockSparseArrayLike{T,N} = Union{ +const AnyAbstractBlockSparseArray{T,N} = Union{ <:AbstractBlockSparseArray{T,N},<:WrappedAbstractBlockSparseArray{T,N} } # a[1:2, 1:2] function Base.to_indices( - a::BlockSparseArrayLike, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}} + a::AnyAbstractBlockSparseArray, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}} ) return blocksparse_to_indices(a, inds, I) end # a[[Block(2), Block(1)], [Block(2), Block(1)]] function Base.to_indices( - a::BlockSparseArrayLike, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}} + a::AnyAbstractBlockSparseArray, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}} ) return blocksparse_to_indices(a, inds, I) end @@ -37,14 +37,16 @@ end # a[BlockVector([Block(2), Block(1)], [2]), BlockVector([Block(2), Block(1)], [2])] # a[BlockedVector([Block(2), Block(1)], [2]), BlockedVector([Block(2), Block(1)], [2])] function Base.to_indices( - a::BlockSparseArrayLike, inds, I::Tuple{AbstractBlockVector{<:Block{1}},Vararg{Any}} + a::AnyAbstractBlockSparseArray, + inds, + I::Tuple{AbstractBlockVector{<:Block{1}},Vararg{Any}}, ) return blocksparse_to_indices(a, inds, I) end # a[mortar([Block(1)[1:2], Block(2)[1:3]])] function Base.to_indices( - a::BlockSparseArrayLike, + a::AnyAbstractBlockSparseArray, inds, I::Tuple{BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}},Vararg{Any}}, ) @@ -53,13 +55,13 @@ end # a[[Block(1)[1:2], Block(2)[1:2]], [Block(1)[1:2], Block(2)[1:2]]] function Base.to_indices( - a::BlockSparseArrayLike, inds, I::Tuple{Vector{<:BlockIndexRange{1}},Vararg{Any}} + a::AnyAbstractBlockSparseArray, inds, I::Tuple{Vector{<:BlockIndexRange{1}},Vararg{Any}} ) return to_indices(a, inds, (mortar(I[1]), Base.tail(I)...)) end # BlockArrays `AbstractBlockArray` interface -BlockArrays.blocks(a::BlockSparseArrayLike) = blocksparse_blocks(a) +BlockArrays.blocks(a::AnyAbstractBlockSparseArray) = blocksparse_blocks(a) # Fix ambiguity error with `BlockArrays` using BlockArrays: BlockSlice @@ -74,59 +76,63 @@ function blockstype(arraytype::Type{<:WrappedAbstractBlockSparseArray}) return blockstype(parenttype(arraytype)) end -blocktype(a::BlockSparseArrayLike) = eltype(blocks(a)) -blocktype(arraytype::Type{<:BlockSparseArrayLike}) = eltype(blockstype(arraytype)) +blocktype(a::AnyAbstractBlockSparseArray) = eltype(blocks(a)) +blocktype(arraytype::Type{<:AnyAbstractBlockSparseArray}) = eltype(blockstype(arraytype)) using ArrayLayouts: ArrayLayouts -function Base.getindex(a::BlockSparseArrayLike{<:Any,N}, I::CartesianIndices{N}) where {N} +function Base.getindex( + a::AnyAbstractBlockSparseArray{<:Any,N}, I::CartesianIndices{N} +) where {N} return ArrayLayouts.layout_getindex(a, I) end function Base.getindex( - a::BlockSparseArrayLike{<:Any,N}, I::Vararg{AbstractUnitRange{<:Integer},N} + a::AnyAbstractBlockSparseArray{<:Any,N}, I::Vararg{AbstractUnitRange{<:Integer},N} ) where {N} return ArrayLayouts.layout_getindex(a, I...) end # TODO: Define `AnyBlockSparseMatrix`. function Base.getindex( - a::BlockSparseArrayLike{<:Any,2}, I::Vararg{AbstractUnitRange{<:Integer},2} + a::AnyAbstractBlockSparseArray{<:Any,2}, I::Vararg{AbstractUnitRange{<:Integer},2} ) return ArrayLayouts.layout_getindex(a, I...) end # Fixes ambiguity error. -function Base.getindex(a::BlockSparseArrayLike{<:Any,0}) +function Base.getindex(a::AnyAbstractBlockSparseArray{<:Any,0}) return ArrayLayouts.layout_getindex(a) end # TODO: Define `blocksparse_isassigned`. function Base.isassigned( - a::BlockSparseArrayLike{<:Any,N}, index::Vararg{Block{1},N} + a::AnyAbstractBlockSparseArray{<:Any,N}, index::Vararg{Block{1},N} ) where {N} return isassigned(blocks(a), Int.(index)...) end # Fix ambiguity error. -function Base.isassigned(a::BlockSparseArrayLike{<:Any,0}) +function Base.isassigned(a::AnyAbstractBlockSparseArray{<:Any,0}) return isassigned(blocks(a)) end -function Base.isassigned(a::BlockSparseArrayLike{<:Any,N}, index::Block{N}) where {N} +function Base.isassigned(a::AnyAbstractBlockSparseArray{<:Any,N}, index::Block{N}) where {N} return isassigned(a, Tuple(index)...) end # TODO: Define `blocksparse_isassigned`. function Base.isassigned( - a::BlockSparseArrayLike{<:Any,N}, index::Vararg{BlockIndex{1},N} + a::AnyAbstractBlockSparseArray{<:Any,N}, index::Vararg{BlockIndex{1},N} ) where {N} b = block.(index) return isassigned(a, b...) && isassigned(@view(a[b...]), blockindex.(index)...) end -function Base.setindex!(a::BlockSparseArrayLike{<:Any,N}, value, I::BlockIndex{N}) where {N} +function Base.setindex!( + a::AnyAbstractBlockSparseArray{<:Any,N}, value, I::BlockIndex{N} +) where {N} blocksparse_setindex!(a, value, I) return a end # Fixes ambiguity error with BlockArrays.jl -function Base.setindex!(a::BlockSparseArrayLike{<:Any,1}, value, I::BlockIndex{1}) +function Base.setindex!(a::AnyAbstractBlockSparseArray{<:Any,1}, value, I::BlockIndex{1}) blocksparse_setindex!(a, value, I) return a end @@ -141,7 +147,7 @@ function Base.fill!(a::AbstractBlockSparseArray, value) return a end -function Base.fill!(a::BlockSparseArrayLike, value) +function Base.fill!(a::AnyAbstractBlockSparseArray, value) # TODO: Even if `iszero(value)`, this doesn't drop # blocks from `a`, and additionally allocates # new blocks filled with zeros, unlike @@ -153,14 +159,15 @@ end # Needed by `BlockArrays` matrix multiplication interface function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}} + arraytype::Type{<:AnyAbstractBlockSparseArray}, + axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}}, ) return similar(arraytype, eltype(arraytype), axes) end # Fixes ambiguity error. function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}} + arraytype::Type{<:AnyAbstractBlockSparseArray}, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}} ) return similar(arraytype, eltype(arraytype), axes) end @@ -170,7 +177,7 @@ end # is only appears to be needed in older versions of Julia like v1.6. # Delete once we drop support for older versions of Julia. function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, + arraytype::Type{<:AnyAbstractBlockSparseArray}, axes::Tuple{AbstractUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, ) return similar(arraytype, eltype(arraytype), axes) @@ -178,7 +185,7 @@ end # Fixes ambiguity error with `BlockArrays`. function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, + arraytype::Type{<:AnyAbstractBlockSparseArray}, axes::Tuple{AbstractBlockedUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, ) return similar(arraytype, eltype(arraytype), axes) @@ -186,7 +193,7 @@ end # Fixes ambiguity error with `BlockArrays`. function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, + arraytype::Type{<:AnyAbstractBlockSparseArray}, axes::Tuple{ AbstractUnitRange{<:Integer}, AbstractBlockedUnitRange{<:Integer}, @@ -198,7 +205,7 @@ end # Needed for disambiguation function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, + arraytype::Type{<:AnyAbstractBlockSparseArray}, axes::Tuple{Vararg{AbstractBlockedUnitRange{<:Integer}}}, ) return similar(arraytype, eltype(arraytype), axes) @@ -213,7 +220,7 @@ end # Needed by `BlockArrays` matrix multiplication interface # TODO: Define a `blocksparse_similar` function. function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, + arraytype::Type{<:AnyAbstractBlockSparseArray}, elt::Type, axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}}, ) @@ -222,19 +229,21 @@ end # TODO: Define a `blocksparse_similar` function. function Base.similar( - a::BlockSparseArrayLike, elt::Type, axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}} + a::AnyAbstractBlockSparseArray, + elt::Type, + axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}}, ) return blocksparse_similar(a, elt, axes) end # Fixes ambiguity error. -function Base.similar(a::BlockSparseArrayLike, elt::Type, axes::Tuple{}) +function Base.similar(a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{}) return blocksparse_similar(a, elt, axes) end # Fixes ambiguity error with `BlockArrays`. function Base.similar( - a::BlockSparseArrayLike, + a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{ AbstractBlockedUnitRange{<:Integer},Vararg{AbstractBlockedUnitRange{<:Integer}} @@ -245,7 +254,7 @@ end # Fixes ambiguity error with `OffsetArrays`. function Base.similar( - a::BlockSparseArrayLike, + a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{AbstractUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, ) @@ -254,7 +263,7 @@ end # Fixes ambiguity error with `BlockArrays`. function Base.similar( - a::BlockSparseArrayLike, + a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{AbstractBlockedUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, ) @@ -263,7 +272,7 @@ end # Fixes ambiguity errors with BlockArrays. function Base.similar( - a::BlockSparseArrayLike, + a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{ AbstractUnitRange{<:Integer}, @@ -276,7 +285,7 @@ end # Fixes ambiguity error with `StaticArrays`. function Base.similar( - a::BlockSparseArrayLike, elt::Type, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}} + a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}} ) return blocksparse_similar(a, elt, axes) end @@ -285,7 +294,7 @@ end # which is ultimately what `Array{T,N}(::AbstractArray{<:Any,N})` calls. # These are defined for now to avoid scalar indexing issues when there # are blocks on GPU. -function Base.Array{T,N}(a::BlockSparseArrayLike{<:Any,N}) where {T,N} +function Base.Array{T,N}(a::AnyAbstractBlockSparseArray{<:Any,N}) where {T,N} # First make it dense, then move to CPU. # Directly copying to CPU causes some issues with # scalar indexing on GPU which we have to investigate. @@ -293,9 +302,9 @@ function Base.Array{T,N}(a::BlockSparseArrayLike{<:Any,N}) where {T,N} a_dest .= a return Array{T,N}(a_dest) end -function Base.Array{T}(a::BlockSparseArrayLike) where {T} +function Base.Array{T}(a::AnyAbstractBlockSparseArray) where {T} return Array{T,ndims(a)}(a) end -function Base.Array(a::BlockSparseArrayLike) +function Base.Array(a::AnyAbstractBlockSparseArray) return Array{eltype(a)}(a) end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl index 5e63e8db46..3b6c72d6b2 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl @@ -175,7 +175,7 @@ end Base.axes(a::BlockSparseArray) = a.axes # BlockArrays `AbstractBlockArray` interface. -# This is used by `blocks(::BlockSparseArrayLike)`. +# This is used by `blocks(::AnyAbstractBlockSparseArray)`. blocksparse_blocks(a::BlockSparseArray) = a.blocks # TODO: Use `TypeParameterAccessors`. diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl index fa7fcc25ce..98694efe94 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl @@ -13,7 +13,7 @@ using BlockArrays: blocks, findblockindex using LinearAlgebra: Adjoint, Transpose -using ..SparseArrayInterface: perm, iperm, nstored, sparse_zero! +using ..SparseArrayInterface: perm, iperm, stored_length, sparse_zero! blocksparse_blocks(a::AbstractArray) = error("Not implemented") @@ -136,8 +136,8 @@ function blocksparse_fill!(a::AbstractArray, value) return a end -function block_nstored(a::AbstractArray) - return nstored(blocks(a)) +function block_stored_length(a::AbstractArray) + return stored_length(blocks(a)) end # BlockArrays @@ -176,7 +176,9 @@ end # TODO: Either make this the generic interface or define # `SparseArrayInterface.sparse_storage`, which is used # to defined this. -SparseArrayInterface.nstored(a::SparsePermutedDimsArrayBlocks) = length(stored_indices(a)) +function SparseArrayInterface.stored_length(a::SparsePermutedDimsArrayBlocks) + return length(stored_indices(a)) +end function SparseArrayInterface.sparse_storage(a::SparsePermutedDimsArrayBlocks) return error("Not implemented") end @@ -214,7 +216,7 @@ end # TODO: Either make this the generic interface or define # `SparseArrayInterface.sparse_storage`, which is used # to defined this. -SparseArrayInterface.nstored(a::SparseTransposeBlocks) = length(stored_indices(a)) +SparseArrayInterface.stored_length(a::SparseTransposeBlocks) = length(stored_indices(a)) function SparseArrayInterface.sparse_storage(a::SparseTransposeBlocks) return error("Not implemented") end @@ -253,7 +255,7 @@ end # TODO: Either make this the generic interface or define # `SparseArrayInterface.sparse_storage`, which is used # to defined this. -SparseArrayInterface.nstored(a::SparseAdjointBlocks) = length(stored_indices(a)) +SparseArrayInterface.stored_length(a::SparseAdjointBlocks) = length(stored_indices(a)) function SparseArrayInterface.sparse_storage(a::SparseAdjointBlocks) return error("Not implemented") end @@ -316,7 +318,7 @@ end # TODO: Either make this the generic interface or define # `SparseArrayInterface.sparse_storage`, which is used # to defined this. -SparseArrayInterface.nstored(a::SparseSubArrayBlocks) = length(stored_indices(a)) +SparseArrayInterface.stored_length(a::SparseSubArrayBlocks) = length(stored_indices(a)) ## struct SparseSubArrayBlocksStorage{Array<:SparseSubArrayBlocks} ## array::Array @@ -341,4 +343,4 @@ function blocksparse_blocks( end using BlockArrays: BlocksView -SparseArrayInterface.nstored(a::BlocksView) = length(a) +SparseArrayInterface.stored_length(a::BlocksView) = length(a) diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index 51435b6eb3..3a5c29ab46 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -23,14 +23,14 @@ using NDTensors.BlockSparseArrays: BlockSparseMatrix, BlockSparseVector, BlockView, - block_nstored, + block_stored_length, block_reshape, block_stored_indices, blockstype, blocktype, view! using NDTensors.GPUArraysCoreExtensions: cpu -using NDTensors.SparseArrayInterface: nstored +using NDTensors.SparseArrayInterface: stored_length using NDTensors.SparseArrayDOKs: SparseArrayDOK, SparseMatrixDOK, SparseVectorDOK using NDTensors.TensorAlgebra: contract using Test: @test, @test_broken, @test_throws, @testset @@ -105,10 +105,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test blockstype(a) <: SparseMatrixDOK{Matrix{elt}} @test blocklengths.(axes(a)) == ([2, 3], [3, 4]) @test iszero(a) - @test_broken iszero(block_stored_length(a)) - @test iszero(block_nstored(a)) - @test_broken iszero(stored_length(a)) - @test iszero(nstored(a)) + @test iszero(block_stored_length(a)) + @test iszero(stored_length(a)) end end @@ -139,10 +137,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test blockstype(a) <: SparseVectorDOK{Vector{elt}} @test blocklengths.(axes(a)) == ([2, 3],) @test iszero(a) - @test_broken iszero(block_stored_length(a)) - @test iszero(block_nstored(a)) - @test_broken iszero(stored_length(a)) - @test iszero(nstored(a)) + @test iszero(block_stored_length(a)) + @test iszero(stored_length(a)) end end end @@ -157,7 +153,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test blocklength.(axes(a)) == (2, 2) @test blocksize(a) == (2, 2) @test size(a) == (5, 5) - @test block_nstored(a) == 0 + @test block_stored_length(a) == 0 @test iszero(a) @allowscalar @test all(I -> iszero(a[I]), eachindex(a)) @test_throws DimensionMismatch a[Block(1, 1)] = randn(elt, 2, 3) @@ -170,7 +166,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test blocklength.(axes(a)) == (2, 2) @test blocksize(a) == (2, 2) @test size(a) == (5, 5) - @test block_nstored(a) == 1 + @test block_stored_length(a) == 1 @test !iszero(a) @test a[3, 3] == 33 @test all(eachindex(a)) do I @@ -190,7 +186,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test isone(length(a)) @test blocksize(a) == () @test blocksizes(a) == fill(()) - @test iszero(block_nstored(a)) + @test iszero(block_stored_length(a)) @test iszero(@allowscalar(a[])) @test iszero(@allowscalar(a[CartesianIndex()])) @test a[Block()] == dev(fill(0)) @@ -205,7 +201,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test isone(length(b)) @test blocksize(b) == () @test blocksizes(b) == fill(()) - @test isone(block_nstored(b)) + @test isone(block_stored_length(b)) @test @allowscalar(b[]) == 2 @test @allowscalar(b[CartesianIndex()]) == 2 @test b[Block()] == dev(fill(2)) @@ -221,8 +217,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[b] = dev(randn(elt, size(a[b]))) end @test eltype(a) == elt - @test block_nstored(a) == 2 - @test nstored(a) == 2 * 4 + 3 * 3 + @test block_stored_length(a) == 2 + @test stored_length(a) == 2 * 4 + 3 * 3 # TODO: Broken on GPU. if dev ≠ cpu @@ -238,8 +234,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test iszero(a[Block(1, 1)]) @test iszero(a[Block(2, 1)]) @test iszero(a[Block(2, 2)]) - @test block_nstored(a) == 1 - @test nstored(a) == 2 * 4 + @test block_stored_length(a) == 1 + @test stored_length(a) == 2 * 4 # TODO: Broken on GPU. if dev ≠ cpu @@ -255,8 +251,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test iszero(a[Block(2, 1)]) @test iszero(a[Block(1, 2)]) @test iszero(a[Block(2, 2)]) - @test block_nstored(a) == 1 - @test nstored(a) == 2 * 4 + @test block_stored_length(a) == 1 + @test stored_length(a) == 2 * 4 a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) @views for b in [Block(1, 2), Block(2, 1)] @@ -265,8 +261,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = similar(a, complex(elt)) @test eltype(b) == complex(eltype(a)) @test iszero(b) - @test block_nstored(b) == 0 - @test nstored(b) == 0 + @test block_stored_length(b) == 0 + @test stored_length(b) == 0 @test size(b) == size(a) @test blocksize(b) == blocksize(a) @@ -274,23 +270,23 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] c = @view b[Block(1, 1)] @test iszero(a) - @test iszero(nstored(a)) + @test iszero(stored_length(a)) @test iszero(b) - @test iszero(nstored(b)) + @test iszero(stored_length(b)) # TODO: Broken on GPU. @test iszero(c) broken = dev ≠ cpu - @test iszero(nstored(c)) + @test iszero(stored_length(c)) @allowscalar a[5, 7] = 1 @test !iszero(a) - @test nstored(a) == 3 * 4 + @test stored_length(a) == 3 * 4 @test !iszero(b) - @test nstored(b) == 3 * 4 + @test stored_length(b) == 3 * 4 # TODO: Broken on GPU. @test !iszero(c) broken = dev ≠ cpu - @test nstored(c) == 3 * 4 + @test stored_length(c) == 3 * 4 d = @view a[1:4, 1:6] @test iszero(d) - @test nstored(d) == 2 * 3 + @test stored_length(d) == 2 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -324,8 +320,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = 2 * a @allowscalar @test Array(b) ≈ 2 * Array(a) @test eltype(b) == elt - @test block_nstored(b) == 2 - @test nstored(b) == 2 * 4 + 3 * 3 + @test block_stored_length(b) == 2 + @test stored_length(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -334,8 +330,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = (2 + 3im) * a @test Array(b) ≈ (2 + 3im) * Array(a) @test eltype(b) == complex(elt) - @test block_nstored(b) == 2 - @test nstored(b) == 2 * 4 + 3 * 3 + @test block_stored_length(b) == 2 + @test stored_length(b) == 2 * 4 + 3 * 3 a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) @views for b in [Block(1, 2), Block(2, 1)] @@ -344,8 +340,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = a + a @allowscalar @test Array(b) ≈ 2 * Array(a) @test eltype(b) == elt - @test block_nstored(b) == 2 - @test nstored(b) == 2 * 4 + 3 * 3 + @test block_stored_length(b) == 2 + @test stored_length(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -358,8 +354,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = a .+ a .+ 3 .* PermutedDimsArray(x, (2, 1)) @test Array(b) ≈ 2 * Array(a) + 3 * permutedims(Array(x), (2, 1)) @test eltype(b) == elt - @test block_nstored(b) == 2 - @test nstored(b) == 2 * 4 + 3 * 3 + @test block_stored_length(b) == 2 + @test stored_length(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -368,8 +364,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = permutedims(a, (2, 1)) @test Array(b) ≈ permutedims(Array(a), (2, 1)) @test eltype(b) == elt - @test block_nstored(b) == 2 - @test nstored(b) == 2 * 4 + 3 * 3 + @test block_stored_length(b) == 2 + @test stored_length(b) == 2 * 4 + 3 * 3 a = dev(BlockSparseArray{elt}([1, 1, 1], [1, 2, 3], [2, 2, 1], [1, 2, 1])) a[Block(3, 2, 2, 3)] = dev(randn(elt, 1, 2, 2, 1)) @@ -389,8 +385,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test eltype(b) == elt @test size(b) == size(a) @test blocksize(b) == (2, 2) - @test block_nstored(b) == 2 - @test nstored(b) == 2 * 4 + 3 * 3 + @test block_stored_length(b) == 2 + @test stored_length(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -403,8 +399,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test b[Block(2, 2)] == a[Block(1, 1)] @test size(b) == size(a) @test blocksize(b) == (2, 2) - @test nstored(b) == nstored(a) - @test block_nstored(b) == 2 + @test stored_length(b) == stored_length(a) + @test block_stored_length(b) == 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -414,8 +410,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test b == a @test size(b) == size(a) @test blocksize(b) == (2, 2) - @test nstored(b) == nstored(a) - @test block_nstored(b) == 2 + @test stored_length(b) == stored_length(a) + @test block_stored_length(b) == 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -427,8 +423,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test b[Block(1, 2)] == a[Block(1, 2)] @test size(b) == (2, 7) @test blocksize(b) == (1, 2) - @test nstored(b) == nstored(a[Block(1, 2)]) - @test block_nstored(b) == 1 + @test stored_length(b) == stored_length(a[Block(1, 2)]) + @test block_stored_length(b) == 1 a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) @views for b in [Block(1, 2), Block(2, 1)] @@ -438,8 +434,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @allowscalar @test b == Array(a)[2:4, 2:4] @test size(b) == (3, 3) @test blocksize(b) == (2, 2) - @test nstored(b) == 1 * 1 + 2 * 2 - @test block_nstored(b) == 2 + @test stored_length(b) == 1 * 1 + 2 * 2 + @test block_stored_length(b) == 2 for f in (getindex, view) # TODO: Broken on GPU. @allowscalar begin @@ -463,18 +459,18 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test b == Array(a)[3:4, 2:3] @test size(b) == (2, 2) @test blocksize(b) == (1, 1) - @test nstored(b) == 2 * 2 - @test block_nstored(b) == 1 + @test stored_length(b) == 2 * 2 + @test block_stored_length(b) == 1 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] a[b] = randn(elt, size(a[b])) end b = PermutedDimsArray(a, (2, 1)) - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test Array(b) == permutedims(Array(a), (2, 1)) c = 2 * b - @test block_nstored(c) == 2 + @test block_stored_length(c) == 2 @test Array(c) == 2 * permutedims(Array(a), (2, 1)) a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @@ -482,10 +478,10 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[b] = randn(elt, size(a[b])) end b = a' - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test Array(b) == Array(a)' c = 2 * b - @test block_nstored(c) == 2 + @test block_stored_length(c) == 2 @test Array(c) == 2 * Array(a)' a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @@ -493,10 +489,10 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[b] = randn(elt, size(a[b])) end b = transpose(a) - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test Array(b) == transpose(Array(a)) c = 2 * b - @test block_nstored(c) == 2 + @test block_stored_length(c) == 2 @test Array(c) == 2 * transpose(Array(a)) a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @@ -568,7 +564,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b .= x @test a[Block(2, 2)[1:2, 2:3]] == x @test a[Block(2, 2)[1:2, 2:3]] == b - @test block_nstored(a) == 1 + @test block_stored_length(a) == 1 a = BlockSparseArray{elt}([2, 3], [2, 3]) @views for b in [Block(1, 1), Block(2, 2)] @@ -608,7 +604,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[b] = randn(elt, size(a[b])) end b = a[Block(2):Block(2), Block(1):Block(2)] - @test block_nstored(b) == 1 + @test block_stored_length(b) == 1 @test b == Array(a)[3:5, 1:end] a = BlockSparseArray{elt}(undef, ([2, 3, 4], [2, 3, 4])) @@ -622,7 +618,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype ) for b in (a[I1, I2], @view(a[I1, I2])) # TODO: Rename `block_stored_length`. - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 @test b[Block(1, 1)] == a[Block(2, 2)[2:3, 2:3]] @test b[Block(2, 2)] == a[Block(3, 3)[1:3, 2:3]] end @@ -642,7 +638,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test blocklengths.(axes(b)) == ([3, 3], [2, 2]) # TODO: Rename `block_stored_length`. @test blocksize(b) == (2, 2) - @test block_nstored(b) == 2 + @test block_stored_length(b) == 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -673,31 +669,31 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a = BlockSparseArray{elt}([2, 3], [3, 4]) @test iszero(a) - @test iszero(block_nstored(a)) + @test iszero(block_stored_length(a)) fill!(a, 0) @test iszero(a) - @test iszero(block_nstored(a)) + @test iszero(block_stored_length(a)) fill!(a, 2) @test !iszero(a) @test all(==(2), a) - @test block_nstored(a) == 4 + @test block_stored_length(a) == 4 fill!(a, 0) @test iszero(a) - @test iszero(block_nstored(a)) + @test iszero(block_stored_length(a)) a = BlockSparseArray{elt}([2, 3], [3, 4]) @test iszero(a) - @test iszero(block_nstored(a)) + @test iszero(block_stored_length(a)) a .= 0 @test iszero(a) - @test iszero(block_nstored(a)) + @test iszero(block_stored_length(a)) a .= 2 @test !iszero(a) @test all(==(2), a) - @test block_nstored(a) == 4 + @test block_stored_length(a) == 4 a .= 0 @test iszero(a) - @test iszero(block_nstored(a)) + @test iszero(block_stored_length(a)) # TODO: Broken on GPU. a = BlockSparseArray{elt}([2, 3], [3, 4]) @@ -736,13 +732,13 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype for abx in (f1(), f2()) @compat (; a, b, x) = abx @test b isa SubArray{<:Any,<:Any,<:BlockSparseArray} - @test block_nstored(b) == 1 + @test block_stored_length(b) == 1 @test b[Block(1, 1)] == x @test @view(b[Block(1, 1)]) isa Matrix{elt} for blck in [Block(2, 1), Block(1, 2), Block(2, 2)] @test iszero(b[blck]) end - @test block_nstored(a) == 1 + @test block_stored_length(a) == 1 @test a[Block(2, 2)] == x for blck in [Block(1, 1), Block(2, 1), Block(1, 2)] @test iszero(a[blck]) @@ -758,7 +754,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b .= x @test b == x @test a[Block(1, 2)] == x - @test block_nstored(a) == 1 + @test block_stored_length(a) == 1 a = BlockSparseArray{elt}([4, 3, 2], [4, 3, 2]) @views for B in [Block(1, 1), Block(2, 2), Block(3, 3)] @@ -769,7 +765,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype c = @view b[4:8, 4:8] @test c isa SubArray{<:Any,<:Any,<:BlockSparseArray} @test size(c) == (5, 5) - @test block_nstored(c) == 2 + @test block_stored_length(c) == 2 @test blocksize(c) == (2, 2) @test blocklengths.(axes(c)) == ([2, 3], [2, 3]) @test size(c[Block(1, 1)]) == (2, 2) @@ -916,7 +912,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a_dest = a1 * a2 @allowscalar @test Array(a_dest) ≈ Array(a1) * Array(a2) @test a_dest isa BlockSparseArray{elt} - @test block_nstored(a_dest) == 1 + @test block_stored_length(a_dest) == 1 end @testset "Matrix multiplication" begin a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) @@ -947,21 +943,21 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a2[Block(1, 2)] = dev(randn(elt, size(@view(a2[Block(1, 2)])))) a_dest = cat(a1, a2; dims=1) - @test block_nstored(a_dest) == 2 + @test block_stored_length(a_dest) == 2 @test blocklengths.(axes(a_dest)) == ([2, 3, 2, 3], [2, 3]) @test issetequal(block_stored_indices(a_dest), [Block(2, 1), Block(3, 2)]) @test a_dest[Block(2, 1)] == a1[Block(2, 1)] @test a_dest[Block(3, 2)] == a2[Block(1, 2)] a_dest = cat(a1, a2; dims=2) - @test block_nstored(a_dest) == 2 + @test block_stored_length(a_dest) == 2 @test blocklengths.(axes(a_dest)) == ([2, 3], [2, 3, 2, 3]) @test issetequal(block_stored_indices(a_dest), [Block(2, 1), Block(1, 4)]) @test a_dest[Block(2, 1)] == a1[Block(2, 1)] @test a_dest[Block(1, 4)] == a2[Block(1, 2)] a_dest = cat(a1, a2; dims=(1, 2)) - @test block_nstored(a_dest) == 2 + @test block_stored_length(a_dest) == 2 @test blocklengths.(axes(a_dest)) == ([2, 3, 2, 3], [2, 3, 2, 3]) @test issetequal(block_stored_indices(a_dest), [Block(2, 1), Block(3, 4)]) @test a_dest[Block(2, 1)] == a1[Block(2, 1)] @@ -988,8 +984,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = block_reshape(a, [6, 8, 9, 12]) @test reshape(a[Block(1, 2)], 9) == b[Block(3)] @test reshape(a[Block(2, 1)], 8) == b[Block(2)] - @test block_nstored(b) == 2 - @test nstored(b) == 17 + @test block_stored_length(b) == 2 + @test stored_length(b) == 17 end end end diff --git a/NDTensors/src/lib/DiagonalArrays/test/runtests.jl b/NDTensors/src/lib/DiagonalArrays/test/runtests.jl index cd605cf72b..6b90974f81 100644 --- a/NDTensors/src/lib/DiagonalArrays/test/runtests.jl +++ b/NDTensors/src/lib/DiagonalArrays/test/runtests.jl @@ -2,7 +2,7 @@ using Test: @test, @testset, @test_broken using NDTensors.DiagonalArrays: DiagonalArrays, DiagonalArray, DiagonalMatrix, diaglength using NDTensors.SparseArrayDOKs: SparseArrayDOK -using NDTensors.SparseArrayInterface: nstored +using NDTensors.SparseArrayInterface: stored_length @testset "Test NDTensors.DiagonalArrays" begin @testset "README" begin @test include( @@ -31,7 +31,7 @@ using NDTensors.SparseArrayInterface: nstored # TODO: Use `densearray` to make generic to GPU. @test Array(a_dest) ≈ Array(a1) * Array(a2) # TODO: Make this work with `ArrayLayouts`. - @test nstored(a_dest) == 2 + @test stored_length(a_dest) == 2 @test a_dest isa DiagonalMatrix{elt} # TODO: Make generic to GPU, use `allocate_randn`? @@ -39,7 +39,7 @@ using NDTensors.SparseArrayInterface: nstored a_dest = a1 * a2 # TODO: Use `densearray` to make generic to GPU. @test Array(a_dest) ≈ Array(a1) * Array(a2) - @test nstored(a_dest) == 8 + @test stored_length(a_dest) == 8 @test a_dest isa Matrix{elt} a2 = SparseArrayDOK{elt}(3, 4) @@ -51,7 +51,7 @@ using NDTensors.SparseArrayInterface: nstored @test Array(a_dest) ≈ Array(a1) * Array(a2) # TODO: Define `SparseMatrixDOK`. # TODO: Make this work with `ArrayLayouts`. - @test nstored(a_dest) == 2 + @test stored_length(a_dest) == 2 @test a_dest isa SparseArrayDOK{elt,2} end end diff --git a/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl b/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl index df65ed6155..a16da982f9 100644 --- a/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl +++ b/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl @@ -57,62 +57,41 @@ end function blockedunitrange_getindices( a::GradedUnitRangeDual, indices::Vector{<:BlockIndexRange{1}} ) - a_indices = getindex(nondual(a), indices) - v = mortar(dual.(blocks(a_indices))) - # flip v to stay consistent with other cases where axes(v) are used - return flip_blockvector(v) + # dual v axes to stay consistent with other cases where axes(v) are used + return dual_axes(blockedunitrange_getindices(nondual(a), indices)) end function blockedunitrange_getindices( a::GradedUnitRangeDual, indices::BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}}, ) - v = mortar(map(b -> a[b], blocks(indices))) - # GradedOneTo appears in mortar - # flip v axis to preserve dual information + # dual v axis to preserve dual information # axes(v) will appear in axes(view(::BlockSparseArray, [Block(1)[1:1]])) - return flip_blockvector(v) + return dual_axes(blockedunitrange_getindices(nondual(a), indices)) end function blockedunitrange_getindices( a::GradedUnitRangeDual, indices::AbstractVector{<:Union{Block{1},BlockIndexRange{1}}} ) - # Without converting `indices` to `Vector`, - # mapping `indices` outputs a `BlockVector` - # which is harder to reason about. - vblocks = map(index -> a[index], Vector(indices)) - # We pass `length.(blocks)` to `mortar` in order - # to pass block labels to the axes of the output, - # if they exist. This makes it so that - # `only(axes(a[indices])) isa `GradedUnitRange` - # if `a isa `GradedUnitRange`, for example. - - v = mortar(vblocks, length.(vblocks)) - # GradedOneTo appears in mortar - # flip v axis to preserve dual information + # dual v axis to preserve dual information # axes(v) will appear in axes(view(::BlockSparseArray, [Block(1)])) - return flip_blockvector(v) + return dual_axes(blockedunitrange_getindices(nondual(a), indices)) end # Fixes ambiguity error. -# TODO: Write this in terms of `blockedunitrange_getindices(dual(a), indices)`. function blockedunitrange_getindices( a::GradedUnitRangeDual, indices::AbstractBlockVector{<:Block{1}} ) - blks = map(bs -> mortar(map(b -> a[b], bs)), blocks(indices)) - # We pass `length.(blks)` to `mortar` in order - # to pass block labels to the axes of the output, - # if they exist. This makes it so that - # `only(axes(a[indices])) isa `GradedUnitRange` - # if `a isa `GradedUnitRange`, for example. - v = mortar(blks, labelled_length.(blks)) - return flip_blockvector(v) -end - -function flip_blockvector(v::BlockVector) - block_axes = flip.(axes(v)) - flipped = mortar(vec.(blocks(v)), block_axes) - return flipped + v = blockedunitrange_getindices(nondual(a), indices) + # v elements are not dualled by dual_axes due to different structure. + # take element dual here. + return dual_axes(dual.(v)) +end + +function dual_axes(v::BlockVector) + # dual both v elements and v axes + block_axes = dual.(axes(v)) + return mortar(dual.(blocks(v)), block_axes) end Base.axes(a::GradedUnitRangeDual) = axes(nondual(a)) diff --git a/NDTensors/src/lib/GradedAxes/test/test_dual.jl b/NDTensors/src/lib/GradedAxes/test/test_dual.jl index 98b8838542..f2b3072dc1 100644 --- a/NDTensors/src/lib/GradedAxes/test/test_dual.jl +++ b/NDTensors/src/lib/GradedAxes/test/test_dual.jl @@ -219,14 +219,35 @@ end @test label(ad[Block(2)]) == U1(-1) @test label(ad[Block(2)[1:1]]) == U1(-1) - I = mortar([Block(2)[1:1]]) - g = ad[I] - @test length(g) == 1 - @test label(first(g)) == U1(-1) - @test isdual(g[Block(1)]) + v = ad[[Block(2)[1:1]]] + @test v isa AbstractVector{LabelledInteger{Int64,U1}} + @test length(v) == 1 + @test label(first(v)) == U1(-1) + @test unlabel(first(v)) == 3 + @test isdual(v[Block(1)]) + @test isdual(axes(v, 1)) + @test blocklabels(axes(v, 1)) == [U1(-1)] - @test isdual(axes(ad[[Block(1)]], 1)) # used in view(::BlockSparseVector, [Block(1)]) - @test isdual(axes(ad[mortar([Block(1)[1:1]])], 1)) # used in view(::BlockSparseVector, [Block(1)[1:1]]) + v = ad[mortar([Block(2)[1:1]])] + @test v isa AbstractVector{LabelledInteger{Int64,U1}} + @test isdual(axes(v, 1)) # used in view(::BlockSparseVector, [Block(1)[1:1]]) + @test label(first(v)) == U1(-1) + @test unlabel(first(v)) == 3 + @test blocklabels(axes(v, 1)) == [U1(-1)] + + v = ad[[Block(2)]] + @test v isa AbstractVector{LabelledInteger{Int64,U1}} + @test isdual(axes(v, 1)) # used in view(::BlockSparseVector, [Block(1)]) + @test label(first(v)) == U1(-1) + @test unlabel(first(v)) == 3 + @test blocklabels(axes(v, 1)) == [U1(-1)] + + v = ad[mortar([[Block(2)], [Block(1)]])] + @test v isa AbstractVector{LabelledInteger{Int64,U1}} + @test isdual(axes(v, 1)) + @test label(first(v)) == U1(-1) + @test unlabel(first(v)) == 3 + @test blocklabels(axes(v, 1)) == [U1(-1), U1(0)] end end diff --git a/NDTensors/src/lib/SparseArrayDOKs/test/runtests.jl b/NDTensors/src/lib/SparseArrayDOKs/test/runtests.jl index ee2262bea0..4bf44f99b8 100644 --- a/NDTensors/src/lib/SparseArrayDOKs/test/runtests.jl +++ b/NDTensors/src/lib/SparseArrayDOKs/test/runtests.jl @@ -9,7 +9,7 @@ using Dictionaries: Dictionary using Test: @test, @testset, @test_broken using NDTensors.SparseArrayDOKs: SparseArrayDOKs, SparseArrayDOK, SparseMatrixDOK, @maybe_grow -using NDTensors.SparseArrayInterface: storage_indices, nstored +using NDTensors.SparseArrayInterface: storage_indices, stored_length using SparseArrays: SparseMatrixCSC, nnz @testset "SparseArrayDOK (eltype=$elt)" for elt in (Float32, ComplexF32, Float64, ComplexF64) @@ -20,7 +20,7 @@ using SparseArrays: SparseMatrixCSC, nnz @test a == SparseArrayDOK{elt}(undef, (3, 4)) @test iszero(a) @test iszero(nnz(a)) - @test nstored(a) == nnz(a) + @test stored_length(a) == nnz(a) @test size(a) == (3, 4) @test eltype(a) == elt for I in eachindex(a) @@ -40,16 +40,16 @@ using SparseArrays: SparseMatrixCSC, nnz @test !iszero(b) @test b[1, 2] == x12 @test b[2, 3] == x23 - @test iszero(nstored(a)) - @test nstored(b) == 2 + @test iszero(stored_length(a)) + @test stored_length(b) == 2 end @testset "map/broadcast" begin a = SparseArrayDOK{elt}(3, 4) a[1, 1] = 11 a[3, 4] = 34 - @test nstored(a) == 2 + @test stored_length(a) == 2 b = 2 * a - @test nstored(b) == 2 + @test stored_length(b) == 2 @test b[1, 1] == 2 * 11 @test b[3, 4] == 2 * 34 end @@ -71,14 +71,14 @@ using SparseArrays: SparseMatrixCSC, nnz # TODO: Use `densearray` to make generic to GPU. @test Array(a_dest) ≈ Array(a1) * Array(a2) # TODO: Make this work with `ArrayLayouts`. - @test nstored(a_dest) == 2 + @test stored_length(a_dest) == 2 @test a_dest isa SparseMatrixDOK{elt} a2 = randn(elt, (3, 4)) a_dest = a1 * a2 # TODO: Use `densearray` to make generic to GPU. @test Array(a_dest) ≈ Array(a1) * Array(a2) - @test nstored(a_dest) == 8 + @test stored_length(a_dest) == 8 @test a_dest isa Matrix{elt} end @testset "SparseMatrixCSC" begin diff --git a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/SparseArrayInterfaceSparseArraysExt.jl b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/SparseArrayInterfaceSparseArraysExt.jl index 9b4ce3123f..828a681cc0 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/SparseArrayInterfaceSparseArraysExt.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/SparseArrayInterfaceSparseArraysExt.jl @@ -1,9 +1,9 @@ using Base: Forward using SparseArrays: SparseArrays, SparseMatrixCSC, findnz, getcolptr, nonzeros, rowvals -using ..SparseArrayInterface: nstored +using ..SparseArrayInterface: stored_length # Julia Base `AbstractSparseArray` interface -SparseArrays.nnz(a::AbstractSparseArray) = nstored(a) +SparseArrays.nnz(a::AbstractSparseArray) = stored_length(a) sparse_storage(a::SparseMatrixCSC) = nonzeros(a) function storage_index_to_index(a::SparseMatrixCSC, I) diff --git a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/arraylayouts.jl b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/arraylayouts.jl index 58176846ca..293d58f3ce 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/arraylayouts.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/arraylayouts.jl @@ -2,7 +2,7 @@ using ArrayLayouts: ArrayLayouts, Dot, DualLayout, MatMulMatAdd, MatMulVecAdd, M using LinearAlgebra: Adjoint, Transpose using ..TypeParameterAccessors: parenttype -function ArrayLayouts.MemoryLayout(arraytype::Type{<:SparseArrayLike}) +function ArrayLayouts.MemoryLayout(arraytype::Type{<:AnyAbstractSparseArray}) return SparseLayout() end diff --git a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/base.jl b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/base.jl index 8480b2fd88..5be8c9c1b3 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/base.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/base.jl @@ -1,18 +1,18 @@ using ..SparseArrayInterface: SparseArrayInterface # Base -function Base.:(==)(a1::SparseArrayLike, a2::SparseArrayLike) +function Base.:(==)(a1::AnyAbstractSparseArray, a2::AnyAbstractSparseArray) return SparseArrayInterface.sparse_isequal(a1, a2) end -function Base.reshape(a::SparseArrayLike, dims::Tuple{Vararg{Int}}) +function Base.reshape(a::AnyAbstractSparseArray, dims::Tuple{Vararg{Int}}) return SparseArrayInterface.sparse_reshape(a, dims) end -function Base.zero(a::SparseArrayLike) +function Base.zero(a::AnyAbstractSparseArray) return SparseArrayInterface.sparse_zero(a) end -function Base.one(a::SparseArrayLike) +function Base.one(a::AnyAbstractSparseArray) return SparseArrayInterface.sparse_one(a) end diff --git a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/broadcast.jl b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/broadcast.jl index fdade0f775..de67af5495 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/broadcast.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/broadcast.jl @@ -1,4 +1,4 @@ # Broadcasting -function Broadcast.BroadcastStyle(arraytype::Type{<:SparseArrayLike}) +function Broadcast.BroadcastStyle(arraytype::Type{<:AnyAbstractSparseArray}) return SparseArrayInterface.SparseArrayStyle{ndims(arraytype)}() end diff --git a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/cat.jl b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/cat.jl index a9db504e38..3d0475159c 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/cat.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/cat.jl @@ -1,4 +1,4 @@ # TODO: Change to `AnyAbstractSparseArray`. -function Base.cat(as::SparseArrayLike...; dims) +function Base.cat(as::AnyAbstractSparseArray...; dims) return sparse_cat(as...; dims) end diff --git a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/map.jl b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/map.jl index bc85aa1099..4dd96f907b 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/map.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/map.jl @@ -1,42 +1,42 @@ using ArrayLayouts: LayoutArray # Map -function Base.map!(f, a_dest::AbstractArray, a_srcs::Vararg{SparseArrayLike}) +function Base.map!(f, a_dest::AbstractArray, a_srcs::Vararg{AnyAbstractSparseArray}) SparseArrayInterface.sparse_map!(f, a_dest, a_srcs...) return a_dest end -function Base.copy!(a_dest::AbstractArray, a_src::SparseArrayLike) +function Base.copy!(a_dest::AbstractArray, a_src::AnyAbstractSparseArray) SparseArrayInterface.sparse_copy!(a_dest, a_src) return a_dest end -function Base.copyto!(a_dest::AbstractArray, a_src::SparseArrayLike) +function Base.copyto!(a_dest::AbstractArray, a_src::AnyAbstractSparseArray) SparseArrayInterface.sparse_copyto!(a_dest, a_src) return a_dest end # Fix ambiguity error -function Base.copyto!(a_dest::LayoutArray, a_src::SparseArrayLike) +function Base.copyto!(a_dest::LayoutArray, a_src::AnyAbstractSparseArray) SparseArrayInterface.sparse_copyto!(a_dest, a_src) return a_dest end -function Base.permutedims!(a_dest::AbstractArray, a_src::SparseArrayLike, perm) +function Base.permutedims!(a_dest::AbstractArray, a_src::AnyAbstractSparseArray, perm) SparseArrayInterface.sparse_permutedims!(a_dest, a_src, perm) return a_dest end -function Base.mapreduce(f, op, as::Vararg{SparseArrayLike}; kwargs...) +function Base.mapreduce(f, op, as::Vararg{AnyAbstractSparseArray}; kwargs...) return SparseArrayInterface.sparse_mapreduce(f, op, as...; kwargs...) end # TODO: Why isn't this calling `mapreduce` already? -function Base.iszero(a::SparseArrayLike) +function Base.iszero(a::AnyAbstractSparseArray) return SparseArrayInterface.sparse_iszero(a) end # TODO: Why isn't this calling `mapreduce` already? -function Base.isreal(a::SparseArrayLike) +function Base.isreal(a::AnyAbstractSparseArray) return SparseArrayInterface.sparse_isreal(a) end diff --git a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/wrappedabstractsparsearray.jl b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/wrappedabstractsparsearray.jl index e42ac91e8b..f9ec08acb7 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/wrappedabstractsparsearray.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/abstractsparsearray/wrappedabstractsparsearray.jl @@ -4,6 +4,6 @@ const WrappedAbstractSparseArray{T,N,A} = WrappedArray{ T,N,<:AbstractSparseArray,<:AbstractSparseArray{T,N} } -const SparseArrayLike{T,N} = Union{ +const AnyAbstractSparseArray{T,N} = Union{ <:AbstractSparseArray{T,N},<:WrappedAbstractSparseArray{T,N} } diff --git a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/SparseArrayInterfaceLinearAlgebraExt.jl b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/SparseArrayInterfaceLinearAlgebraExt.jl index 633506d630..4e4ed259d0 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/SparseArrayInterfaceLinearAlgebraExt.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/SparseArrayInterfaceLinearAlgebraExt.jl @@ -61,7 +61,7 @@ function sparse_dot(a1::AbstractArray, a2::AbstractArray) size(a1) == size(a2) || throw(DimensionMismatch("Sizes $(size(a1)) and $(size(a2)) don't match.")) dot_dest = zero(Base.promote_op(dot, eltype(a1), eltype(a2))) - # TODO: First check if the number of stored elements (`nstored`, to be renamed + # TODO: First check if the number of stored elements (`stored_length`, to be renamed # `stored_length`) is smaller in `a1` or `a2` and use whicheven one is smallar # as the outer loop. for I1 in stored_indices(a1) diff --git a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/base.jl b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/base.jl index 16144e66bc..9a6fd24941 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/base.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/base.jl @@ -99,7 +99,7 @@ end function sparse_isequal(a1::AbstractArray, a2::AbstractArray) Is = collect(stored_indices(a1)) intersect!(Is, stored_indices(a2)) - if !(length(Is) == nstored(a1) == nstored(a2)) + if !(length(Is) == stored_length(a1) == stored_length(a2)) return false end for I in Is diff --git a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl index f416ca421e..5f8c1cad7e 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl @@ -20,7 +20,7 @@ end index(i::StoredIndex) = i.iouter StorageIndex(i::StoredIndex) = i.istorage -nstored(a::AbstractArray) = length(sparse_storage(a)) +stored_length(a::AbstractArray) = length(sparse_storage(a)) struct NotStoredIndex{Iouter} <: MaybeStoredIndex{Iouter} iouter::Iouter diff --git a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/map.jl b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/map.jl index 8a5f32d8a6..0f9d9aad5f 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/map.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/map.jl @@ -8,7 +8,7 @@ struct NotStoredValue{Value} value::Value end value(v::NotStoredValue) = v.value -nstored(::NotStoredValue) = false +stored_length(::NotStoredValue) = false Base.:*(x::Number, y::NotStoredValue) = false Base.:*(x::NotStoredValue, y::Number) = false Base.:/(x::NotStoredValue, y::Number) = false diff --git a/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl b/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl index 47cf6668c6..8ec5174463 100644 --- a/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl +++ b/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl @@ -14,7 +14,7 @@ using Test: @test, @testset @test size(a) == (2, 3) @test axes(a) == (1:2, 1:3) @test SparseArrayInterface.sparse_storage(a) == elt[] - @test iszero(SparseArrayInterface.nstored(a)) + @test iszero(SparseArrayInterface.stored_length(a)) @test collect(SparseArrayInterface.stored_indices(a)) == CartesianIndex{2}[] @test iszero(a) @test iszero(norm(a)) @@ -34,7 +34,7 @@ using Test: @test, @testset fill!(a, 0) @test size(a) == (2, 3) @test iszero(a) - @test iszero(SparseArrayInterface.nstored(a)) + @test iszero(SparseArrayInterface.stored_length(a)) a_dense = SparseArrayInterface.densearray(a) @test a_dense == a @@ -44,7 +44,7 @@ using Test: @test, @testset fill!(a, 2) @test size(a) == (2, 3) @test !iszero(a) - @test SparseArrayInterface.nstored(a) == length(a) + @test SparseArrayInterface.stored_length(a) == length(a) for I in eachindex(a) @test a[I] == 2 end @@ -57,7 +57,7 @@ using Test: @test, @testset @test axes(a) == (1:2, 1:3) @test a[SparseArrayInterface.StorageIndex(1)] == 12 @test SparseArrayInterface.sparse_storage(a) == elt[12] - @test isone(SparseArrayInterface.nstored(a)) + @test isone(SparseArrayInterface.stored_length(a)) @test collect(SparseArrayInterface.stored_indices(a)) == [CartesianIndex(1, 2)] @test !iszero(a) @test !iszero(norm(a)) @@ -97,7 +97,7 @@ using Test: @test, @testset a[1, 2] = 12 a = zero(a) @test size(a) == (2, 3) - @test iszero(SparseArrayInterface.nstored(a)) + @test iszero(SparseArrayInterface.stored_length(a)) a = SparseArray{elt}(2, 3) a[1, 2] = 12 @@ -144,7 +144,7 @@ using Test: @test, @testset a[1, 2] = 12 a = zero(a) @test size(a) == (2, 3) - @test iszero(SparseArrayInterface.nstored(a)) + @test iszero(SparseArrayInterface.stored_length(a)) a = SparseArray{elt}(2, 3) a[1, 2] = 12 @@ -152,7 +152,7 @@ using Test: @test, @testset @test size(a) == (2, 3) @test axes(a) == (1:2, 1:3) @test SparseArrayInterface.sparse_storage(a) == elt[12] - @test isone(SparseArrayInterface.nstored(a)) + @test isone(SparseArrayInterface.stored_length(a)) @test SparseArrayInterface.storage_indices(a) == 1:1 @test collect(SparseArrayInterface.stored_indices(a)) == [CartesianIndex(1, 2)] @test !iszero(a) @@ -171,7 +171,7 @@ using Test: @test, @testset @test size(a) == (2, 3) @test axes(a) == (1:2, 1:3) @test SparseArrayInterface.sparse_storage(a) == elt[24] - @test isone(SparseArrayInterface.nstored(a)) + @test isone(SparseArrayInterface.stored_length(a)) @test collect(SparseArrayInterface.stored_indices(a)) == [CartesianIndex(1, 2)] @test !iszero(a) @test !iszero(norm(a)) @@ -191,7 +191,7 @@ using Test: @test, @testset @test size(c) == (2, 3) @test axes(c) == (1:2, 1:3) @test SparseArrayInterface.sparse_storage(c) == elt[12, 21] - @test SparseArrayInterface.nstored(c) == 2 + @test SparseArrayInterface.stored_length(c) == 2 @test collect(SparseArrayInterface.stored_indices(c)) == [CartesianIndex(1, 2), CartesianIndex(2, 1)] @test !iszero(c) @@ -212,7 +212,7 @@ using Test: @test, @testset @test size(b) == (3, 2) @test axes(b) == (1:3, 1:2) @test SparseArrayInterface.sparse_storage(b) == elt[12] - @test SparseArrayInterface.nstored(b) == 1 + @test SparseArrayInterface.stored_length(b) == 1 @test collect(SparseArrayInterface.stored_indices(b)) == [CartesianIndex(2, 1)] @test !iszero(b) @test !iszero(norm(b)) @@ -271,7 +271,7 @@ using Test: @test, @testset @test a .+ 2 .* b == Array(a) + 2b @test a + b isa Matrix{elt} @test b + a isa Matrix{elt} - @test SparseArrayInterface.nstored(a + b) == length(a) + @test SparseArrayInterface.stored_length(a + b) == length(a) a = SparseArray{elt}(2, 3) a[1, 2] = 12 @@ -282,10 +282,10 @@ using Test: @test, @testset @test a′ == a + b # TODO: Should this be: # ```julia - # @test SparseArrayInterface.nstored(a′) == 2 + # @test SparseArrayInterface.stored_length(a′) == 2 # ``` # ? I.e. should it only store the nonzero values? - @test SparseArrayInterface.nstored(a′) == 6 + @test SparseArrayInterface.stored_length(a′) == 6 # Matrix multiplication a1 = SparseArray{elt}(2, 3) @@ -297,7 +297,7 @@ using Test: @test, @testset a_dest = a1 * a2 @test Array(a_dest) ≈ Array(a1) * Array(a2) @test a_dest isa SparseArray{elt} - @test SparseArrayInterface.nstored(a_dest) == 2 + @test SparseArrayInterface.stored_length(a_dest) == 2 # Dot product a1 = SparseArray{elt}(4) @@ -322,7 +322,7 @@ using Test: @test, @testset mul!(a_dest, a1, a2) @test Array(a_dest) ≈ Array(a1) * Array(a2) @test a_dest isa SparseArray{elt} - @test SparseArrayInterface.nstored(a_dest) == 2 + @test SparseArrayInterface.stored_length(a_dest) == 2 # In-place matrix multiplication a1 = SparseArray{elt}(2, 3) @@ -340,7 +340,7 @@ using Test: @test, @testset mul!(a_dest, a1, a2, α, β) @test Array(a_dest) ≈ Array(a1) * Array(a2) * α + Array(a_dest′) * β @test a_dest isa SparseArray{elt} - @test SparseArrayInterface.nstored(a_dest) == 2 + @test SparseArrayInterface.stored_length(a_dest) == 2 # cat a1 = SparseArray{elt}(2, 3) @@ -352,7 +352,7 @@ using Test: @test, @testset a_dest = cat(a1, a2; dims=1) @test size(a_dest) == (4, 3) - @test SparseArrayInterface.nstored(a_dest) == 4 + @test SparseArrayInterface.stored_length(a_dest) == 4 @test a_dest[1, 2] == a1[1, 2] @test a_dest[2, 1] == a1[2, 1] @test a_dest[3, 1] == a2[1, 1] @@ -360,7 +360,7 @@ using Test: @test, @testset a_dest = cat(a1, a2; dims=2) @test size(a_dest) == (2, 6) - @test SparseArrayInterface.nstored(a_dest) == 4 + @test SparseArrayInterface.stored_length(a_dest) == 4 @test a_dest[1, 2] == a1[1, 2] @test a_dest[2, 1] == a1[2, 1] @test a_dest[1, 4] == a2[1, 1] @@ -368,7 +368,7 @@ using Test: @test, @testset a_dest = cat(a1, a2; dims=(1, 2)) @test size(a_dest) == (4, 6) - @test SparseArrayInterface.nstored(a_dest) == 4 + @test SparseArrayInterface.stored_length(a_dest) == 4 @test a_dest[1, 2] == a1[1, 2] @test a_dest[2, 1] == a1[2, 1] @test a_dest[3, 4] == a2[1, 1] @@ -395,6 +395,6 @@ using Test: @test, @testset ## mul!(a_dest, a1, a2) ## @test Array(a_dest) ≈ Array(a1) * Array(a2) ## @test a_dest isa SparseArray{Matrix{elt}} - ## @test SparseArrayInterface.nstored(a_dest) == 2 + ## @test SparseArrayInterface.stored_length(a_dest) == 2 end end diff --git a/src/lib/ITensorsNamedDimsArraysExt/test/test_basics.jl b/src/lib/ITensorsNamedDimsArraysExt/test/test_basics.jl index e2c3ae73dd..6a39e7a64d 100644 --- a/src/lib/ITensorsNamedDimsArraysExt/test/test_basics.jl +++ b/src/lib/ITensorsNamedDimsArraysExt/test/test_basics.jl @@ -3,7 +3,7 @@ using BlockArrays: blocklengths using ITensors: ITensor, Index, QN, dag, inds, plev, random_itensor using ITensors.ITensorsNamedDimsArraysExt: to_nameddimsarray using NDTensors: tensor -using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored +using NDTensors.BlockSparseArrays: BlockSparseArray, block_stored_length using NDTensors.GradedAxes: isdual using NDTensors.LabelledNumbers: label using NDTensors.NamedDimsArrays: NamedDimsArray, unname @@ -27,7 +27,7 @@ using Test: @test, @testset @test blocklengths(axes(bb, 2)) == [2, 3] @test label.(blocklengths(axes(bb, 1))) == [QN(0), QN(1)] @test label.(blocklengths(axes(bb, 2))) == [QN(0), QN(-1)] - @test block_nstored(bb) == 2 + @test block_stored_length(bb) == 2 @test b' * b ≈ to_nameddimsarray(a' * a) end end