Skip to content
This repository has been archived by the owner on Feb 5, 2024. It is now read-only.

MPI Sparse Hamiltonian for Adjoint Jacobian #128

Merged
merged 51 commits into from
Aug 24, 2023
Merged
Show file tree
Hide file tree
Changes from 33 commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
c3c50aa
add cpp layer
multiphaseCFD Jul 7, 2023
d398161
add docstring
multiphaseCFD Jul 7, 2023
087a85e
add python layer
multiphaseCFD Jul 8, 2023
2e29ce9
add mpi support for spmv adjoint
multiphaseCFD Jul 8, 2023
608c8e1
tidy up code
multiphaseCFD Jul 9, 2023
482f760
memory optimization
multiphaseCFD Jul 9, 2023
51a7139
tidy up the code
multiphaseCFD Jul 9, 2023
d7604bb
quick update
multiphaseCFD Jul 9, 2023
55e505f
make format
multiphaseCFD Jul 9, 2023
2f21e75
Add changelog
multiphaseCFD Jul 10, 2023
1a451d2
make format
multiphaseCFD Jul 10, 2023
2fb4c55
Trigger MPI CI
multiphaseCFD Jul 10, 2023
93011e9
update base on comments
multiphaseCFD Jul 11, 2023
50a3d81
add changelog
multiphaseCFD Jul 11, 2023
bebbe2b
make format
multiphaseCFD Jul 11, 2023
147afbe
update based on comments
multiphaseCFD Jul 19, 2023
5a1705a
make format & add to-do work
multiphaseCFD Jul 19, 2023
d68281c
make format
multiphaseCFD Jul 21, 2023
521f84a
sparse matrix struct to class
multiphaseCFD Jul 24, 2023
9f9777c
update unit test cases
multiphaseCFD Jul 24, 2023
914aac6
update docstring
multiphaseCFD Jul 27, 2023
fb78ed8
add more docstring
multiphaseCFD Jul 27, 2023
77abc0e
make format
multiphaseCFD Jul 27, 2023
7845449
Merge branch 'mpi_sparse_ham' into mpi_adjoint_spmv
multiphaseCFD Jul 27, 2023
6c94981
update based on new csrmatrix
multiphaseCFD Jul 27, 2023
e3b6a8a
bug fix
multiphaseCFD Jul 27, 2023
97b8926
make format
multiphaseCFD Jul 27, 2023
6c94020
quick fix
multiphaseCFD Jul 27, 2023
ebac2a0
update mpimanager
multiphaseCFD Jul 27, 2023
13d6da5
quick update
multiphaseCFD Jul 27, 2023
014ed67
Merge branch 'mpi_sparse_ham' into mpi_adjoint_spmv
multiphaseCFD Jul 28, 2023
8938ee3
Merge branch 'mpi_adjoint_spmv' of https://github.com/PennyLaneAI/pen…
multiphaseCFD Jul 28, 2023
5b0712c
merge sparse_ham
multiphaseCFD Jul 28, 2023
500d8c5
add more docstring
multiphaseCFD Jul 29, 2023
2ae1e96
Merge branch 'mpi_sparse_ham' into mpi_adjoint_spmv
multiphaseCFD Aug 1, 2023
a86e9db
Merge branch 'main' into mpi_adjoint_spmv
multiphaseCFD Aug 2, 2023
a09d10b
update typo
multiphaseCFD Aug 8, 2023
c9131f0
update indices order
multiphaseCFD Aug 9, 2023
0cbde2c
Merge branch 'main' into mpi_adjoint_spmv
multiphaseCFD Aug 18, 2023
618bdf2
add matrix sort v0
multiphaseCFD Aug 19, 2023
de7db96
refactor csrmatrix
multiphaseCFD Aug 22, 2023
134cfc3
add crsmatrix
multiphaseCFD Aug 22, 2023
45633b8
refactor binding
multiphaseCFD Aug 23, 2023
0448cc9
Merge branch 'refactor_csrmat'
multiphaseCFD Aug 23, 2023
77ecd93
init zeros for tmp for empty spmat
multiphaseCFD Aug 23, 2023
39a016d
Merge branch 'main' into mpi_adjoint_spmv
multiphaseCFD Aug 23, 2023
c2c8e5d
update changelog and merge main
multiphaseCFD Aug 23, 2023
877760a
add unit tests for CSRMatrix related methods
multiphaseCFD Aug 23, 2023
6a8cce0
add docstring
multiphaseCFD Aug 24, 2023
d1f1455
add license header
multiphaseCFD Aug 24, 2023
d75c268
tidy up code
multiphaseCFD Aug 24, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,22 @@

### New features since last release

* Add sparse Hamiltonian support to multi-node/multi-GPU adjoint methods.
[(#128)] (https://github.com/PennyLaneAI/pennylane-lightning-gpu/pull/128)

Note each MPI process will return the overall result of the adjoint method. To ensure wires index arranged in a way aligned with cuQuantum backend, `wires` should be reordered in a descending manner in a python script as follows:
```python
qml.SparseHamiltonian(
qml.Hamiltonian(
[0.1], [qml.PauliX(0) @ qml.PauliY(1)]
).sparse_matrix(wires.[::-1]),
)
```

* Add Sparse Hamiltonian support for expectation value calculation.
[(#127)] (https://github.com/PennyLaneAI/pennylane-lightning-gpu/pull/127)


### Breaking changes

### Improvements
Expand All @@ -14,6 +30,8 @@

This release contains contributions from (in alphabetical order):

Shuli Shu

---

# Release 0.31.0
Expand Down
138 changes: 119 additions & 19 deletions mpitests/test_adjoint_jacobian.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
TensorProdObsGPUMPI_C128,
HamiltonianGPUMPI_C64,
HamiltonianGPUMPI_C128,
SparseHamiltonianGPUMPI_C64,
SparseHamiltonianGPUMPI_C128,
)
from pennylane_lightning_gpu._serialize import _serialize_ob

Expand Down Expand Up @@ -1044,49 +1046,142 @@ def circuit(params):
j_gpu = qml.jacobian(qnode_gpu)(params)


custom_wires0 = ["alice", 3.14, -1, 0, "bob", "luc"]
""" Important Note: Ensure wires index arranged from high to low
qml.SparseHamiltonian(
qml.Hamiltonian(
[0.1], [qml.PauliX(wires=custom_wires0[1]) @ qml.PauliY(wires=custom_wires0[0])]
).sparse_matrix(custom_wires0.[::-1]),
)
"""
multiphaseCFD marked this conversation as resolved.
Show resolved Hide resolved


@pytest.mark.parametrize(
"returns",
[
qml.SparseHamiltonian(
qml.Hamiltonian(
[0.1], [qml.PauliX(wires=custom_wires[0]) @ qml.PauliZ(wires=custom_wires[1])]
).sparse_matrix(custom_wires),
wires=custom_wires,
[0.1], [qml.PauliX(wires=custom_wires0[0]) @ qml.PauliY(wires=custom_wires0[1])]
).sparse_matrix(custom_wires0[::-1]),
wires=custom_wires0,
),
qml.SparseHamiltonian(
qml.Hamiltonian(
[2.0], [qml.PauliX(wires=custom_wires[2]) @ qml.PauliZ(wires=custom_wires[0])]
).sparse_matrix(custom_wires),
wires=custom_wires,
[2.0], [qml.PauliX(wires=custom_wires0[2]) @ qml.PauliZ(wires=custom_wires0[0])]
).sparse_matrix(custom_wires0[::-1]),
wires=custom_wires0,
),
qml.SparseHamiltonian(
qml.Hamiltonian(
[1.1], [qml.PauliX(wires=custom_wires[0]) @ qml.PauliZ(wires=custom_wires[2])]
).sparse_matrix(custom_wires),
wires=custom_wires,
[1.1], [qml.PauliX(wires=custom_wires0[0]) @ qml.PauliZ(wires=custom_wires0[2])]
).sparse_matrix(custom_wires0[::-1]),
wires=custom_wires0,
),
],
)
def test_failed_adjoint_SparseHamiltonian(returns):
def test_adjoint_SparseHamiltonian_custom_wires(returns):
"""Integration tests that compare to default.qubit for a large circuit containing parametrized
operations and when using custom wire labels"""

dev_gpu = qml.device("lightning.gpu", wires=custom_wires, mpi=True)
comm = MPI.COMM_WORLD
dev_gpu = qml.device("lightning.gpu", wires=custom_wires0, mpi=True)
dev_cpu = qml.device("default.qubit", wires=custom_wires0)

def circuit(params):
circuit_ansatz(params, wires=custom_wires)
circuit_ansatz(params, wires=custom_wires0)
return qml.expval(returns)

n_params = 30
np.random.seed(1337)
params = np.random.rand(n_params)
if comm.Get_rank() == 0:
n_params = 30
np.random.seed(1337)
params = np.random.rand(n_params)
else:
params = None

params = comm.bcast(params, root=0)

qnode_gpu = qml.QNode(circuit, dev_gpu, diff_method="adjoint")
qnode_cpu = qml.QNode(circuit, dev_cpu, diff_method="parameter-shift")

with pytest.raises(
RuntimeError, match="LightningGPU-MPI does not currently support SparseHamiltonian."
):
j_gpu = qml.jacobian(qnode_gpu)(params)
j_gpu = qml.jacobian(qnode_gpu)(params)
j_cpu = qml.jacobian(qnode_cpu)(params)

assert np.allclose(j_cpu, j_gpu)


""" Important Note: Ensure wires index arranged from high to low
qml.SparseHamiltonian(
qml.Hamiltonian(
[0.1],
[qml.PauliX(1) @ qml.PauliZ(0)],
).sparse_matrix(range(5, -1, -1)),
wires=range(6),
)
"""


@pytest.mark.parametrize(
"returns",
[
qml.SparseHamiltonian(
qml.Hamiltonian(
[0.1],
[qml.PauliZ(1) @ qml.PauliX(0) @ qml.Identity(2) @ qml.PauliX(4) @ qml.Identity(5)],
).sparse_matrix(range(5, -1, -1)),
wires=range(6),
),
qml.SparseHamiltonian(
qml.Hamiltonian(
[0.1],
[qml.PauliX(1) @ qml.PauliZ(0)],
).sparse_matrix(range(5, -1, -1)),
wires=range(6),
),
qml.SparseHamiltonian(
qml.Hamiltonian(
[0.1],
[qml.PauliX(0) @ qml.PauliZ(1)],
).sparse_matrix(range(5, -1, -1)),
wires=range(6),
),
qml.SparseHamiltonian(
qml.Hamiltonian([2.0], [qml.PauliX(1) @ qml.PauliZ(2)]).sparse_matrix(range(5, -1, -1)),
wires=range(6),
),
qml.SparseHamiltonian(
qml.Hamiltonian([1.1], [qml.PauliX(2) @ qml.PauliZ(0)]).sparse_matrix(range(5, -1, -1)),
wires=range(6),
),
],
)
def test_adjoint_SparseHamiltonian(returns):
"""Integration tests that compare to default.qubit for a large circuit containing parametrized
operations and when using custom wire labels"""

comm = MPI.COMM_WORLD
dev_gpu = qml.device("lightning.gpu", wires=6, mpi=True)
dev_cpu = qml.device("default.qubit", wires=6)

def circuit(params):
circuit_ansatz(params, wires=range(6))
return qml.expval(returns)

if comm.Get_rank() == 0:
n_params = 30
np.random.seed(1337)
params = np.random.rand(n_params)
else:
params = None

params = comm.bcast(params, root=0)

qnode_gpu = qml.QNode(circuit, dev_gpu, diff_method="adjoint")
qnode_cpu = qml.QNode(circuit, dev_cpu, diff_method="parameter-shift")

j_gpu = qml.jacobian(qnode_gpu)(params)
j_cpu = qml.jacobian(qnode_cpu)(params)

assert np.allclose(j_cpu, j_gpu)


@pytest.mark.parametrize(
Expand All @@ -1101,6 +1196,11 @@ def circuit(params):
HamiltonianGPUMPI_C64,
HamiltonianGPUMPI_C128,
),
(
qml.SparseHamiltonian(qml.Hamiltonian([1], [qml.PauliZ(0)]).sparse_matrix(), wires=[0]),
SparseHamiltonianGPUMPI_C64,
SparseHamiltonianGPUMPI_C128,
),
],
)
@pytest.mark.parametrize("use_csingle", [True, False])
Expand Down
53 changes: 50 additions & 3 deletions mpitests/test_apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,6 +553,52 @@ def circuit():
assert np.allclose(local_state_vector, local_expected_output_cpu, atol=tol, rtol=0)


class TestSparseHamExpval:
"""Tests sparse hamiltonian expectation values."""

def test_sparse_hamiltonian_expectation(self, tol):
comm = MPI.COMM_WORLD
commSize = comm.Get_size()
num_global_wires = commSize.bit_length() - 1
num_local_wires = 3 - num_global_wires

obs = qml.Identity(0) @ qml.PauliX(1) @ qml.PauliY(2)
obs1 = qml.Identity(1)
Hmat = qml.Hamiltonian([1.0, 1.0], [obs1, obs]).sparse_matrix()

state_vector = np.array(
[
0.0 + 0.0j,
0.0 + 0.1j,
0.1 + 0.1j,
0.1 + 0.2j,
0.2 + 0.2j,
0.2 + 0.3j,
0.3 + 0.3j,
0.3 + 0.5j,
],
dtype=np.complex128,
)

local_state_vector = np.zeros(1 << num_local_wires).astype(np.complex128)
comm.Scatter(state_vector, local_state_vector, root=0)

dev_gpumpi = qml.device("lightning.gpu", wires=3, mpi=True, c_dtype=np.complex128)
dev_gpu = qml.device("lightning.gpu", wires=3, mpi=False, c_dtype=np.complex128)

dev_gpumpi.syncH2D(local_state_vector)
dev_gpu.syncH2D(state_vector)

H_sparse = qml.SparseHamiltonian(Hmat, wires=range(3))

comm.Barrier()

res = dev_gpumpi.expval(H_sparse)
expected = dev_gpu.expval(H_sparse)

assert np.allclose(res, expected)


class TestExpval:
"""Tests that expectation values are properly calculated or that the proper errors are raised."""

Expand All @@ -573,6 +619,7 @@ def test_expval_single_wire_no_parameters(self, tol, operation, wires):
obs = operation(wires)
expval_single_wire_no_param(tol, obs)

@pytest.fixture(params=[np.complex64, np.complex128])
@pytest.mark.parametrize(
"obs",
[
Expand All @@ -584,13 +631,13 @@ def test_expval_single_wire_no_parameters(self, tol, operation, wires):
qml.PauliZ(numQubits - 2) @ qml.PauliZ(numQubits - 1),
],
)
def test_expval_multiple_obs(self, obs, tol):
def test_expval_multiple_obs(self, obs, request, tol):
"""Test expval with Hamiltonian"""
num_wires = numQubits
comm = MPI.COMM_WORLD

dev_cpu = qml.device("default.qubit", wires=num_wires, c_dtype=np.complex128)
dev_gpumpi = qml.device("lightning.gpu", wires=num_wires, mpi=True, c_dtype=np.complex128)
dev_cpu = qml.device("default.qubit", wires=num_wires, c_dtype=request.params)
dev_gpumpi = qml.device("lightning.gpu", wires=num_wires, mpi=True, c_dtype=request.params)

def circuit():
qml.RX(0.4, wires=[0])
Expand Down
72 changes: 52 additions & 20 deletions pennylane_lightning_gpu/_serialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@
TensorProdObsGPUMPI_C128,
HamiltonianGPUMPI_C64,
HamiltonianGPUMPI_C128,
SparseHamiltonianGPUMPI_C64,
SparseHamiltonianGPUMPI_C128,
HermitianObsGPUMPI_C64,
HermitianObsGPUMPI_C128,
)
Expand Down Expand Up @@ -117,6 +119,20 @@ def _hamiltonian_ob_dtype(use_csingle, use_mpi: bool):
)


def _sparsehamiltonian_ob_dtype(use_csingle, use_mpi: bool):
if not use_mpi:
return (
[SparseHamiltonianGPU_C64, np.float32, np.int32]
if use_csingle
else [SparseHamiltonianGPU_C128, np.float64, np.int64]
)
multiphaseCFD marked this conversation as resolved.
Show resolved Hide resolved
return (
[SparseHamiltonianGPUMPI_C64, np.float32, np.int32]
if use_csingle
else [SparseHamiltonianGPUMPI_C128, np.float64, np.int64]
)


def _sv_py_dtype(use_csingle, use_mpi: bool):
if not use_mpi:
return LightningGPU_C64 if use_csingle else LightningGPU_C128
Expand Down Expand Up @@ -151,24 +167,42 @@ def _serialize_hamiltonian(
return hamiltonian_obs(coeffs, terms)


def _serialize_sparsehamiltonian(ob, wires_map: dict, use_csingle: bool):
if use_csingle:
ctype = np.complex64
rtype = np.int32
sparsehamiltonian_obs = SparseHamiltonianGPU_C64
def _serialize_sparsehamiltonian(ob, wires_map: dict, use_csingle: bool, use_mpi: bool):
sparsehamiltonian_obs, ctype, rtype = _sparsehamiltonian_ob_dtype(use_csingle, use_mpi)

if use_mpi:
mpi_manager_local = MPIManager()
# Only root 0 needs the overall sparsematrix data
if mpi_manager_local.getRank() == 0:
spm = ob.sparse_matrix()
data = np.array(spm.data).astype(ctype)
indices = np.array(spm.indices).astype(rtype)
offsets = np.array(spm.indptr).astype(rtype)
wires = []
wires_list = ob.wires.tolist()
wires.extend([wires_map[w] for w in wires_list])
else:
# Other root only needs non-null some sparsematrix data to pass
obs = qml.Identity(0)
Hmat = qml.Hamiltonian([1.0], [obs]).sparse_matrix()
H_sparse = qml.SparseHamiltonian(Hmat, wires=range(1))
spm = H_sparse.sparse_matrix()

data = np.array(spm.data).astype(ctype)
indices = np.array(spm.indices).astype(rtype)
offsets = np.array(spm.indptr).astype(rtype)

wires = []
wires_list = ob.wires.tolist()
wires.extend([wires_map[w] for w in wires_list])
multiphaseCFD marked this conversation as resolved.
Show resolved Hide resolved
else:
ctype = np.complex128
rtype = np.int64
sparsehamiltonian_obs = SparseHamiltonianGPU_C128

spm = ob.sparse_matrix()
data = np.array(spm.data).astype(ctype)
indices = np.array(spm.indices).astype(rtype)
offsets = np.array(spm.indptr).astype(rtype)

wires = []
wires_list = ob.wires.tolist()
wires.extend([wires_map[w] for w in wires_list])
spm = ob.sparse_matrix()
data = np.array(spm.data).astype(ctype)
indices = np.array(spm.indices).astype(rtype)
offsets = np.array(spm.indptr).astype(rtype)
wires = []
wires_list = ob.wires.tolist()
wires.extend([wires_map[w] for w in wires_list])

return sparsehamiltonian_obs(data, indices, offsets, wires)

Expand Down Expand Up @@ -214,9 +248,7 @@ def _serialize_ob(ob, wires_map, use_csingle, use_mpi: bool = False, use_splitti
elif ob.name == "Hamiltonian":
return _serialize_hamiltonian(ob, wires_map, use_csingle, use_mpi, use_splitting)
elif ob.name == "SparseHamiltonian":
if use_mpi:
raise TypeError("SparseHamiltonian is not supported for MPI backend.")
return _serialize_sparsehamiltonian(ob, wires_map, use_csingle)
return _serialize_sparsehamiltonian(ob, wires_map, use_csingle, use_mpi)
elif isinstance(ob, (PauliX, PauliY, PauliZ, Identity, Hadamard)):
return _serialize_named_ob(ob, wires_map, use_csingle, use_mpi)
elif ob._pauli_rep is not None:
Expand Down
Loading
Loading