Skip to content
This repository has been archived by the owner on Aug 18, 2023. It is now read-only.

Commit

Permalink
Analytic VQT test (#84)
Browse files Browse the repository at this point in the history
* format

* switch to explicit rtol

* update loss number

* added derivation note

* Add docstring and remove unused import

* Change names od

* format

* Change index to squeeze

* format

* typo fix

* typo

* remove squeeze

* change name

* format
  • Loading branch information
zaqqwerty authored Aug 31, 2021
1 parent 2781d68 commit e13b6dd
Show file tree
Hide file tree
Showing 2 changed files with 91 additions and 11 deletions.
26 changes: 20 additions & 6 deletions qhbmlib/vqt.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,33 @@
"""Impementations of the VQT loss and its derivatives."""

import tensorflow as tf
import tensorflow_quantum as tfq


@tf.function
def vqt(qhbm, num_samples, hamiltonian, beta):
"""Computes the VQT loss of a given QHBM against given thermal state params.
This function is differentiable within a `tf.GradientTape` scope.
Args:
qhbm: A `qhbm.QHBM` which is the model whose loss is to be calculated.
num_samples: A scalar `tf.Tensor` specifying the number of samples to draw
from the EBM of `qhbm` when estimating the loss and its gradients.
hamiltonian: 1D tensor of strings with one entry, the result of calling
`tfq.convert_to_tensor` on a list containing one cirq.PauliSum, `[op]`.
Here, `op` is the Hamiltonian against which the loss is calculated.
beta: A scalar `tf.Tensor` which is the inverse temperature at which the
loss is calculated.
Returns:
The VQT loss.
"""

@tf.custom_gradient
def loss(variables):
bitstrings, counts = qhbm.ebm.sample(num_samples)
probs = tf.cast(counts, tf.float32) / tf.cast(
tf.reduce_sum(counts), tf.float32)
probs = tf.cast(counts, tf.float32) / tf.cast(num_samples, tf.float32)
expectation = tf.squeeze(
qhbm.qnn.expectation(bitstrings, counts, hamiltonian))
qhbm.qnn.expectation(bitstrings, counts, hamiltonian), -1)
if qhbm.is_analytic:
entropy = qhbm.entropy()
else:
Expand Down Expand Up @@ -57,7 +71,7 @@ def grad(grad_y, variables=None):
for grad in energy_gradients
]
grad_vars = grad_ebm + grad_qnn
return grad_vars, grad_vars
return grad_vars

return beta * expectation - entropy, grad

Expand Down
76 changes: 71 additions & 5 deletions tests/vqt_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# """Tests for the VQT loss and gradients."""
"""Tests for the VQT loss and gradients."""

import cirq
import sympy
import tensorflow as tf
import tensorflow_quantum as tfq

from qhbmlib import ebm, qnn, qhbm
from qhbmlib import ebm
from qhbmlib import qhbm
from qhbmlib import qnn
from qhbmlib import vqt
from tests import test_util

RTOL = 3e-2


class VQTTest(tf.test.TestCase):
"""Tests for the sample-based VQT."""
Expand Down Expand Up @@ -53,7 +57,7 @@ def test_loss_consistency(self):
tf_ham = tfq.convert_to_tensor([cirq_ham])
loss = vqt.vqt(test_qhbm, num_samples, tf_ham, beta)
loss_copy = vqt.vqt(test_qhbm_copy, num_samples, tf_ham, beta)
self.assertAllClose(loss_copy, loss, rtol=1e-2)
self.assertAllClose(loss_copy, loss, rtol=RTOL)

def test_zero_grad(self):
"""Confirm correct gradients and loss at the optimal settings."""
Expand All @@ -71,8 +75,70 @@ def test_zero_grad(self):
loss = vqt.vqt(test_qhbm, tf.constant(int(5e6)), tf_ham, tf.constant(1.0))
gradient = tape.gradient(loss, test_qhbm.trainable_variables)
for grad in gradient:
self.assertAllClose(grad, tf.zeros_like(grad), atol=1e-2)
self.assertAllClose(loss, -test_qhbm.log_partition_function(), atol=1e-2)
self.assertAllClose(grad, tf.zeros_like(grad), rtol=RTOL)
self.assertAllClose(loss, -test_qhbm.log_partition_function(), rtol=RTOL)

def test_loss_value_x_rot(self):
"""Confirms correct values for a single qubit X rotation with H=Y.
See the colab notebook at the following link in for derivations:
https://colab.research.google.com/drive/14987JCMju_8AVvvVoojwe6hA7Nlw-Dhe?usp=sharing
Since each qubit is independent, the loss is the sum over the individual
qubit losses, and the gradients are the the per-qubit gradients.
"""
seed = None
for num_qubits in [1, 2, 3, 4, 5]:
# EBM
ebm_init = tf.keras.initializers.RandomUniform(
minval=-2.0, maxval=2.0, seed=seed)
test_ebm = ebm.Bernoulli(num_qubits, ebm_init, True)

# QNN
qubits = cirq.GridQubit.rect(1, num_qubits)
r_symbols = [sympy.Symbol(f"phi_{n}") for n in range(num_qubits)]
r_circuit = cirq.Circuit(
cirq.rx(r_s)(q) for r_s, q in zip(r_symbols, qubits))
qnn_init = tf.keras.initializers.RandomUniform(
minval=-6.2, maxval=6.2, seed=seed)
test_qnn = qnn.QNN(r_circuit, qnn_init, is_analytic=True)

# VQT arguments
test_qhbm = qhbm.QHBM(test_ebm, test_qnn)
test_num_samples = tf.constant(1e6)
test_h = tfq.convert_to_tensor(
[cirq.PauliSum.from_pauli_strings(cirq.Y(q) for q in qubits)])
test_beta = tf.random.uniform([], minval=0.01, maxval=100.0, seed=seed)

# Compute losses
# Bernoulli has only one tf.Variable
test_thetas = test_qhbm.thetas[0]
# QNN has only one tf.Variable
test_phis = test_qhbm.phis[0]
actual_expectation = test_qhbm.expectation(test_h, test_num_samples)[0]
expected_expectation = tf.reduce_sum(
tf.math.tanh(test_thetas) * tf.math.sin(test_phis))
self.assertAllClose(actual_expectation, expected_expectation, rtol=RTOL)

actual_entropy = test_qhbm.entropy()
expected_entropy = tf.reduce_sum(
-test_thetas * tf.math.tanh(test_thetas) +
tf.math.log(2 * tf.math.cosh(test_thetas)))
self.assertAllClose(actual_entropy, expected_entropy, rtol=RTOL)

with tf.GradientTape() as tape:
actual_loss = vqt.vqt(test_qhbm, test_num_samples, test_h, test_beta)
expected_loss = test_beta * expected_expectation - expected_entropy
self.assertAllClose(actual_loss, expected_loss, rtol=RTOL)

actual_thetas_grads, actual_phis_grads = tape.gradient(
actual_loss, (test_thetas, test_phis))
expected_thetas_grads = (1 - tf.math.tanh(test_thetas)**2) * (
test_beta * tf.math.sin(test_phis) + test_thetas)
expected_phis_grads = test_beta * tf.math.tanh(test_thetas) * tf.math.cos(
test_phis)
self.assertAllClose(actual_thetas_grads, expected_thetas_grads, rtol=RTOL)
self.assertAllClose(actual_phis_grads, expected_phis_grads, rtol=RTOL)


if __name__ == "__main__":
Expand Down

0 comments on commit e13b6dd

Please sign in to comment.