Skip to content

Commit

Permalink
rename gradient_fn to diff_method in qml.execute
Browse files Browse the repository at this point in the history
  • Loading branch information
albi3ro committed Nov 7, 2024
1 parent 147fb99 commit 3293973
Show file tree
Hide file tree
Showing 27 changed files with 302 additions and 268 deletions.
5 changes: 5 additions & 0 deletions doc/development/deprecations.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@ deprecations are listed below.
Pending deprecations
--------------------

* The ``gradient_fn`` keyword argument to ``qml.execute`` has been renamed ``diff_method``.

- Deprecated in v0.40
- Will be removed in v0.41

* The ``max_expansion`` argument for :func:`~pennylane.transforms.decompositions.clifford_t_decomposition`
has been deprecated.

Expand Down
3 changes: 3 additions & 0 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@

<h3>Deprecations 👋</h3>

* The `gradient_fn` keyword argument has been renamed `diff_method`, to better align with the termionology
used by the `QNode`. `gradient_fn` will be removed in v0.41.

<h3>Documentation 📝</h3>

<h3>Bug fixes 🐛</h3>
Expand Down
53 changes: 32 additions & 21 deletions pennylane/workflow/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from collections.abc import Callable, MutableMapping
from functools import partial
from typing import Literal, Optional, Union, get_args
from warnings import warn

from cachetools import Cache, LRUCache

Expand Down Expand Up @@ -310,7 +311,7 @@ def _update_mcm_config(mcm_config: "qml.devices.MCMConfig", interface: str, fini
def execute(
tapes: QuantumScriptBatch,
device: SupportedDeviceAPIs,
gradient_fn: Optional[Union[Callable, str]] = None,
diff_method: Optional[Union[Callable, str, qml.transforms.core.TransformDispatcher]] = None,
interface: Optional[str] = "auto",
transform_program=None,
inner_transform=None,
Expand All @@ -322,6 +323,7 @@ def execute(
max_diff=1,
device_vjp=False,
mcm_config=None,
gradient_fn="unset",
) -> ResultBatch:
"""New function to execute a batch of tapes on a device in an autodifferentiable-compatible manner. More cases will be added,
during the project. The current version is supporting forward execution for NumPy and does not support shot vectors.
Expand All @@ -331,7 +333,7 @@ def execute(
device (pennylane.Device): Device to use to execute the batch of tapes.
If the device does not provide a ``batch_execute`` method,
by default the tapes will be executed in serial.
gradient_fn (None or callable): The gradient transform function to use
diff_method (None, str, TransformDispatcher): The gradient transform function to use
for backward passes. If "device", the device will be queried directly
for the gradient (if supported).
interface (str): The interface that will be used for classical autodifferentiation.
Expand All @@ -356,6 +358,8 @@ def execute(
device_vjp=False (Optional[bool]): whether or not to use the device provided jacobian
product if it is available.
mcm_config (dict): Dictionary containing configuration options for handling mid-circuit measurements.
gradient_fn="unset": **DEPRECATED**. This keyword argument has been renamed ``diff_method`` and will
be removed in v0.41.
Returns:
list[tensor_like[float]]: A nested list of tape results. Each element in
Expand Down Expand Up @@ -385,7 +389,7 @@ def cost_fn(params, x):
tapes = [tape1, tape2]
# execute both tapes in a batch on the given device
res = qml.execute(tapes, dev, gradient_fn=qml.gradients.param_shift, max_diff=2)
res = qml.execute(tapes, dev, diff_method=qml.gradients.param_shift, max_diff=2)
return res[0] + res[1][0] - res[1][1]
Expand Down Expand Up @@ -419,15 +423,22 @@ def cost_fn(params, x):
if not isinstance(device, qml.devices.Device):
device = qml.devices.LegacyDeviceFacade(device)

if gradient_fn != "unset":
warn(
"gradient_fn has been renamed to diff_method in qml.execute",
qml.PennyLaneDeprecationWarning,
)
diff_method = gradient_fn

if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"""Entry with args=(tapes=%s, device=%s, gradient_fn=%s, interface=%s, grad_on_execution=%s, gradient_kwargs=%s, cache=%s, cachesize=%s, max_diff=%s) called by=%s""",
"""Entry with args=(tapes=%s, device=%s, diff_method=%s, interface=%s, grad_on_execution=%s, gradient_kwargs=%s, cache=%s, cachesize=%s, max_diff=%s) called by=%s""",
tapes,
repr(device),
(
gradient_fn
if not (logger.isEnabledFor(qml.logging.TRACE) and inspect.isfunction(gradient_fn))
else "\n" + inspect.getsource(gradient_fn) + "\n"
diff_method
if not (logger.isEnabledFor(qml.logging.TRACE) and inspect.isfunction(diff_method))
else "\n" + inspect.getsource(diff_method) + "\n"
),
interface,
grad_on_execution,
Expand All @@ -444,7 +455,7 @@ def cost_fn(params, x):
interface = _get_interface_name(tapes, interface)
# Only need to calculate derivatives with jax when we know it will be executed later.
if interface in {"jax", "jax-jit"}:
grad_on_execution = grad_on_execution if isinstance(gradient_fn, Callable) else False
grad_on_execution = grad_on_execution if isinstance(diff_method, Callable) else False

if (
device_vjp
Expand All @@ -458,7 +469,7 @@ def cost_fn(params, x):
gradient_kwargs = gradient_kwargs or {}
mcm_config = mcm_config or {}
config = config or _get_execution_config(
gradient_fn, grad_on_execution, interface, device, device_vjp, mcm_config, gradient_kwargs
diff_method, grad_on_execution, interface, device, device_vjp, mcm_config, gradient_kwargs
)

# Mid-circuit measurement configuration validation
Expand All @@ -470,7 +481,7 @@ def cost_fn(params, x):
finite_shots = any(tape.shots for tape in tapes)
_update_mcm_config(config.mcm_config, mcm_interface, finite_shots)

is_gradient_transform = isinstance(gradient_fn, qml.transforms.core.TransformDispatcher)
is_gradient_transform = isinstance(diff_method, qml.transforms.core.TransformDispatcher)
transform_program, inner_transform = _make_transform_programs(
device, config, inner_transform, transform_program, is_gradient_transform
)
Expand Down Expand Up @@ -547,7 +558,7 @@ def execute_fn(internal_tapes):

return device.execute_and_compute_derivatives(numpy_tapes, config)

gradient_fn = None
diff_method = None

else:

Expand All @@ -561,7 +572,7 @@ def execute_fn(internal_tapes) -> tuple[ResultBatch, tuple]:
numpy_tapes, _ = qml.transforms.convert_to_numpy_parameters(internal_tapes)
return device.execute(numpy_tapes, config), tuple()

def gradient_fn(internal_tapes):
def diff_method(internal_tapes):
"""A partial function that wraps compute_derivatives method of the device.
Closure Variables:
Expand All @@ -573,7 +584,7 @@ def gradient_fn(internal_tapes):

elif grad_on_execution is True:
# In "forward" mode, gradients are automatically handled
# within execute_and_gradients, so providing a gradient_fn
# within execute_and_gradients, so providing a diff_method
# in this case would have ambiguous behaviour.
raise ValueError("Gradient transforms cannot be used with grad_on_execution=True")
elif interface in jpc_interfaces:
Expand All @@ -586,7 +597,7 @@ def gradient_fn(internal_tapes):
# its own jacobian product class
# this mechanism unpacks the currently existing recursion
jpc = TransformJacobianProducts(
execute_fn, gradient_fn, gradient_kwargs, cache_full_jacobian
execute_fn, diff_method, gradient_kwargs, cache_full_jacobian
)
for i in range(1, max_diff):
differentiable = i > 1
Expand All @@ -599,7 +610,7 @@ def gradient_fn(internal_tapes):
jpc=jpc,
device=device,
)
jpc = TransformJacobianProducts(execute_fn, gradient_fn, gradient_kwargs)
jpc = TransformJacobianProducts(execute_fn, diff_method, gradient_kwargs)

if interface == "jax-jit":
# no need to use pure callbacks around execute_fn or the jpc when taking
Expand All @@ -624,7 +635,7 @@ def gradient_fn(internal_tapes):
results = ml_boundary_execute(tapes, execute_fn, jpc, device=device)
else:
results = ml_boundary_execute(
tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff
tapes, device, execute_fn, diff_method, gradient_kwargs, _n=1, max_diff=max_diff
)

return post_processing(results)
Expand All @@ -635,7 +646,7 @@ def _make_transform_programs(
):
"""helper function to make the transform programs."""

# If gradient_fn is a gradient transform, device preprocessing should happen in
# If diff_method is a gradient transform, device preprocessing should happen in
# inner execute (inside the ml boundary).
if is_gradient_transform:
if inner_transform is None:
Expand All @@ -652,13 +663,13 @@ def _make_transform_programs(


def _get_execution_config(
gradient_fn, grad_on_execution, interface, device, device_vjp, mcm_config, gradient_kwargs
diff_method, grad_on_execution, interface, device, device_vjp, mcm_config, gradient_kwargs
):
"""Helper function to get the execution config."""
if gradient_fn is None:
if diff_method is None:
_gradient_method = None
elif isinstance(gradient_fn, str):
_gradient_method = gradient_fn
elif isinstance(diff_method, str):
_gradient_method = diff_method
else:
_gradient_method = "gradient-transform"
config = qml.devices.ExecutionConfig(
Expand Down
2 changes: 1 addition & 1 deletion pennylane/workflow/qnode.py
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,7 @@ def _execution_component(self, args: tuple, kwargs: dict) -> qml.typing.Result:
res = qml.execute(
(self._tape,),
device=self.device,
gradient_fn=gradient_fn,
diff_method=gradient_fn,
interface=interface,
transform_program=full_transform_program,
inner_transform=inner_transform_program,
Expand Down
2 changes: 1 addition & 1 deletion tests/devices/test_default_mixed_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def wrapper(x):
qml.RX(x, wires=0)
qml.expval(qml.PauliZ(0))
tape = qml.tape.QuantumScript.from_queue(q)
return qml.execute([tape], dev, gradient_fn=gradient_func)
return qml.execute([tape], dev, diff_method=gradient_func)

assert jnp.allclose(wrapper(jnp.array(0.0))[0], 1.0)

Expand Down
2 changes: 1 addition & 1 deletion tests/gradients/parameter_shift/test_parameter_shift.py
Original file line number Diff line number Diff line change
Expand Up @@ -1797,7 +1797,7 @@ def test_integer_parameters(self, tol, par):
# gradients
exact = np.cos(par)
gtapes, fn = qml.gradients.param_shift(tape)
grad_PS = fn(qml.execute(gtapes, dev, gradient_fn=None))
grad_PS = fn(qml.execute(gtapes, dev, diff_method=None))

# different methods must agree
assert np.allclose(grad_PS, exact, atol=tol, rtol=0)
Expand Down
26 changes: 13 additions & 13 deletions tests/gradients/parameter_shift/test_parameter_shift_hessian.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def test_single_expval(self):
expected = -np.cos(x)

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, np.ndarray)
assert hessian.shape == ()
Expand All @@ -285,7 +285,7 @@ def test_single_probs(self):
expected = 0.5 * np.cos(x) * np.array([-1, 0, 0, 1])

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, np.ndarray)
assert hessian.shape == (4,)
Expand All @@ -308,7 +308,7 @@ def test_multi_expval(self):
expected = (-np.cos(x), -np.cos(x) / np.sqrt(2))

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, tuple)
assert len(hessian) == 2
Expand All @@ -335,7 +335,7 @@ def test_multi_expval_probs(self):
expected = (-np.cos(x), 0.5 * np.cos(x) * np.array([-1, 0, 0, 1]))

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, tuple)
assert len(hessian) == 2
Expand All @@ -362,7 +362,7 @@ def test_multi_probs(self):
expected = (0.5 * np.cos(x) * np.array([-1, 1]), 0.5 * np.cos(x) * np.array([-1, 0, 0, 1]))

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, tuple)
assert len(hessian) == 2
Expand All @@ -389,7 +389,7 @@ def test_single_expval_multi_params(self):
expected = ((-np.cos(x[0]), 0), (0, 0))

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, tuple)
assert len(hessian) == 2
Expand Down Expand Up @@ -433,7 +433,7 @@ def test_single_probs_multi_params(self):
)

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, tuple)
assert len(hessian) == 2
Expand Down Expand Up @@ -475,7 +475,7 @@ def test_multi_expval_multi_params(self):
)

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, tuple)
assert len(hessian) == 2
Expand Down Expand Up @@ -524,7 +524,7 @@ def test_multi_expval_probs_multi_params(self):
)

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, tuple)
assert len(hessian) == 2
Expand Down Expand Up @@ -585,7 +585,7 @@ def test_multi_probs_multi_params(self):
)

tapes, fn = qml.gradients.param_shift_hessian(tape)
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, tuple)
assert len(hessian) == 2
Expand Down Expand Up @@ -619,7 +619,7 @@ def test_multi_params_argnum(self):
expected = ((0, 0, 0), (0, 0, 0), (0, 0, -np.cos(x[2] + x[0])))

tapes, fn = qml.gradients.param_shift_hessian(tape, argnum=(1, 2))
hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
hessian = fn(qml.execute(tapes, dev, diff_method=None))

assert isinstance(hessian, tuple)
assert len(hessian) == 3
Expand Down Expand Up @@ -1613,7 +1613,7 @@ def circuit(x):
for _tape, exp_shift in zip(tapes[3:-1], expected_shifts):
assert np.allclose(_tape.get_parameters(), x + exp_shift)

hessian = fn(qml.execute(tapes, dev, gradient_fn=qml.gradients.param_shift))
hessian = fn(qml.execute(tapes, dev, diff_method=qml.gradients.param_shift))

assert np.allclose(expected, hessian)

Expand Down Expand Up @@ -1666,7 +1666,7 @@ def circuit(x):
for mult, _tape in zip(shift_order, tapes[10:]):
assert np.allclose(_tape.get_parameters(), x + np.array([0.0, np.pi * mult]))

hessian = fn(qml.execute(tapes, dev, gradient_fn=qml.gradients.param_shift))
hessian = fn(qml.execute(tapes, dev, diff_method=qml.gradients.param_shift))
assert np.allclose(expected, hessian)

@pytest.mark.parametrize("argnum", [(0,), (1,), (0, 1)])
Expand Down
Loading

0 comments on commit 3293973

Please sign in to comment.