diff --git a/doc/development/deprecations.rst b/doc/development/deprecations.rst
index 31b25abd9a8..0520e11f555 100644
--- a/doc/development/deprecations.rst
+++ b/doc/development/deprecations.rst
@@ -9,6 +9,11 @@ deprecations are listed below.
Pending deprecations
--------------------
+* The ``gradient_fn`` keyword argument to ``qml.execute`` has been renamed ``diff_method``.
+
+ - Deprecated in v0.40
+ - Will be removed in v0.41
+
* The ``max_expansion`` argument for :func:`~pennylane.transforms.decompositions.clifford_t_decomposition`
has been deprecated.
diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md
index 1bddb979d80..402124e5c82 100644
--- a/doc/releases/changelog-dev.md
+++ b/doc/releases/changelog-dev.md
@@ -41,6 +41,9 @@
Deprecations 👋
+* The `gradient_fn` keyword argument has been renamed `diff_method`, to better align with the termionology
+ used by the `QNode`. `gradient_fn` will be removed in v0.41.
+
Documentation 📝
Bug fixes 🐛
diff --git a/pennylane/workflow/execution.py b/pennylane/workflow/execution.py
index 867fa38d8d8..0c6806e4221 100644
--- a/pennylane/workflow/execution.py
+++ b/pennylane/workflow/execution.py
@@ -27,6 +27,7 @@
from collections.abc import Callable, MutableMapping
from functools import partial
from typing import Literal, Optional, Union, get_args
+from warnings import warn
from cachetools import Cache, LRUCache
@@ -310,7 +311,7 @@ def _update_mcm_config(mcm_config: "qml.devices.MCMConfig", interface: str, fini
def execute(
tapes: QuantumScriptBatch,
device: SupportedDeviceAPIs,
- gradient_fn: Optional[Union[Callable, str]] = None,
+ diff_method: Optional[Union[Callable, str, qml.transforms.core.TransformDispatcher]] = None,
interface: Optional[str] = "auto",
transform_program=None,
inner_transform=None,
@@ -322,6 +323,7 @@ def execute(
max_diff=1,
device_vjp=False,
mcm_config=None,
+ gradient_fn="unset",
) -> ResultBatch:
"""New function to execute a batch of tapes on a device in an autodifferentiable-compatible manner. More cases will be added,
during the project. The current version is supporting forward execution for NumPy and does not support shot vectors.
@@ -331,7 +333,7 @@ def execute(
device (pennylane.Device): Device to use to execute the batch of tapes.
If the device does not provide a ``batch_execute`` method,
by default the tapes will be executed in serial.
- gradient_fn (None or callable): The gradient transform function to use
+ diff_method (None, str, TransformDispatcher): The gradient transform function to use
for backward passes. If "device", the device will be queried directly
for the gradient (if supported).
interface (str): The interface that will be used for classical autodifferentiation.
@@ -356,6 +358,8 @@ def execute(
device_vjp=False (Optional[bool]): whether or not to use the device provided jacobian
product if it is available.
mcm_config (dict): Dictionary containing configuration options for handling mid-circuit measurements.
+ gradient_fn="unset": **DEPRECATED**. This keyword argument has been renamed ``diff_method`` and will
+ be removed in v0.41.
Returns:
list[tensor_like[float]]: A nested list of tape results. Each element in
@@ -385,7 +389,7 @@ def cost_fn(params, x):
tapes = [tape1, tape2]
# execute both tapes in a batch on the given device
- res = qml.execute(tapes, dev, gradient_fn=qml.gradients.param_shift, max_diff=2)
+ res = qml.execute(tapes, dev, diff_method=qml.gradients.param_shift, max_diff=2)
return res[0] + res[1][0] - res[1][1]
@@ -419,15 +423,22 @@ def cost_fn(params, x):
if not isinstance(device, qml.devices.Device):
device = qml.devices.LegacyDeviceFacade(device)
+ if gradient_fn != "unset":
+ warn(
+ "gradient_fn has been renamed to diff_method in qml.execute",
+ qml.PennyLaneDeprecationWarning,
+ )
+ diff_method = gradient_fn
+
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
- """Entry with args=(tapes=%s, device=%s, gradient_fn=%s, interface=%s, grad_on_execution=%s, gradient_kwargs=%s, cache=%s, cachesize=%s, max_diff=%s) called by=%s""",
+ """Entry with args=(tapes=%s, device=%s, diff_method=%s, interface=%s, grad_on_execution=%s, gradient_kwargs=%s, cache=%s, cachesize=%s, max_diff=%s) called by=%s""",
tapes,
repr(device),
(
- gradient_fn
- if not (logger.isEnabledFor(qml.logging.TRACE) and inspect.isfunction(gradient_fn))
- else "\n" + inspect.getsource(gradient_fn) + "\n"
+ diff_method
+ if not (logger.isEnabledFor(qml.logging.TRACE) and inspect.isfunction(diff_method))
+ else "\n" + inspect.getsource(diff_method) + "\n"
),
interface,
grad_on_execution,
@@ -444,7 +455,7 @@ def cost_fn(params, x):
interface = _get_interface_name(tapes, interface)
# Only need to calculate derivatives with jax when we know it will be executed later.
if interface in {"jax", "jax-jit"}:
- grad_on_execution = grad_on_execution if isinstance(gradient_fn, Callable) else False
+ grad_on_execution = grad_on_execution if isinstance(diff_method, Callable) else False
if (
device_vjp
@@ -458,7 +469,7 @@ def cost_fn(params, x):
gradient_kwargs = gradient_kwargs or {}
mcm_config = mcm_config or {}
config = config or _get_execution_config(
- gradient_fn, grad_on_execution, interface, device, device_vjp, mcm_config, gradient_kwargs
+ diff_method, grad_on_execution, interface, device, device_vjp, mcm_config, gradient_kwargs
)
# Mid-circuit measurement configuration validation
@@ -470,7 +481,7 @@ def cost_fn(params, x):
finite_shots = any(tape.shots for tape in tapes)
_update_mcm_config(config.mcm_config, mcm_interface, finite_shots)
- is_gradient_transform = isinstance(gradient_fn, qml.transforms.core.TransformDispatcher)
+ is_gradient_transform = isinstance(diff_method, qml.transforms.core.TransformDispatcher)
transform_program, inner_transform = _make_transform_programs(
device, config, inner_transform, transform_program, is_gradient_transform
)
@@ -547,7 +558,7 @@ def execute_fn(internal_tapes):
return device.execute_and_compute_derivatives(numpy_tapes, config)
- gradient_fn = None
+ diff_method = None
else:
@@ -561,7 +572,7 @@ def execute_fn(internal_tapes) -> tuple[ResultBatch, tuple]:
numpy_tapes, _ = qml.transforms.convert_to_numpy_parameters(internal_tapes)
return device.execute(numpy_tapes, config), tuple()
- def gradient_fn(internal_tapes):
+ def diff_method(internal_tapes):
"""A partial function that wraps compute_derivatives method of the device.
Closure Variables:
@@ -573,7 +584,7 @@ def gradient_fn(internal_tapes):
elif grad_on_execution is True:
# In "forward" mode, gradients are automatically handled
- # within execute_and_gradients, so providing a gradient_fn
+ # within execute_and_gradients, so providing a diff_method
# in this case would have ambiguous behaviour.
raise ValueError("Gradient transforms cannot be used with grad_on_execution=True")
elif interface in jpc_interfaces:
@@ -586,7 +597,7 @@ def gradient_fn(internal_tapes):
# its own jacobian product class
# this mechanism unpacks the currently existing recursion
jpc = TransformJacobianProducts(
- execute_fn, gradient_fn, gradient_kwargs, cache_full_jacobian
+ execute_fn, diff_method, gradient_kwargs, cache_full_jacobian
)
for i in range(1, max_diff):
differentiable = i > 1
@@ -599,7 +610,7 @@ def gradient_fn(internal_tapes):
jpc=jpc,
device=device,
)
- jpc = TransformJacobianProducts(execute_fn, gradient_fn, gradient_kwargs)
+ jpc = TransformJacobianProducts(execute_fn, diff_method, gradient_kwargs)
if interface == "jax-jit":
# no need to use pure callbacks around execute_fn or the jpc when taking
@@ -624,7 +635,7 @@ def gradient_fn(internal_tapes):
results = ml_boundary_execute(tapes, execute_fn, jpc, device=device)
else:
results = ml_boundary_execute(
- tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_diff=max_diff
+ tapes, device, execute_fn, diff_method, gradient_kwargs, _n=1, max_diff=max_diff
)
return post_processing(results)
@@ -635,7 +646,7 @@ def _make_transform_programs(
):
"""helper function to make the transform programs."""
- # If gradient_fn is a gradient transform, device preprocessing should happen in
+ # If diff_method is a gradient transform, device preprocessing should happen in
# inner execute (inside the ml boundary).
if is_gradient_transform:
if inner_transform is None:
@@ -652,13 +663,13 @@ def _make_transform_programs(
def _get_execution_config(
- gradient_fn, grad_on_execution, interface, device, device_vjp, mcm_config, gradient_kwargs
+ diff_method, grad_on_execution, interface, device, device_vjp, mcm_config, gradient_kwargs
):
"""Helper function to get the execution config."""
- if gradient_fn is None:
+ if diff_method is None:
_gradient_method = None
- elif isinstance(gradient_fn, str):
- _gradient_method = gradient_fn
+ elif isinstance(diff_method, str):
+ _gradient_method = diff_method
else:
_gradient_method = "gradient-transform"
config = qml.devices.ExecutionConfig(
diff --git a/pennylane/workflow/qnode.py b/pennylane/workflow/qnode.py
index 17ac1241ec0..c8a85c36981 100644
--- a/pennylane/workflow/qnode.py
+++ b/pennylane/workflow/qnode.py
@@ -935,7 +935,7 @@ def _execution_component(self, args: tuple, kwargs: dict) -> qml.typing.Result:
res = qml.execute(
(self._tape,),
device=self.device,
- gradient_fn=gradient_fn,
+ diff_method=gradient_fn,
interface=interface,
transform_program=full_transform_program,
inner_transform=inner_transform_program,
diff --git a/tests/devices/test_default_mixed_jax.py b/tests/devices/test_default_mixed_jax.py
index c8ae1ce8ac1..3b4f63ffc25 100644
--- a/tests/devices/test_default_mixed_jax.py
+++ b/tests/devices/test_default_mixed_jax.py
@@ -154,7 +154,7 @@ def wrapper(x):
qml.RX(x, wires=0)
qml.expval(qml.PauliZ(0))
tape = qml.tape.QuantumScript.from_queue(q)
- return qml.execute([tape], dev, gradient_fn=gradient_func)
+ return qml.execute([tape], dev, diff_method=gradient_func)
assert jnp.allclose(wrapper(jnp.array(0.0))[0], 1.0)
diff --git a/tests/gradients/parameter_shift/test_parameter_shift.py b/tests/gradients/parameter_shift/test_parameter_shift.py
index 5f8d6308a51..57ac1741bfa 100644
--- a/tests/gradients/parameter_shift/test_parameter_shift.py
+++ b/tests/gradients/parameter_shift/test_parameter_shift.py
@@ -1797,7 +1797,7 @@ def test_integer_parameters(self, tol, par):
# gradients
exact = np.cos(par)
gtapes, fn = qml.gradients.param_shift(tape)
- grad_PS = fn(qml.execute(gtapes, dev, gradient_fn=None))
+ grad_PS = fn(qml.execute(gtapes, dev, diff_method=None))
# different methods must agree
assert np.allclose(grad_PS, exact, atol=tol, rtol=0)
diff --git a/tests/gradients/parameter_shift/test_parameter_shift_hessian.py b/tests/gradients/parameter_shift/test_parameter_shift_hessian.py
index 030d670da8d..08f39601036 100644
--- a/tests/gradients/parameter_shift/test_parameter_shift_hessian.py
+++ b/tests/gradients/parameter_shift/test_parameter_shift_hessian.py
@@ -263,7 +263,7 @@ def test_single_expval(self):
expected = -np.cos(x)
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, np.ndarray)
assert hessian.shape == ()
@@ -285,7 +285,7 @@ def test_single_probs(self):
expected = 0.5 * np.cos(x) * np.array([-1, 0, 0, 1])
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, np.ndarray)
assert hessian.shape == (4,)
@@ -308,7 +308,7 @@ def test_multi_expval(self):
expected = (-np.cos(x), -np.cos(x) / np.sqrt(2))
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, tuple)
assert len(hessian) == 2
@@ -335,7 +335,7 @@ def test_multi_expval_probs(self):
expected = (-np.cos(x), 0.5 * np.cos(x) * np.array([-1, 0, 0, 1]))
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, tuple)
assert len(hessian) == 2
@@ -362,7 +362,7 @@ def test_multi_probs(self):
expected = (0.5 * np.cos(x) * np.array([-1, 1]), 0.5 * np.cos(x) * np.array([-1, 0, 0, 1]))
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, tuple)
assert len(hessian) == 2
@@ -389,7 +389,7 @@ def test_single_expval_multi_params(self):
expected = ((-np.cos(x[0]), 0), (0, 0))
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, tuple)
assert len(hessian) == 2
@@ -433,7 +433,7 @@ def test_single_probs_multi_params(self):
)
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, tuple)
assert len(hessian) == 2
@@ -475,7 +475,7 @@ def test_multi_expval_multi_params(self):
)
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, tuple)
assert len(hessian) == 2
@@ -524,7 +524,7 @@ def test_multi_expval_probs_multi_params(self):
)
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, tuple)
assert len(hessian) == 2
@@ -585,7 +585,7 @@ def test_multi_probs_multi_params(self):
)
tapes, fn = qml.gradients.param_shift_hessian(tape)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, tuple)
assert len(hessian) == 2
@@ -619,7 +619,7 @@ def test_multi_params_argnum(self):
expected = ((0, 0, 0), (0, 0, 0), (0, 0, -np.cos(x[2] + x[0])))
tapes, fn = qml.gradients.param_shift_hessian(tape, argnum=(1, 2))
- hessian = fn(qml.execute(tapes, dev, gradient_fn=None))
+ hessian = fn(qml.execute(tapes, dev, diff_method=None))
assert isinstance(hessian, tuple)
assert len(hessian) == 3
@@ -1613,7 +1613,7 @@ def circuit(x):
for _tape, exp_shift in zip(tapes[3:-1], expected_shifts):
assert np.allclose(_tape.get_parameters(), x + exp_shift)
- hessian = fn(qml.execute(tapes, dev, gradient_fn=qml.gradients.param_shift))
+ hessian = fn(qml.execute(tapes, dev, diff_method=qml.gradients.param_shift))
assert np.allclose(expected, hessian)
@@ -1666,7 +1666,7 @@ def circuit(x):
for mult, _tape in zip(shift_order, tapes[10:]):
assert np.allclose(_tape.get_parameters(), x + np.array([0.0, np.pi * mult]))
- hessian = fn(qml.execute(tapes, dev, gradient_fn=qml.gradients.param_shift))
+ hessian = fn(qml.execute(tapes, dev, diff_method=qml.gradients.param_shift))
assert np.allclose(expected, hessian)
@pytest.mark.parametrize("argnum", [(0,), (1,), (0, 1)])
diff --git a/tests/interfaces/test_autograd.py b/tests/interfaces/test_autograd.py
index 589ebed952b..0e0bb8f3f09 100644
--- a/tests/interfaces/test_autograd.py
+++ b/tests/interfaces/test_autograd.py
@@ -60,7 +60,7 @@ def cost(x, cache):
tape = qml.tape.QuantumScript.from_queue(q)
return qml.execute(
- [tape], dev, gradient_fn=qml.gradients.param_shift, cache=cache, max_diff=2
+ [tape], dev, diff_method=qml.gradients.param_shift, cache=cache, max_diff=2
)[0]
# No caching: number of executions is not ideal
@@ -116,7 +116,7 @@ def f(x):
tape1 = qml.tape.QuantumScript([qml.RX(x, 0)], [qml.probs(wires=0)])
tape2 = qml.tape.QuantumScript([qml.RY(x, 0)], [qml.probs(wires=0)])
- results = qml.execute([tape1, tape2], dev, gradient_fn=qml.gradients.param_shift)
+ results = qml.execute([tape1, tape2], dev, diff_method=qml.gradients.param_shift)
return results[0] + results[1]
x = qml.numpy.array(0.1)
@@ -132,47 +132,47 @@ def f(x):
# add tests for lightning 2 when possible
# set rng for device when possible
test_matrix = [
- ({"gradient_fn": param_shift}, Shots(50000), "default.qubit"),
- ({"gradient_fn": param_shift}, Shots((50000, 50000)), "default.qubit"),
- ({"gradient_fn": param_shift}, Shots(None), "default.qubit"),
- ({"gradient_fn": "backprop"}, Shots(None), "default.qubit"),
+ ({"diff_method": param_shift}, Shots(50000), "default.qubit"),
+ ({"diff_method": param_shift}, Shots((50000, 50000)), "default.qubit"),
+ ({"diff_method": param_shift}, Shots(None), "default.qubit"),
+ ({"diff_method": "backprop"}, Shots(None), "default.qubit"),
(
- {"gradient_fn": "adjoint", "grad_on_execution": True, "device_vjp": False},
+ {"diff_method": "adjoint", "grad_on_execution": True, "device_vjp": False},
Shots(None),
"default.qubit",
),
(
{
- "gradient_fn": "adjoint",
+ "diff_method": "adjoint",
"grad_on_execution": False,
"device_vjp": False,
},
Shots(None),
"default.qubit",
),
- ({"gradient_fn": "adjoint", "device_vjp": True}, Shots(None), "default.qubit"),
+ ({"diff_method": "adjoint", "device_vjp": True}, Shots(None), "default.qubit"),
(
- {"gradient_fn": "device", "device_vjp": False},
+ {"diff_method": "device", "device_vjp": False},
Shots((50000, 50000)),
"param_shift.qubit",
),
(
- {"gradient_fn": "device", "device_vjp": True},
+ {"diff_method": "device", "device_vjp": True},
Shots((100000, 100000)),
"param_shift.qubit",
),
(
- {"gradient_fn": param_shift},
+ {"diff_method": param_shift},
Shots(None),
"reference.qubit",
),
(
- {"gradient_fn": param_shift},
+ {"diff_method": param_shift},
Shots(50000),
"reference.qubit",
),
(
- {"gradient_fn": param_shift},
+ {"diff_method": param_shift},
Shots((50000, 50000)),
"reference.qubit",
),
@@ -340,7 +340,7 @@ def cost(params):
def test_tapes_with_different_return_size(self, execute_kwargs, shots, device_name, seed):
"""Test that tapes wit different can be executed and differentiated."""
- if execute_kwargs["gradient_fn"] == "backprop":
+ if execute_kwargs["diff_method"] == "backprop":
pytest.xfail("backprop is not compatible with something about this situation.")
device = get_device(device_name, seed=seed)
@@ -561,12 +561,12 @@ def cost_fn(a, p):
tape = qml.tape.QuantumScript(
[qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))]
)
- gradient_fn = execute_kwargs["gradient_fn"]
+ diff_method = execute_kwargs["diff_method"]
- if gradient_fn is None:
+ if diff_method is None:
_gradient_method = None
- elif isinstance(gradient_fn, str):
- _gradient_method = gradient_fn
+ elif isinstance(diff_method, str):
+ _gradient_method = diff_method
else:
_gradient_method = "gradient-transform"
config = qml.devices.ExecutionConfig(
@@ -712,7 +712,7 @@ def cost_fn(x):
ops2 = [qml.RX(x[0], 0), qml.RY(x[0], 1), qml.CNOT((0, 1))]
tape2 = qml.tape.QuantumScript(ops2, [qml.probs(wires=1)])
- result = execute([tape1, tape2], dev, gradient_fn=param_shift, max_diff=2)
+ result = execute([tape1, tape2], dev, diff_method=param_shift, max_diff=2)
return result[0] + result[1][0]
res = cost_fn(params)
@@ -748,7 +748,7 @@ def cost_fn(x):
ops2 = [qml.RX(x[0], 0), qml.RY(x[0], 1), qml.CNOT((0, 1))]
tape2 = qml.tape.QuantumScript(ops2, [qml.probs(wires=1)])
- result = execute([tape1, tape2], dev, gradient_fn=param_shift, max_diff=1)
+ result = execute([tape1, tape2], dev, diff_method=param_shift, max_diff=1)
return result[0] + result[1][0]
res = cost_fn(params)
@@ -835,7 +835,7 @@ def cost_fn_jacobian(weights, coeffs1, coeffs2):
def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with no trainable parameters."""
- if execute_kwargs["gradient_fn"] == "adjoint" and not qml.operation.active_new_opmath():
+ if execute_kwargs["diff_method"] == "adjoint" and not qml.operation.active_new_opmath():
pytest.skip("adjoint differentiation does not support hamiltonians.")
coeffs1 = pnp.array([0.1, 0.2, 0.3], requires_grad=False)
@@ -860,7 +860,7 @@ def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shot
def test_multiple_hamiltonians_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with trainable parameters."""
- if execute_kwargs["gradient_fn"] == "adjoint":
+ if execute_kwargs["diff_method"] == "adjoint":
pytest.skip("trainable hamiltonians not supported with adjoint")
if qml.operation.active_new_opmath():
pytest.skip("parameter shift derivatives do not yet support sums.")
diff --git a/tests/interfaces/test_autograd_qnode.py b/tests/interfaces/test_autograd_qnode.py
index f203041279f..6c0975cd490 100644
--- a/tests/interfaces/test_autograd_qnode.py
+++ b/tests/interfaces/test_autograd_qnode.py
@@ -548,7 +548,7 @@ def cost_fn(a, b):
cost_fn(a, b, shots=100)
# since we are using finite shots, parameter-shift will
# be chosen
- assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift
+ assert spy.call_args[1]["diff_method"] is qml.gradients.param_shift
with pytest.warns(
qml.PennyLaneDeprecationWarning, match=r"QNode.gradient_fn is deprecated"
):
@@ -560,7 +560,7 @@ def cost_fn(a, b):
qml.PennyLaneDeprecationWarning, match=r"QNode.gradient_fn is deprecated"
):
assert cost_fn.gradient_fn == "backprop"
- assert spy.call_args[1]["gradient_fn"] == "backprop"
+ assert spy.call_args[1]["diff_method"] == "backprop"
@pytest.mark.parametrize(
diff --git a/tests/interfaces/test_execute.py b/tests/interfaces/test_execute.py
index 1732f741d8a..9f0a7ff693d 100644
--- a/tests/interfaces/test_execute.py
+++ b/tests/interfaces/test_execute.py
@@ -20,8 +20,8 @@
from pennylane.devices import DefaultQubit
-@pytest.mark.parametrize("gradient_fn", (None, "backprop", qml.gradients.param_shift))
-def test_caching(gradient_fn):
+@pytest.mark.parametrize("diff_method", (None, "backprop", qml.gradients.param_shift))
+def test_caching(diff_method):
"""Test that cache execute returns the cached result if the same script is executed
multiple times, both in multiple times in a batch and in separate batches."""
dev = DefaultQubit()
@@ -31,8 +31,8 @@ def test_caching(gradient_fn):
cache = {}
with qml.Tracker(dev) as tracker:
- results = qml.execute([qs, qs], dev, cache=cache, gradient_fn=gradient_fn)
- results2 = qml.execute([qs, qs], dev, cache=cache, gradient_fn=gradient_fn)
+ results = qml.execute([qs, qs], dev, cache=cache, diff_method=diff_method)
+ results2 = qml.execute([qs, qs], dev, cache=cache, diff_method=diff_method)
assert len(cache) == 1
assert cache[qs.hash] == -1.0
@@ -55,3 +55,18 @@ def test_execute_legacy_device():
res = qml.execute((tape,), dev)
assert qml.math.allclose(res[0], np.cos(0.1))
+
+
+def test_gradient_fn_deprecation():
+ """Test that gradient_fn has been renamed to diff_method."""
+
+ tape = qml.tape.QuantumScript([qml.RX(qml.numpy.array(1.0), 0)], [qml.expval(qml.Z(0))])
+ dev = qml.device("default.qubit")
+
+ with dev.tracker:
+ with pytest.warns(
+ qml.PennyLaneDeprecationWarning, match=r"gradient_fn has been renamed to diff_method"
+ ):
+ qml.execute((tape,), dev, gradient_fn="adjoint")
+
+ assert dev.tracker.totals["execute_and_derivative_batches"] == 1 # uses adjoint diff
diff --git a/tests/interfaces/test_jax.py b/tests/interfaces/test_jax.py
index 97f8abaa79c..12a354eccb3 100644
--- a/tests/interfaces/test_jax.py
+++ b/tests/interfaces/test_jax.py
@@ -43,8 +43,8 @@ def test_jit_execution():
[qml.RX(jax.numpy.array(0.1), 0)], [qml.expval(qml.s_prod(2.0, qml.PauliZ(0)))]
)
- out = jax.jit(qml.execute, static_argnames=("device", "gradient_fn"))(
- (tape,), device=dev, gradient_fn=qml.gradients.param_shift
+ out = jax.jit(qml.execute, static_argnames=("device", "diff_method"))(
+ (tape,), device=dev, diff_method=qml.gradients.param_shift
)
expected = 2.0 * jax.numpy.cos(jax.numpy.array(0.1))
assert qml.math.allclose(out[0], expected)
@@ -78,7 +78,7 @@ def cost(x, cache):
tape = qml.tape.QuantumScript.from_queue(q)
return qml.execute(
- [tape], device, gradient_fn=qml.gradients.param_shift, cache=cache, max_diff=2
+ [tape], device, diff_method=qml.gradients.param_shift, cache=cache, max_diff=2
)[0]
# No caching: number of executions is not ideal
@@ -131,16 +131,16 @@ def cost(x, cache):
shots_10k = Shots(10000)
shots_2_10k = Shots((10000, 10000))
test_matrix = [
- ({"gradient_fn": param_shift}, shots_10k, "default.qubit"), # 0
- ({"gradient_fn": param_shift}, shots_2_10k, "default.qubit"), # 1
- ({"gradient_fn": param_shift}, no_shots, "default.qubit"), # 2
- ({"gradient_fn": "backprop"}, no_shots, "default.qubit"), # 3
- ({"gradient_fn": "adjoint"}, no_shots, "default.qubit"), # 4
- ({"gradient_fn": "adjoint", "device_vjp": True}, no_shots, "default.qubit"), # 5
- ({"gradient_fn": "device"}, shots_2_10k, "param_shift.qubit"), # 6
- ({"gradient_fn": param_shift}, no_shots, "reference.qubit"), # 7
- ({"gradient_fn": param_shift}, shots_10k, "reference.qubit"), # 8
- ({"gradient_fn": param_shift}, shots_2_10k, "reference.qubit"), # 9
+ ({"diff_method": param_shift}, shots_10k, "default.qubit"), # 0
+ ({"diff_method": param_shift}, shots_2_10k, "default.qubit"), # 1
+ ({"diff_method": param_shift}, no_shots, "default.qubit"), # 2
+ ({"diff_method": "backprop"}, no_shots, "default.qubit"), # 3
+ ({"diff_method": "adjoint"}, no_shots, "default.qubit"), # 4
+ ({"diff_method": "adjoint", "device_vjp": True}, no_shots, "default.qubit"), # 5
+ ({"diff_method": "device"}, shots_2_10k, "param_shift.qubit"), # 6
+ ({"diff_method": param_shift}, no_shots, "reference.qubit"), # 7
+ ({"diff_method": param_shift}, shots_10k, "reference.qubit"), # 8
+ ({"diff_method": param_shift}, shots_2_10k, "reference.qubit"), # 9
]
@@ -173,7 +173,7 @@ def cost(a, b):
with device.tracker:
res = cost(a, b)
- if execute_kwargs.get("gradient_fn", None) == "adjoint":
+ if execute_kwargs.get("diff_method", None) == "adjoint":
assert device.tracker.totals.get("execute_and_derivative_batches", 0) == 0
else:
assert device.tracker.totals["batches"] == 1
@@ -377,7 +377,7 @@ def cost(params):
def test_reusing_quantum_tape(self, execute_kwargs, shots, device_name, seed):
"""Test re-using a quantum tape by passing new parameters"""
- if execute_kwargs["gradient_fn"] == param_shift:
+ if execute_kwargs["diff_method"] == param_shift:
pytest.skip("Basic QNode execution wipes out trainable params with param-shift")
device = get_device(device_name, seed)
@@ -502,11 +502,11 @@ def cost_fn(a, p):
[qml.expval(qml.PauliX(0))],
shots=shots,
)
- gradient_fn = execute_kwargs["gradient_fn"]
- if gradient_fn is None:
+ diff_method = execute_kwargs["diff_method"]
+ if diff_method is None:
_gradient_method = None
- elif isinstance(gradient_fn, str):
- _gradient_method = gradient_fn
+ elif isinstance(diff_method, str):
+ _gradient_method = diff_method
else:
_gradient_method = "gradient-transform"
conf = qml.devices.ExecutionConfig(
@@ -674,7 +674,7 @@ def cost_fn(x):
ops2 = [qml.RX(x[0], 0), qml.RY(x[0], 1), qml.CNOT((0, 1))]
tape2 = qml.tape.QuantumScript(ops2, [qml.probs(wires=1)])
- result = execute([tape1, tape2], dev, gradient_fn=param_shift, max_diff=2)
+ result = execute([tape1, tape2], dev, diff_method=param_shift, max_diff=2)
return result[0] + result[1][0]
res = cost_fn(params)
@@ -710,7 +710,7 @@ def cost_fn(x):
ops2 = [qml.RX(x[0], 0), qml.RY(x[0], 1), qml.CNOT((0, 1))]
tape2 = qml.tape.QuantumScript(ops2, [qml.probs(wires=1)])
- result = execute([tape1, tape2], dev, gradient_fn=param_shift, max_diff=1)
+ result = execute([tape1, tape2], dev, diff_method=param_shift, max_diff=1)
return result[0] + result[1][0]
res = cost_fn(params)
@@ -798,7 +798,7 @@ def cost_fn_jacobian(weights, coeffs1, coeffs2):
def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with no trainable parameters."""
- if execute_kwargs["gradient_fn"] == "adjoint" and not qml.operation.active_new_opmath():
+ if execute_kwargs["diff_method"] == "adjoint" and not qml.operation.active_new_opmath():
pytest.skip("adjoint differentiation does not suppport hamiltonians.")
coeffs1 = jnp.array([0.1, 0.2, 0.3])
@@ -823,7 +823,7 @@ def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shot
def test_multiple_hamiltonians_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with trainable parameters."""
- if execute_kwargs["gradient_fn"] == "adjoint":
+ if execute_kwargs["diff_method"] == "adjoint":
pytest.skip("trainable hamiltonians not supported with adjoint")
if qml.operation.active_new_opmath():
pytest.skip("parameter shift derivatives do not yet support sums.")
diff --git a/tests/interfaces/test_jax_jit.py b/tests/interfaces/test_jax_jit.py
index eea7b6be52a..c6b2ce19de9 100644
--- a/tests/interfaces/test_jax_jit.py
+++ b/tests/interfaces/test_jax_jit.py
@@ -50,7 +50,7 @@ def cost(a, device):
return execute(
[tape],
device,
- gradient_fn=param_shift,
+ diff_method=param_shift,
gradient_kwargs={"shifts": [(np.pi / 4,)] * 2},
)[0]
@@ -77,7 +77,7 @@ def cost(a, device):
return execute(
[tape],
device,
- gradient_fn=param_shift,
+ diff_method=param_shift,
grad_on_execution=True,
)[0]
@@ -103,7 +103,7 @@ def cost(a, device):
return execute(
[tape],
device,
- gradient_fn=param_shift,
+ diff_method=param_shift,
interface="None",
)[0]
@@ -126,7 +126,7 @@ def cost(a):
return execute(
[tape],
dev,
- gradient_fn="adjoint",
+ diff_method="adjoint",
)[0]
a = jax.numpy.array([0.1, 0.2])
@@ -158,7 +158,7 @@ def cost(a):
return execute(
[tape],
dev,
- gradient_fn="adjoint",
+ diff_method="adjoint",
grad_on_execution=False,
)[0]
@@ -194,7 +194,7 @@ def cost(a, cachesize):
return execute(
[tape],
dev,
- gradient_fn=param_shift,
+ diff_method=param_shift,
cachesize=cachesize,
)[0]
@@ -222,7 +222,7 @@ def cost(a, cache):
return execute(
[tape],
dev,
- gradient_fn=param_shift,
+ diff_method=param_shift,
cache=cache,
)[0]
@@ -259,7 +259,7 @@ def cost(a, b, cache):
res = execute(
[tape1, tape2],
dev,
- gradient_fn=param_shift,
+ diff_method=param_shift,
cache=cache,
)
return res[0]
@@ -286,7 +286,7 @@ def cost(a, cache):
return execute(
[tape],
dev,
- gradient_fn=param_shift,
+ diff_method=param_shift,
cache=cache,
)[0]
@@ -338,7 +338,7 @@ def cost(a, cache):
return execute(
[tape],
dev,
- gradient_fn="adjoint",
+ diff_method="adjoint",
cache=cache,
grad_on_execution=False,
)[0]
@@ -361,13 +361,13 @@ def cost(a, cache):
execute_kwargs_integration = [
- {"gradient_fn": param_shift},
+ {"diff_method": param_shift},
{
- "gradient_fn": "adjoint",
+ "diff_method": "adjoint",
"grad_on_execution": True,
},
{
- "gradient_fn": "adjoint",
+ "diff_method": "adjoint",
"grad_on_execution": False,
},
]
@@ -799,7 +799,7 @@ def test_qnode_sample(self, execute_kwargs):
dev = qml.device("default.qubit", wires=2, shots=10)
params = jax.numpy.array([0.1, 0.2, 0.3])
- grad_meth = execute_kwargs.get("gradient_fn", "")
+ grad_meth = execute_kwargs.get("diff_method", "")
if grad_meth in ("adjoint", "backprop"):
pytest.skip("Adjoint does not support probs")
@@ -929,7 +929,7 @@ def test_jit_allcounts_broadcasting(self):
@pytest.mark.xfail(reason="Need to figure out how to handle this case in a less ambiguous manner")
def test_diff_method_None_jit():
- """Test that jitted execution works when `gradient_fn=None`."""
+ """Test that jitted execution works when `diff_method=None`."""
dev = qml.device("default.qubit", wires=1, shots=10)
@@ -941,6 +941,6 @@ def wrapper(x):
tape = qml.tape.QuantumScript.from_queue(q)
- return qml.execute([tape], dev, gradient_fn=None)
+ return qml.execute([tape], dev, diff_method=None)
assert jax.numpy.allclose(wrapper(jax.numpy.array(0.0))[0], 1.0)
diff --git a/tests/interfaces/test_jax_jit_qnode.py b/tests/interfaces/test_jax_jit_qnode.py
index fdb4094160d..82d548ab4fe 100644
--- a/tests/interfaces/test_jax_jit_qnode.py
+++ b/tests/interfaces/test_jax_jit_qnode.py
@@ -893,7 +893,7 @@ def cost_fn(a, b):
qml.PennyLaneDeprecationWarning, match=r"QNode.gradient_fn is deprecated"
):
assert cost_fn.gradient_fn == qml.gradients.param_shift
- assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift
+ assert spy.call_args[1]["diff_method"] is qml.gradients.param_shift
cost_fn(a, b)
with pytest.warns(
@@ -901,7 +901,7 @@ def cost_fn(a, b):
):
assert cost_fn.gradient_fn == "backprop"
# if we set the shots to None, backprop can now be used
- assert spy.call_args[1]["gradient_fn"] == "backprop"
+ assert spy.call_args[1]["diff_method"] == "backprop"
@pytest.mark.parametrize("shots", [(10000, 10000), (10000, 10005)])
def test_shot_vectors_single_measurements(self, interface, shots, seed):
diff --git a/tests/interfaces/test_jax_qnode.py b/tests/interfaces/test_jax_qnode.py
index 261a93a3e18..336a3a87fcd 100644
--- a/tests/interfaces/test_jax_qnode.py
+++ b/tests/interfaces/test_jax_qnode.py
@@ -800,7 +800,7 @@ def cost_fn(a, b):
qml.PennyLaneDeprecationWarning, match=r"QNode.gradient_fn is deprecated"
):
assert cost_fn.gradient_fn == qml.gradients.param_shift
- assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift
+ assert spy.call_args[1]["diff_method"] is qml.gradients.param_shift
# if we use the default shots value of None, backprop can now be used
cost_fn(a, b)
@@ -808,7 +808,7 @@ def cost_fn(a, b):
qml.PennyLaneDeprecationWarning, match=r"QNode.gradient_fn is deprecated"
):
assert cost_fn.gradient_fn == "backprop"
- assert spy.call_args[1]["gradient_fn"] == "backprop"
+ assert spy.call_args[1]["diff_method"] == "backprop"
@pytest.mark.parametrize(
diff --git a/tests/interfaces/test_tensorflow.py b/tests/interfaces/test_tensorflow.py
index 57a38669445..edd43c75c3c 100644
--- a/tests/interfaces/test_tensorflow.py
+++ b/tests/interfaces/test_tensorflow.py
@@ -51,7 +51,7 @@ def cost(x, cache):
tape = qml.tape.QuantumScript.from_queue(q)
return qml.execute(
- [tape], dev, gradient_fn=qml.gradients.param_shift, cache=cache, max_diff=2
+ [tape], dev, diff_method=qml.gradients.param_shift, cache=cache, max_diff=2
)[0]
# No caching: number of executions is not ideal
@@ -109,18 +109,18 @@ def cost(x, cache):
# add tests for lightning 2 when possible
# set rng for device when possible
test_matrix = [
- ({"gradient_fn": param_shift, "interface": "tensorflow"}, 100000, "default.qubit"), # 0
- ({"gradient_fn": param_shift, "interface": "tensorflow"}, None, "default.qubit"), # 1
- ({"gradient_fn": "backprop", "interface": "tensorflow"}, None, "default.qubit"), # 2
- ({"gradient_fn": "adjoint", "interface": "tensorflow"}, None, "default.qubit"), # 3
- ({"gradient_fn": param_shift, "interface": "tf-autograph"}, 100000, "default.qubit"), # 4
- ({"gradient_fn": param_shift, "interface": "tf-autograph"}, None, "default.qubit"), # 5
- ({"gradient_fn": "backprop", "interface": "tf-autograph"}, None, "default.qubit"), # 6
- ({"gradient_fn": "adjoint", "interface": "tf-autograph"}, None, "default.qubit"), # 7
- ({"gradient_fn": "adjoint", "interface": "tf", "device_vjp": True}, None, "default.qubit"), # 8
- ({"gradient_fn": param_shift, "interface": "tensorflow"}, None, "reference.qubit"), # 9
+ ({"diff_method": param_shift, "interface": "tensorflow"}, 100000, "default.qubit"), # 0
+ ({"diff_method": param_shift, "interface": "tensorflow"}, None, "default.qubit"), # 1
+ ({"diff_method": "backprop", "interface": "tensorflow"}, None, "default.qubit"), # 2
+ ({"diff_method": "adjoint", "interface": "tensorflow"}, None, "default.qubit"), # 3
+ ({"diff_method": param_shift, "interface": "tf-autograph"}, 100000, "default.qubit"), # 4
+ ({"diff_method": param_shift, "interface": "tf-autograph"}, None, "default.qubit"), # 5
+ ({"diff_method": "backprop", "interface": "tf-autograph"}, None, "default.qubit"), # 6
+ ({"diff_method": "adjoint", "interface": "tf-autograph"}, None, "default.qubit"), # 7
+ ({"diff_method": "adjoint", "interface": "tf", "device_vjp": True}, None, "default.qubit"), # 8
+ ({"diff_method": param_shift, "interface": "tensorflow"}, None, "reference.qubit"), # 9
(
- {"gradient_fn": param_shift, "interface": "tensorflow"},
+ {"diff_method": param_shift, "interface": "tensorflow"},
100000,
"reference.qubit",
), # 10
@@ -156,7 +156,7 @@ def cost(a, b):
with device.tracker:
res = cost(a, b)
- if execute_kwargs.get("gradient_fn", None) == "adjoint" and not execute_kwargs.get(
+ if execute_kwargs.get("diff_method", None) == "adjoint" and not execute_kwargs.get(
"device_vjp", False
):
assert device.tracker.totals["execute_and_derivative_batches"] == 1
@@ -271,7 +271,7 @@ def cost(params):
if (
execute_kwargs.get("interface", "") == "tf-autograph"
- and execute_kwargs.get("gradient_fn", "") == "adjoint"
+ and execute_kwargs.get("diff_method", "") == "adjoint"
):
with pytest.raises(NotImplementedError):
tape.gradient(res, params)
@@ -287,7 +287,7 @@ def test_tapes_with_different_return_size(self, execute_kwargs, shots, device_na
device = qml.device(device_name, seed=seed)
if (
- execute_kwargs["gradient_fn"] == "adjoint"
+ execute_kwargs["diff_method"] == "adjoint"
and execute_kwargs["interface"] == "tf-autograph"
):
pytest.skip("Cannot compute the jacobian with adjoint-differentation and tf-autograph")
@@ -497,11 +497,11 @@ def cost_fn(a, p):
tape = qml.tape.QuantumScript(
[qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))]
)
- gradient_fn = execute_kwargs["gradient_fn"]
- if gradient_fn is None:
+ diff_method = execute_kwargs["diff_method"]
+ if diff_method is None:
_gradient_method = None
- elif isinstance(gradient_fn, str):
- _gradient_method = gradient_fn
+ elif isinstance(diff_method, str):
+ _gradient_method = diff_method
else:
_gradient_method = "gradient-transform"
config = qml.devices.ExecutionConfig(
@@ -570,7 +570,7 @@ def cost(x, y):
if (
execute_kwargs.get("interface", "") == "tf-autograph"
- and execute_kwargs.get("gradient_fn", "") == "adjoint"
+ and execute_kwargs.get("diff_method", "") == "adjoint"
):
with pytest.raises(tf.errors.UnimplementedError):
tape.jacobian(cost_res, [x, y])
@@ -627,7 +627,7 @@ def cost(x, y):
if (
execute_kwargs.get("interface", "") == "tf-autograph"
- and execute_kwargs.get("gradient_fn", "") == "adjoint"
+ and execute_kwargs.get("diff_method", "") == "adjoint"
):
with pytest.raises(tf.errors.UnimplementedError):
tape.jacobian(cost_res, [x, y])
@@ -669,7 +669,7 @@ def cost_fn(x):
ops2 = [qml.RX(x[0], 0), qml.RY(x[0], 1), qml.CNOT((0, 1))]
tape2 = qml.tape.QuantumScript(ops2, [qml.probs(wires=1)])
- result = execute([tape1, tape2], dev, gradient_fn=param_shift, max_diff=2)
+ result = execute([tape1, tape2], dev, diff_method=param_shift, max_diff=2)
return result[0] + result[1][0]
with tf.GradientTape() as jac_tape:
@@ -708,7 +708,7 @@ def cost_fn(x):
ops2 = [qml.RX(x[0], 0), qml.RY(x[0], 1), qml.CNOT((0, 1))]
tape2 = qml.tape.QuantumScript(ops2, [qml.probs(wires=1)])
- result = execute([tape1, tape2], dev, gradient_fn=param_shift, max_diff=1)
+ result = execute([tape1, tape2], dev, diff_method=param_shift, max_diff=1)
return result[0] + result[1][0]
with tf.GradientTape() as jac_tape:
@@ -794,7 +794,7 @@ def cost_fn_jacobian(weights, coeffs1, coeffs2):
def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with no trainable parameters."""
- if execute_kwargs["gradient_fn"] == "adjoint" and not qml.operation.active_new_opmath():
+ if execute_kwargs["diff_method"] == "adjoint" and not qml.operation.active_new_opmath():
pytest.skip("adjoint differentiation does not suppport hamiltonians.")
device_vjp = execute_kwargs.get("device_vjp", False)
@@ -815,7 +815,7 @@ def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shot
def test_multiple_hamiltonians_trainable(self, cost_fn, execute_kwargs, shots):
"""Test hamiltonian with trainable parameters."""
- if execute_kwargs["gradient_fn"] == "adjoint":
+ if execute_kwargs["diff_method"] == "adjoint":
pytest.skip("trainable hamiltonians not supported with adjoint")
if qml.operation.active_new_opmath():
pytest.skip("parameter shift derivatives do not yet support sums.")
diff --git a/tests/interfaces/test_tensorflow_qnode.py b/tests/interfaces/test_tensorflow_qnode.py
index 109f5d5cf4e..41a2f22206d 100644
--- a/tests/interfaces/test_tensorflow_qnode.py
+++ b/tests/interfaces/test_tensorflow_qnode.py
@@ -544,7 +544,7 @@ def circuit(weights):
qml.PennyLaneDeprecationWarning, match=r"QNode.gradient_fn is deprecated"
):
assert circuit.gradient_fn == qml.gradients.param_shift
- assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift
+ assert spy.call_args[1]["diff_method"] is qml.gradients.param_shift
# if we use the default shots value of None, backprop can now be used
circuit(weights)
@@ -552,7 +552,7 @@ def circuit(weights):
qml.PennyLaneDeprecationWarning, match=r"QNode.gradient_fn is deprecated"
):
assert circuit.gradient_fn == "backprop"
- assert spy.call_args[1]["gradient_fn"] == "backprop"
+ assert spy.call_args[1]["diff_method"] == "backprop"
@pytest.mark.parametrize(
diff --git a/tests/interfaces/test_torch.py b/tests/interfaces/test_torch.py
index 8b1d5ddb978..82814821222 100644
--- a/tests/interfaces/test_torch.py
+++ b/tests/interfaces/test_torch.py
@@ -66,7 +66,7 @@ def cost_no_cache(x):
return qml.execute(
[get_cost_tape(x)],
dev,
- gradient_fn=qml.gradients.param_shift,
+ diff_method=qml.gradients.param_shift,
cache=False,
max_diff=2,
)[0]
@@ -75,7 +75,7 @@ def cost_cache(x):
return qml.execute(
[get_cost_tape(x)],
dev,
- gradient_fn=qml.gradients.param_shift,
+ diff_method=qml.gradients.param_shift,
cache=True,
max_diff=2,
)[0]
@@ -136,39 +136,39 @@ def get_device(dev_name, seed):
# add tests for lightning 2 when possible
# set rng for device when possible
test_matrix = [
- ({"gradient_fn": param_shift}, Shots(100000), "default.qubit"),
- ({"gradient_fn": param_shift}, Shots((100000, 100000)), "default.qubit"),
- ({"gradient_fn": param_shift}, Shots(None), "default.qubit"),
- ({"gradient_fn": "backprop"}, Shots(None), "default.qubit"),
+ ({"diff_method": param_shift}, Shots(100000), "default.qubit"),
+ ({"diff_method": param_shift}, Shots((100000, 100000)), "default.qubit"),
+ ({"diff_method": param_shift}, Shots(None), "default.qubit"),
+ ({"diff_method": "backprop"}, Shots(None), "default.qubit"),
(
- {"gradient_fn": "adjoint", "grad_on_execution": True, "device_vjp": False},
+ {"diff_method": "adjoint", "grad_on_execution": True, "device_vjp": False},
Shots(None),
"default.qubit",
),
(
{
- "gradient_fn": "adjoint",
+ "diff_method": "adjoint",
"grad_on_execution": False,
"device_vjp": False,
},
Shots(None),
"default.qubit",
),
- ({"gradient_fn": "adjoint", "device_vjp": True}, Shots(None), "default.qubit"),
- ({"gradient_fn": "device", "device_vjp": False}, Shots((100000, 100000)), "param_shift.qubit"),
- ({"gradient_fn": "device", "device_vjp": True}, Shots((100000, 100000)), "param_shift.qubit"),
+ ({"diff_method": "adjoint", "device_vjp": True}, Shots(None), "default.qubit"),
+ ({"diff_method": "device", "device_vjp": False}, Shots((100000, 100000)), "param_shift.qubit"),
+ ({"diff_method": "device", "device_vjp": True}, Shots((100000, 100000)), "param_shift.qubit"),
(
- {"gradient_fn": param_shift},
+ {"diff_method": param_shift},
Shots(None),
"reference.qubit",
),
(
- {"gradient_fn": param_shift},
+ {"diff_method": param_shift},
Shots(100000),
"reference.qubit",
),
(
- {"gradient_fn": param_shift},
+ {"diff_method": param_shift},
Shots((100000, 100000)),
"reference.qubit",
),
@@ -356,7 +356,7 @@ def cost(params):
def test_tapes_with_different_return_size(self, execute_kwargs, shots, device_name, seed):
"""Test that tapes wit different can be executed and differentiated."""
- if execute_kwargs["gradient_fn"] == "backprop":
+ if execute_kwargs["diff_method"] == "backprop":
pytest.xfail("backprop is not compatible with something about this situation.")
device = get_device(device_name, seed)
@@ -541,11 +541,11 @@ def cost_fn(a, p):
tape = qml.tape.QuantumScript(
[qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))]
)
- gradient_fn = execute_kwargs["gradient_fn"]
- if gradient_fn is None:
+ diff_method = execute_kwargs["diff_method"]
+ if diff_method is None:
_gradient_method = None
- elif isinstance(gradient_fn, str):
- _gradient_method = gradient_fn
+ elif isinstance(diff_method, str):
+ _gradient_method = diff_method
else:
_gradient_method = "gradient-transform"
config = qml.devices.ExecutionConfig(
@@ -702,7 +702,7 @@ def cost_fn(x):
ops2 = [qml.RX(x[0], 0), qml.RY(x[0], 1), qml.CNOT((0, 1))]
tape2 = qml.tape.QuantumScript(ops2, [qml.probs(wires=1)])
- result = execute([tape1, tape2], dev, gradient_fn=param_shift, max_diff=2)
+ result = execute([tape1, tape2], dev, diff_method=param_shift, max_diff=2)
return result[0] + result[1][0]
res = cost_fn(params)
@@ -738,7 +738,7 @@ def cost_fn(x):
ops2 = [qml.RX(x[0], 0), qml.RY(x[0], 1), qml.CNOT((0, 1))]
tape2 = qml.tape.QuantumScript(ops2, [qml.probs(wires=1)])
- result = execute([tape1, tape2], dev, gradient_fn=param_shift, max_diff=1)
+ result = execute([tape1, tape2], dev, diff_method=param_shift, max_diff=1)
return result[0] + result[1][0]
res = cost_fn(params)
@@ -830,7 +830,7 @@ def cost_fn_jacobian(weights, coeffs1, coeffs2):
def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with no trainable parameters."""
- if execute_kwargs["gradient_fn"] == "adjoint" and not qml.operation.active_new_opmath():
+ if execute_kwargs["diff_method"] == "adjoint" and not qml.operation.active_new_opmath():
pytest.skip("adjoint differentiation does not suppport hamiltonians.")
coeffs1 = torch.tensor([0.1, 0.2, 0.3], requires_grad=False)
@@ -855,7 +855,7 @@ def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shot
def test_multiple_hamiltonians_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with trainable parameters."""
- if execute_kwargs["gradient_fn"] == "adjoint":
+ if execute_kwargs["diff_method"] == "adjoint":
pytest.skip("trainable hamiltonians not supported with adjoint")
if qml.operation.active_new_opmath():
pytest.skip("parameter shift derivatives do not yet support sums.")
diff --git a/tests/interfaces/test_torch_qnode.py b/tests/interfaces/test_torch_qnode.py
index c71b74108be..460f0fcfa7e 100644
--- a/tests/interfaces/test_torch_qnode.py
+++ b/tests/interfaces/test_torch_qnode.py
@@ -646,11 +646,11 @@ def cost_fn(a, b):
qml.PennyLaneDeprecationWarning, match=r"QNode.gradient_fn is deprecated"
):
assert cost_fn.gradient_fn == qml.gradients.param_shift
- assert spy.call_args[1]["gradient_fn"] is qml.gradients.param_shift
+ assert spy.call_args[1]["diff_method"] is qml.gradients.param_shift
# if we use the default shots value of None, backprop can now be used
cost_fn(a, b)
- assert spy.call_args[1]["gradient_fn"] == "backprop"
+ assert spy.call_args[1]["diff_method"] == "backprop"
@pytest.mark.parametrize(
diff --git a/tests/logging/test_logging_autograd.py b/tests/logging/test_logging_autograd.py
index f1468358935..f86c958187e 100644
--- a/tests/logging/test_logging_autograd.py
+++ b/tests/logging/test_logging_autograd.py
@@ -20,9 +20,9 @@
import pennylane as qml
_grad_log_map = {
- "adjoint": "gradient_fn=adjoint, interface=autograd, grad_on_execution=best, gradient_kwargs={}",
- "backprop": "gradient_fn=backprop, interface=autograd, grad_on_execution=best, gradient_kwargs={}",
- "parameter-shift": "gradient_fn=",
+ "adjoint": "diff_method=adjoint, interface=autograd, grad_on_execution=best, gradient_kwargs={}",
+ "backprop": "diff_method=backprop, interface=autograd, grad_on_execution=best, gradient_kwargs={}",
+ "parameter-shift": "diff_method=",
}
@@ -91,7 +91,7 @@ def circuit(params):
"pennylane.workflow.execution",
[
"device=