Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up how interface is handled in QNode and qml.execute #6225

Merged
merged 38 commits into from
Sep 16, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
c62ce82
Add _to_autograd to autograd execute
astralcai Sep 5, 2024
26111be
stop treating numpy as autograd internally
astralcai Sep 5, 2024
5e8740b
Merge branch 'master' into autograd-bug
astralcai Sep 5, 2024
26d16bc
bug fixes
astralcai Sep 6, 2024
c45b750
uncomment line
astralcai Sep 6, 2024
dfacd7f
Merge branch 'master' into autograd-bug
astralcai Sep 6, 2024
0633dc0
fix tiny bug
astralcai Sep 6, 2024
f1213cb
Merge branch 'master' into autograd-bug
astralcai Sep 6, 2024
ffb9d3c
more fix
astralcai Sep 9, 2024
5d6e8eb
fix bug
astralcai Sep 9, 2024
6776c01
fix tests
astralcai Sep 9, 2024
eca0f14
fix black
astralcai Sep 9, 2024
33f4f63
Merge branch 'master' of https://github.com/PennyLaneAI/pennylane int…
astralcai Sep 9, 2024
2e4f55c
revert change
astralcai Sep 9, 2024
8c4a72a
add sparse matrix to Hermitian
astralcai Sep 9, 2024
3c8a691
bug fix
astralcai Sep 9, 2024
71561f9
bug fix
astralcai Sep 9, 2024
676d85b
bug fix
astralcai Sep 9, 2024
63525a2
Merge branch 'master' into autograd-bug
astralcai Sep 9, 2024
b34b48c
clean up handling of interface
astralcai Sep 10, 2024
9810b68
Merge branch 'master' into autograd-bug
astralcai Sep 10, 2024
c21eee3
fix isort
astralcai Sep 10, 2024
ffd41a9
update
astralcai Sep 10, 2024
cee3976
fix some tests
astralcai Sep 10, 2024
087dc22
fix tests
astralcai Sep 10, 2024
535e66b
make pylint happy
astralcai Sep 10, 2024
18c5fa6
update name
astralcai Sep 10, 2024
7fde693
fix isort
astralcai Sep 10, 2024
9e86111
fix tests
astralcai Sep 10, 2024
c4415a8
Update pennylane/workflow/qnode.py
astralcai Sep 11, 2024
6ac5315
Merge branch 'master' into autograd-bug
astralcai Sep 11, 2024
b6ad427
changelog
astralcai Sep 12, 2024
c41572e
Merge branch 'master' of https://github.com/PennyLaneAI/pennylane int…
astralcai Sep 12, 2024
fee0af9
Merge branch 'master' into autograd-bug
astralcai Sep 12, 2024
61f0dc1
Merge branch 'master' of https://github.com/PennyLaneAI/pennylane int…
astralcai Sep 13, 2024
a8b11cc
fix bug
astralcai Sep 13, 2024
a44ef16
add test
astralcai Sep 13, 2024
99d393c
Merge branch 'master' of https://github.com/PennyLaneAI/pennylane int…
astralcai Sep 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pennylane/devices/legacy_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,13 +408,13 @@ def _validate_backprop_method(self, tape):

if backprop_interface is not None:
# device supports backpropagation natively
return mapped_interface in [backprop_interface, "Numpy"]
return mapped_interface in [backprop_interface, "numpy"]
# determine if the device has any child devices that support backpropagation
backprop_devices = self._device.capabilities().get("passthru_devices", None)

if backprop_devices is None:
return False
return mapped_interface in backprop_devices or mapped_interface == "Numpy"
return mapped_interface in backprop_devices or mapped_interface == "numpy"

def _validate_adjoint_method(self, tape):
# The conditions below provide a minimal set of requirements that we can likely improve upon in
Expand Down
2 changes: 1 addition & 1 deletion pennylane/devices/qubit/simulate.py
Original file line number Diff line number Diff line change
Expand Up @@ -922,7 +922,7 @@ def _(original_measurement: ExpectationMP, measures): # pylint: disable=unused-
for v in measures.values():
if not v[0] or v[1] is tuple():
continue
cum_value += v[0] * v[1]
cum_value += qml.math.multiply(v[0], v[1])
total_counts += v[0]
return cum_value / total_counts

Expand Down
2 changes: 1 addition & 1 deletion pennylane/workflow/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@
]

_mapping_output = (
"Numpy",
"numpy",
astralcai marked this conversation as resolved.
Show resolved Hide resolved
"auto",
"autograd",
"autograd",
Expand Down
17 changes: 16 additions & 1 deletion pennylane/workflow/interfaces/autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,21 @@ def autograd_execute(
return _execute(parameters, tuple(tapes), execute_fn, jpc)


def _to_autograd(result: qml.typing.ResultBatch) -> qml.typing.ResultBatch:
"""Converts an arbitrary result batch to one with autograd arrays.
Args:
result (ResultBatch): a nested structure of lists, tuples, dicts, and numpy arrays
Returns:
ResultBatch: a nested structure of tuples, dicts, and jax arrays
"""
if isinstance(result, dict):
return result
# pylint: disable=no-member
if isinstance(result, (list, tuple, autograd.builtins.tuple, autograd.builtins.list)):
return tuple(_to_autograd(r) for r in result)
return autograd.numpy.array(result)


@autograd.extend.primitive
def _execute(
parameters,
Expand All @@ -165,7 +180,7 @@ def _execute(
for the input tapes.

"""
return execute_fn(tapes)
return _to_autograd(execute_fn(tapes))


# pylint: disable=unused-argument
Expand Down
6 changes: 4 additions & 2 deletions pennylane/workflow/qnode.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def _convert_to_interface(res, interface):
"""
interface = INTERFACE_MAP[interface]

if interface in ["Numpy"]:
if interface == "numpy":
return res

if isinstance(res, (list, tuple)):
Expand Down Expand Up @@ -931,8 +931,10 @@ def _impl_call(self, *args, **kwargs) -> qml.typing.Result:
if qml.capture.enabled()
else qml.math.get_interface(*args, *list(kwargs.values()))
)
if interface == "numpy":
# Internally stop treating numpy as autograd
interface = None
self._interface = INTERFACE_MAP[interface]

if self._qfunc_uses_shots_arg:
override_shots = False
else:
Expand Down
76 changes: 39 additions & 37 deletions tests/gradients/finite_diff/test_spsa_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@
"""
Tests for the gradients.spsa_gradient module.
"""
import numpy
import numpy as np
import pytest

import pennylane as qml
from pennylane import numpy as np
from pennylane import numpy as pnp
from pennylane.devices import DefaultQubitLegacy
from pennylane.gradients import spsa_grad
from pennylane.gradients.spsa_gradient import _rademacher_sampler
Expand Down Expand Up @@ -168,7 +168,7 @@ def circuit(param):

expected_message = "The argument sampler_rng is expected to be a NumPy PRNG"
with pytest.raises(ValueError, match=expected_message):
qml.grad(circuit)(np.array(1.0))
qml.grad(circuit)(pnp.array(1.0))

def test_trainable_batched_tape_raises(self):
"""Test that an error is raised for a broadcasted/batched tape if the broadcasted
Expand Down Expand Up @@ -202,7 +202,7 @@ def test_nontrainable_batched_tape(self):
def test_non_differentiable_error(self):
"""Test error raised if attempting to differentiate with
respect to a non-differentiable argument"""
psi = np.array([1, 0, 1, 0], requires_grad=False) / np.sqrt(2)
psi = pnp.array([1, 0, 1, 0], requires_grad=False) / np.sqrt(2)

with qml.queuing.AnnotatedQueue() as q:
qml.StatePrep(psi, wires=[0, 1])
Expand All @@ -227,10 +227,10 @@ def test_non_differentiable_error(self):
assert isinstance(res, tuple)
assert len(res) == 2

assert isinstance(res[0], numpy.ndarray)
assert isinstance(res[0], np.ndarray)
assert res[0].shape == (4,)

assert isinstance(res[1], numpy.ndarray)
assert isinstance(res[1], np.ndarray)
assert res[1].shape == (4,)

@pytest.mark.parametrize("num_directions", [1, 10])
Expand All @@ -252,8 +252,8 @@ def test_independent_parameter(self, num_directions, mocker):
assert isinstance(res, tuple)
assert len(res) == 2

assert isinstance(res[0], numpy.ndarray)
assert isinstance(res[1], numpy.ndarray)
assert isinstance(res[0], np.ndarray)
assert isinstance(res[1], np.ndarray)

# 2 tapes per direction because the default strategy for SPSA is "center"
assert len(spy.call_args_list) == num_directions
Expand Down Expand Up @@ -282,7 +282,7 @@ def test_no_trainable_params_tape(self):
res = post_processing(qml.execute(g_tapes, dev, None))

assert g_tapes == []
assert isinstance(res, numpy.ndarray)
assert isinstance(res, np.ndarray)
assert res.shape == (0,)

def test_no_trainable_params_multiple_return_tape(self):
Expand Down Expand Up @@ -383,7 +383,7 @@ def circuit(params):
qml.Rot(*params, wires=0)
return qml.probs([2, 3])

params = np.array([0.5, 0.5, 0.5], requires_grad=True)
params = pnp.array([0.5, 0.5, 0.5], requires_grad=True)

result = spsa_grad(circuit)(params)

Expand All @@ -402,7 +402,7 @@ def circuit(params):
qml.Rot(*params, wires=0)
return qml.expval(qml.PauliZ(wires=2)), qml.probs([2, 3])

params = np.array([0.5, 0.5, 0.5], requires_grad=True)
params = pnp.array([0.5, 0.5, 0.5], requires_grad=True)

result = spsa_grad(circuit)(params)

Expand Down Expand Up @@ -514,7 +514,7 @@ def cost6(x):
qml.Rot(*x, wires=0)
return qml.probs([0, 1]), qml.probs([2, 3])

x = np.random.rand(3)
x = pnp.random.rand(3)
circuits = [qml.QNode(cost, dev) for cost in (cost1, cost2, cost3, cost4, cost5, cost6)]

transform = [qml.math.shape(spsa_grad(c)(x)) for c in circuits]
Expand Down Expand Up @@ -576,7 +576,7 @@ class DeviceSupportingSpecialObservable(DefaultQubitLegacy):

@staticmethod
def _asarray(arr, dtype=None):
return np.array(arr, dtype=dtype)
return pnp.array(arr, dtype=dtype)

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
Expand All @@ -603,9 +603,11 @@ def reference_qnode(x):
qml.RY(x, wires=0)
return qml.expval(qml.PauliZ(wires=0))

par = np.array(0.2, requires_grad=True)
assert np.isclose(qnode(par).item().val, reference_qnode(par))
assert np.isclose(qml.jacobian(qnode)(par).item().val, qml.jacobian(reference_qnode)(par))
par = pnp.array(0.2, requires_grad=True)
assert np.isclose(qnode(par).item().val, reference_qnode(par).item())
assert np.isclose(
qml.jacobian(qnode)(par).item().val, qml.jacobian(reference_qnode)(par).item()
)


@pytest.mark.parametrize("approx_order", [2, 4])
Expand Down Expand Up @@ -684,10 +686,10 @@ def test_single_expectation_value(self, approx_order, strategy, validate, tol):
# 1 / num_params here.
res = tuple(qml.math.convert_like(r * 2, r) for r in res)

assert isinstance(res[0], numpy.ndarray)
assert isinstance(res[0], np.ndarray)
assert res[0].shape == ()

assert isinstance(res[1], numpy.ndarray)
assert isinstance(res[1], np.ndarray)
assert res[1].shape == ()

expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])
Expand Down Expand Up @@ -728,10 +730,10 @@ def test_single_expectation_value_with_argnum_all(self, approx_order, strategy,
# 1 / num_params here.
res = tuple(qml.math.convert_like(r * 2, r) for r in res)

assert isinstance(res[0], numpy.ndarray)
assert isinstance(res[0], np.ndarray)
assert res[0].shape == ()

assert isinstance(res[1], numpy.ndarray)
assert isinstance(res[1], np.ndarray)
assert res[1].shape == ()

expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])
Expand Down Expand Up @@ -772,10 +774,10 @@ def test_single_expectation_value_with_argnum_one(self, approx_order, strategy,
assert isinstance(res, tuple)
assert len(res) == 2

assert isinstance(res[0], numpy.ndarray)
assert isinstance(res[0], np.ndarray)
assert res[0].shape == ()

assert isinstance(res[1], numpy.ndarray)
assert isinstance(res[1], np.ndarray)
assert res[1].shape == ()

expected = [0, np.cos(y) * np.cos(x)]
Expand Down Expand Up @@ -856,14 +858,14 @@ def test_multiple_expectation_values(self, approx_order, strategy, validate, tol
assert isinstance(res[0], tuple)
assert len(res[0]) == 2
assert np.allclose(res[0], [-np.sin(x), 0], atol=tol, rtol=0)
assert isinstance(res[0][0], numpy.ndarray)
assert isinstance(res[0][1], numpy.ndarray)
assert isinstance(res[0][0], np.ndarray)
assert isinstance(res[0][1], np.ndarray)

assert isinstance(res[1], tuple)
assert len(res[1]) == 2
assert np.allclose(res[1], [0, np.cos(y)], atol=tol, rtol=0)
assert isinstance(res[1][0], numpy.ndarray)
assert isinstance(res[1][1], numpy.ndarray)
assert isinstance(res[1][0], np.ndarray)
assert isinstance(res[1][1], np.ndarray)

def test_var_expectation_values(self, approx_order, strategy, validate, tol):
"""Tests correct output shape and evaluation for a tape
Expand Down Expand Up @@ -901,14 +903,14 @@ def test_var_expectation_values(self, approx_order, strategy, validate, tol):
assert isinstance(res[0], tuple)
assert len(res[0]) == 2
assert np.allclose(res[0], [-np.sin(x), 0], atol=tol, rtol=0)
assert isinstance(res[0][0], numpy.ndarray)
assert isinstance(res[0][1], numpy.ndarray)
assert isinstance(res[0][0], np.ndarray)
assert isinstance(res[0][1], np.ndarray)

assert isinstance(res[1], tuple)
assert len(res[1]) == 2
assert np.allclose(res[1], [0, -2 * np.cos(y) * np.sin(y)], atol=tol, rtol=0)
assert isinstance(res[1][0], numpy.ndarray)
assert isinstance(res[1][1], numpy.ndarray)
assert isinstance(res[1][0], np.ndarray)
assert isinstance(res[1][1], np.ndarray)

def test_prob_expectation_values(self, approx_order, strategy, validate, tol):
"""Tests correct output shape and evaluation for a tape
Expand Down Expand Up @@ -946,9 +948,9 @@ def test_prob_expectation_values(self, approx_order, strategy, validate, tol):
assert isinstance(res[0], tuple)
assert len(res[0]) == 2
assert np.allclose(res[0][0], -np.sin(x), atol=tol, rtol=0)
assert isinstance(res[0][0], numpy.ndarray)
assert isinstance(res[0][0], np.ndarray)
assert np.allclose(res[0][1], 0, atol=tol, rtol=0)
assert isinstance(res[0][1], numpy.ndarray)
assert isinstance(res[0][1], np.ndarray)

assert isinstance(res[1], tuple)
assert len(res[1]) == 2
Expand All @@ -963,7 +965,7 @@ def test_prob_expectation_values(self, approx_order, strategy, validate, tol):
atol=tol,
rtol=0,
)
assert isinstance(res[1][0], numpy.ndarray)
assert isinstance(res[1][0], np.ndarray)
assert np.allclose(
res[1][1],
[
Expand All @@ -975,7 +977,7 @@ def test_prob_expectation_values(self, approx_order, strategy, validate, tol):
atol=tol,
rtol=0,
)
assert isinstance(res[1][1], numpy.ndarray)
assert isinstance(res[1][1], np.ndarray)


@pytest.mark.parametrize(
Expand All @@ -991,7 +993,7 @@ def test_autograd(self, dev_name, sampler, num_directions, atol):
can be differentiated using autograd, yielding second derivatives."""
dev = qml.device(dev_name, wires=2)
execute_fn = dev.execute if dev_name == "default.qubit" else dev.batch_execute
params = np.array([0.543, -0.654], requires_grad=True)
params = pnp.array([0.543, -0.654], requires_grad=True)
rng = np.random.default_rng(42)

def cost_fn(x):
Expand All @@ -1006,7 +1008,7 @@ def cost_fn(x):
tapes, fn = spsa_grad(
tape, n=1, num_directions=num_directions, sampler=sampler, sampler_rng=rng
)
jac = np.array(fn(execute_fn(tapes)))
jac = pnp.array(fn(execute_fn(tapes)))
if sampler is coordinate_sampler:
jac *= 2
return jac
Expand All @@ -1029,7 +1031,7 @@ def test_autograd_ragged(self, dev_name, sampler, num_directions, atol):
of a ragged tape can be differentiated using autograd, yielding second derivatives."""
dev = qml.device(dev_name, wires=2)
execute_fn = dev.execute if dev_name == "default.qubit" else dev.batch_execute
params = np.array([0.543, -0.654], requires_grad=True)
params = pnp.array([0.543, -0.654], requires_grad=True)
rng = np.random.default_rng(42)

def cost_fn(x):
Expand Down
Loading
Loading