diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md
index 506504f631b..559f1f20b6e 100644
--- a/doc/releases/changelog-dev.md
+++ b/doc/releases/changelog-dev.md
@@ -10,18 +10,14 @@
[(#6061)](https://github.com/PennyLaneAI/pennylane/pull/6061)
* `qml.qchem.excitations` now optionally returns fermionic operators.
- [(#6171)](https://github.com/PennyLaneAI/pennylane/pull/6171)
+ [(#6171)](https://github.com/PennyLaneAI/pennylane/pull/6171)
* The `diagonalize_measurements` transform now uses a more efficient method of diagonalization
when possible, based on the `pauli_rep` of the relevant observables.
[#6113](https://github.com/PennyLaneAI/pennylane/pull/6113/)
-
Capturing and representing hybrid programs
-
-* Differentiation of hybrid programs via `qml.grad` can now be captured into plxpr.
- When evaluating a captured `qml.grad` instruction, it will dispatch to `jax.grad`,
- which differs from the Autograd implementation of `qml.grad` itself.
- [(#6120)](https://github.com/PennyLaneAI/pennylane/pull/6120)
+* The `Hermitian` operator now has a `compute_sparse_matrix` implementation.
+ [(#6225)](https://github.com/PennyLaneAI/pennylane/pull/6225)
Capturing and representing hybrid programs
@@ -120,12 +116,19 @@
* The ``qml.FABLE`` template now returns the correct value when JIT is enabled.
[(#6263)](https://github.com/PennyLaneAI/pennylane/pull/6263)
-* Contributors ✍️
+* Fixes a bug where a circuit using the `autograd` interface sometimes returns nested values that are not of the `autograd` interface.
+ [(#6225)](https://github.com/PennyLaneAI/pennylane/pull/6225)
+
+* Fixes a bug where a simple circuit with no parameters or only builtin/numpy arrays as parameters returns autograd tensors.
+ [(#6225)](https://github.com/PennyLaneAI/pennylane/pull/6225)
+
+Contributors ✍️
This release contains contributions from (in alphabetical order):
Guillermo Alonso,
Utkarsh Azad,
+Astral Cai,
Lillian M. A. Frederiksen,
Pietropaolo Frisoni,
Emiliano Godinez,
diff --git a/pennylane/devices/execution_config.py b/pennylane/devices/execution_config.py
index 5b7af096d81..7f3866d9e86 100644
--- a/pennylane/devices/execution_config.py
+++ b/pennylane/devices/execution_config.py
@@ -17,7 +17,7 @@
from dataclasses import dataclass, field
from typing import Optional, Union
-from pennylane.workflow import SUPPORTED_INTERFACES
+from pennylane.workflow import SUPPORTED_INTERFACE_NAMES
@dataclass
@@ -110,9 +110,9 @@ def __post_init__(self):
Note that this hook is automatically called after init via the dataclass integration.
"""
- if self.interface not in SUPPORTED_INTERFACES:
+ if self.interface not in SUPPORTED_INTERFACE_NAMES:
raise ValueError(
- f"Unknown interface. interface must be in {SUPPORTED_INTERFACES}, got {self.interface} instead."
+ f"Unknown interface. interface must be in {SUPPORTED_INTERFACE_NAMES}, got {self.interface} instead."
)
if self.grad_on_execution not in {True, False, None}:
diff --git a/pennylane/devices/legacy_facade.py b/pennylane/devices/legacy_facade.py
index 41c1e0dea2c..bd2190f0fe1 100644
--- a/pennylane/devices/legacy_facade.py
+++ b/pennylane/devices/legacy_facade.py
@@ -24,6 +24,7 @@
import pennylane as qml
from pennylane.measurements import MidMeasureMP, Shots
from pennylane.transforms.core.transform_program import TransformProgram
+from pennylane.workflow.execution import INTERFACE_MAP
from .device_api import Device
from .execution_config import DefaultExecutionConfig
@@ -322,25 +323,24 @@ def _validate_backprop_method(self, tape):
return False
params = tape.get_parameters(trainable_only=False)
interface = qml.math.get_interface(*params)
+ if interface != "numpy":
+ interface = INTERFACE_MAP.get(interface, interface)
if tape and any(isinstance(m.obs, qml.SparseHamiltonian) for m in tape.measurements):
return False
- if interface == "numpy":
- interface = None
- mapped_interface = qml.workflow.execution.INTERFACE_MAP.get(interface, interface)
# determine if the device supports backpropagation
backprop_interface = self._device.capabilities().get("passthru_interface", None)
if backprop_interface is not None:
# device supports backpropagation natively
- return mapped_interface in [backprop_interface, "Numpy"]
+ return interface in [backprop_interface, "numpy"]
# determine if the device has any child devices that support backpropagation
backprop_devices = self._device.capabilities().get("passthru_devices", None)
if backprop_devices is None:
return False
- return mapped_interface in backprop_devices or mapped_interface == "Numpy"
+ return interface in backprop_devices or interface == "numpy"
def _validate_adjoint_method(self, tape):
# The conditions below provide a minimal set of requirements that we can likely improve upon in
diff --git a/pennylane/devices/qubit/simulate.py b/pennylane/devices/qubit/simulate.py
index 56e4a8f1a48..89c041b8f3e 100644
--- a/pennylane/devices/qubit/simulate.py
+++ b/pennylane/devices/qubit/simulate.py
@@ -922,7 +922,7 @@ def _(original_measurement: ExpectationMP, measures): # pylint: disable=unused-
for v in measures.values():
if not v[0] or v[1] is tuple():
continue
- cum_value += v[0] * v[1]
+ cum_value += qml.math.multiply(v[0], v[1])
total_counts += v[0]
return cum_value / total_counts
@@ -935,7 +935,7 @@ def _(original_measurement: ProbabilityMP, measures): # pylint: disable=unused-
for v in measures.values():
if not v[0] or v[1] is tuple():
continue
- cum_value += v[0] * v[1]
+ cum_value += qml.math.multiply(v[0], v[1])
total_counts += v[0]
return cum_value / total_counts
diff --git a/pennylane/ops/qubit/observables.py b/pennylane/ops/qubit/observables.py
index 8f992c81bc2..4fc4a98c092 100644
--- a/pennylane/ops/qubit/observables.py
+++ b/pennylane/ops/qubit/observables.py
@@ -137,6 +137,10 @@ def compute_matrix(A: TensorLike) -> TensorLike: # pylint: disable=arguments-di
Hermitian._validate_input(A)
return A
+ @staticmethod
+ def compute_sparse_matrix(A) -> csr_matrix: # pylint: disable=arguments-differ
+ return csr_matrix(Hermitian.compute_matrix(A))
+
@property
def eigendecomposition(self) -> dict[str, TensorLike]:
"""Return the eigendecomposition of the matrix specified by the Hermitian observable.
diff --git a/pennylane/workflow/__init__.py b/pennylane/workflow/__init__.py
index 55068804b68..b41c031e8a4 100644
--- a/pennylane/workflow/__init__.py
+++ b/pennylane/workflow/__init__.py
@@ -56,6 +56,6 @@
"""
from .construct_batch import construct_batch, get_transform_program
-from .execution import INTERFACE_MAP, SUPPORTED_INTERFACES, execute
+from .execution import INTERFACE_MAP, SUPPORTED_INTERFACE_NAMES, execute
from .qnode import QNode, qnode
from .set_shots import set_shots
diff --git a/pennylane/workflow/execution.py b/pennylane/workflow/execution.py
index 7445bcea2b7..8d8f0adb9ef 100644
--- a/pennylane/workflow/execution.py
+++ b/pennylane/workflow/execution.py
@@ -51,12 +51,9 @@
"autograd",
"numpy",
"torch",
- "pytorch",
"jax",
- "jax-python",
"jax-jit",
"tf",
- "tensorflow",
}
SupportedInterfaceUserInput = Literal[
@@ -78,30 +75,29 @@
]
_mapping_output = (
- "Numpy",
+ "numpy",
"auto",
"autograd",
"autograd",
"numpy",
"jax",
- "jax",
+ "jax-jit",
"jax",
"jax",
"torch",
"torch",
"tf",
"tf",
- "tf",
- "tf",
+ "tf-autograph",
+ "tf-autograph",
)
+
INTERFACE_MAP = dict(zip(get_args(SupportedInterfaceUserInput), _mapping_output))
"""dict[str, str]: maps an allowed interface specification to its canonical name."""
-#: list[str]: allowed interface strings
-SUPPORTED_INTERFACES = list(INTERFACE_MAP)
+SUPPORTED_INTERFACE_NAMES = list(INTERFACE_MAP)
"""list[str]: allowed interface strings"""
-
_CACHED_EXECUTION_WITH_FINITE_SHOTS_WARNINGS = (
"Cached execution with finite shots detected!\n"
"Note that samples as well as all noisy quantities computed via sampling "
@@ -135,23 +131,21 @@ def _get_ml_boundary_execute(
pennylane.QuantumFunctionError if the required package is not installed.
"""
- mapped_interface = INTERFACE_MAP[interface]
try:
- if mapped_interface == "autograd":
+ if interface == "autograd":
from .interfaces.autograd import autograd_execute as ml_boundary
- elif mapped_interface == "tf":
- if "autograph" in interface:
- from .interfaces.tensorflow_autograph import execute as ml_boundary
+ elif interface == "tf-autograph":
+ from .interfaces.tensorflow_autograph import execute as ml_boundary
- ml_boundary = partial(ml_boundary, grad_on_execution=grad_on_execution)
+ ml_boundary = partial(ml_boundary, grad_on_execution=grad_on_execution)
- else:
- from .interfaces.tensorflow import tf_execute as full_ml_boundary
+ elif interface == "tf":
+ from .interfaces.tensorflow import tf_execute as full_ml_boundary
- ml_boundary = partial(full_ml_boundary, differentiable=differentiable)
+ ml_boundary = partial(full_ml_boundary, differentiable=differentiable)
- elif mapped_interface == "torch":
+ elif interface == "torch":
from .interfaces.torch import execute as ml_boundary
elif interface == "jax-jit":
@@ -159,7 +153,8 @@ def _get_ml_boundary_execute(
from .interfaces.jax_jit import jax_jit_vjp_execute as ml_boundary
else:
from .interfaces.jax_jit import jax_jit_jvp_execute as ml_boundary
- else: # interface in {"jax", "jax-python", "JAX"}:
+
+ else: # interface is jax
if device_vjp:
from .interfaces.jax_jit import jax_jit_vjp_execute as ml_boundary
else:
@@ -167,9 +162,10 @@ def _get_ml_boundary_execute(
except ImportError as e: # pragma: no cover
raise qml.QuantumFunctionError(
- f"{mapped_interface} not found. Please install the latest "
- f"version of {mapped_interface} to enable the '{mapped_interface}' interface."
+ f"{interface} not found. Please install the latest "
+ f"version of {interface} to enable the '{interface}' interface."
) from e
+
return ml_boundary
@@ -263,12 +259,22 @@ def _get_interface_name(tapes, interface):
Returns:
str: Interface name"""
+
+ if interface not in SUPPORTED_INTERFACE_NAMES:
+ raise qml.QuantumFunctionError(
+ f"Unknown interface {interface}. Interface must be one of {SUPPORTED_INTERFACE_NAMES}."
+ )
+
+ interface = INTERFACE_MAP[interface]
+
if interface == "auto":
params = []
for tape in tapes:
params.extend(tape.get_parameters(trainable_only=False))
interface = qml.math.get_interface(*params)
- if INTERFACE_MAP.get(interface, "") == "tf" and _use_tensorflow_autograph():
+ if interface != "numpy":
+ interface = INTERFACE_MAP[interface]
+ if interface == "tf" and _use_tensorflow_autograph():
interface = "tf-autograph"
if interface == "jax":
try: # pragma: no cover
@@ -439,6 +445,7 @@ def cost_fn(params, x):
### Specifying and preprocessing variables ####
+ _interface_user_input = interface
interface = _get_interface_name(tapes, interface)
# Only need to calculate derivatives with jax when we know it will be executed later.
if interface in {"jax", "jax-jit"}:
@@ -460,7 +467,11 @@ def cost_fn(params, x):
)
# Mid-circuit measurement configuration validation
- mcm_interface = interface or _get_interface_name(tapes, "auto")
+ # If the user specifies `interface=None`, regular execution considers it numpy, but the mcm
+ # workflow still needs to know if jax-jit is used
+ mcm_interface = (
+ _get_interface_name(tapes, "auto") if _interface_user_input is None else interface
+ )
finite_shots = any(tape.shots for tape in tapes)
_update_mcm_config(config.mcm_config, mcm_interface, finite_shots)
@@ -479,12 +490,12 @@ def cost_fn(params, x):
cache = None
# changing this set of conditions causes a bunch of tests to break.
- no_interface_boundary_required = interface is None or config.gradient_method in {
+ no_interface_boundary_required = interface == "numpy" or config.gradient_method in {
None,
"backprop",
}
device_supports_interface_data = no_interface_boundary_required and (
- interface is None
+ interface == "numpy"
or config.gradient_method == "backprop"
or getattr(device, "short_name", "") == "default.mixed"
)
@@ -497,9 +508,9 @@ def cost_fn(params, x):
numpy_only=not device_supports_interface_data,
)
- # moved to its own explicit step so it will be easier to remove
+ # moved to its own explicit step so that it will be easier to remove
def inner_execute_with_empty_jac(tapes, **_):
- return (inner_execute(tapes), [])
+ return inner_execute(tapes), []
if interface in jpc_interfaces:
execute_fn = inner_execute
@@ -522,7 +533,7 @@ def inner_execute_with_empty_jac(tapes, **_):
and getattr(device, "short_name", "") in ("lightning.gpu", "lightning.kokkos")
and interface in jpc_interfaces
): # pragma: no cover
- if INTERFACE_MAP[interface] == "jax" and "use_device_state" in gradient_kwargs:
+ if "jax" in interface and "use_device_state" in gradient_kwargs:
gradient_kwargs["use_device_state"] = False
jpc = LightningVJPs(device, gradient_kwargs=gradient_kwargs)
@@ -563,7 +574,7 @@ def execute_fn(internal_tapes) -> tuple[ResultBatch, tuple]:
config: the ExecutionConfig that specifies how to perform the simulations.
"""
numpy_tapes, _ = qml.transforms.convert_to_numpy_parameters(internal_tapes)
- return (device.execute(numpy_tapes, config), tuple())
+ return device.execute(numpy_tapes, config), tuple()
def gradient_fn(internal_tapes):
"""A partial function that wraps compute_derivatives method of the device.
@@ -612,7 +623,7 @@ def gradient_fn(internal_tapes):
# trainable parameters can only be set on the first pass for jax
# not higher order passes for higher order derivatives
- if interface in {"jax", "jax-python", "jax-jit"}:
+ if "jax" in interface:
for tape in tapes:
params = tape.get_parameters(trainable_only=False)
tape.trainable_params = qml.math.get_trainable_indices(params)
diff --git a/pennylane/workflow/interfaces/autograd.py b/pennylane/workflow/interfaces/autograd.py
index 9452af31854..cb5731ddc8b 100644
--- a/pennylane/workflow/interfaces/autograd.py
+++ b/pennylane/workflow/interfaces/autograd.py
@@ -147,6 +147,21 @@ def autograd_execute(
return _execute(parameters, tuple(tapes), execute_fn, jpc)
+def _to_autograd(result: qml.typing.ResultBatch) -> qml.typing.ResultBatch:
+ """Converts an arbitrary result batch to one with autograd arrays.
+ Args:
+ result (ResultBatch): a nested structure of lists, tuples, dicts, and numpy arrays
+ Returns:
+ ResultBatch: a nested structure of tuples, dicts, and jax arrays
+ """
+ if isinstance(result, dict):
+ return result
+ # pylint: disable=no-member
+ if isinstance(result, (list, tuple, autograd.builtins.tuple, autograd.builtins.list)):
+ return tuple(_to_autograd(r) for r in result)
+ return autograd.numpy.array(result)
+
+
@autograd.extend.primitive
def _execute(
parameters,
@@ -165,7 +180,7 @@ def _execute(
for the input tapes.
"""
- return execute_fn(tapes)
+ return _to_autograd(execute_fn(tapes))
# pylint: disable=unused-argument
diff --git a/pennylane/workflow/qnode.py b/pennylane/workflow/qnode.py
index 408a0794674..ab68a9ad147 100644
--- a/pennylane/workflow/qnode.py
+++ b/pennylane/workflow/qnode.py
@@ -32,7 +32,7 @@
from pennylane.tape import QuantumScript, QuantumTape
from pennylane.transforms.core import TransformContainer, TransformDispatcher, TransformProgram
-from .execution import INTERFACE_MAP, SUPPORTED_INTERFACES, SupportedInterfaceUserInput
+from .execution import INTERFACE_MAP, SUPPORTED_INTERFACE_NAMES, SupportedInterfaceUserInput
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
@@ -56,9 +56,8 @@ def _convert_to_interface(res, interface):
"""
Recursively convert res to the given interface.
"""
- interface = INTERFACE_MAP[interface]
- if interface in ["Numpy"]:
+ if interface == "numpy":
return res
if isinstance(res, (list, tuple)):
@@ -67,7 +66,18 @@ def _convert_to_interface(res, interface):
if isinstance(res, dict):
return {k: _convert_to_interface(v, interface) for k, v in res.items()}
- return qml.math.asarray(res, like=interface if interface != "tf" else "tensorflow")
+ interface_conversion_map = {
+ "autograd": "autograd",
+ "jax": "jax",
+ "jax-jit": "jax",
+ "torch": "torch",
+ "tf": "tensorflow",
+ "tf-autograph": "tensorflow",
+ }
+
+ interface_name = interface_conversion_map[interface]
+
+ return qml.math.asarray(res, like=interface_name)
def _make_execution_config(
@@ -495,10 +505,10 @@ def __init__(
gradient_kwargs,
)
- if interface not in SUPPORTED_INTERFACES:
+ if interface not in SUPPORTED_INTERFACE_NAMES:
raise qml.QuantumFunctionError(
f"Unknown interface {interface}. Interface must be "
- f"one of {SUPPORTED_INTERFACES}."
+ f"one of {SUPPORTED_INTERFACE_NAMES}."
)
if not isinstance(device, (qml.devices.LegacyDevice, qml.devices.Device)):
@@ -524,7 +534,7 @@ def __init__(
# input arguments
self.func = func
self.device = device
- self._interface = None if diff_method is None else interface
+ self._interface = "numpy" if diff_method is None else INTERFACE_MAP[interface]
self.diff_method = diff_method
mcm_config = qml.devices.MCMConfig(mcm_method=mcm_method, postselect_mode=postselect_mode)
cache = (max_diff > 1) if cache == "auto" else cache
@@ -617,10 +627,10 @@ def interface(self) -> str:
@interface.setter
def interface(self, value: SupportedInterfaceUserInput):
- if value not in SUPPORTED_INTERFACES:
+ if value not in SUPPORTED_INTERFACE_NAMES:
raise qml.QuantumFunctionError(
- f"Unknown interface {value}. Interface must be one of {SUPPORTED_INTERFACES}."
+ f"Unknown interface {value}. Interface must be one of {SUPPORTED_INTERFACE_NAMES}."
)
self._interface = INTERFACE_MAP[value]
@@ -923,12 +933,18 @@ def _execution_component(self, args: tuple, kwargs: dict) -> qml.typing.Result:
execute_kwargs["mcm_config"] = mcm_config
+ # Mapping numpy to None here because `qml.execute` will map None back into
+ # numpy. If we do not do this, numpy will become autograd in `qml.execute`.
+ # If the user specified interface="numpy", it would've already been converted to
+ # "autograd", and it wouldn't be affected.
+ interface = None if self.interface == "numpy" else self.interface
+
# pylint: disable=unexpected-keyword-arg
res = qml.execute(
(self._tape,),
device=self.device,
gradient_fn=gradient_fn,
- interface=self.interface,
+ interface=interface,
transform_program=full_transform_program,
inner_transform=inner_transform_program,
config=config,
@@ -961,7 +977,9 @@ def _impl_call(self, *args, **kwargs) -> qml.typing.Result:
if qml.capture.enabled()
else qml.math.get_interface(*args, *list(kwargs.values()))
)
- self._interface = INTERFACE_MAP[interface]
+ if interface != "numpy":
+ interface = INTERFACE_MAP[interface]
+ self._interface = interface
try:
res = self._execution_component(args, kwargs)
diff --git a/tests/devices/default_qubit/test_default_qubit.py b/tests/devices/default_qubit/test_default_qubit.py
index 8b3a1e257dd..d3049d90eae 100644
--- a/tests/devices/default_qubit/test_default_qubit.py
+++ b/tests/devices/default_qubit/test_default_qubit.py
@@ -1960,7 +1960,7 @@ def test_postselection_invalid_analytic(
dev = qml.device("default.qubit")
@qml.defer_measurements
- @qml.qnode(dev, interface=interface)
+ @qml.qnode(dev, interface=None if interface == "numpy" else interface)
def circ():
qml.RX(np.pi, 0)
qml.CNOT([0, 1])
diff --git a/tests/devices/qubit/test_simulate.py b/tests/devices/qubit/test_simulate.py
index dbe9573b8df..4dce5afd4c5 100644
--- a/tests/devices/qubit/test_simulate.py
+++ b/tests/devices/qubit/test_simulate.py
@@ -205,7 +205,7 @@ def test_result_has_correct_interface(self, op):
def test_expand_state_keeps_autograd_interface(self):
"""Test that expand_state doesn't convert autograd to numpy."""
- @qml.qnode(qml.device("default.qubit", wires=2))
+ @qml.qnode(qml.device("default.qubit", wires=2), interface="autograd")
def circuit(x):
qml.RX(x, 0)
return qml.probs(wires=[0, 1])
diff --git a/tests/gradients/finite_diff/test_spsa_gradient.py b/tests/gradients/finite_diff/test_spsa_gradient.py
index d8f19dcf826..2730cd53d00 100644
--- a/tests/gradients/finite_diff/test_spsa_gradient.py
+++ b/tests/gradients/finite_diff/test_spsa_gradient.py
@@ -14,11 +14,11 @@
"""
Tests for the gradients.spsa_gradient module.
"""
-import numpy
+import numpy as np
import pytest
import pennylane as qml
-from pennylane import numpy as np
+from pennylane import numpy as pnp
from pennylane.devices import DefaultQubitLegacy
from pennylane.gradients import spsa_grad
from pennylane.gradients.spsa_gradient import _rademacher_sampler
@@ -168,7 +168,7 @@ def circuit(param):
expected_message = "The argument sampler_rng is expected to be a NumPy PRNG"
with pytest.raises(ValueError, match=expected_message):
- qml.grad(circuit)(np.array(1.0))
+ qml.grad(circuit)(pnp.array(1.0))
def test_trainable_batched_tape_raises(self):
"""Test that an error is raised for a broadcasted/batched tape if the broadcasted
@@ -202,7 +202,7 @@ def test_nontrainable_batched_tape(self):
def test_non_differentiable_error(self):
"""Test error raised if attempting to differentiate with
respect to a non-differentiable argument"""
- psi = np.array([1, 0, 1, 0], requires_grad=False) / np.sqrt(2)
+ psi = pnp.array([1, 0, 1, 0], requires_grad=False) / np.sqrt(2)
with qml.queuing.AnnotatedQueue() as q:
qml.StatePrep(psi, wires=[0, 1])
@@ -227,10 +227,10 @@ def test_non_differentiable_error(self):
assert isinstance(res, tuple)
assert len(res) == 2
- assert isinstance(res[0], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
assert res[0].shape == (4,)
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[1], np.ndarray)
assert res[1].shape == (4,)
@pytest.mark.parametrize("num_directions", [1, 10])
@@ -252,8 +252,8 @@ def test_independent_parameter(self, num_directions, mocker):
assert isinstance(res, tuple)
assert len(res) == 2
- assert isinstance(res[0], numpy.ndarray)
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
+ assert isinstance(res[1], np.ndarray)
# 2 tapes per direction because the default strategy for SPSA is "center"
assert len(spy.call_args_list) == num_directions
@@ -282,7 +282,7 @@ def test_no_trainable_params_tape(self):
res = post_processing(qml.execute(g_tapes, dev, None))
assert g_tapes == []
- assert isinstance(res, numpy.ndarray)
+ assert isinstance(res, np.ndarray)
assert res.shape == (0,)
def test_no_trainable_params_multiple_return_tape(self):
@@ -383,7 +383,7 @@ def circuit(params):
qml.Rot(*params, wires=0)
return qml.probs([2, 3])
- params = np.array([0.5, 0.5, 0.5], requires_grad=True)
+ params = pnp.array([0.5, 0.5, 0.5], requires_grad=True)
result = spsa_grad(circuit)(params)
@@ -402,7 +402,7 @@ def circuit(params):
qml.Rot(*params, wires=0)
return qml.expval(qml.PauliZ(wires=2)), qml.probs([2, 3])
- params = np.array([0.5, 0.5, 0.5], requires_grad=True)
+ params = pnp.array([0.5, 0.5, 0.5], requires_grad=True)
result = spsa_grad(circuit)(params)
@@ -514,7 +514,7 @@ def cost6(x):
qml.Rot(*x, wires=0)
return qml.probs([0, 1]), qml.probs([2, 3])
- x = np.random.rand(3)
+ x = pnp.random.rand(3)
circuits = [qml.QNode(cost, dev) for cost in (cost1, cost2, cost3, cost4, cost5, cost6)]
transform = [qml.math.shape(spsa_grad(c)(x)) for c in circuits]
@@ -576,7 +576,7 @@ class DeviceSupportingSpecialObservable(DefaultQubitLegacy):
@staticmethod
def _asarray(arr, dtype=None):
- return np.array(arr, dtype=dtype)
+ return pnp.array(arr, dtype=dtype)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -603,9 +603,11 @@ def reference_qnode(x):
qml.RY(x, wires=0)
return qml.expval(qml.PauliZ(wires=0))
- par = np.array(0.2, requires_grad=True)
- assert np.isclose(qnode(par).item().val, reference_qnode(par))
- assert np.isclose(qml.jacobian(qnode)(par).item().val, qml.jacobian(reference_qnode)(par))
+ par = pnp.array(0.2, requires_grad=True)
+ assert np.isclose(qnode(par).item().val, reference_qnode(par).item())
+ assert np.isclose(
+ qml.jacobian(qnode)(par).item().val, qml.jacobian(reference_qnode)(par).item()
+ )
@pytest.mark.parametrize("approx_order", [2, 4])
@@ -684,10 +686,10 @@ def test_single_expectation_value(self, approx_order, strategy, validate, tol):
# 1 / num_params here.
res = tuple(qml.math.convert_like(r * 2, r) for r in res)
- assert isinstance(res[0], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
assert res[0].shape == ()
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[1], np.ndarray)
assert res[1].shape == ()
expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])
@@ -728,10 +730,10 @@ def test_single_expectation_value_with_argnum_all(self, approx_order, strategy,
# 1 / num_params here.
res = tuple(qml.math.convert_like(r * 2, r) for r in res)
- assert isinstance(res[0], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
assert res[0].shape == ()
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[1], np.ndarray)
assert res[1].shape == ()
expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])
@@ -772,10 +774,10 @@ def test_single_expectation_value_with_argnum_one(self, approx_order, strategy,
assert isinstance(res, tuple)
assert len(res) == 2
- assert isinstance(res[0], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
assert res[0].shape == ()
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[1], np.ndarray)
assert res[1].shape == ()
expected = [0, np.cos(y) * np.cos(x)]
@@ -856,14 +858,14 @@ def test_multiple_expectation_values(self, approx_order, strategy, validate, tol
assert isinstance(res[0], tuple)
assert len(res[0]) == 2
assert np.allclose(res[0], [-np.sin(x), 0], atol=tol, rtol=0)
- assert isinstance(res[0][0], numpy.ndarray)
- assert isinstance(res[0][1], numpy.ndarray)
+ assert isinstance(res[0][0], np.ndarray)
+ assert isinstance(res[0][1], np.ndarray)
assert isinstance(res[1], tuple)
assert len(res[1]) == 2
assert np.allclose(res[1], [0, np.cos(y)], atol=tol, rtol=0)
- assert isinstance(res[1][0], numpy.ndarray)
- assert isinstance(res[1][1], numpy.ndarray)
+ assert isinstance(res[1][0], np.ndarray)
+ assert isinstance(res[1][1], np.ndarray)
def test_var_expectation_values(self, approx_order, strategy, validate, tol):
"""Tests correct output shape and evaluation for a tape
@@ -901,14 +903,14 @@ def test_var_expectation_values(self, approx_order, strategy, validate, tol):
assert isinstance(res[0], tuple)
assert len(res[0]) == 2
assert np.allclose(res[0], [-np.sin(x), 0], atol=tol, rtol=0)
- assert isinstance(res[0][0], numpy.ndarray)
- assert isinstance(res[0][1], numpy.ndarray)
+ assert isinstance(res[0][0], np.ndarray)
+ assert isinstance(res[0][1], np.ndarray)
assert isinstance(res[1], tuple)
assert len(res[1]) == 2
assert np.allclose(res[1], [0, -2 * np.cos(y) * np.sin(y)], atol=tol, rtol=0)
- assert isinstance(res[1][0], numpy.ndarray)
- assert isinstance(res[1][1], numpy.ndarray)
+ assert isinstance(res[1][0], np.ndarray)
+ assert isinstance(res[1][1], np.ndarray)
def test_prob_expectation_values(self, approx_order, strategy, validate, tol):
"""Tests correct output shape and evaluation for a tape
@@ -946,9 +948,9 @@ def test_prob_expectation_values(self, approx_order, strategy, validate, tol):
assert isinstance(res[0], tuple)
assert len(res[0]) == 2
assert np.allclose(res[0][0], -np.sin(x), atol=tol, rtol=0)
- assert isinstance(res[0][0], numpy.ndarray)
+ assert isinstance(res[0][0], np.ndarray)
assert np.allclose(res[0][1], 0, atol=tol, rtol=0)
- assert isinstance(res[0][1], numpy.ndarray)
+ assert isinstance(res[0][1], np.ndarray)
assert isinstance(res[1], tuple)
assert len(res[1]) == 2
@@ -963,7 +965,7 @@ def test_prob_expectation_values(self, approx_order, strategy, validate, tol):
atol=tol,
rtol=0,
)
- assert isinstance(res[1][0], numpy.ndarray)
+ assert isinstance(res[1][0], np.ndarray)
assert np.allclose(
res[1][1],
[
@@ -975,7 +977,7 @@ def test_prob_expectation_values(self, approx_order, strategy, validate, tol):
atol=tol,
rtol=0,
)
- assert isinstance(res[1][1], numpy.ndarray)
+ assert isinstance(res[1][1], np.ndarray)
@pytest.mark.parametrize(
@@ -989,7 +991,7 @@ def test_autograd(self, sampler, num_directions, atol):
"""Tests that the output of the SPSA gradient transform
can be differentiated using autograd, yielding second derivatives."""
dev = qml.device("default.qubit", wires=2)
- params = np.array([0.543, -0.654], requires_grad=True)
+ params = pnp.array([0.543, -0.654], requires_grad=True)
rng = np.random.default_rng(42)
def cost_fn(x):
@@ -1004,7 +1006,7 @@ def cost_fn(x):
tapes, fn = spsa_grad(
tape, n=1, num_directions=num_directions, sampler=sampler, sampler_rng=rng
)
- jac = np.array(fn(dev.execute(tapes)))
+ jac = pnp.array(fn(dev.execute(tapes)))
if sampler is coordinate_sampler:
jac *= 2
return jac
@@ -1025,7 +1027,7 @@ def test_autograd_ragged(self, sampler, num_directions, atol):
"""Tests that the output of the SPSA gradient transform
of a ragged tape can be differentiated using autograd, yielding second derivatives."""
dev = qml.device("default.qubit", wires=2)
- params = np.array([0.543, -0.654], requires_grad=True)
+ params = pnp.array([0.543, -0.654], requires_grad=True)
rng = np.random.default_rng(42)
def cost_fn(x):
diff --git a/tests/gradients/finite_diff/test_spsa_gradient_shot_vec.py b/tests/gradients/finite_diff/test_spsa_gradient_shot_vec.py
index 46f8aa1288e..2c771dc2832 100644
--- a/tests/gradients/finite_diff/test_spsa_gradient_shot_vec.py
+++ b/tests/gradients/finite_diff/test_spsa_gradient_shot_vec.py
@@ -14,11 +14,11 @@
"""
Tests for the gradients.spsa_gradient module using shot vectors.
"""
-import numpy
+import numpy as np
import pytest
import pennylane as qml
-from pennylane import numpy as np
+from pennylane import numpy as pnp
from pennylane.devices import DefaultQubitLegacy
from pennylane.gradients import spsa_grad
from pennylane.measurements import Shots
@@ -49,7 +49,7 @@ class TestSpsaGradient:
def test_non_differentiable_error(self):
"""Test error raised if attempting to differentiate with
respect to a non-differentiable argument"""
- psi = np.array([1, 0, 1, 0], requires_grad=False) / np.sqrt(2)
+ psi = pnp.array([1, 0, 1, 0], requires_grad=False) / np.sqrt(2)
with qml.queuing.AnnotatedQueue() as q:
qml.StatePrep(psi, wires=[0, 1])
@@ -78,10 +78,10 @@ def test_non_differentiable_error(self):
for res in all_res:
assert isinstance(res, tuple)
- assert isinstance(res[0], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
assert res[0].shape == (4,)
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[1], np.ndarray)
assert res[1].shape == (4,)
@pytest.mark.parametrize("num_directions", [1, 6])
@@ -107,8 +107,8 @@ def test_independent_parameter(self, num_directions, mocker):
assert isinstance(res, tuple)
assert len(res) == 2
- assert isinstance(res[0], numpy.ndarray)
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
+ assert isinstance(res[1], np.ndarray)
# 2 tapes per direction because the default strategy for SPSA is "center"
assert len(spy.call_args_list) == num_directions
@@ -139,7 +139,7 @@ def test_no_trainable_params_tape(self):
for res in all_res:
assert g_tapes == []
- assert isinstance(res, numpy.ndarray)
+ assert isinstance(res, np.ndarray)
assert res.shape == (0,)
def test_no_trainable_params_multiple_return_tape(self):
@@ -244,7 +244,7 @@ def circuit(params):
qml.Rot(*params, wires=0)
return qml.probs([2, 3])
- params = np.array([0.5, 0.5, 0.5], requires_grad=True)
+ params = pnp.array([0.5, 0.5, 0.5], requires_grad=True)
grad_fn = spsa_grad(circuit, h=h_val, sampler_rng=rng)
all_result = grad_fn(params)
@@ -269,7 +269,7 @@ def circuit(params):
qml.Rot(*params, wires=0)
return qml.expval(qml.PauliZ(wires=2)), qml.probs([2, 3])
- params = np.array([0.5, 0.5, 0.5], requires_grad=True)
+ params = pnp.array([0.5, 0.5, 0.5], requires_grad=True)
grad_fn = spsa_grad(circuit, h=h_val, sampler_rng=rng)
all_result = grad_fn(params)
@@ -416,7 +416,7 @@ def cost6(x):
qml.Rot(*x, wires=0)
return qml.probs([0, 1]), qml.probs([2, 3])
- x = np.random.rand(3)
+ x = pnp.random.rand(3)
circuits = [qml.QNode(cost, dev) for cost in (cost1, cost2, cost3, cost4, cost5, cost6)]
transform = [qml.math.shape(spsa_grad(c, h=h_val)(x)) for c in circuits]
@@ -498,9 +498,11 @@ def reference_qnode(x):
qml.RY(x, wires=0)
return qml.expval(qml.PauliZ(wires=0))
- par = np.array(0.2, requires_grad=True)
- assert np.isclose(qnode(par).item().val, reference_qnode(par))
- assert np.isclose(qml.jacobian(qnode)(par).item().val, qml.jacobian(reference_qnode)(par))
+ par = pnp.array(0.2, requires_grad=True)
+ assert np.isclose(qnode(par).item().val, reference_qnode(par).item())
+ assert np.isclose(
+ qml.jacobian(qnode)(par).item().val, qml.jacobian(reference_qnode)(par).item()
+ )
@pytest.mark.parametrize("approx_order", [2, 4])
@@ -586,10 +588,10 @@ def test_single_expectation_value(self, approx_order, strategy, validate):
assert isinstance(res, tuple)
assert len(res) == 2
- assert isinstance(res[0], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
assert res[0].shape == ()
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[1], np.ndarray)
assert res[1].shape == ()
# The coordinate_sampler produces the right evaluation points, but the tape execution
@@ -635,10 +637,10 @@ def test_single_expectation_value_with_argnum_all(self, approx_order, strategy,
assert isinstance(res, tuple)
assert len(res) == 2
- assert isinstance(res[0], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
assert res[0].shape == ()
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[1], np.ndarray)
assert res[1].shape == ()
# The coordinate_sampler produces the right evaluation points, but the tape execution
@@ -689,10 +691,10 @@ def test_single_expectation_value_with_argnum_one(self, approx_order, strategy,
assert isinstance(res, tuple)
assert len(res) == 2
- assert isinstance(res[0], numpy.ndarray)
+ assert isinstance(res[0], np.ndarray)
assert res[0].shape == ()
- assert isinstance(res[1], numpy.ndarray)
+ assert isinstance(res[1], np.ndarray)
assert res[1].shape == ()
# The coordinate_sampler produces the right evaluation points and there is just one
@@ -783,13 +785,13 @@ def test_multiple_expectation_values(self, approx_order, strategy, validate):
assert isinstance(res[0], tuple)
assert len(res[0]) == 2
- assert isinstance(res[0][0], numpy.ndarray)
- assert isinstance(res[0][1], numpy.ndarray)
+ assert isinstance(res[0][0], np.ndarray)
+ assert isinstance(res[0][1], np.ndarray)
assert isinstance(res[1], tuple)
assert len(res[1]) == 2
- assert isinstance(res[1][0], numpy.ndarray)
- assert isinstance(res[1][1], numpy.ndarray)
+ assert isinstance(res[1][0], np.ndarray)
+ assert isinstance(res[1][1], np.ndarray)
# The coordinate_sampler produces the right evaluation points, but the tape execution
# results are averaged instead of added, so that we need to revert the prefactor
@@ -837,13 +839,13 @@ def test_var_expectation_values(self, approx_order, strategy, validate):
assert isinstance(res[0], tuple)
assert len(res[0]) == 2
- assert isinstance(res[0][0], numpy.ndarray)
- assert isinstance(res[0][1], numpy.ndarray)
+ assert isinstance(res[0][0], np.ndarray)
+ assert isinstance(res[0][1], np.ndarray)
assert isinstance(res[1], tuple)
assert len(res[1]) == 2
- assert isinstance(res[1][0], numpy.ndarray)
- assert isinstance(res[1][1], numpy.ndarray)
+ assert isinstance(res[1][0], np.ndarray)
+ assert isinstance(res[1][1], np.ndarray)
# The coordinate_sampler produces the right evaluation points, but the tape execution
# results are averaged instead of added, so that we need to revert the prefactor
@@ -892,13 +894,13 @@ def test_prob_expectation_values(self, approx_order, strategy, validate):
assert isinstance(res[0], tuple)
assert len(res[0]) == 2
- assert isinstance(res[0][0], numpy.ndarray)
- assert isinstance(res[0][1], numpy.ndarray)
+ assert isinstance(res[0][0], np.ndarray)
+ assert isinstance(res[0][1], np.ndarray)
assert isinstance(res[1], tuple)
assert len(res[1]) == 2
- assert isinstance(res[1][0], numpy.ndarray)
- assert isinstance(res[1][1], numpy.ndarray)
+ assert isinstance(res[1][0], np.ndarray)
+ assert isinstance(res[1][1], np.ndarray)
# The coordinate_sampler produces the right evaluation points, but the tape execution
# results are averaged instead of added, so that we need to revert the prefactor
@@ -943,7 +945,7 @@ def test_autograd(self, approx_order, strategy):
"""Tests that the output of the SPSA gradient transform
can be differentiated using autograd, yielding second derivatives."""
dev = qml.device("default.qubit", wires=2, shots=many_shots_shot_vector)
- params = np.array([0.543, -0.654], requires_grad=True)
+ params = pnp.array([0.543, -0.654], requires_grad=True)
rng = np.random.default_rng(42)
def cost_fn(x):
@@ -986,7 +988,7 @@ def test_autograd_ragged(self, approx_order, strategy):
"""Tests that the output of the SPSA gradient transform
of a ragged tape can be differentiated using autograd, yielding second derivatives."""
dev = qml.device("default.qubit", wires=2, shots=many_shots_shot_vector)
- params = np.array([0.543, -0.654], requires_grad=True)
+ params = pnp.array([0.543, -0.654], requires_grad=True)
rng = np.random.default_rng(42)
def cost_fn(x):
diff --git a/tests/interfaces/test_jax_jit.py b/tests/interfaces/test_jax_jit.py
index a9927dad7fb..eea7b6be52a 100644
--- a/tests/interfaces/test_jax_jit.py
+++ b/tests/interfaces/test_jax_jit.py
@@ -107,7 +107,7 @@ def cost(a, device):
interface="None",
)[0]
- with pytest.raises(ValueError, match="Unknown interface"):
+ with pytest.raises(qml.QuantumFunctionError, match="Unknown interface"):
cost(a, device=dev)
def test_grad_on_execution(self, mocker):
diff --git a/tests/measurements/test_sample.py b/tests/measurements/test_sample.py
index e0d4ec25724..d31ce97d4a5 100644
--- a/tests/measurements/test_sample.py
+++ b/tests/measurements/test_sample.py
@@ -121,8 +121,8 @@ def circuit():
# If all the dimensions are equal the result will end up to be a proper rectangular array
assert len(result) == 3
- assert isinstance(result[0], np.ndarray)
- assert isinstance(result[1], np.ndarray)
+ assert isinstance(result[0], float)
+ assert isinstance(result[1], float)
assert result[2].dtype == np.dtype("float")
assert np.array_equal(result[2].shape, (n_sample,))
diff --git a/tests/qnn/test_keras.py b/tests/qnn/test_keras.py
index f4f9769edc2..1115460922d 100644
--- a/tests/qnn/test_keras.py
+++ b/tests/qnn/test_keras.py
@@ -588,7 +588,11 @@ def circuit(inputs, w1):
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
qlayer = KerasLayer(circuit, weight_shapes, output_dim=2)
- assert qlayer.qnode.interface == circuit.interface == interface
+ assert (
+ qlayer.qnode.interface
+ == circuit.interface
+ == qml.workflow.execution.INTERFACE_MAP[interface]
+ )
@pytest.mark.tf
diff --git a/tests/qnn/test_qnn_torch.py b/tests/qnn/test_qnn_torch.py
index 64aeb9b1a9c..e2642df0e4b 100644
--- a/tests/qnn/test_qnn_torch.py
+++ b/tests/qnn/test_qnn_torch.py
@@ -632,7 +632,11 @@ def circuit(inputs, w1):
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
qlayer = TorchLayer(circuit, weight_shapes)
- assert qlayer.qnode.interface == circuit.interface == interface
+ assert (
+ qlayer.qnode.interface
+ == circuit.interface
+ == qml.workflow.execution.INTERFACE_MAP[interface]
+ )
@pytest.mark.torch
diff --git a/tests/test_qnode.py b/tests/test_qnode.py
index 38b8847106d..1322ca62c16 100644
--- a/tests/test_qnode.py
+++ b/tests/test_qnode.py
@@ -434,7 +434,7 @@ def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
- assert circuit.interface is None
+ assert circuit.interface == "numpy"
with pytest.warns(
qml.PennyLaneDeprecationWarning, match=r"QNode.gradient_fn is deprecated"
):
@@ -1139,6 +1139,20 @@ def circuit():
assert q.queue == [] # pylint: disable=use-implicit-booleaness-not-comparison
assert len(circuit.tape.operations) == 1
+ def test_qnode_preserves_inferred_numpy_interface(self):
+ """Tests that the QNode respects the inferred numpy interface."""
+
+ dev = qml.device("default.qubit", wires=1)
+
+ @qml.qnode(dev)
+ def circuit(x):
+ qml.RX(x, wires=0)
+ return qml.expval(qml.PauliZ(0))
+
+ x = np.array(0.8)
+ res = circuit(x)
+ assert qml.math.get_interface(res) == "numpy"
+
class TestShots:
"""Unit tests for specifying shots per call."""
@@ -1899,7 +1913,7 @@ def circuit(x):
else:
spy = mocker.spy(circuit.device, "execute")
- x = np.array(0.5)
+ x = pnp.array(0.5)
circuit(x)
tape = spy.call_args[0][0][0]
diff --git a/tests/test_qnode_legacy.py b/tests/test_qnode_legacy.py
index 3ee36d99bdb..73eaf29b302 100644
--- a/tests/test_qnode_legacy.py
+++ b/tests/test_qnode_legacy.py
@@ -1488,7 +1488,7 @@ def circuit(x):
else:
spy = mocker.spy(circuit.device, "execute")
- x = np.array(0.5)
+ x = pnp.array(0.5)
circuit(x)
tape = spy.call_args[0][0][0]