Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix hadamard_grad with wires-broadcasted measurements #5860

Merged
merged 9 commits into from
Jun 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,9 @@

<h3>Bug fixes 🐛</h3>

* Fixes a bug where `hadamard_grad` returned a wrong shape for `qml.probs()` without wires.
[(#5860)](https://github.com/PennyLaneAI/pennylane/pull/5860)

* An error is now raised on processing an `AnnotatedQueue` into a `QuantumScript` if the queue
contains something other than an `Operator`, `MeasurementProcess`, or `QuantumScript`.
[(#5866)](https://github.com/PennyLaneAI/pennylane/pull/5866)
Expand Down
30 changes: 17 additions & 13 deletions pennylane/gradients/hadamard_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,11 +274,6 @@ def _expval_hadamard_grad(tape, argnum, aux_wire):
coeffs = []

gradient_data = []
measurements_probs = [
idx
for idx, m in enumerate(tape.measurements)
if isinstance(m, qml.measurements.ProbabilityMP)
]
EmilianoG-byte marked this conversation as resolved.
Show resolved Hide resolved
for trainable_param_idx, _ in enumerate(tape.trainable_params):
if trainable_param_idx not in argnums:
# parameter has zero gradient
Expand Down Expand Up @@ -330,7 +325,8 @@ def _expval_hadamard_grad(tape, argnum, aux_wire):
elif m.obs:
obs_new = [m.obs]
else:
obs_new = [qml.Z(i) for i in m.wires]
m_wires = m.wires if len(m.wires) > 0 else tape.wires
obs_new = [qml.Z(i) for i in m_wires]

obs_new.append(qml.Y(aux_wire))
obs_type = qml.prod if qml.operation.active_new_opmath() else qml.operation.Tensor
Expand Down Expand Up @@ -359,6 +355,18 @@ def _expval_hadamard_grad(tape, argnum, aux_wire):

multi_measurements = len(tape.measurements) > 1
multi_params = len(tape.trainable_params) > 1
measurements_probs = [
idx
for idx, m in enumerate(tape.measurements)
if isinstance(m, qml.measurements.ProbabilityMP)
]

def _postprocess_probs(res, measurement, projector):
dwierichs marked this conversation as resolved.
Show resolved Hide resolved
num_wires_probs = len(measurement.wires)
if num_wires_probs == 0:
num_wires_probs = tape.num_wires
res = qml.math.reshape(res, (2**num_wires_probs, 2))
return qml.math.tensordot(res, projector, axes=[[1], [0]])

def processing_fn(results): # pylint: disable=too-many-branches
"""Post processing function for computing a hadamard gradient."""
Expand All @@ -378,16 +386,12 @@ def processing_fn(results): # pylint: disable=too-many-branches
for idx, res in enumerate(final_res):
if multi_measurements:
for prob_idx in measurements_probs:
num_wires_probs = len(tape.measurements[prob_idx].wires)
res_reshaped = qml.math.reshape(res[prob_idx], (2**num_wires_probs, 2))
final_res[idx][prob_idx] = qml.math.tensordot(
res_reshaped, projector, axes=[[1], [0]]
final_res[idx][prob_idx] = _postprocess_probs(
res[prob_idx], tape.measurements[prob_idx], projector
)
else:
prob_idx = measurements_probs[0]
num_wires_probs = len(tape.measurements[prob_idx].wires)
res = qml.math.reshape(res, (2**num_wires_probs, 2))
final_res[idx] = qml.math.tensordot(res, projector, axes=[[1], [0]])
final_res[idx] = _postprocess_probs(res, tape.measurements[prob_idx], projector)
grads = []
idx = 0
for num_tape in gradient_data:
Expand Down
80 changes: 58 additions & 22 deletions tests/gradients/core/test_hadamard_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,30 @@ def cost9(x):
return (qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)))


def cost10(x):
astralcai marked this conversation as resolved.
Show resolved Hide resolved
"""Cost function."""
qml.Rot(*x, wires=0)
qml.Hadamard(1)
qml.X(2)
return qml.probs()


def cost11(x):
"""Cost function."""
qml.Rot(*x, wires=0)
qml.Hadamard(1)
qml.X(2)
return qml.probs(), qml.probs([0, 2, 1, 4, 3])


def cost12(x):
"""Cost function."""
qml.Rot(*x, wires=0)
qml.Hadamard(1)
qml.X(2)
return qml.probs(op=qml.Hadamard(0) @ qml.Y(1) @ qml.Y(2))


class TestHadamardGrad:
"""Unit tests for the hadamard_grad function"""

Expand Down Expand Up @@ -440,6 +464,7 @@ def test_prob_expectation_values(self, tol):
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[0, 1])
qml.probs()
dwierichs marked this conversation as resolved.
Show resolved Hide resolved

tape = qml.tape.QuantumScript.from_queue(q)

Expand All @@ -448,7 +473,7 @@ def test_prob_expectation_values(self, tol):
assert len(tapes) == 2

assert isinstance(res_hadamard, tuple)
assert len(res_hadamard) == 2
assert len(res_hadamard) == 3

assert isinstance(res_hadamard[0], tuple)
assert len(res_hadamard[0]) == 2
Expand All @@ -458,13 +483,14 @@ def test_prob_expectation_values(self, tol):
assert isinstance(res_hadamard[0][1], np.ndarray)
assert res_hadamard[0][1].shape == ()

assert isinstance(res_hadamard[1], tuple)
assert len(res_hadamard[1]) == 2
for res in res_hadamard[1:]:
assert isinstance(res, tuple)
assert len(res) == 2

assert isinstance(res_hadamard[1][0], np.ndarray)
assert res_hadamard[1][0].shape == (4,)
assert isinstance(res_hadamard[1][1], np.ndarray)
assert res_hadamard[1][1].shape == (4,)
assert isinstance(res[0], np.ndarray)
assert res[0].shape == (4,)
assert isinstance(res[1], np.ndarray)
assert res[1].shape == (4,)

expval_expected = [-2 * np.sin(x) / 2, 0]
probs_expected = (
Expand Down Expand Up @@ -495,8 +521,9 @@ def test_prob_expectation_values(self, tol):
assert np.allclose(res_hadamard[0][1], expval_expected[1], tol)

# Probs
assert np.allclose(res_hadamard[1][0], probs_expected[:, 0], tol)
assert np.allclose(res_hadamard[1][1], probs_expected[:, 1], tol)
for res in res_hadamard[1:]:
assert np.allclose(res[0], probs_expected[:, 0], tol)
assert np.allclose(res[1], probs_expected[:, 1], tol)

costs_and_expected_expval_scalar = [
(cost7, (), np.ndarray),
Expand Down Expand Up @@ -549,6 +576,9 @@ def test_output_shape_matches_qnode_expval_array(self, cost, exp_shape, exp_type
(cost4, [4, 3], np.ndarray),
(cost5, [4, 3], list),
(cost6, [2, 4, 3], tuple),
(cost10, [32, 3], np.ndarray), # Note that the shape here depends on the device
(cost11, [2, 32, 3], tuple),
(cost12, [8, 3], np.ndarray),
]

@pytest.mark.parametrize("cost, exp_shape, exp_type", costs_and_expected_probs)
Expand All @@ -565,19 +595,25 @@ def test_output_shape_matches_qnode_probs(self, cost, exp_shape, exp_type):
res_hadamard = res_hadamard[0]
assert len(res_hadamard) == exp_shape[0]

if len(exp_shape) > 2:
for r in res_hadamard:
assert isinstance(r, np.ndarray)
assert len(r) == exp_shape[1]

for r_ in r:
assert isinstance(r_, np.ndarray)
assert len(r_) == exp_shape[2]

elif len(exp_shape) > 1:
for r in res_hadamard:
assert isinstance(r, np.ndarray)
assert len(r) == exp_shape[1]
# Also check on the tape level
circuit(x)
tapes, fn = qml.gradients.hadamard_grad(circuit.tape)
res_hadamard_tape = qml.math.moveaxis(qml.math.stack(fn(dev.execute(tapes))), -2, -1)

for res in [res_hadamard, res_hadamard_tape]:
if len(exp_shape) > 2:
for r in res:
assert isinstance(r, np.ndarray)
assert len(r) == exp_shape[1]

for r_ in r:
assert isinstance(r_, np.ndarray)
assert len(r_) == exp_shape[2]

elif len(exp_shape) > 1:
for r in res:
assert isinstance(r, np.ndarray)
assert len(r) == exp_shape[1]

@pytest.mark.parametrize("shots", [None, 100])
def test_shots_attribute(self, shots):
Expand Down
Loading