Skip to content

Commit

Permalink
use TF 2.13 in CI (#4472)
Browse files Browse the repository at this point in the history
* use TF 2.13 in CI

* fix requires_grad; stop using obs hash

* fix import; consider ndim=None to be an error

* changelog

* pull wires out of evil test

* make sure 2.12 still works

* Revert "make sure 2.12 still works"

This reverts commit 51d5e80.

* Update pennylane/math/single_dispatch.py

Co-authored-by: Christina Lee <christina@xanadu.ai>

* fix jax seed in test

---------

Co-authored-by: Christina Lee <christina@xanadu.ai>
  • Loading branch information
timmysilv and albi3ro committed Aug 16, 2023
1 parent 892e092 commit f3a4e42
Show file tree
Hide file tree
Showing 9 changed files with 42 additions and 23 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/interface-unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ on:
description: The version of TensorFlow to install for any job that requires TensorFlow
required: false
type: string
default: 2.12.0
default: 2.13.0
pytorch_version:
description: The version of PyTorch to install for any job that requires PyTorch
required: false
Expand Down
3 changes: 3 additions & 0 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,9 @@ array([False, False])
[(#4465)](https://github.com/PennyLaneAI/pennylane/pull/4465/)
[(#4478)](https://github.com/PennyLaneAI/pennylane/pull/4478/)

* CI now runs tests with Tensorflow 2.13.0
[(#4472)](https://github.com/PennyLaneAI/pennylane/pull/4472)

<h3>Breaking changes 💔</h3>

* Gradient transforms no longer implicitly cast `float32` parameters to `float64`. Finite diff
Expand Down
4 changes: 2 additions & 2 deletions pennylane/gradients/hamiltonian_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ def hamiltonian_grad(tape, idx):
idx (int): index of parameter that we differentiate with respect to
"""

op, _, p_idx = tape.get_operation(idx)
op, m_pos, p_idx = tape.get_operation(idx)
new_tape = tape.copy(copy_operations=True)

# get position in queue
queue_position = tape.observables.index(op)
queue_position = m_pos - len(tape.operations)
new_tape._measurements[queue_position] = qml.expval(op.ops[p_idx])

new_tape._par_info = {}
Expand Down
5 changes: 4 additions & 1 deletion pennylane/math/single_dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,10 @@ def _round_tf(tensor, decimals=0):

def _ndim_tf(tensor):
try:
return _i("tf").experimental.numpy.ndim(tensor)
ndim = _i("tf").experimental.numpy.ndim(tensor)
if ndim is None:
return len(tensor.shape)
return ndim
except AttributeError:
return len(tensor.shape)

Expand Down
10 changes: 8 additions & 2 deletions pennylane/math/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,10 @@ def requires_grad(tensor, interface=None):
import tensorflow as tf

try:
from tensorflow.python.eager.tape import should_record_backprop
try:
from tensorflow.python.eager.record import should_record_backprop
except ImportError: # pragma: no cover
from tensorflow.python.eager.tape import should_record_backprop
except ImportError: # pragma: no cover
from tensorflow.python.eager.tape import should_record as should_record_backprop

Expand Down Expand Up @@ -508,7 +511,10 @@ def in_backprop(tensor, interface=None):
import tensorflow as tf

try:
from tensorflow.python.eager.tape import should_record_backprop
try:
from tensorflow.python.eager.record import should_record_backprop
except ImportError: # pragma: no cover
from tensorflow.python.eager.tape import should_record_backprop
except ImportError: # pragma: no cover
from tensorflow.python.eager.tape import should_record as should_record_backprop

Expand Down
15 changes: 9 additions & 6 deletions pennylane/ops/qubit/hamiltonian.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,17 +44,20 @@ def _compute_grouping_indices(observables, grouping_type="qwc", method="rlf"):

indices = []
available_indices = list(range(len(observables)))
for partition in observable_groups:
for partition in observable_groups: # pylint:disable=too-many-nested-blocks
indices_this_group = []
for pauli_word in partition:
# find index of this pauli word in remaining original observables,
for observable in observables:
if qml.pauli.are_identical_pauli_words(pauli_word, observable):
ind = observables.index(observable)
indices_this_group.append(available_indices[ind])
# delete this observable and its index, so it cannot be found again
observables.pop(ind)
available_indices.pop(ind)
for ind, obs in enumerate(observables):
if obs is not observable:
continue
indices_this_group.append(available_indices[ind])
# delete this observable and its index, so it cannot be found again
observables.pop(ind)
available_indices.pop(ind)
break
break
indices.append(tuple(indices_this_group))

Expand Down
13 changes: 8 additions & 5 deletions pennylane/pauli/grouping/group_observables.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,16 +242,19 @@ def group_observables(observables, coefficients=None, grouping_type="qwc", metho
# we cannot delete elements from the coefficients tensor, so we
# use a proxy list memorising the indices for this logic
coeff_indices = list(range(qml.math.shape(coefficients)[0]))
for i, partition in enumerate(partitioned_paulis):
for i, partition in enumerate(partitioned_paulis): # pylint:disable=too-many-nested-blocks
indices = []
for pauli_word in partition:
# find index of this pauli word in remaining original observables,
for observable in observables:
if are_identical_pauli_words(pauli_word, observable):
ind = observables.index(observable)
indices.append(coeff_indices[ind])
observables.pop(ind)
coeff_indices.pop(ind)
for ind, obs in enumerate(observables):
if obs is not observable:
continue
indices.append(coeff_indices[ind])
observables.pop(ind)
coeff_indices.pop(ind)
break
break

# add a tensor of coefficients to the grouped coefficients
Expand Down
9 changes: 5 additions & 4 deletions tests/ops/qubit/test_parametric_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -3766,12 +3766,13 @@ def test_simplify_rotations_grad_tf_function(self):
import tensorflow as tf

op = qml.U2
wires = list(range(op.num_wires))

dev = qml.device("default.qubit", wires=2)

@tf.function
@qml.qnode(dev)
def circuit(simplify, wires, *params, **hyperparams):
def circuit(simplify, *params, **hyperparams):
if simplify:
qml.simplify(op(*params, wires=wires, **hyperparams))
else:
Expand All @@ -3780,19 +3781,19 @@ def circuit(simplify, wires, *params, **hyperparams):
return qml.expval(qml.PauliZ(0))

unsimplified_op = self.get_unsimplified_op(op)
params, wires = self._get_params_wires(unsimplified_op)
params, _ = self._get_params_wires(unsimplified_op)
hyperparams = {"dim": 2} if unsimplified_op.name == "PCPhase" else {}

for i in range(params[0].shape[0]):
parameters = [tf.Variable(p[i]) for p in params]

with tf.GradientTape() as unsimplified_tape:
unsimplified_res = circuit(False, wires, *parameters, **hyperparams)
unsimplified_res = circuit(False, *parameters, **hyperparams)

unsimplified_grad = unsimplified_tape.gradient(unsimplified_res, parameters)

with tf.GradientTape() as simplified_tape:
simplified_res = circuit(True, wires, *parameters, **hyperparams)
simplified_res = circuit(True, *parameters, **hyperparams)

simplified_grad = simplified_tape.gradient(simplified_res, parameters)

Expand Down
4 changes: 2 additions & 2 deletions tests/test_operation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1989,8 +1989,8 @@ def test_map_wires(self):
qml.Identity(1),
cv.NumberOperator(wires=[1]),
cv.TensorN(wires=[1]),
cv.X(wires=[1]),
cv.P(wires=[1]),
cv.QuadX(wires=[1]),
cv.QuadP(wires=[1]),
# cv.QuadOperator(1.234, wires=0),
# cv.FockStateProjector([1,2,3], wires=[0, 1, 2]),
cv.PolyXP(np.array([1.0, 2.0, 3.0]), wires=[0]),
Expand Down

0 comments on commit f3a4e42

Please sign in to comment.