Skip to content

Commit

Permalink
Refactoring interfaces in preparation for Jacobian Product work (#4415
Browse files Browse the repository at this point in the history
)

* preparation cleaning to interface files

* move conversion to numpy inside execute_fn

* fixing some failures

* hopefully fixing all the tests

* fixing more tests

* fixing legacy mistake

* Update pennylane/devices/qubit/sampling.py

* Update pennylane/optimize/qnspsa.py

* Update tests/transforms/test_batch_transform.py

* Update pennylane/interfaces/torch.py

* changelog and black

* fix failures

* Update pennylane/interfaces/execution.py

Co-authored-by: Matthew Silverman <matthews@xanadu.ai>

* respond to feedback

* oops

* add recommendation to warning

* Update pennylane/interfaces/execution.py

* black

* Update pennylane/gradients/finite_difference.py

* Update pennylane/interfaces/execution.py

* black and more pragma: no-cover

---------

Co-authored-by: Matthew Silverman <matthews@xanadu.ai>
Co-authored-by: Romain Moyard <rmoyard@gmail.com>
  • Loading branch information
3 people committed Aug 14, 2023
1 parent a4c31d6 commit 6b149a1
Show file tree
Hide file tree
Showing 29 changed files with 293 additions and 246 deletions.
11 changes: 11 additions & 0 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,13 @@ array([False, False])
* Provide users access to the logging configuration file path and improve the logging configuration structure.
[(#4377)](https://github.com/PennyLaneAI/pennylane/pull/4377)

* Refactoring of `pennylane/interfaces`. The `execute_fn` passed to the machine learning framework boundaries
is now responsible for converting parameters to numpy. The gradients module can now handle tensorflow parameters,
but gradient tapes now retain the original dtype instead of converting to float64. This may cause instability
with finite diff and float32 parameters. The ml boundary functions are now uncoupled from their legacy
counterparts.
[(#4415)](https://github.com/PennyLaneAI/pennylane/pull/4415)

* `qml.transforms.adjoint_metric_tensor` now uses the simulation tools in `pennylane.devices.qubit` instead of
private methods of `pennylane.devices.DefaultQubit`.
[(#4456)](https://github.com/PennyLaneAI/pennylane/pull/4456)
Expand All @@ -181,6 +188,10 @@ array([False, False])

<h3>Breaking changes 💔</h3>

* Gradient transforms no longer implicitly cast `float32` parameters to `float64`. Finite diff
with float32 parameters may no longer give accurate results.
[(#4415)](https://github.com/PennyLaneAI/pennylane/pull/4415)

* Support for Python 3.8 is dropped.
[(#4453)](https://github.com/PennyLaneAI/pennylane/pull/4453)

Expand Down
8 changes: 8 additions & 0 deletions pennylane/gradients/finite_difference.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
# pylint: disable=protected-access,too-many-arguments,too-many-branches,too-many-statements
import functools
from collections.abc import Sequence
from warnings import warn

import numpy as np
from scipy.special import factorial
Expand Down Expand Up @@ -329,6 +330,13 @@ def finite_diff(
f0=f0,
validate_params=validate_params,
)

if any(qml.math.get_dtype_name(p) == "float32" for p in tape.get_parameters()):
warn(
"Finite differences with float32 detected. Answers may be inaccurate. float64 is recommended.",
UserWarning,
)

if argnum is None and not tape.trainable_params:
return _no_trainable_grad(tape)

Expand Down
8 changes: 6 additions & 2 deletions pennylane/gradients/general_shift_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -396,8 +396,12 @@ def _copy_and_shift_params(tape, indices, shifts, multipliers, cast=False):

# Shift copied parameter
new_params = list(op.data)
new_params[p_idx] = new_params[p_idx] * qml.math.convert_like(multiplier, new_params[p_idx])
new_params[p_idx] = new_params[p_idx] + qml.math.convert_like(shift, new_params[p_idx])
multiplier = qml.math.convert_like(multiplier, new_params[p_idx])
multiplier = qml.math.cast_like(multiplier, new_params[p_idx])
shift = qml.math.convert_like(shift, new_params[p_idx])
shift = qml.math.cast_like(shift, new_params[p_idx])
new_params[p_idx] = new_params[p_idx] * multiplier
new_params[p_idx] = new_params[p_idx] + shift
if cast:
dtype = getattr(new_params[p_idx], "dtype", float)
new_params[p_idx] = qml.math.cast(new_params[p_idx], dtype)
Expand Down
23 changes: 4 additions & 19 deletions pennylane/interfaces/autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,16 +303,6 @@ def execute(tapes, device, execute_fn, gradient_fn, gradient_kwargs, _n=1, max_d
list[list[float]]: A nested list of tape results. Each element in
the returned list corresponds in order to the provided tapes.
"""
if not qml.active_return():
return _execute_legacy(
tapes,
device,
execute_fn,
gradient_fn,
gradient_kwargs,
_n=_n,
max_diff=max_diff,
)
# pylint: disable=unused-argument
for tape in tapes:
# set the trainable parameters
Expand Down Expand Up @@ -385,8 +375,7 @@ def _execute(
"::L".join(str(i) for i in inspect.getouterframes(inspect.currentframe(), 2)[1][1:3]),
)

unwrapped_tapes = tuple(convert_to_numpy_parameters(t) for t in tapes)
res, jacs = execute_fn(unwrapped_tapes, **gradient_kwargs)
res, jacs = execute_fn(tapes, **gradient_kwargs)

return res, jacs

Expand Down Expand Up @@ -461,9 +450,7 @@ def partial_gradient_fn(tape):
return gradient_fn(tape, **gradient_kwargs)

g_tapes, fn = qml.transforms.map_batch_transform(partial_gradient_fn, tapes)
unwrapped_tapes = tuple(convert_to_numpy_parameters(g_t) for g_t in g_tapes)

res, _ = execute_fn(unwrapped_tapes, **gradient_kwargs)
res, _ = execute_fn(g_tapes, **gradient_kwargs)

jacs = fn(res)
cached_jac["jacobian"] = jacs
Expand Down Expand Up @@ -507,9 +494,8 @@ def grad_fn(dy):

# Generate and execute the required gradient tapes
if _n == max_diff:
unwrapped_tapes = tuple(convert_to_numpy_parameters(t) for t in tapes)
vjp_tapes, processing_fn = qml.gradients.batch_vjp(
unwrapped_tapes,
tapes,
dy,
gradient_fn,
reduction="append",
Expand Down Expand Up @@ -551,8 +537,7 @@ def grad_fn(dy):
# - gradient_fn is not differentiable
#
# so we cannot support higher-order derivatives.
unwrapped_tapes = tuple(convert_to_numpy_parameters(t) for t in tapes)
jacs = gradient_fn(unwrapped_tapes, **gradient_kwargs)
jacs = gradient_fn(tapes, **gradient_kwargs)

vjps = _compute_vjps_autograd(jacs, dy, multi_measurements, has_partitioned_shots)

Expand Down
Loading

0 comments on commit 6b149a1

Please sign in to comment.