diff --git a/doc/code/qml_fermi.rst b/doc/code/qml_fermi.rst index b1d4a2e43a2..2c528c1f2fe 100644 --- a/doc/code/qml_fermi.rst +++ b/doc/code/qml_fermi.rst @@ -51,18 +51,18 @@ The fermionic operators can be mapped to the qubit basis by using the Fermi sentences. >>> qml.jordan_wigner(qml.FermiA(1)) -(0.5*(PauliZ(wires=[0]) @ PauliX(wires=[1]))) -+ (0.5j*(PauliZ(wires=[0]) @ PauliY(wires=[1]))) +0.5 * (Z(0) @ X(1)) + 0.5j * (Z(0) @ Y(1)) >>> qml.jordan_wigner(qml.FermiC(1) * qml.FermiA(1)) -((0.5+0j)*(Identity(wires=[1]))) -+ ((-0.5+0j)*(PauliZ(wires=[1]))) +(0.5+0j) * I(1) + (-0.5+0j) * Z(1) >>> f = 0.5 * qml.FermiC(1) * qml.FermiA(1) + 0.75 * qml.FermiC(2) * qml.FermiA(2) >>> qml.jordan_wigner(f) -((0.625+0j)*(Identity(wires=[1]))) -+ ((-0.25+0j)*(PauliZ(wires=[1]))) -+ ((-0.375+0j)*(PauliZ(wires=[2]))) +( + (0.625+0j) * I(1) + + (-0.25+0j) * Z(1) + + (-0.375+0j) * Z(2) +) FermiWord and FermiSentence --------------------------- diff --git a/pennylane/fermi/conversion.py b/pennylane/fermi/conversion.py index 8f27a39d2aa..10fa7ffbc68 100644 --- a/pennylane/fermi/conversion.py +++ b/pennylane/fermi/conversion.py @@ -211,9 +211,9 @@ def parity_transform( >>> parity_transform(w, n=6) ( -0.25j * Y(0) - + (-0.25+0j) * (X(0) @ Z(1)) - + (0.25+0j) * X(0) - + 0.25j * (Y(0) @ Z(1)) + + (-0.25+0j) * (X(0) @ Z(1)) + + (0.25+0j) * X(0) + + 0.25j * (Y(0) @ Z(1)) ) >>> parity_transform(w, n=6, ps=True) @@ -374,10 +374,10 @@ def bravyi_kitaev( >>> w = qml.fermi.from_string('0+ 1-') >>> bravyi_kitaev(w, n=6) ( - -0.25j * Y(0) - + (-0.25+0j) * (X(0) @ Z(1)) - + (0.25+0j) * X(0) - + 0.25j * (Y(0) @ Z(1)) + -0.25j * Y(0) + + (-0.25+0j) * (X(0) @ Z(1)) + + (0.25+0j) * X(0) + + 0.25j * (Y(0) @ Z(1)) ) >>> bravyi_kitaev(w, n=6, ps=True) diff --git a/pennylane/kernels/cost_functions.py b/pennylane/kernels/cost_functions.py index dc1912249b8..c0a823eede4 100644 --- a/pennylane/kernels/cost_functions.py +++ b/pennylane/kernels/cost_functions.py @@ -68,7 +68,7 @@ def polarity( .. code-block :: python - dev = qml.device('default.qubit') + dev = qml.device('default.qubit', wires=2) @qml.qnode(dev) def circuit(x1, x2): qml.templates.AngleEmbedding(x1, wires=dev.wires) @@ -144,7 +144,7 @@ def target_alignment( .. code-block :: python - dev = qml.device('default.qubit') + dev = qml.device('default.qubit', wires=2) @qml.qnode(dev) def circuit(x1, x2): qml.templates.AngleEmbedding(x1, wires=dev.wires) diff --git a/pennylane/kernels/utils.py b/pennylane/kernels/utils.py index 9ccedc21781..498675702f2 100644 --- a/pennylane/kernels/utils.py +++ b/pennylane/kernels/utils.py @@ -39,7 +39,7 @@ def square_kernel_matrix(X, kernel, assume_normalized_kernel=False): .. code-block :: python - dev = qml.device('default.qubit') + dev = qml.device('default.qubit', wires=2) @qml.qnode(dev) def circuit(x1, x2): qml.templates.AngleEmbedding(x1, wires=dev.wires) @@ -103,7 +103,7 @@ def kernel_matrix(X1, X2, kernel): .. code-block :: python - dev = qml.device('default.qubit') + dev = qml.device('default.qubit', wires=2) @qml.qnode(dev) def circuit(x1, x2): qml.templates.AngleEmbedding(x1, wires=dev.wires) diff --git a/pennylane/math/multi_dispatch.py b/pennylane/math/multi_dispatch.py index 6510bc99509..011cd67bb97 100644 --- a/pennylane/math/multi_dispatch.py +++ b/pennylane/math/multi_dispatch.py @@ -187,7 +187,7 @@ def block_diag(values, like=None): >>> t = [ ... np.array([[1, 2], [3, 4]]), ... torch.tensor([[1, 2, 3], [-1, -6, -3]]), - ... torch.tensor(5) + ... torch.tensor([[5]]) ... ] >>> qml.math.block_diag(t) tensor([[ 1, 2, 0, 0, 0, 0], @@ -225,8 +225,9 @@ def concatenate(values, axis=0, like=None): >>> y = tf.Variable([0.1, 0.2, 0.3]) >>> z = np.array([5., 8., 101.]) >>> concatenate([x, y, z]) - + """ if like == "torch": @@ -415,11 +416,12 @@ def get_trainable_indices(values, like=None): **Example** + >>> from pennylane import numpy as pnp >>> def cost_fn(params): ... print("Trainable:", qml.math.get_trainable_indices(params)) ... return np.sum(np.sin(params[0] * params[1])) - >>> values = [np.array([0.1, 0.2], requires_grad=True), - ... np.array([0.5, 0.2], requires_grad=False)] + >>> values = [pnp.array([0.1, 0.2], requires_grad=True), + ... pnp.array([0.5, 0.2], requires_grad=False)] >>> cost_fn(values) Trainable: {0} tensor(0.0899685, requires_grad=True) @@ -453,7 +455,7 @@ def ones_like(tensor, dtype=None): >>> x = torch.tensor([1., 2.]) >>> ones_like(x) - tensor([1, 1]) + tensor([1., 1.]) >>> y = tf.Variable([[0], [5]]) >>> ones_like(y, dtype=np.complex128) >> math.where(tf.constant(a) < 1) - tf.Tensor( - [[0 0] - [0 1] - [1 1] - [1 2]], shape=(4, 2), dtype=int64) + - As we can see, the dimensions are swapped and the output is a single Tensor. Note that the number of dimensions of the output does *not* depend on the input shape, it is always two-dimensional. @@ -767,7 +764,7 @@ def unwrap(values, max_depth=None): ... return np.sum(np.sin(params)) >>> params = np.array([0.1, 0.2, 0.3]) >>> grad = autograd.grad(cost_fn)(params) - Unwrapped: [(0.1, ), (0.2, ), (0.3, )] + Unwrapped: [(0.1, ), (0.2, ), (0.3, )] >>> print(grad) [0.99500417 0.98006658 0.95533649] """ diff --git a/pennylane/math/quantum.py b/pennylane/math/quantum.py index e35dd5b6b10..a1fdeabaf2d 100644 --- a/pennylane/math/quantum.py +++ b/pennylane/math/quantum.py @@ -77,10 +77,11 @@ def circuit(weights): We can now compute the covariance matrix: >>> shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2, n_wires=3) - >>> weights = np.random.random(shape, requires_grad=True) + >>> weights = pnp.random.random(shape, requires_grad=True) >>> cov = qml.math.cov_matrix(circuit(weights), obs_list) >>> cov - tensor([[0.9275379 , 0.05233832], [0.05233832, 0.99335545]], requires_grad=True) + tensor([[0.98125435, 0.4905541 ], + [0.4905541 , 0.99920878]], requires_grad=True) Autodifferentiation is fully supported using all interfaces. Here we use autograd: @@ -204,7 +205,7 @@ def reduce_dm(density_matrix, indices, check_state=False, c_dtype="complex128"): [0.+0.j 0.+0.j]] >>> z = tf.Variable([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=tf.complex128) - >>> reduce_dm(x, indices=[1]) + >>> reduce_dm(z, indices=[1]) tf.Tensor( [[1.+0.j 0.+0.j] [0.+0.j 0.+0.j]], shape=(2, 2), dtype=complex128) @@ -268,16 +269,20 @@ def partial_trace(matrix, indices, c_dtype="complex128"): >>> x = np.array([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) >>> partial_trace(x, indices=[0]) - array([[1, 0], [0, 0]]) + array([[1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]) We can also pass a batch of matrices ``x`` to the function and return the partial trace of each matrix with respect to each matrix's 0th index. >>> x = np.array([ - [[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - [[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] - ]) + ... [[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + ... [[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] + ... ]) >>> partial_trace(x, indices=[0]) - array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]]) + array([[[1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j]]]) The partial trace can also be computed with respect to multiple indices within different frameworks such as TensorFlow. @@ -285,7 +290,10 @@ def partial_trace(matrix, indices, c_dtype="complex128"): ... [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]]], dtype=tf.complex128) >>> partial_trace(x, indices=[1]) + array([[[1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j]]])> """ # Autograd does not support same indices sum in backprop, and tensorflow @@ -771,6 +779,7 @@ def vn_entanglement_entropy( The entanglement entropy between subsystems for a state vector can be returned as follows: >>> x = np.array([0, -1, 1, 0]) / np.sqrt(2) + >>> x = qml.math.dm_from_state_vector(x) >>> qml.math.vn_entanglement_entropy(x, indices0=[0], indices1=[1]) 0.6931471805599453 @@ -934,12 +943,12 @@ def relative_entropy(state0, state1, base=None, check_state=False, c_dtype="comp >>> rho = np.array([[0.3, 0], [0, 0.7]]) >>> sigma = np.array([[0.5, 0], [0, 0.5]]) >>> qml.math.relative_entropy(rho, sigma) - tensor(0.08228288, requires_grad=True) + 0.08228288 It is also possible to change the log base: >>> qml.math.relative_entropy(rho, sigma, base=2) - tensor(0.1187091, requires_grad=True) + 0.1187091 .. seealso:: :func:`pennylane.qinfo.transforms.relative_entropy` """ diff --git a/pennylane/math/utils.py b/pennylane/math/utils.py index b2b9a3a8a0c..e80b11d969a 100644 --- a/pennylane/math/utils.py +++ b/pennylane/math/utils.py @@ -301,7 +301,7 @@ def get_deep_interface(value): >>> qml.math.asarray(x, like=qml.math.get_deep_interface(x)) Array([[1, 2], - [3, 4]], dtype=int64) + [3, 4]], dtype=int64) """ itr = value diff --git a/pennylane/qnn/torch.py b/pennylane/qnn/torch.py index b0860bed9ec..1b37999f09d 100644 --- a/pennylane/qnn/torch.py +++ b/pennylane/qnn/torch.py @@ -186,7 +186,7 @@ def qnode(inputs, weights_0, weights_1, weights_2, weight_3, weight_4): init_method = { "weights_0": torch.nn.init.normal_, - "weights_1": torch.nn.init.uniform, + "weights_1": torch.nn.init.uniform_, "weights_2": torch.tensor([1., 2., 3.]), "weight_3": torch.tensor(1.), # scalar when shape is not an iterable and is <= 1 "weight_4": torch.tensor([1.]), @@ -261,7 +261,7 @@ def qnode(inputs, weights_0, weights_1, weights_2, weight_3, weight_4): def qnode(inputs, weights): qml.templates.AngleEmbedding(inputs, wires=range(n_qubits)) qml.templates.StronglyEntanglingLayers(weights, wires=range(n_qubits)) - return qml.expval(qml.Z(0)), qml.expval(qml.Z(1)) + return [qml.expval(qml.Z(0)), qml.expval(qml.Z(1))] weight_shapes = {"weights": (3, n_qubits, 3)}