diff --git a/tests/conftest.py b/tests/conftest.py index 7c89c5d68a0..3a9986ad4c6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -39,6 +39,12 @@ class DummyDevice(DefaultGaussian): _operation_map["Kerr"] = lambda *x, **y: np.identity(2) +@pytest.fixture(autouse=True) +def set_numpy_seed(): + np.random.seed(9872653) + yield + + @pytest.fixture(scope="session") def tol(): """Numerical tolerance for equality tests.""" diff --git a/tests/gradients/finite_diff/test_finite_difference_shot_vec.py b/tests/gradients/finite_diff/test_finite_difference_shot_vec.py index 641d9138b7c..0f1b19609fb 100644 --- a/tests/gradients/finite_diff/test_finite_difference_shot_vec.py +++ b/tests/gradients/finite_diff/test_finite_difference_shot_vec.py @@ -718,7 +718,7 @@ def test_multiple_expectation_values(self, approx_order, strategy, validate): assert isinstance(res[0], tuple) assert len(res[0]) == 2 - assert np.allclose(res[0], [-np.sin(x), 0], atol=0.1, rtol=0) + assert np.allclose(res[0], [-np.sin(x), 0], atol=0.15, rtol=0) assert isinstance(res[0][0], numpy.ndarray) assert isinstance(res[0][1], numpy.ndarray) diff --git a/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py b/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py index 85463888985..0d2d97c596b 100644 --- a/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py +++ b/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py @@ -1305,7 +1305,7 @@ def test_non_involutory_variance_single_param(self): for gradF in all_gradF: assert isinstance(gradF, np.ndarray) assert gradF.shape == () - assert gradF == pytest.approx(expected, abs=1) + assert qml.math.allclose(gradF, expected, atol=2 * _herm_shot_vec_tol) @flaky(max_runs=5) def test_non_involutory_variance_multi_param(self): @@ -1359,7 +1359,7 @@ def test_non_involutory_variance_multi_param(self): # Note: the tolerances here are significantly higher than in usual tests # due to the stochasticity of the test case assert gradF[0] == pytest.approx(expected, abs=2) - assert gradF[1] == pytest.approx(expected, abs=1) + assert qml.math.allclose(gradF[1], expected, atol=1.5) @flaky(max_runs=8) def test_involutory_and_noninvolutory_variance_single_param(self): @@ -1486,8 +1486,8 @@ def test_involutory_and_noninvolutory_variance_multi_param(self): assert np.allclose(shot_vec_result[0][0], expected[0], atol=1) assert np.allclose(shot_vec_result[0][1], expected[1], atol=1) - assert np.allclose(shot_vec_result[1][0], expected[2], atol=1) - assert np.allclose(shot_vec_result[1][1], expected[3], atol=1) + assert np.allclose(shot_vec_result[1][0], expected[2], atol=1.5) + assert np.allclose(shot_vec_result[1][1], expected[3], atol=1.5) @pytest.mark.parametrize("ind", [0, 1]) def test_var_and_probs_single_param(self, ind): diff --git a/tests/transforms/test_hamiltonian_expand.py b/tests/transforms/test_hamiltonian_expand.py index 6adad868778..578430998d6 100644 --- a/tests/transforms/test_hamiltonian_expand.py +++ b/tests/transforms/test_hamiltonian_expand.py @@ -345,9 +345,7 @@ def test_processing_function_shot_vectors_broadcasting(self, H, expected, groupi """Tests that the processing function works with shot vectors, parameter broadcasting, and grouping with different number of coefficients in each group""" - np.random.seed(824) - dev_with_shot_vector = qml.device("default.qubit", shots=[(8000, 4)]) - + dev_with_shot_vector = qml.device("default.qubit", shots=[(10000, 4)]) if grouping: H.compute_grouping() diff --git a/tests/transforms/test_qcut.py b/tests/transforms/test_qcut.py index 7225ea4bada..0ab9d64d978 100644 --- a/tests/transforms/test_qcut.py +++ b/tests/transforms/test_qcut.py @@ -2506,7 +2506,7 @@ def target_circuit(v): qml.RX(2.3, wires=2) return qml.expval(qml.PauliZ(wires=0) @ qml.PauliZ(wires=2)) - dev = dev_fn(wires=2, shots=10000) + dev = dev_fn(wires=2, shots=20000) @partial(qml.cut_circuit_mc, classical_processing_fn=fn) @qml.qnode(dev)