diff --git a/tests/test_calibration.py b/tests/test_calibration.py index e3aa2b6c..0e0335b1 100644 --- a/tests/test_calibration.py +++ b/tests/test_calibration.py @@ -89,82 +89,6 @@ def test_spotpy_calibration(): assert len(best_parameters_transform) == len(bounds_high) -def test_calibration_failures(): - """Test for the calibration algorithm failure modes""" - bounds_low = np.array([0, 0, 0]) - bounds_high = np.array([10, 10, 10]) - model_config = { - "precip": np.array([10, 11, 12, 13, 14, 15]), - "temperature": np.array([10, 3, -5, 1, 15, 0]), - "Qobs": np.array([120, 130, 140, 150, 160, 170]), - "drainage_area": np.array([10]), - "model_name": "Dummy", - } - - # Test Qobs different length than Qsim - with pytest.raises(SystemExit) as pytest_wrapped_e: - best_parameters_negative, best_simulation, best_objfun = perform_calibration( - model_config.update(Qobs=np.array([100, 100, 100])), - "nse", - bounds_low=bounds_low, - bounds_high=bounds_high, - evaluations=1000, - algorithm="OTHER", - ) - assert pytest_wrapped_e.type == SystemExit - - # Test mask not 1 or 0 - mask = np.array([0, 0, 0, 0.5, 1, 1]) - best_parameters_negative, best_simulation, best_objfun = perform_calibration( - model_config, - "nse", - bounds_low=bounds_low, - bounds_high=bounds_high, - evaluations=1000, - algorithm="DDS", - mask=mask, - ) - assert pytest_wrapped_e.type == SystemExit - - # test not same length in mask - mask = np.array([0, 0, 0, 1, 1]) - best_parameters_negative, best_simulation, best_objfun = perform_calibration( - model_config, - "nse", - bounds_low=bounds_low, - bounds_high=bounds_high, - evaluations=1000, - algorithm="DDS", - mask=mask, - ) - assert pytest_wrapped_e.type == SystemExit - - # Test objective function fail is caught - mask = np.array([0, 0, 0, 0, 1, 1]) - best_parameters_negative, best_simulation, best_objfun = perform_calibration( - model_config, - "nse_fake", - bounds_low=bounds_low, - bounds_high=bounds_high, - evaluations=1000, - algorithm="DDS", - mask=mask, - ) - assert pytest_wrapped_e.type == SystemExit - - # Test objective function that cannot be minimized - best_parameters_negative, best_simulation, best_objfun = perform_calibration( - model_config, - "bias", - bounds_low=bounds_low, - bounds_high=bounds_high, - evaluations=1000, - algorithm="DDS", - mask=mask, - ) - assert pytest_wrapped_e.type == SystemExit - - def test_transform(): """Test the flow transformer""" Qsim = np.array([10, 10, 10]) diff --git a/tests/test_hydrological_modelling.py b/tests/test_hydrological_modelling.py index f4928937..9f81751a 100644 --- a/tests/test_hydrological_modelling.py +++ b/tests/test_hydrological_modelling.py @@ -25,9 +25,9 @@ def test_hydrological_modelling(): assert Qsim == 0 -def import_unknown_model(): +def test_import_unknown_model(): """Test for unknown model""" - with pytest.raises(NotImplementedError): + with pytest.raises(NotImplementedError) as pytest_wrapped_e: model_config = {"model_name": "fake_model"} - Qsim = hydrological_model_selector(model_config) - assert Qsim is None + _ = hydrological_model_selector(model_config) + assert pytest_wrapped_e.type == NotImplementedError diff --git a/tests/test_objective_functions.py b/tests/test_objective_functions.py index 86252f02..f1d470f0 100644 --- a/tests/test_objective_functions.py +++ b/tests/test_objective_functions.py @@ -69,70 +69,91 @@ def test_obj_funcs(): np.testing.assert_array_almost_equal(objfun, -0.022988505747126436, 8) -def test_objective_function_failure_modes(): - """Test for the objective function calculation failure modes""" - Qobs = np.array([100, 100, 100]) - Qsim = np.array([110, 110, 90]) - - mask = np.array([0, 1, 1]) - - # Test Qobs different length than Qsim +def test_objective_function_failure_data_length(): + """Test for the objective function calculation failure mode: + Qobs and Qsim length are different + """ with pytest.raises(SystemExit) as pytest_wrapped_e: - objfun = get_objective_function( - np.array([100, 100]), - Qsim, + _ = get_objective_function( + np.array([100, 110]), + np.array([100, 110, 120]), obj_func="mae", - take_negative=True, - mask=None, - transform=None, - epsilon=None, ) assert pytest_wrapped_e.type == SystemExit - # Test for mask length - objfun = get_objective_function( - Qobs, - Qsim, + +def test_objective_function_failure_mask_length(): + """Test for the objective function calculation failure mode: + Qobs and mask length are different + """ + with pytest.raises(SystemExit) as pytest_wrapped_e: + _ = get_objective_function( + np.array([100, 100, 100]), + np.array([100, 110, 120]), obj_func="mae", - take_negative=True, mask=np.array([0, 1, 0, 0]), - transform=None, - epsilon=None, ) assert pytest_wrapped_e.type == SystemExit - # Test for obj_func does not exist - objfun = get_objective_function( - Qobs, - Qsim, - obj_func="fake_mae", - take_negative=True, - mask=mask, + +def test_objective_function_failure_unknown_objfun(): + """Test for the objective function calculation failure mode: + Objective function is unknown + """ + with pytest.raises(SystemExit) as pytest_wrapped_e: + _ = get_objective_function( + np.array([100, 100, 100]), + np.array([100, 110, 120]), + obj_func="fake", ) assert pytest_wrapped_e.type == SystemExit - # Test for mask is not 0 and 1 - objfun = get_objective_function( - Qobs, - Qsim, + +def test_objective_function_failure_mask_contents(): + """Test for the objective function calculation failure mode: + Mask contains other than 0 and 1 + """ + with pytest.raises(SystemExit) as pytest_wrapped_e: + _ = get_objective_function( + np.array([100, 100, 100]), + np.array([100, 110, 120]), obj_func="mae", - take_negative=True, mask=np.array([0, 0.5, 1]), ) assert pytest_wrapped_e.type == SystemExit - assert objfun is None - # Test for maximize_minimize objective func for unbounded metrics. - maximize = get_objfun_minimize_or_maximize(obj_func="bias") - assert pytest_wrapped_e.type == SystemExit - maximize = get_objfun_minimize_or_maximize(obj_func="pbias") + +def test_maximizer_objfun_failure_modes_bias(): + """Test for maximize-minimize failure mode: + Use of bias objfun which is unbounded + """ + with pytest.raises(SystemExit) as pytest_wrapped_e: + _ = get_objfun_minimize_or_maximize(obj_func="bias") assert pytest_wrapped_e.type == SystemExit - maximize = get_objfun_minimize_or_maximize(obj_func="volume_error") + + +def test_maximizer_objfun_failure_modes_pbias(): + """Test for maximize-minimize failure mode: + Use of pbias objfun which is unbounded + """ + with pytest.raises(SystemExit) as pytest_wrapped_e: + _ = get_objfun_minimize_or_maximize(obj_func="pbias") assert pytest_wrapped_e.type == SystemExit - # Test for unknown objective func - maximize = get_objective_function(obj_func="bias_fake") + +def test_maximizer_objfun_failure_modes_volume_error(): + """Test for maximize-minimize failure mode: + Use of volume_error objfun which is unbounded + """ + with pytest.raises(SystemExit) as pytest_wrapped_e: + _ = get_objfun_minimize_or_maximize(obj_func="volume_error") assert pytest_wrapped_e.type == SystemExit - assert objfun is None - assert maximize is None + +def test_maximizer_objfun_failure_modes_unknown_metric(): + """Test for maximize-minimize failure mode: + Use of unknown objfun + """ + with pytest.raises(SystemExit) as pytest_wrapped_e: + _ = get_objfun_minimize_or_maximize(obj_func="unknown_of") + assert pytest_wrapped_e.type == SystemExit