Skip to content

Commit

Permalink
added tests for coveralls
Browse files Browse the repository at this point in the history
  • Loading branch information
richardarsenault committed Dec 29, 2023
1 parent 2229e2d commit 86119ff
Show file tree
Hide file tree
Showing 2 changed files with 175 additions and 3 deletions.
143 changes: 140 additions & 3 deletions tests/test_calibration.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
"""Test suite for the calibration algorithm in calibration.py."""

# Also tests the dummy model implementation.

import numpy as np
import pytest

from xhydro.modelling.calibration import perform_calibration
from xhydro.modelling.hydrological_modelling import dummy_model
from xhydro.modelling.obj_funcs import get_objective_function
from xhydro.modelling.obj_funcs import get_objective_function, transform_flows


def test_spotpy_calibration():
"""Make sure the calibration works for a few test cases."""
"""Make sure the calibration works under possible test cases."""

bounds_low = np.array([0, 0, 0])
bounds_high = np.array([10, 10, 10])

Expand Down Expand Up @@ -51,3 +52,139 @@ def test_spotpy_calibration():
model_config["parameters"] = [5, 5, 5]
Qsim = dummy_model(model_config)
assert Qsim[3] == 3500.00

# Also test to ensure SCEUA and take_minimize is required.
best_parameters_sceua, best_simulation, best_objfun = perform_calibration(
model_config,
"mae",
bounds_low=bounds_low,
bounds_high=bounds_high,
evaluations=10,
algorithm="SCEUA",
)

assert len(best_parameters_sceua) == len(bounds_high)

# Also test to ensure SCEUA and take_minimize is required.
best_parameters_negative, best_simulation, best_objfun = perform_calibration(
model_config,
"nse",
bounds_low=bounds_low,
bounds_high=bounds_high,
evaluations=10,
algorithm="SCEUA",
)
assert len(best_parameters_negative) == len(bounds_high)

# Test to see if transform works
best_parameters_transform, best_simulation, best_objfun = perform_calibration(
model_config,
"nse",
bounds_low=bounds_low,
bounds_high=bounds_high,
evaluations=10,
algorithm="SCEUA",
transform="inv",
epsilon=0.01,
)
assert len(best_parameters_transform) == len(bounds_high)


def test_calibration_failures():
"""Test for the calibration algorithm failure modes"""
bounds_low = np.array([0, 0, 0])
bounds_high = np.array([10, 10, 10])
model_config = {
"precip": np.array([10, 11, 12, 13, 14, 15]),
"temperature": np.array([10, 3, -5, 1, 15, 0]),
"Qobs": np.array([120, 130, 140, 150, 160, 170]),
"drainage_area": np.array([10]),
"model_name": "Dummy",
}

# Test Qobs different length than Qsim
with pytest.raises(SystemExit) as pytest_wrapped_e:
best_parameters_negative, best_simulation, best_objfun = perform_calibration(
model_config.update(Qobs=np.array([100, 100, 100])),
"nse",
bounds_low=bounds_low,
bounds_high=bounds_high,
evaluations=1000,
algorithm="OTHER",
)
assert pytest_wrapped_e.type == SystemExit

# Test mask not 1 or 0
mask = np.array([0, 0, 0, 0.5, 1, 1])
best_parameters_negative, best_simulation, best_objfun = perform_calibration(
model_config,
"nse",
bounds_low=bounds_low,
bounds_high=bounds_high,
evaluations=1000,
algorithm="DDS",
mask=mask,
)
assert pytest_wrapped_e.type == SystemExit

# test not same length in mask
mask = np.array([0, 0, 0, 1, 1])
best_parameters_negative, best_simulation, best_objfun = perform_calibration(
model_config,
"nse",
bounds_low=bounds_low,
bounds_high=bounds_high,
evaluations=1000,
algorithm="DDS",
mask=mask,
)
assert pytest_wrapped_e.type == SystemExit

# Test objective function fail is caught
mask = np.array([0, 0, 0, 0, 1, 1])
best_parameters_negative, best_simulation, best_objfun = perform_calibration(
model_config,
"nse_fake",
bounds_low=bounds_low,
bounds_high=bounds_high,
evaluations=1000,
algorithm="DDS",
mask=mask,
)
assert pytest_wrapped_e.type == SystemExit

# Test objective function that cannot be minimized
best_parameters_negative, best_simulation, best_objfun = perform_calibration(
model_config,
"bias",
bounds_low=bounds_low,
bounds_high=bounds_high,
evaluations=1000,
algorithm="DDS",
mask=mask,
)
assert pytest_wrapped_e.type == SystemExit


def test_transform():
"""Test the transformer"""

Qsim = np.array([10, 10, 10])
Qobs = np.array([5, 5, 5])

Qsim_r, Qobs_r = transform_flows(Qsim, Qobs, transform="inv", epsilon=0.01)
np.testing.assert_array_almost_equal(Qsim_r[1], 0.0995024, 6)
np.testing.assert_array_almost_equal(Qobs_r[1], 0.1980198, 6)

Qsim_r, Qobs_r = transform_flows(Qsim, Qobs, transform="sqrt")
np.testing.assert_array_almost_equal(Qsim_r[1], 3.1622776, 6)
np.testing.assert_array_almost_equal(Qobs_r[1], 2.2360679, 6)

Qsim_r, Qobs_r = transform_flows(Qsim, Qobs, transform="log", epsilon=0.01)
np.testing.assert_array_almost_equal(Qsim_r[1], 2.3075726, 6)
np.testing.assert_array_almost_equal(Qobs_r[1], 1.6193882, 6)

# Test Qobs different length than Qsim
with pytest.raises(SystemExit) as pytest_wrapped_e:
Qobs_r, Qobs_r = transform_flows(Qsim, Qobs, transform="a", epsilon=0.01)
assert pytest_wrapped_e.type == SystemExit
35 changes: 35 additions & 0 deletions tests/test_hydrological_modelling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
"""Test suite for hydrological modelling in hydrological_modelling.py"""
import unittest

import numpy as np
import pytest

from xhydro.modelling.hydrological_modelling import hydrological_model_selector


def test_hydrological_modelling():
"""Test the hydrological models as they become online"""

# Test the dummy model
model_config = {
"precip": np.array([10, 11, 12, 13, 14, 15]),
"temperature": np.array([10, 3, -5, 1, 15, 0]),
"Qobs": np.array([120, 130, 140, 150, 160, 170]),
"drainage_area": np.array([10]),
"model_name": "Dummy",
"parameters": np.array([5, 5, 5]),
}
Qsim = hydrological_model_selector(model_config)
assert Qsim[3] == 3500.00

# Test the exceptions for new models
model_config.update(model_name="ADD_OTHER_HERE")
Qsim = hydrological_model_selector(model_config)
assert Qsim == 0


@pytest.mark.xfail(raises=NotImplementedError)
def import_unknown_model():
model_config = {"model_name": "fake_model"}
Qsim = hydrological_model_selector(model_config)
assert Qsim == None

0 comments on commit 86119ff

Please sign in to comment.