diff --git a/ads/aqua/config/config.py b/ads/aqua/config/config.py index c56d0c3f0..1cabc203c 100644 --- a/ads/aqua/config/config.py +++ b/ads/aqua/config/config.py @@ -12,7 +12,7 @@ DEFAULT_EVALUATION_CONTAINER = "odsc-llm-evaluate" -def evaluation_service_config( +def get_evaluation_service_config( container: Optional[str] = DEFAULT_EVALUATION_CONTAINER, ) -> EvaluationServiceConfig: """ diff --git a/ads/aqua/evaluation/evaluation.py b/ads/aqua/evaluation/evaluation.py index 3bdaf49dd..7f7349beb 100644 --- a/ads/aqua/evaluation/evaluation.py +++ b/ads/aqua/evaluation/evaluation.py @@ -45,7 +45,7 @@ is_valid_ocid, upload_local_to_os, ) -from ads.aqua.config.config import evaluation_service_config +from ads.aqua.config.config import get_evaluation_service_config from ads.aqua.config.evaluation.evaluation_service_config import EvaluationServiceConfig from ads.aqua.constants import ( CONSOLE_LINK_RESOURCE_TYPE_MAPPING, @@ -176,7 +176,7 @@ def create( # The model to evaluate evaluation_source = None # The evaluation service config - evaluation_config: EvaluationServiceConfig = evaluation_service_config() + evaluation_config: EvaluationServiceConfig = get_evaluation_service_config() # The evaluation inference configuration. The inference configuration will be extracted # based on the inferencing container family. eval_inference_configuration: Dict = {} @@ -931,7 +931,7 @@ def get_status(self, eval_id: str) -> dict: def get_supported_metrics(self) -> dict: """Gets a list of supported metrics for evaluation.""" return [ - item.to_dict() for item in evaluation_service_config().ui_config.metrics + item.to_dict() for item in get_evaluation_service_config().ui_config.metrics ] @telemetry(entry_point="plugin=evaluation&action=load_metrics", name="aqua") @@ -1218,7 +1218,7 @@ def load_evaluation_config(self, container: Optional[str] = None) -> Dict: """Loads evaluation config.""" # retrieve the evaluation config by container family name - evaluation_config = evaluation_service_config(container) + evaluation_config = get_evaluation_service_config(container) # convert the new config representation to the old one return { diff --git a/tests/unitary/with_extras/aqua/test_config.py b/tests/unitary/with_extras/aqua/test_config.py index 381b03116..4994fabe7 100644 --- a/tests/unitary/with_extras/aqua/test_config.py +++ b/tests/unitary/with_extras/aqua/test_config.py @@ -7,7 +7,7 @@ from unittest.mock import patch from ads.aqua.common.entities import ContainerSpec -from ads.aqua.config.config import evaluation_service_config +from ads.aqua.config.config import get_evaluation_service_config class TestConfig: @@ -32,7 +32,7 @@ def test_evaluation_service_config(self, mock_get_container_config): mock_get_container_config.return_value = expected_result - test_result = evaluation_service_config(container="test_container") + test_result = get_evaluation_service_config(container="test_container") assert ( test_result.to_dict() == expected_result[ContainerSpec.CONTAINER_SPEC]["test_container"] diff --git a/tests/unitary/with_extras/aqua/test_evaluation.py b/tests/unitary/with_extras/aqua/test_evaluation.py index 1d39e21b2..0a64732f7 100644 --- a/tests/unitary/with_extras/aqua/test_evaluation.py +++ b/tests/unitary/with_extras/aqua/test_evaluation.py @@ -426,6 +426,7 @@ def assert_payload(self, response, response_type): continue assert rdict.get(attr), f"{attr} is empty" + @patch("ads.aqua.evaluation.evaluation.get_evaluation_service_config") @patch.object(Job, "run") @patch("ads.jobs.ads_job.Job.name", new_callable=PropertyMock) @patch("ads.jobs.ads_job.Job.id", new_callable=PropertyMock) @@ -444,6 +445,7 @@ def test_create_evaluation( mock_job_id, mock_job_name, mock_job_run, + mock_get_evaluation_service_config, ): foundation_model = MagicMock() foundation_model.display_name = "test_foundation_model" @@ -473,6 +475,8 @@ def test_create_evaluation( evaluation_job_run.lifecycle_state = "IN_PROGRESS" mock_job_run.return_value = evaluation_job_run + mock_get_evaluation_service_config.return_value = EvaluationServiceConfig() + self.app.ds_client.update_model = MagicMock() self.app.ds_client.update_model_provenance = MagicMock() @@ -883,8 +887,8 @@ def test_extract_job_lifecycle_details(self, input, expect_output): msg = self.app._extract_job_lifecycle_details(input) assert msg == expect_output, msg - @patch("ads.aqua.evaluation.evaluation.evaluation_service_config") - def test_get_supported_metrics(self, mock_evaluation_service_config): + @patch("ads.aqua.evaluation.evaluation.get_evaluation_service_config") + def test_get_supported_metrics(self, mock_get_evaluation_service_config): """ Tests getting a list of supported metrics for evaluation. """ @@ -905,7 +909,7 @@ def test_get_supported_metrics(self, mock_evaluation_service_config): ] ) ) - mock_evaluation_service_config.return_value = test_evaluation_service_config + mock_get_evaluation_service_config.return_value = test_evaluation_service_config response = self.app.get_supported_metrics() assert isinstance(response, list) assert len(response) == len(test_evaluation_service_config.ui_config.metrics) @@ -913,8 +917,8 @@ def test_get_supported_metrics(self, mock_evaluation_service_config): item.to_dict() for item in test_evaluation_service_config.ui_config.metrics ] - @patch("ads.aqua.evaluation.evaluation.evaluation_service_config") - def test_load_evaluation_config(self, mock_evaluation_service_config): + @patch("ads.aqua.evaluation.evaluation.get_evaluation_service_config") + def test_load_evaluation_config(self, mock_get_evaluation_service_config): """ Tests loading default config for evaluation. This method currently hardcoded the return value. @@ -952,7 +956,7 @@ def test_load_evaluation_config(self, mock_evaluation_service_config): ], ) ) - mock_evaluation_service_config.return_value = test_evaluation_service_config + mock_get_evaluation_service_config.return_value = test_evaluation_service_config expected_result = { "model_params": {