Skip to content

Commit

Permalink
support input tags for evaluation
Browse files Browse the repository at this point in the history
  • Loading branch information
VipulMascarenhas committed Dec 9, 2024
1 parent 13053b1 commit 6c7a72f
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 11 deletions.
6 changes: 6 additions & 0 deletions ads/aqua/evaluation/entities.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,10 @@ class CreateAquaEvaluationDetails(Serializable):
The metrics for the evaluation.
force_overwrite: (bool, optional). Defaults to `False`.
Whether to force overwrite the existing file in object storage.
freeform_tags: (dict, optional)
Freeform tags for the evaluation model
defined_tags: (dict, optional)
Defined tags for the evaluation model
"""

evaluation_source_id: str
Expand All @@ -85,6 +89,8 @@ class CreateAquaEvaluationDetails(Serializable):
log_id: Optional[str] = None
metrics: Optional[List[Dict[str, Any]]] = None
force_overwrite: Optional[bool] = False
freeform_tags: Optional[dict] = None
defined_tags: Optional[dict] = None

class Config:
extra = "ignore"
Expand Down
41 changes: 34 additions & 7 deletions ads/aqua/evaluation/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,10 @@ def create(
evaluation_mvs_freeform_tags = {
Tags.AQUA_EVALUATION: Tags.AQUA_EVALUATION,
}
evaluation_mvs_freeform_tags = {
**evaluation_mvs_freeform_tags,
**(create_aqua_evaluation_details.freeform_tags or {}),
}

model_version_set = (
ModelVersionSet()
Expand All @@ -307,6 +311,9 @@ def create(
create_aqua_evaluation_details.experiment_description
)
.with_freeform_tags(**evaluation_mvs_freeform_tags)
.with_defined_tags(
**(create_aqua_evaluation_details.defined_tags or {})
)
# TODO: decide what parameters will be needed
.create(**kwargs)
)
Expand Down Expand Up @@ -369,6 +376,10 @@ def create(
Tags.AQUA_EVALUATION: Tags.AQUA_EVALUATION,
Tags.AQUA_EVALUATION_MODEL_ID: evaluation_model.id,
}
evaluation_job_freeform_tags = {
**evaluation_job_freeform_tags,
**(create_aqua_evaluation_details.freeform_tags or {}),
}

evaluation_job = Job(name=evaluation_model.display_name).with_infrastructure(
DataScienceJob()
Expand All @@ -379,6 +390,7 @@ def create(
.with_shape_name(create_aqua_evaluation_details.shape_name)
.with_block_storage_size(create_aqua_evaluation_details.block_storage_size)
.with_freeform_tag(**evaluation_job_freeform_tags)
.with_defined_tag(**(create_aqua_evaluation_details.defined_tags or {}))
)
if (
create_aqua_evaluation_details.memory_in_gbs
Expand Down Expand Up @@ -425,6 +437,7 @@ def create(
evaluation_job_run = evaluation_job.run(
name=evaluation_model.display_name,
freeform_tags=evaluation_job_freeform_tags,
defined_tags=(create_aqua_evaluation_details.defined_tags or {}),
wait=False,
)
logger.debug(
Expand All @@ -444,13 +457,23 @@ def create(
for metadata in evaluation_model_custom_metadata.to_dict()["data"]
]

evaluation_model_freeform_tags = {
Tags.AQUA_EVALUATION: Tags.AQUA_EVALUATION,
}
evaluation_model_freeform_tags = {
**evaluation_model_freeform_tags,
**(create_aqua_evaluation_details.freeform_tags or {}),
}
evaluation_model_defined_tags = (
create_aqua_evaluation_details.defined_tags or {}
)

self.ds_client.update_model(
model_id=evaluation_model.id,
update_model_details=UpdateModelDetails(
custom_metadata_list=updated_custom_metadata_list,
freeform_tags={
Tags.AQUA_EVALUATION: Tags.AQUA_EVALUATION,
},
freeform_tags=evaluation_model_freeform_tags,
defined_tags=evaluation_model_defined_tags,
),
)

Expand Down Expand Up @@ -520,10 +543,14 @@ def create(
),
),
tags={
"aqua_evaluation": Tags.AQUA_EVALUATION,
"evaluation_job_id": evaluation_job.id,
"evaluation_source": create_aqua_evaluation_details.evaluation_source_id,
"evaluation_experiment_id": experiment_model_version_set_id,
**{
"aqua_evaluation": Tags.AQUA_EVALUATION,
"evaluation_job_id": evaluation_job.id,
"evaluation_source": create_aqua_evaluation_details.evaluation_source_id,
"evaluation_experiment_id": experiment_model_version_set_id,
},
**evaluation_model_freeform_tags,
**evaluation_model_defined_tags,
},
parameters=AquaEvalParams(),
)
Expand Down
17 changes: 13 additions & 4 deletions tests/unitary/with_extras/aqua/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,6 +475,9 @@ def test_create_evaluation(
self.app.ds_client.update_model = MagicMock()
self.app.ds_client.update_model_provenance = MagicMock()

eval_model_freeform_tags = {"ftag1": "fvalue1", "ftag2": "fvalue2"}
eval_model_defined_tags = {"dtag1": "dvalue1", "dtag2": "dvalue2"}

create_aqua_evaluation_details = dict(
evaluation_source_id="ocid1.datasciencemodel.oc1.iad.<OCID>",
evaluation_name="test_evaluation_name",
Expand All @@ -486,6 +489,8 @@ def test_create_evaluation(
experiment_name="test_experiment_name",
memory_in_gbs=1,
ocpus=1,
freeform_tags=eval_model_freeform_tags,
defined_tags=eval_model_defined_tags,
)
aqua_evaluation_summary = self.app.create(**create_aqua_evaluation_details)

Expand Down Expand Up @@ -516,10 +521,14 @@ def test_create_evaluation(
"url": f"https://cloud.oracle.com/data-science/models/ocid1.datasciencemodel.oc1.iad.<OCID>?region={self.app.region}",
},
"tags": {
"aqua_evaluation": "aqua_evaluation",
"evaluation_experiment_id": f"{experiment.id}",
"evaluation_job_id": f"{mock_job_id.return_value}",
"evaluation_source": "ocid1.datasciencemodel.oc1.iad.<OCID>",
**{
"aqua_evaluation": "aqua_evaluation",
"evaluation_experiment_id": f"{experiment.id}",
"evaluation_job_id": f"{mock_job_id.return_value}",
"evaluation_source": "ocid1.datasciencemodel.oc1.iad.<OCID>",
},
**eval_model_freeform_tags,
**eval_model_defined_tags,
},
"time_created": f"{oci_dsc_model.time_created}",
}
Expand Down

0 comments on commit 6c7a72f

Please sign in to comment.