From dcf3dd272eac121fe72f718c0068c664d734d267 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 18 Nov 2024 12:08:15 -0500 Subject: [PATCH 01/11] Typing of tool request expansion stuff. --- .../dataset_collections/subcollections.py | 20 ++++++++++++++-- lib/galaxy/tools/parameters/meta.py | 24 +++++++++++++++---- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/lib/galaxy/model/dataset_collections/subcollections.py b/lib/galaxy/model/dataset_collections/subcollections.py index af6c2a397326..fc799b1711ad 100644 --- a/lib/galaxy/model/dataset_collections/subcollections.py +++ b/lib/galaxy/model/dataset_collections/subcollections.py @@ -1,12 +1,28 @@ +from typing import ( + List, + TYPE_CHECKING, +) + from galaxy import exceptions +if TYPE_CHECKING: + from galaxy.model import ( + DatasetCollection, + DatasetCollectionElement, + HistoryDatasetCollectionAssociation, + ) + -def split_dataset_collection_instance(dataset_collection_instance, collection_type): +def split_dataset_collection_instance( + dataset_collection_instance: "HistoryDatasetCollectionAssociation", collection_type: str +) -> List["DatasetCollectionElement"]: """Split up collection into collection.""" return _split_dataset_collection(dataset_collection_instance.collection, collection_type) -def _split_dataset_collection(dataset_collection, collection_type): +def _split_dataset_collection( + dataset_collection: "DatasetCollection", collection_type: str +) -> List["DatasetCollectionElement"]: this_collection_type = dataset_collection.collection_type if not this_collection_type.endswith(collection_type) or this_collection_type == collection_type: raise exceptions.MessageException("Cannot split collection in desired fashion.") diff --git a/lib/galaxy/tools/parameters/meta.py b/lib/galaxy/tools/parameters/meta.py index b74df54fa269..5865a6698110 100644 --- a/lib/galaxy/tools/parameters/meta.py +++ b/lib/galaxy/tools/parameters/meta.py @@ -8,13 +8,20 @@ List, Optional, Tuple, + Union, ) from galaxy import ( exceptions, util, ) -from galaxy.model import HistoryDatasetCollectionAssociation +from galaxy.model import ( + DatasetCollectionElement, + DatasetInstance, + HistoryDatasetAssociation, + HistoryDatasetCollectionAssociation, + LibraryDatasetDatasetAssociation, +) from galaxy.model.dataset_collections import ( matching, subcollections, @@ -327,7 +334,12 @@ def visitor(input, value, prefix, prefixed_name, prefixed_label, error, **kwargs return (single_inputs_nested, matched_multi_inputs, multiplied_multi_inputs) -def __expand_collection_parameter(trans, input_key, incoming_val, collections_to_match, linked=False): +CollectionExpansionListT = Union[List[DatasetCollectionElement], List[DatasetInstance]] + + +def __expand_collection_parameter( + trans, input_key: str, incoming_val, collections_to_match: "matching.CollectionsToMatch", linked=False +) -> CollectionExpansionListT: # If subcollectin multirun of data_collection param - value will # be "hdca_id|subcollection_type" else it will just be hdca_id if "|" in incoming_val: @@ -348,10 +360,12 @@ def __expand_collection_parameter(trans, input_key, incoming_val, collections_to raise exceptions.ToolInputsNotReadyException("An input collection is not populated.") collections_to_match.add(input_key, hdc, subcollection_type=subcollection_type, linked=linked) if subcollection_type is not None: - subcollection_elements = subcollections.split_dataset_collection_instance(hdc, subcollection_type) + subcollection_elements: List[DatasetCollectionElement] = subcollections.split_dataset_collection_instance( + hdc, subcollection_type + ) return subcollection_elements else: - hdas = [] + hdas: List[DatasetInstance] = [] for element in hdc.collection.dataset_elements: hda = element.dataset_instance hda.element_identifier = element.element_identifier @@ -359,7 +373,7 @@ def __expand_collection_parameter(trans, input_key, incoming_val, collections_to return hdas -def __collection_multirun_parameter(value): +def __collection_multirun_parameter(value: Dict[str, Any]) -> bool: is_batch = value.get("batch", False) if not is_batch: return False From 9051a757d24efe89a870eb07aa856fb929200ca9 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Thu, 14 Nov 2024 10:42:57 -0500 Subject: [PATCH 02/11] Fix unit test names. --- test/unit/tool_util/test_parameter_convert.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/unit/tool_util/test_parameter_convert.py b/test/unit/tool_util/test_parameter_convert.py index 7f5deff41e85..38efa9105aed 100644 --- a/test/unit/tool_util/test_parameter_convert.py +++ b/test/unit/tool_util/test_parameter_convert.py @@ -33,7 +33,7 @@ } -def test_encode_data(): +def test_decode_data(): tool_source = tool_source_for("parameters/gx_data") bundle = input_models_for_tool_source(tool_source) request_state = RequestToolState({"parameter": {"src": "hda", "id": EXAMPLE_ID_1_ENCODED}}) @@ -53,7 +53,7 @@ def test_encode_collection(): assert decoded_state.input_state["parameter"]["id"] == EXAMPLE_ID_1 -def test_encode_repeat(): +def test_decode_repeat(): tool_source = tool_source_for("parameters/gx_repeat_data") bundle = input_models_for_tool_source(tool_source) request_state = RequestToolState({"parameter": [{"data_parameter": {"src": "hda", "id": EXAMPLE_ID_1_ENCODED}}]}) @@ -63,7 +63,7 @@ def test_encode_repeat(): assert decoded_state.input_state["parameter"][0]["data_parameter"]["id"] == EXAMPLE_ID_1 -def test_encode_section(): +def test_decode_section(): tool_source = tool_source_for("parameters/gx_section_data") bundle = input_models_for_tool_source(tool_source) request_state = RequestToolState({"parameter": {"data_parameter": {"src": "hda", "id": EXAMPLE_ID_1_ENCODED}}}) @@ -73,7 +73,7 @@ def test_encode_section(): assert decoded_state.input_state["parameter"]["data_parameter"]["id"] == EXAMPLE_ID_1 -def test_encode_conditional(): +def test_decode_conditional(): tool_source = tool_source_for("identifier_in_conditional") bundle = input_models_for_tool_source(tool_source) request_state = RequestToolState( From e6fe4ae251632c788780413b693dc978ab30b32d Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 18 Nov 2024 15:49:11 -0500 Subject: [PATCH 03/11] Migration for tool request implicit collections. --- ...d7bf6ac02_tool_request_implicit_outputs.py | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 lib/galaxy/model/migrations/alembic/versions_gxy/1d1d7bf6ac02_tool_request_implicit_outputs.py diff --git a/lib/galaxy/model/migrations/alembic/versions_gxy/1d1d7bf6ac02_tool_request_implicit_outputs.py b/lib/galaxy/model/migrations/alembic/versions_gxy/1d1d7bf6ac02_tool_request_implicit_outputs.py new file mode 100644 index 000000000000..0709c14ee653 --- /dev/null +++ b/lib/galaxy/model/migrations/alembic/versions_gxy/1d1d7bf6ac02_tool_request_implicit_outputs.py @@ -0,0 +1,60 @@ +"""Track tool request implicit output collections. + +Revision ID: 1d1d7bf6ac02 +Revises: 75348cfb3715 +Create Date: 2024-11-18 15:39:42.900327 + +""" + +from sqlalchemy import ( + Column, + Integer, + String, +) + +from galaxy.model.migrations.util import ( + create_foreign_key, + create_table, + drop_table, + transaction, +) + +# revision identifiers, used by Alembic. +revision = "1d1d7bf6ac02" +down_revision = "75348cfb3715" +branch_labels = None +depends_on = None + +association_table_name = "tool_request_implicit_collection_association" + + +def upgrade(): + with transaction(): + create_table( + association_table_name, + Column("id", Integer, primary_key=True), + Column("tool_request_id", Integer, index=True), + Column("dataset_collection_id", Integer, index=True), + Column("output_name", String(255), nullable=False), + ) + + create_foreign_key( + "fk_trica_tri", + association_table_name, + "tool_request", + ["tool_request_id"], + ["id"], + ) + + create_foreign_key( + "fk_trica_dci", + association_table_name, + "history_dataset_collection_association", + ["dataset_collection_id"], + ["id"], + ) + + +def downgrade(): + with transaction(): + drop_table(association_table_name) From ea82a383e54269a3f18fe56b596d054cfd74fa2d Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 18 Nov 2024 12:09:41 -0500 Subject: [PATCH 04/11] Tool Request API... --- .github/workflows/framework_tools.yaml | 3 +- lib/galaxy/app.py | 9 +- lib/galaxy/celery/tasks.py | 19 +- lib/galaxy/managers/jobs.py | 134 ++++++++++- lib/galaxy/model/__init__.py | 24 ++ lib/galaxy/schema/jobs.py | 13 + lib/galaxy/schema/schema.py | 15 ++ lib/galaxy/schema/tasks.py | 13 + lib/galaxy/tool_util/parameters/convert.py | 28 ++- lib/galaxy/tool_util/parameters/models.py | 1 + lib/galaxy/tool_util/verify/_types.py | 11 +- lib/galaxy/tool_util/verify/interactor.py | 222 +++++++++++++++--- lib/galaxy/tool_util/verify/parse.py | 43 +++- lib/galaxy/tools/__init__.py | 155 +++++++++++- lib/galaxy/tools/_types.py | 6 + lib/galaxy/tools/execute.py | 129 +++++++++- lib/galaxy/tools/parameters/__init__.py | 184 +++++++++++++++ lib/galaxy/tools/parameters/basic.py | 3 +- lib/galaxy/tools/parameters/meta.py | 108 ++++++++- lib/galaxy/webapps/galaxy/api/histories.py | 12 + lib/galaxy/webapps/galaxy/api/jobs.py | 25 +- lib/galaxy/webapps/galaxy/api/tools.py | 85 ++++++- lib/galaxy/webapps/galaxy/services/base.py | 35 ++- .../webapps/galaxy/services/histories.py | 9 + lib/galaxy/webapps/galaxy/services/jobs.py | 134 ++++++++++- lib/galaxy/webapps/galaxy/services/tools.py | 79 ++++--- lib/galaxy_test/api/conftest.py | 2 +- lib/galaxy_test/api/test_tool_execute.py | 161 +++++++++---- lib/galaxy_test/api/test_tool_execution.py | 135 +++++++++++ lib/galaxy_test/base/populators.py | 130 ++++++++-- test/functional/test_toolbox_pytest.py | 10 +- test/unit/tool_util/test_parameter_convert.py | 30 ++- 32 files changed, 1767 insertions(+), 200 deletions(-) create mode 100644 lib/galaxy_test/api/test_tool_execution.py diff --git a/.github/workflows/framework_tools.yaml b/.github/workflows/framework_tools.yaml index a55dfa316488..3ae88c7a7eda 100644 --- a/.github/workflows/framework_tools.yaml +++ b/.github/workflows/framework_tools.yaml @@ -26,6 +26,7 @@ jobs: strategy: matrix: python-version: ['3.8'] + use-legacy-api: ['if_needed', 'always'] services: postgres: image: postgres:13 @@ -63,7 +64,7 @@ jobs: path: 'galaxy root/.venv' key: gxy-venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('galaxy root/requirements.txt') }}-framework-tools - name: Run tests - run: ./run_tests.sh --coverage --framework-tools + run: GALAXY_TEST_USE_LEGACY_TOOL_API="${{ matrix.use-legacy-api }}" ./run_tests.sh --coverage --framework-tools working-directory: 'galaxy root' - uses: codecov/codecov-action@v3 with: diff --git a/lib/galaxy/app.py b/lib/galaxy/app.py index 2076ef2599e8..e4baca865f02 100644 --- a/lib/galaxy/app.py +++ b/lib/galaxy/app.py @@ -674,6 +674,10 @@ def __init__(self, configure_logging=True, use_converters=True, use_display_appl self._register_singleton(Registry, self.datatypes_registry) galaxy.model.set_datatypes_registry(self.datatypes_registry) self.configure_sentry_client() + # Load dbkey / genome build manager + self._configure_genome_builds(data_table_name="__dbkeys__", load_old_style=True) + # Tool Data Tables + self._configure_tool_data_tables(from_shed_config=False) self._configure_tool_shed_registry() self._register_singleton(tool_shed_registry.Registry, self.tool_shed_registry) @@ -752,11 +756,6 @@ def __init__(self, **kwargs) -> None: ) self.api_keys_manager = self._register_singleton(ApiKeyManager) - # Tool Data Tables - self._configure_tool_data_tables(from_shed_config=False) - # Load dbkey / genome build manager - self._configure_genome_builds(data_table_name="__dbkeys__", load_old_style=True) - # Genomes self.genomes = self._register_singleton(Genomes) # Data providers registry. diff --git a/lib/galaxy/celery/tasks.py b/lib/galaxy/celery/tasks.py index 35b133fd5845..1dbc71408b6b 100644 --- a/lib/galaxy/celery/tasks.py +++ b/lib/galaxy/celery/tasks.py @@ -31,6 +31,7 @@ DatasetManager, ) from galaxy.managers.hdas import HDAManager +from galaxy.managers.jobs import JobSubmitter from galaxy.managers.lddas import LDDAManager from galaxy.managers.markdown_util import generate_branded_pdf from galaxy.managers.model_stores import ModelStoreManager @@ -57,6 +58,7 @@ MaterializeDatasetInstanceTaskRequest, PrepareDatasetCollectionDownload, PurgeDatasetsTaskRequest, + QueueJobs, SetupHistoryExportJob, WriteHistoryContentTo, WriteHistoryTo, @@ -78,8 +80,10 @@ def setup_data_table_manager(app): @lru_cache -def cached_create_tool_from_representation(app: MinimalManagerApp, raw_tool_source: str): - return create_tool_from_representation(app=app, raw_tool_source=raw_tool_source, tool_source_class="XmlToolSource") +def cached_create_tool_from_representation(app: MinimalManagerApp, raw_tool_source: str, tool_dir: str = ""): + return create_tool_from_representation( + app=app, raw_tool_source=raw_tool_source, tool_dir=tool_dir, tool_source_class="XmlToolSource" + ) @galaxy_task(action="recalculate a user's disk usage") @@ -336,6 +340,17 @@ def fetch_data( return abort_when_job_stops(_fetch_data, session=sa_session, job_id=job_id, setup_return=setup_return) +@galaxy_task(action="queuing up submitted jobs") +def queue_jobs(request: QueueJobs, app: MinimalManagerApp, job_submitter: JobSubmitter): + tool = cached_create_tool_from_representation( + app, request.tool_source.raw_tool_source, tool_dir=request.tool_source.tool_dir + ) + job_submitter.queue_jobs( + tool, + request, + ) + + @galaxy_task(ignore_result=True, action="setting up export history job") def export_history( model_store_manager: ModelStoreManager, diff --git a/lib/galaxy/managers/jobs.py b/lib/galaxy/managers/jobs.py index 1ef47ff34f46..bde5bfcdf735 100644 --- a/lib/galaxy/managers/jobs.py +++ b/lib/galaxy/managers/jobs.py @@ -1,5 +1,6 @@ import json import logging +from dataclasses import dataclass from datetime import ( date, datetime, @@ -11,6 +12,7 @@ Dict, List, Optional, + Tuple, Union, ) @@ -50,14 +52,20 @@ ProvidesUserContext, ) from galaxy.managers.datasets import DatasetManager -from galaxy.managers.hdas import HDAManager +from galaxy.managers.hdas import ( + dereference_input, + HDAManager, +) +from galaxy.managers.histories import HistoryManager from galaxy.managers.lddas import LDDAManager +from galaxy.managers.users import UserManager from galaxy.model import ( ImplicitCollectionJobs, ImplicitCollectionJobsJobAssociation, Job, JobMetricNumeric, JobParameter, + ToolRequest, User, Workflow, WorkflowInvocation, @@ -75,8 +83,23 @@ JobIndexQueryPayload, JobIndexSortByEnum, ) +from galaxy.schema.tasks import ( + MaterializeDatasetInstanceTaskRequest, + QueueJobs, +) from galaxy.security.idencoding import IdEncodingHelper -from galaxy.structured_app import StructuredApp +from galaxy.structured_app import ( + MinimalManagerApp, + StructuredApp, +) +from galaxy.tool_util.parameters import ( + DataRequestInternalHda, + DataRequestUri, + dereference, + RequestInternalDereferencedToolState, + RequestInternalToolState, +) +from galaxy.tools import Tool from galaxy.tools._types import ( ToolStateDumpedToJsonInternalT, ToolStateJobInstancePopulatedT, @@ -92,6 +115,7 @@ parse_filters_structured, RawTextTerm, ) +from galaxy.work.context import WorkRequestContext log = logging.getLogger(__name__) @@ -144,6 +168,8 @@ def index_query(self, trans: ProvidesUserContext, payload: JobIndexQueryPayload) workflow_id = payload.workflow_id invocation_id = payload.invocation_id implicit_collection_jobs_id = payload.implicit_collection_jobs_id + tool_request_id = payload.tool_request_id + search = payload.search order_by = payload.order_by @@ -160,6 +186,7 @@ def build_and_apply_filters(stmt, objects, filter_func): def add_workflow_jobs(): wfi_step = select(WorkflowInvocationStep) + if workflow_id is not None: wfi_step = ( wfi_step.join(WorkflowInvocation).join(Workflow).where(Workflow.stored_workflow_id == workflow_id) @@ -174,6 +201,7 @@ def add_workflow_jobs(): ImplicitCollectionJobsJobAssociation.implicit_collection_jobs_id == wfi_step_sq.c.implicit_collection_jobs_id, ) + # Ensure the result is models, not tuples sq = stmt1.union(stmt2).subquery() # SQLite won't recognize Job.foo as a valid column for the ORDER BY clause due to the UNION clause, so we'll use the subquery `columns` collection (`sq.c`). @@ -251,6 +279,9 @@ def add_search_criteria(stmt): if history_id is not None: stmt = stmt.where(Job.history_id == history_id) + if tool_request_id is not None: + stmt = stmt.filter(model.Job.tool_request_id == tool_request_id) + order_by_columns = Job if workflow_id or invocation_id: stmt, order_by_columns = add_workflow_jobs() @@ -1250,3 +1281,102 @@ def get_jobs_to_check_at_startup(session: galaxy_scoped_session, track_jobs_in_d def get_job(session, *where_clauses): stmt = select(Job).where(*where_clauses).limit(1) return session.scalars(stmt).first() + + +@dataclass +class DereferencedDatasetPair: + hda: model.HistoryDatasetAssociation + request: DataRequestUri + + +class JobSubmitter: + def __init__( + self, + history_manager: HistoryManager, + user_manager: UserManager, + hda_manager: HDAManager, + app: MinimalManagerApp, + ): + self.history_manager = history_manager + self.user_manager = user_manager + self.hda_manager = hda_manager + self.app = app + + def materialize_request_for( + self, trans: WorkRequestContext, hda: model.HistoryDatasetAssociation + ) -> MaterializeDatasetInstanceTaskRequest: + return MaterializeDatasetInstanceTaskRequest( + user=trans.async_request_user, + history_id=trans.history.id, + source="hda", + content=hda.id, + ) + + def dereference( + self, trans: WorkRequestContext, tool: Tool, request: QueueJobs, tool_request: ToolRequest + ) -> Tuple[RequestInternalDereferencedToolState, List[DereferencedDatasetPair]]: + new_hdas: List[DereferencedDatasetPair] = [] + + def dereference_callback(data_request: DataRequestUri) -> DataRequestInternalHda: + # a deferred dataset corresponding to request + hda = dereference_input(trans, data_request) + new_hdas.append(DereferencedDatasetPair(hda, data_request)) + return DataRequestInternalHda(id=hda.id) + + tool_state = RequestInternalToolState(tool_request.request) + return dereference(tool_state, tool, dereference_callback), new_hdas + + def queue_jobs(self, tool: Tool, request: QueueJobs) -> None: + tool_request: ToolRequest = self._tool_request(request.tool_request_id) + sa_session = self.app.model.context + try: + request_context = self._context(tool_request, request) + target_history = request_context.history + use_cached_jobs = request.use_cached_jobs + rerun_remap_job_id = request.rerun_remap_job_id + tool_state: RequestInternalDereferencedToolState + new_hdas: List[DereferencedDatasetPair] + tool_state, new_hdas = self.dereference(request_context, tool, request, tool_request) + to_materialize_list: List[DereferencedDatasetPair] = [p for p in new_hdas if not p.request.deferred] + for to_materialize in to_materialize_list: + materialize_request = self.materialize_request_for(request_context, to_materialize.hda) + # API dataset materialization is immutable and produces new datasets + # here we just created the datasets - lets just materialize them in place + # and avoid extra and confusing input copies + self.hda_manager.materialize(materialize_request, in_place=True) + tool.handle_input_async( + request_context, + tool_request, + tool_state, + history=target_history, + use_cached_job=use_cached_jobs, + rerun_remap_job_id=rerun_remap_job_id, + ) + tool_request.state = ToolRequest.states.SUBMITTED + sa_session.add(tool_request) + with transaction(sa_session): + sa_session.commit() + except Exception as e: + log.exception("Problem here....") + tool_request.state = ToolRequest.states.FAILED + tool_request.state_message = str(e) + sa_session.add(tool_request) + with transaction(sa_session): + sa_session.commit() + + def _context(self, tool_request: ToolRequest, request: QueueJobs) -> WorkRequestContext: + user = self.user_manager.by_id(request.user.user_id) + target_history = tool_request.history + trans = WorkRequestContext( + self.app, + user, + history=target_history, + ) + return trans + + def _tool_request(self, tool_request_id: int) -> ToolRequest: + sa_session = self.app.model.context + tool_request: ToolRequest = cast(ToolRequest, sa_session.query(ToolRequest).get(tool_request_id)) + if tool_request is None: + raise Exception(f"Problem fetching request with ID {tool_request_id}") + return tool_request diff --git a/lib/galaxy/model/__init__.py b/lib/galaxy/model/__init__.py index bd5d59384efd..37eaa53f12e8 100644 --- a/lib/galaxy/model/__init__.py +++ b/lib/galaxy/model/__init__.py @@ -1356,6 +1356,27 @@ class ToolRequest(Base, Dictifiable, RepresentById): tool_source: Mapped["ToolSource"] = relationship() history: Mapped[Optional["History"]] = relationship(back_populates="tool_requests") + implicit_collections: Mapped[List["ToolRequestImplicitCollectionAssociation"]] = relationship( + back_populates="tool_request" + ) + + +class ToolRequestImplicitCollectionAssociation(Base, Dictifiable, RepresentById): + __tablename__ = "tool_request_implicit_collection_association" + + id: Mapped[int] = mapped_column(primary_key=True) + tool_request_id: Mapped[int] = mapped_column(ForeignKey("tool_request.id", name="fk_trica_tri"), index=True) + dataset_collection_id: Mapped[int] = mapped_column( + ForeignKey("history_dataset_collection_association.id", name="fk_trica_dci"), index=True + ) + output_name: Mapped[str] = mapped_column(String(255)) + + tool_request: Mapped["ToolRequest"] = relationship(back_populates="implicit_collections") + dataset_collection: Mapped["HistoryDatasetCollectionAssociation"] = relationship( + back_populates="tool_request_association", uselist=False + ) + + dict_collection_visible_keys = ["id", "tool_request_id", "dataset_collection_id", "output_name"] class DynamicTool(Base, Dictifiable, RepresentById): @@ -7046,6 +7067,9 @@ class HistoryDatasetCollectionAssociation( back_populates="dataset_collection", ) creating_job_associations: Mapped[List["JobToOutputDatasetCollectionAssociation"]] = relationship(viewonly=True) + tool_request_association: Mapped[Optional["ToolRequestImplicitCollectionAssociation"]] = relationship( + back_populates="dataset_collection" + ) dict_dbkeysandextensions_visible_keys = ["dbkeys", "extensions"] editable_keys = ("name", "deleted", "visible") diff --git a/lib/galaxy/schema/jobs.py b/lib/galaxy/schema/jobs.py index fbca9e281a2d..339f86ae22bf 100644 --- a/lib/galaxy/schema/jobs.py +++ b/lib/galaxy/schema/jobs.py @@ -82,6 +82,19 @@ class JobOutputAssociation(JobAssociation): ) +class JobOutputCollectionAssociation(Model): + name: str = Field( + default=..., + title="name", + description="Name of the job parameter.", + ) + dataset_collection_instance: EncodedDataItemSourceId = Field( + default=..., + title="dataset_collection_instance", + description="Reference to the associated item.", + ) + + class ReportJobErrorPayload(Model): dataset_id: DecodedDatabaseIdField = Field( default=..., diff --git a/lib/galaxy/schema/schema.py b/lib/galaxy/schema/schema.py index 3febee546896..3eeb82d9e360 100644 --- a/lib/galaxy/schema/schema.py +++ b/lib/galaxy/schema/schema.py @@ -1531,6 +1531,7 @@ class JobIndexQueryPayload(Model): workflow_id: Optional[DecodedDatabaseIdField] = None invocation_id: Optional[DecodedDatabaseIdField] = None implicit_collection_jobs_id: Optional[DecodedDatabaseIdField] = None + tool_request_id: Optional[DecodedDatabaseIdField] = None order_by: JobIndexSortByEnum = JobIndexSortByEnum.update_time search: Optional[str] = None limit: int = 500 @@ -3758,11 +3759,25 @@ class ToolRequestState(str, Enum): FAILED = "failed" +class ToolRequestOutputCollectionAssociation(Model): + name: str = Field( + default=..., + title="name", + description="Name of the job output.", + ) + dataset_collection_instance: EncodedDataItemSourceId = Field( + default=..., + title="dataset_collection_instance", + description="Reference to the associated item.", + ) + + class ToolRequestModel(Model): id: EncodedDatabaseIdField = ToolRequestIdField request: Dict[str, Any] state: ToolRequestState state_message: Optional[str] + implicit_collection_outputs: List[ToolRequestOutputCollectionAssociation] class AsyncFile(Model): diff --git a/lib/galaxy/schema/tasks.py b/lib/galaxy/schema/tasks.py index 313ec9f1dad0..31255d968268 100644 --- a/lib/galaxy/schema/tasks.py +++ b/lib/galaxy/schema/tasks.py @@ -119,3 +119,16 @@ class ComputeDatasetHashTaskRequest(Model): class PurgeDatasetsTaskRequest(Model): dataset_ids: List[int] + + +class ToolSource(Model): + raw_tool_source: str + tool_dir: str + + +class QueueJobs(Model): + tool_source: ToolSource + tool_request_id: int # links to request ("incoming") and history + user: RequestUser # TODO: test anonymous users through this submission path + use_cached_jobs: bool + rerun_remap_job_id: Optional[int] # link to a job to rerun & remap diff --git a/lib/galaxy/tool_util/parameters/convert.py b/lib/galaxy/tool_util/parameters/convert.py index 63dbb9ab58b9..981d2439390d 100644 --- a/lib/galaxy/tool_util/parameters/convert.py +++ b/lib/galaxy/tool_util/parameters/convert.py @@ -357,18 +357,27 @@ def encode_src_dict(src_dict: dict): else: return src_dict + def encode_element(element: dict): + if element.get("__class__") == "Batch": + encoded = element.copy() + values = encoded.pop("values") + encoded["values"] = list(map(encode_src_dict, values)) + return encoded + else: + return encode_src_dict(element) + def encode_callback(parameter: ToolParameterT, value: Any): if parameter.parameter_type == "gx_data": data_parameter = cast(DataParameterModel, parameter) if data_parameter.multiple: assert isinstance(value, list), str(value) - return list(map(encode_src_dict, value)) + return list(map(encode_element, value)) else: assert isinstance(value, dict), str(value) - return encode_src_dict(value) + return encode_element(value) elif parameter.parameter_type == "gx_data_collection": assert isinstance(value, dict), str(value) - return encode_src_dict(value) + return encode_element(value) else: return VISITOR_NO_REPLACEMENT @@ -385,6 +394,15 @@ def decode_src_dict(src_dict: dict): else: return src_dict + def decode_element(element: dict): + if element.get("__class__") == "Batch": + decoded = element.copy() + values = decoded.pop("values") + decoded["values"] = list(map(decode_src_dict, values)) + return decoded + else: + return decode_src_dict(element) + def decode_callback(parameter: ToolParameterT, value: Any): if parameter.parameter_type == "gx_data": if value is None: @@ -392,10 +410,10 @@ def decode_callback(parameter: ToolParameterT, value: Any): data_parameter = cast(DataParameterModel, parameter) if data_parameter.multiple: assert isinstance(value, list), str(value) - return list(map(decode_src_dict, value)) + return list(map(decode_element, value)) else: assert isinstance(value, dict), str(value) - return decode_src_dict(value) + return decode_element(value) elif parameter.parameter_type == "gx_data_collection": if value is None: return VISITOR_NO_REPLACEMENT diff --git a/lib/galaxy/tool_util/parameters/models.py b/lib/galaxy/tool_util/parameters/models.py index d0e7e6bbcb8f..0fc92d244dbb 100644 --- a/lib/galaxy/tool_util/parameters/models.py +++ b/lib/galaxy/tool_util/parameters/models.py @@ -118,6 +118,7 @@ def allow_batching(job_template: DynamicModelInformation, batch_type: Optional[T class BatchRequest(StrictModel): meta_class: Literal["Batch"] = Field(..., alias="__class__") values: List[batch_type] # type: ignore[valid-type] + linked: Optional[bool] = None # maybe True instead? request_type = union_type([job_py_type, BatchRequest]) diff --git a/lib/galaxy/tool_util/verify/_types.py b/lib/galaxy/tool_util/verify/_types.py index e5aa85f1ddb7..c532dab9aa69 100644 --- a/lib/galaxy/tool_util/verify/_types.py +++ b/lib/galaxy/tool_util/verify/_types.py @@ -19,10 +19,15 @@ ToolSourceTestOutputs, ) -# inputs that have been processed with parse.py and expanded out +# legacy inputs for working with POST /api/tools +# + inputs that have been processed with parse.py and expanded out ExpandedToolInputs = Dict[str, Any] -# ExpandedToolInputs where any model objects have been json-ified with to_dict() +# + ExpandedToolInputs where any model objects have been json-ified with to_dict() ExpandedToolInputsJsonified = Dict[str, Any] + +# modern inputs for working with POST /api/jobs* +RawTestToolRequest = Dict[str, Any] + ExtraFileInfoDictT = Dict[str, Any] RequiredFileTuple = Tuple[str, ExtraFileInfoDictT] RequiredFilesT = List[RequiredFileTuple] @@ -36,6 +41,8 @@ class ToolTestDescriptionDict(TypedDict): name: str test_index: int inputs: ExpandedToolInputsJsonified + request: NotRequired[Optional[Dict[str, Any]]] + request_schema: NotRequired[Optional[Dict[str, Any]]] outputs: ToolSourceTestOutputs output_collections: List[TestSourceTestOutputColllection] stdout: Optional[AssertionList] diff --git a/lib/galaxy/tool_util/verify/interactor.py b/lib/galaxy/tool_util/verify/interactor.py index ca186d823ebb..e177d32a6f64 100644 --- a/lib/galaxy/tool_util/verify/interactor.py +++ b/lib/galaxy/tool_util/verify/interactor.py @@ -35,8 +35,18 @@ ) from galaxy import util +from galaxy.tool_util.parameters import ( + DataCollectionRequest, + DataRequestHda, + encode_test, + input_models_from_json, + TestCaseToolState, + ToolParameterBundle, +) from galaxy.tool_util.parser.interface import ( AssertionList, + JsonTestCollectionDefDict, + JsonTestDatasetDefDict, TestCollectionDef, TestCollectionOutputDef, TestSourceTestOutputColllection, @@ -53,6 +63,7 @@ from ._types import ( ExpandedToolInputs, ExpandedToolInputsJsonified, + RawTestToolRequest, RequiredDataTablesT, RequiredFilesT, RequiredLocFileT, @@ -63,6 +74,9 @@ log = getLogger(__name__) +UseLegacyApiT = Literal["always", "never", "if_needed"] +DEFAULT_USE_LEGACY_API: UseLegacyApiT = "always" + # Off by default because it can pound the database pretty heavily # and result in sqlite errors on larger tests or larger numbers of # tests. @@ -102,6 +116,8 @@ def __getitem__(self, item): class ValidToolTestDict(TypedDict): inputs: ExpandedToolInputs + request: NotRequired[Optional[RawTestToolRequest]] + request_schema: NotRequired[Optional[Dict[str, Any]]] outputs: ToolSourceTestOutputs output_collections: List[TestSourceTestOutputColllection] stdout: NotRequired[AssertionList] @@ -148,7 +164,7 @@ def stage_data_in_history( # Upload any needed files upload_waits = [] - assert tool_id + assert tool_id, "Tool id not set" if UPLOAD_ASYNC: for test_data in all_test_data: @@ -236,6 +252,15 @@ def get_tests_summary(self): assert response.status_code == 200, f"Non 200 response from tool tests available API. [{response.content}]" return response.json() + def get_tool_inputs(self, tool_id: str, tool_version: Optional[str] = None) -> ToolParameterBundle: + url = f"tools/{tool_id}/inputs" + params = {"tool_version": tool_version} if tool_version else None + response = self._get(url, data=params) + assert response.status_code == 200, f"Non 200 response from tool inputs API. [{response.content}]" + raw_inputs_array = response.json() + tool_parameter_bundle = input_models_from_json(raw_inputs_array) + return tool_parameter_bundle + def get_tool_tests(self, tool_id: str, tool_version: Optional[str] = None) -> List[ToolTestDescriptionDict]: url = f"tools/{tool_id}/test_data" params = {"tool_version": tool_version} if tool_version else None @@ -366,9 +391,27 @@ def wait_for_content(): def wait_for_job(self, job_id: str, history_id: Optional[str] = None, maxseconds=DEFAULT_TOOL_TEST_WAIT) -> None: self.wait_for(lambda: self.__job_ready(job_id, history_id), maxseconds=maxseconds) + def wait_on_tool_request(self, tool_request_id: str): + def state(): + state_response = self._get(f"tool_requests/{tool_request_id}/state") + state_response.raise_for_status() + return state_response.json() + + def is_ready(): + is_complete = state() in ["submitted", "failed"] + return True if is_complete else None + + self.wait_for(is_ready, "waiting for tool request to submit") + return state() == "submitted" + + def get_tool_request(self, tool_request_id: str): + response_raw = self._get(f"tool_requests/{tool_request_id}") + response_raw.raise_for_status() + return response_raw.json() + def wait_for(self, func: Callable, what: str = "tool test run", **kwd) -> None: walltime_exceeded = int(kwd.get("maxseconds", DEFAULT_TOOL_TEST_WAIT)) - wait_on(func, what, walltime_exceeded) + return wait_on(func, what, walltime_exceeded) def get_job_stdio(self, job_id: str) -> Dict[str, Any]: return self.__get_job_stdio(job_id).json() @@ -564,8 +607,9 @@ def stage_data_async( files["files_0|file_data"] = file_content name = os.path.basename(name) tool_input["files_0|NAME"] = name + # upload1 will always be the legacy API... submit_response_object = self.__submit_tool( - history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files + history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files, use_legacy_api=True ) submit_response = ensure_tool_run_response_okay(submit_response_object, f"upload dataset {name}") assert ( @@ -590,39 +634,71 @@ def _ensure_valid_location_in(self, test_data: dict) -> Optional[str]: return location def run_tool( - self, testdef: "ToolTestDescription", history_id: str, resource_parameters: Optional[Dict[str, Any]] = None + self, + testdef: "ToolTestDescription", + history_id: str, + resource_parameters: Optional[Dict[str, Any]] = None, + use_legacy_api: UseLegacyApiT = DEFAULT_USE_LEGACY_API, ) -> RunToolResponse: # We need to handle the case where we've uploaded a valid compressed file since the upload # tool will have uncompressed it on the fly. resource_parameters = resource_parameters or {} - inputs_tree = testdef.inputs.copy() - for key, value in inputs_tree.items(): - values = [value] if not isinstance(value, list) else value - new_values = [] - for value in values: - if isinstance(value, TestCollectionDef): - hdca_id = self._create_collection(history_id, value) - new_values = [dict(src="hdca", id=hdca_id)] - elif value in self.uploads: - new_values.append(self.uploads[value]) - else: - new_values.append(value) - inputs_tree[key] = new_values + request = testdef.request + request_schema = testdef.request_schema + submit_with_legacy_api = use_legacy_api == "always" or (use_legacy_api == "if_needed" and request is None) + if submit_with_legacy_api: + inputs_tree = testdef.inputs.copy() + for key, value in inputs_tree.items(): + values = [value] if not isinstance(value, list) else value + new_values = [] + for value in values: + if isinstance(value, TestCollectionDef): + hdca_id = self._create_collection(history_id, value) + new_values = [dict(src="hdca", id=hdca_id)] + elif value in self.uploads: + new_values.append(self.uploads[value]) + else: + new_values.append(value) + inputs_tree[key] = new_values + + # HACK: Flatten single-value lists. Required when using expand_grouping + for key, value in inputs_tree.items(): + if isinstance(value, list) and len(value) == 1: + inputs_tree[key] = value[0] + else: + assert request is not None, "Request not set" + assert request_schema is not None, "Request schema not set" + parameters = request_schema["parameters"] + + def adapt_datasets(test_input: JsonTestDatasetDefDict) -> DataRequestHda: + # if path is not set it might be a composite file with a path, + # e.g. composite_shapefile + test_input_path = test_input.get("path", "") + return DataRequestHda(**self.uploads[test_input_path]) + + def adapt_collections(test_input: JsonTestCollectionDefDict) -> DataCollectionRequest: + test_collection_def = TestCollectionDef.from_dict(test_input) + hdca_id = self._create_collection(history_id, test_collection_def) + return DataCollectionRequest(src="hdca", id=hdca_id) + + test_case_state = TestCaseToolState(input_state=request) + inputs_tree = encode_test( + test_case_state, input_models_from_json(parameters), adapt_datasets, adapt_collections + ).input_state if resource_parameters: inputs_tree["__job_resource|__job_resource__select"] = "yes" for key, value in resource_parameters.items(): inputs_tree[f"__job_resource|{key}"] = value - # HACK: Flatten single-value lists. Required when using expand_grouping - for key, value in inputs_tree.items(): - if isinstance(value, list) and len(value) == 1: - inputs_tree[key] = value[0] - submit_response = None for _ in range(DEFAULT_TOOL_TEST_WAIT): submit_response = self.__submit_tool( - history_id, tool_id=testdef.tool_id, tool_input=inputs_tree, tool_version=testdef.tool_version + history_id, + tool_id=testdef.tool_id, + tool_input=inputs_tree, + tool_version=testdef.tool_version, + use_legacy_api=submit_with_legacy_api, ) if _are_tool_inputs_not_ready(submit_response): print("Tool inputs not ready yet") @@ -631,12 +707,38 @@ def run_tool( else: break submit_response_object = ensure_tool_run_response_okay(submit_response, "execute tool", inputs_tree) + if not submit_with_legacy_api: + tool_request_id = submit_response_object["tool_request_id"] + successful = self.wait_on_tool_request(tool_request_id) + if not successful: + request = self.get_tool_request(tool_request_id) or {} + raise RunToolException( + f"Tool request failure - state {request.get('state')}, message: {request.get('state_message')}", + inputs_tree, + ) + jobs = self.jobs_for_tool_request(tool_request_id) + outputs = OutputsDict() + output_collections = {} + if len(jobs) != 1: + raise Exception(f"Found incorrect number of jobs for tool request - was expecting a single job {jobs}") + assert len(jobs) == 1, jobs + job_id = jobs[0]["id"] + job_outputs = self.job_outputs(job_id) + for job_output in job_outputs: + if "dataset" in job_output: + outputs[job_output["name"]] = job_output["dataset"] + else: + output_collections[job_output["name"]] = job_output["dataset_collection_instance"] + else: + outputs = self.__dictify_outputs(submit_response_object) + output_collections = self.__dictify_output_collections(submit_response_object) + jobs = submit_response_object["jobs"] try: return RunToolResponse( inputs=inputs_tree, - outputs=self.__dictify_outputs(submit_response_object), - output_collections=self.__dictify_output_collections(submit_response_object), - jobs=submit_response_object["jobs"], + outputs=outputs, + output_collections=output_collections, + jobs=jobs, ) except KeyError: message = ( @@ -774,14 +876,24 @@ def format_for_summary(self, blob, empty_message, prefix="| "): contents = "\n".join(f"{prefix}{line.strip()}" for line in io.StringIO(blob).readlines() if line.rstrip("\n\r")) return contents or f"{prefix}*{empty_message}*" - def _dataset_provenance(self, history_id, id): + def _dataset_provenance(self, history_id: str, id: str): provenance = self._get(f"histories/{history_id}/contents/{id}/provenance").json() return provenance - def _dataset_info(self, history_id, id): + def _dataset_info(self, history_id: str, id: str): dataset_json = self._get(f"histories/{history_id}/contents/{id}").json() return dataset_json + def jobs_for_tool_request(self, tool_request_id: str) -> List[Dict[str, Any]]: + job_list_response = self._get("jobs", data={"tool_request_id": tool_request_id, "order_by": "create_time"}) + job_list_response.raise_for_status() + return job_list_response.json() + + def job_outputs(self, job_id: str) -> List[Dict[str, Any]]: + outputs = self._get(f"jobs/{job_id}/outputs") + outputs.raise_for_status() + return outputs.json() + def __contents(self, history_id): history_contents_response = self._get(f"histories/{history_id}/contents") history_contents_response.raise_for_status() @@ -798,12 +910,33 @@ def _state_ready(self, job_id: str, error_msg: str): ) return None - def __submit_tool(self, history_id, tool_id, tool_input, extra_data=None, files=None, tool_version=None): + def __submit_tool( + self, + history_id, + tool_id, + tool_input, + extra_data=None, + files=None, + tool_version=None, + use_legacy_api: bool = True, + ): extra_data = extra_data or {} - data = dict( - history_id=history_id, tool_id=tool_id, inputs=dumps(tool_input), tool_version=tool_version, **extra_data - ) - return self._post("tools", files=files, data=data) + if use_legacy_api: + data = dict( + history_id=history_id, + tool_id=tool_id, + inputs=dumps(tool_input), + tool_version=tool_version, + **extra_data, + ) + return self._post("tools", files=files, data=data) + else: + assert files is None + data = dict( + history_id=history_id, tool_id=tool_id, inputs=tool_input, tool_version=tool_version, **extra_data + ) + submit_tool_request_response = self._post("jobs", data=data, json=True) + return submit_tool_request_response def ensure_user_with_email(self, email, password=None): admin_key = self.master_api_key @@ -1315,6 +1448,7 @@ def verify_tool( register_job_data: Optional[JobDataCallbackT] = None, test_index: int = 0, tool_version: Optional[str] = None, + use_legacy_api: UseLegacyApiT = DEFAULT_USE_LEGACY_API, quiet: bool = False, test_history: Optional[str] = None, no_history_cleanup: bool = False, @@ -1331,11 +1465,7 @@ def verify_tool( if client_test_config is None: client_test_config = NullClientTestConfig() tool_test_dicts = _tool_test_dicts or galaxy_interactor.get_tool_tests(tool_id, tool_version=tool_version) - tool_test_dict = tool_test_dicts[test_index] - if "test_index" not in tool_test_dict: - tool_test_dict["test_index"] = test_index - if "tool_id" not in tool_test_dict: - tool_test_dict["tool_id"] = tool_id + tool_test_dict: ToolTestDescriptionDict = tool_test_dicts[test_index] if tool_version is None and "tool_version" in tool_test_dict: tool_version = tool_test_dict.get("tool_version") @@ -1400,7 +1530,9 @@ def verify_tool( input_staging_exception = e raise try: - tool_response = galaxy_interactor.run_tool(testdef, test_history, resource_parameters=resource_parameters) + tool_response = galaxy_interactor.run_tool( + testdef, test_history, resource_parameters=resource_parameters, use_legacy_api=use_legacy_api + ) data_list, jobs, tool_inputs = tool_response.outputs, tool_response.jobs, tool_response.inputs data_collection_list = tool_response.output_collections except RunToolException as e: @@ -1685,6 +1817,8 @@ def adapt_tool_source_dict(processed_dict: ToolTestDict) -> ToolTestDescriptionD expect_test_failure: bool = DEFAULT_EXPECT_TEST_FAILURE inputs: ExpandedToolInputsJsonified = {} maxseconds: Optional[int] = None + request: Optional[Dict[str, Any]] = None + request_schema: Optional[Dict[str, Any]] = None if not error_in_test_definition: processed_test_dict = cast(ValidToolTestDict, processed_dict) @@ -1710,6 +1844,8 @@ def adapt_tool_source_dict(processed_dict: ToolTestDict) -> ToolTestDescriptionD expect_failure = processed_test_dict.get("expect_failure", DEFAULT_EXPECT_FAILURE) expect_test_failure = processed_test_dict.get("expect_test_failure", DEFAULT_EXPECT_TEST_FAILURE) inputs = processed_test_dict.get("inputs", {}) + request = processed_test_dict.get("request", None) + request_schema = processed_test_dict.get("request_schema", None) else: invalid_test_dict = cast(InvalidToolTestDict, processed_dict) maxseconds = DEFAULT_TOOL_TEST_WAIT @@ -1737,6 +1873,8 @@ def adapt_tool_source_dict(processed_dict: ToolTestDict) -> ToolTestDescriptionD expect_failure=expect_failure, expect_test_failure=expect_test_failure, inputs=inputs, + request=request, + request_schema=request_schema, ) @@ -1799,6 +1937,8 @@ class ToolTestDescription: expect_test_failure: bool exception: Optional[str] inputs: ExpandedToolInputs + request: Optional[Dict[str, Any]] + request_schema: Optional[Dict[str, Any]] outputs: ToolSourceTestOutputs output_collections: List[TestCollectionOutputDef] maxseconds: Optional[int] @@ -1827,6 +1967,8 @@ def __init__(self, json_dict: ToolTestDescriptionDict): self.expect_failure = json_dict.get("expect_failure", DEFAULT_EXPECT_FAILURE) self.expect_test_failure = json_dict.get("expect_test_failure", DEFAULT_EXPECT_TEST_FAILURE) self.inputs = expanded_inputs_from_json(json_dict.get("inputs", {})) + self.request = json_dict.get("request", None) + self.request_schema = json_dict.get("request_schema", None) self.tool_id = json_dict["tool_id"] self.tool_version = json_dict.get("tool_version") self.maxseconds = _get_maxseconds(json_dict) @@ -1859,6 +2001,8 @@ def to_dict(self) -> ToolTestDescriptionDict: "required_files": self.required_files, "required_data_tables": self.required_data_tables, "required_loc_files": self.required_loc_files, + "request": self.request, + "request_schema": self.request_schema, "error": self.error, "exception": self.exception, "maxseconds": self.maxseconds, diff --git a/lib/galaxy/tool_util/verify/parse.py b/lib/galaxy/tool_util/verify/parse.py index 8dd51f525944..110dc9a93628 100644 --- a/lib/galaxy/tool_util/verify/parse.py +++ b/lib/galaxy/tool_util/verify/parse.py @@ -1,8 +1,10 @@ import logging import os import traceback +from dataclasses import dataclass from typing import ( Any, + Dict, Iterable, List, Optional, @@ -15,6 +17,8 @@ from galaxy.tool_util.parameters import ( input_models_for_tool_source, test_case_state as case_state, + TestCaseToolState, + ToolParameterBundleModel, ) from galaxy.tool_util.parser.interface import ( InputSource, @@ -65,15 +69,18 @@ def parse_tool_test_descriptions( profile = tool_source.parse_profile() for i, raw_test_dict in enumerate(raw_tests_dict.get("tests", [])): validation_exception: Optional[Exception] = None - if validate_on_load: + request_and_schema: Optional[TestRequestAndSchema] = None + try: tool_parameter_bundle = input_models_for_tool_source(tool_source) - try: - case_state(raw_test_dict, tool_parameter_bundle.parameters, profile, validate=True) - except Exception as e: - # TOOD: restrict types of validation exceptions a bit probably? - validation_exception = e + validated_test_case = case_state(raw_test_dict, tool_parameter_bundle.parameters, profile, validate=True) + request_and_schema = TestRequestAndSchema( + validated_test_case.tool_state, + tool_parameter_bundle, + ) + except Exception as e: + validation_exception = e - if validation_exception: + if validation_exception and validate_on_load: tool_id, tool_version = _tool_id_and_version(tool_source, tool_guid) test = ToolTestDescription.from_tool_source_dict( InvalidToolTestDict( @@ -89,13 +96,23 @@ def parse_tool_test_descriptions( ) ) else: - test = _description_from_tool_source(tool_source, raw_test_dict, i, tool_guid) + test = _description_from_tool_source(tool_source, raw_test_dict, i, tool_guid, request_and_schema) tests.append(test) return tests +@dataclass +class TestRequestAndSchema: + request: TestCaseToolState + request_schema: ToolParameterBundleModel + + def _description_from_tool_source( - tool_source: ToolSource, raw_test_dict: ToolSourceTest, test_index: int, tool_guid: Optional[str] + tool_source: ToolSource, + raw_test_dict: ToolSourceTest, + test_index: int, + tool_guid: Optional[str], + request_and_schema: Optional[TestRequestAndSchema], ) -> ToolTestDescription: required_files: RequiredFilesT = [] required_data_tables: RequiredDataTablesT = [] @@ -108,6 +125,12 @@ def _description_from_tool_source( if maxseconds is not None: maxseconds = int(maxseconds) + request: Optional[Dict[str, Any]] = None + request_schema: Optional[Dict[str, Any]] = None + if request_and_schema: + request = request_and_schema.request.input_state + request_schema = request_and_schema.request_schema.dict() + tool_id, tool_version = _tool_id_and_version(tool_source, tool_guid) processed_test_dict: Union[ValidToolTestDict, InvalidToolTestDict] try: @@ -122,6 +145,8 @@ def _description_from_tool_source( processed_test_dict = ValidToolTestDict( { "inputs": processed_inputs, + "request": request, + "request_schema": request_schema, "outputs": raw_test_dict["outputs"], "output_collections": raw_test_dict["output_collections"], "num_outputs": num_outputs, diff --git a/lib/galaxy/tools/__init__.py b/lib/galaxy/tools/__init__.py index 2e352e7c0f39..7b3e667fe8ea 100644 --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -49,6 +49,7 @@ from galaxy.model import ( Job, StoredWorkflow, + ToolRequest, ) from galaxy.model.base import transaction from galaxy.model.dataset_collections.matching import MatchingCollections @@ -71,6 +72,13 @@ expand_ontology_data, ) from galaxy.tool_util.output_checker import DETECTED_JOB_STATE +from galaxy.tool_util.parameters import ( + fill_static_defaults, + input_models_for_pages, + JobInternalToolState, + RequestInternalDereferencedToolState, + ToolParameterBundle, +) from galaxy.tool_util.parser import ( get_tool_source, get_tool_source_from_representation, @@ -118,7 +126,7 @@ from galaxy.tools.evaluation import global_tool_errors from galaxy.tools.execution_helpers import ToolExecutionCache from galaxy.tools.imp_exp import JobImportHistoryArchiveWrapper -from galaxy.tools.parameters import ( +from galaxy.tools.parameters import ( # fill_dynamic_defaults, check_param, params_from_strings, params_to_incoming, @@ -126,6 +134,7 @@ params_to_json_internal, params_to_strings, populate_state, + populate_state_async, visit_input_values, ) from galaxy.tools.parameters.basic import ( @@ -152,7 +161,10 @@ UploadDataset, ) from galaxy.tools.parameters.input_translation import ToolInputTranslator -from galaxy.tools.parameters.meta import expand_meta_parameters +from galaxy.tools.parameters.meta import ( + expand_meta_parameters, + expand_meta_parameters_async, +) from galaxy.tools.parameters.populate_model import populate_model from galaxy.tools.parameters.workflow_utils import workflow_building_modes from galaxy.tools.parameters.wrapped_json import json_wrap @@ -198,6 +210,7 @@ ToolRequestT, ToolStateDumpedToJsonInternalT, ToolStateDumpedToJsonT, + ToolStateJobInstanceExpansionT, ToolStateJobInstancePopulatedT, ToolStateJobInstanceT, ) @@ -208,7 +221,8 @@ DEFAULT_RERUN_REMAP_JOB_ID, DEFAULT_SET_OUTPUT_HID, DEFAULT_USE_CACHED_JOB, - execute as execute_job, + execute as execute_sync, + execute_async, ExecutionSlice, JobCallbackT, MappingParameters, @@ -762,7 +776,7 @@ class _Options(Bunch): refresh: str -class Tool(UsesDictVisibleKeys): +class Tool(UsesDictVisibleKeys, ToolParameterBundle): """ Represents a computational tool that can be executed through Galaxy. """ @@ -1431,6 +1445,11 @@ def parse_inputs(self, tool_source: ToolSource): self.inputs: Dict[str, Union[Group, ToolParameter]] = {} pages = tool_source.parse_input_pages() enctypes: Set[str] = set() + try: + parameters = input_models_for_pages(pages, self.profile) + self.parameters = parameters + except Exception: + pass if pages.inputs_defined: if hasattr(pages, "input_elem"): input_elem = pages.input_elem @@ -1823,6 +1842,64 @@ def visit_inputs(self, values, callback): if self.check_values: visit_input_values(self.inputs, values, callback) + def expand_incoming_async( + self, + request_context: WorkRequestContext, + tool_request_internal_state: RequestInternalDereferencedToolState, + rerun_remap_job_id: Optional[int], + ) -> Tuple[ + List[ToolStateJobInstancePopulatedT], + List[ToolStateJobInstancePopulatedT], + Optional[MatchingCollections], + List[JobInternalToolState], + ]: + """The tool request API+tasks version of expand_incoming. + + This is responsible for breaking the map over job requests into individual jobs for execution. + """ + if self.input_translator: + raise exceptions.RequestParameterInvalidException( + "Failure executing tool request with id '%s' (cannot validate inputs from this type of data source tool - please POST to /api/tools).", + self.id, + ) + + set_dataset_matcher_factory(request_context, self) + + expanded_incomings: List[ToolStateJobInstanceExpansionT] + job_tool_states: List[ToolStateJobInstanceT] + collection_info: Optional[MatchingCollections] + expanded_incomings, job_tool_states, collection_info = expand_meta_parameters_async( + request_context.app, self, tool_request_internal_state + ) + + self._ensure_expansion_is_valid(job_tool_states, rerun_remap_job_id) + + # Process incoming data + validation_timer = self.app.execution_timer_factory.get_timer( + "internals.galaxy.tools.validation", + "Validated and populated state for tool request", + ) + all_errors = [] + all_params: List[ToolStateJobInstancePopulatedT] = [] + internal_states: List[JobInternalToolState] = [] + for expanded_incoming, job_tool_state in zip(expanded_incomings, job_tool_states): + log.info(f"expanded_incoming before fill static defaults: {expanded_incoming}") + expanded_incoming = fill_static_defaults(expanded_incoming, self, self.profile) + job_tool_state = fill_static_defaults(job_tool_state, self, self.profile) + log.info(f"expanded_incoming before populate: {expanded_incoming}") + params, errors = self._populate_async(request_context, expanded_incoming) + log.info(f"expanded_incoming after: {expanded_incoming}") + internal_tool_state = JobInternalToolState(job_tool_state) + internal_tool_state.validate(self) + + internal_states.append(internal_tool_state) + all_errors.append(errors) + all_params.append(params) + unset_dataset_matcher_factory(request_context) + + log.info(validation_timer) + return all_params, all_errors, collection_info, internal_states + def expand_incoming( self, request_context: WorkRequestContext, incoming: ToolRequestT, input_format: InputFormatT = "legacy" ) -> Tuple[ @@ -1836,7 +1913,7 @@ def expand_incoming( # Fixed set of input parameters may correspond to any number of jobs. # Expand these out to individual parameters for given jobs (tool executions). - expanded_incomings: List[ToolStateJobInstanceT] + expanded_incomings: List[ToolStateJobInstanceExpansionT] collection_info: Optional[MatchingCollections] expanded_incomings, collection_info = expand_meta_parameters( request_context, self, incoming, input_format=input_format @@ -1862,7 +1939,9 @@ def expand_incoming( return all_params, all_errors, rerun_remap_job_id, collection_info def _ensure_expansion_is_valid( - self, expanded_incomings: List[ToolStateJobInstanceT], rerun_remap_job_id: Optional[int] + self, + expanded_incomings: Union[List[JobInternalToolState], List[ToolStateJobInstanceT]], + rerun_remap_job_id: Optional[int], ) -> None: """If the request corresponds to multiple jobs but this doesn't work with request configuration - raise an error. @@ -1911,6 +1990,33 @@ def _populate( self._handle_validate_input_hook(request_context, params, errors) return params, errors + def _populate_async( + self, request_context, expanded_incoming: ToolStateJobInstanceT + ) -> Tuple[ToolStateJobInstancePopulatedT, ParameterValidationErrorsT]: + """Validate expanded parameters for a job to replace references with model objects. + + So convert a ToolStateJobInstanceT to a ToolStateJobInstancePopulatedT. + """ + params: ToolStateJobInstancePopulatedT = {} + errors: ParameterValidationErrorsT = {} + if self.input_translator: + self.input_translator.translate(expanded_incoming) + if not self.check_values: + # If `self.check_values` is false we don't do any checking or + # processing on input This is used to pass raw values + # through to/from external sites. + params = cast(ToolStateJobInstancePopulatedT, expanded_incoming) + else: + populate_state_async( + request_context, + self.inputs, + expanded_incoming, + params, + errors, + ) + self._handle_validate_input_hook(request_context, params, errors) + return params, errors + def _handle_validate_input_hook( self, request_context, params: ToolStateJobInstancePopulatedT, errors: ParameterValidationErrorsT ): @@ -1944,6 +2050,39 @@ def completed_jobs( completed_jobs[i] = None return completed_jobs + def handle_input_async( + self, + request_context: WorkRequestContext, + tool_request: ToolRequest, + tool_state: RequestInternalDereferencedToolState, + history: Optional[model.History] = None, + use_cached_job: bool = DEFAULT_USE_CACHED_JOB, + preferred_object_store_id: Optional[str] = DEFAULT_PREFERRED_OBJECT_STORE_ID, + rerun_remap_job_id: Optional[int] = None, + input_format: str = "legacy", + ): + """The tool request API+tasks version of handle_input.""" + all_params, all_errors, collection_info, job_tool_states = self.expand_incoming_async( + request_context, tool_state, rerun_remap_job_id + ) + self.handle_incoming_errors(all_errors) + + mapping_params = MappingParameters(tool_request.request, all_params, tool_state, job_tool_states) + completed_jobs: Dict[int, Optional[model.Job]] = self.completed_jobs( + request_context, use_cached_job, all_params + ) + execute_async( + request_context, + self, + mapping_params, + request_context.history, + tool_request, + completed_jobs, + rerun_remap_job_id=rerun_remap_job_id, + preferred_object_store_id=preferred_object_store_id, + collection_info=collection_info, + ) + def handle_input( self, trans, @@ -1969,9 +2108,9 @@ def handle_input( # If there were errors, we stay on the same page and display them self.handle_incoming_errors(all_errors) - mapping_params = MappingParameters(incoming, all_params) + mapping_params = MappingParameters(incoming, all_params, None, None) completed_jobs: Dict[int, Optional[model.Job]] = self.completed_jobs(trans, use_cached_job, all_params) - execution_tracker = execute_job( + execution_tracker = execute_sync( trans, self, mapping_params, diff --git a/lib/galaxy/tools/_types.py b/lib/galaxy/tools/_types.py index 635a86cf459d..f55566fd443f 100644 --- a/lib/galaxy/tools/_types.py +++ b/lib/galaxy/tools/_types.py @@ -8,6 +8,8 @@ +================================+============+=================================+============+===========+ | ToolRequestT | request | src dicts of encoded ids | nope | | | ToolStateJobInstanceT | a job | src dicts of encoded ids | nope | | +| ToolStateJobInstanceExpansionT | a job | a mix I think, things that were | nope | | +| | | expanded are objects | nope | | | ToolStateJobInstancePopulatedT | a job | model objs loaded from db | check_param | | | ToolStateDumpedToJsonT | a job | src dicts of encoded ids | " | | | | | (normalized into values attr) | " | | @@ -35,6 +37,10 @@ # been "checked" (check_param has not been called). ToolStateJobInstanceT = Dict[str, Any] +# After meta.expand_incoming stuff I think expanded parameters are in model object form but the other stuff is likely +# still encoded IDs? None of this is verified though. +ToolStateJobInstanceExpansionT = Dict[str, Any] + # Input dictionary for an individual job where objects are their model objects and parameters have been # "checked" (check_param has been called). ToolStateJobInstancePopulatedT = Dict[str, Any] diff --git a/lib/galaxy/tools/execute.py b/lib/galaxy/tools/execute.py index 31369c948052..97a6406daf68 100644 --- a/lib/galaxy/tools/execute.py +++ b/lib/galaxy/tools/execute.py @@ -24,12 +24,20 @@ from galaxy import model from galaxy.exceptions import ToolInputsNotOKException +from galaxy.model import ( + ToolRequest, + ToolRequestImplicitCollectionAssociation, +) from galaxy.model.base import transaction from galaxy.model.dataset_collections.matching import MatchingCollections from galaxy.model.dataset_collections.structure import ( get_structure, tool_output_to_structure, ) +from galaxy.tool_util.parameters.state import ( + JobInternalToolState, + RequestInternalDereferencedToolState, +) from galaxy.tool_util.parser import ToolOutputCollectionPart from galaxy.tools.execution_helpers import ( filter_output, @@ -69,8 +77,58 @@ def __init__(self, execution_tracker: "ExecutionTracker"): class MappingParameters(NamedTuple): + # the raw request - might correspond to multiple jobs param_template: ToolRequestT + # parameters corresponding to individual job param_combinations: List[ToolStateJobInstancePopulatedT] + # schema driven parameters + # model validated tool request - might correspond to multiple jobs + validated_param_template: Optional[RequestInternalDereferencedToolState] = None + # validated job parameters for individual jobs + validated_param_combinations: Optional[List[JobInternalToolState]] = None + + def ensure_validated(self): + assert self.validated_param_template is not None + assert self.validated_param_combinations is not None + + +def execute_async( + trans, + tool: "Tool", + mapping_params: MappingParameters, + history: model.History, + tool_request: ToolRequest, + completed_jobs: Optional[CompletedJobsT] = None, + rerun_remap_job_id: Optional[int] = None, + preferred_object_store_id: Optional[str] = None, + collection_info: Optional[MatchingCollections] = None, + workflow_invocation_uuid: Optional[str] = None, + invocation_step: Optional[model.WorkflowInvocationStep] = None, + max_num_jobs: Optional[int] = None, + job_callback: Optional[Callable] = None, + workflow_resource_parameters: Optional[Dict[str, Any]] = None, + validate_outputs: bool = False, +) -> "ExecutionTracker": + """The tool request/async version of execute.""" + completed_jobs = completed_jobs or {} + mapping_params.ensure_validated() + return _execute( + trans, + tool, + mapping_params, + history, + tool_request, + rerun_remap_job_id, + preferred_object_store_id, + collection_info, + workflow_invocation_uuid, + invocation_step, + max_num_jobs, + job_callback, + completed_jobs, + workflow_resource_parameters, + validate_outputs, + ) def execute( @@ -88,12 +146,48 @@ def execute( completed_jobs: Optional[CompletedJobsT] = None, workflow_resource_parameters: Optional[WorkflowResourceParametersT] = None, validate_outputs: bool = False, -): +) -> "ExecutionTracker": """ Execute a tool and return object containing summary (output data, number of failures, etc...). """ completed_jobs = completed_jobs or {} + return _execute( + trans, + tool, + mapping_params, + history, + None, + rerun_remap_job_id, + preferred_object_store_id, + collection_info, + workflow_invocation_uuid, + invocation_step, + max_num_jobs, + job_callback, + completed_jobs, + workflow_resource_parameters, + validate_outputs, + ) + + +def _execute( + trans, + tool: "Tool", + mapping_params: MappingParameters, + history: model.History, + tool_request: Optional[ToolRequest], + rerun_remap_job_id: Optional[int], + preferred_object_store_id: Optional[str], + collection_info: Optional[MatchingCollections], + workflow_invocation_uuid: Optional[str], + invocation_step: Optional[model.WorkflowInvocationStep], + max_num_jobs: Optional[int], + job_callback: Optional[Callable], + completed_jobs: Dict[int, Optional[model.Job]], + workflow_resource_parameters: Optional[Dict[str, Any]], + validate_outputs: bool, +) -> "ExecutionTracker": if max_num_jobs is not None: assert invocation_step is not None if rerun_remap_job_id: @@ -118,8 +212,9 @@ def execute_single_job(execution_slice: "ExecutionSlice", completed_job: Optiona "internals.galaxy.tools.execute.job_single", SINGLE_EXECUTION_SUCCESS_MESSAGE ) params = execution_slice.param_combination - if "__data_manager_mode" in mapping_params.param_template: - params["__data_manager_mode"] = mapping_params.param_template["__data_manager_mode"] + request_state = mapping_params.param_template + if "__data_manager_mode" in request_state: + params["__data_manager_mode"] = request_state["__data_manager_mode"] if workflow_invocation_uuid: params["__workflow_invocation_uuid__"] = workflow_invocation_uuid elif "__workflow_invocation_uuid__" in params: @@ -148,6 +243,8 @@ def execute_single_job(execution_slice: "ExecutionSlice", completed_job: Optiona skip=skip, ) if job: + if tool_request: + job.tool_request = tool_request log.debug(job_timer.to_str(tool_id=tool.id, job_id=job.id)) execution_tracker.record_success(execution_slice, job, result) # associate dataset instances with the job that creates them @@ -175,7 +272,7 @@ def execute_single_job(execution_slice: "ExecutionSlice", completed_job: Optiona except ToolInputsNotOKException as e: execution_tracker.record_error(e) - execution_tracker.ensure_implicit_collections_populated(history, mapping_params.param_template) + execution_tracker.ensure_implicit_collections_populated(history, mapping_params.param_template, tool_request) job_count = len(execution_tracker.param_combinations) jobs_executed = 0 @@ -188,7 +285,11 @@ def execute_single_job(execution_slice: "ExecutionSlice", completed_job: Optiona has_remaining_jobs = True break else: - skip = execution_slice.param_combination.pop("__when_value__", None) is False + slice_params = execution_slice.param_combination + if isinstance(slice_params, JobInternalToolState): + slice_params = slice_params.input_state + + skip = slice_params.pop("__when_value__", None) is False execute_single_job(execution_slice, completed_jobs[i], skip=skip) history = execution_slice.history or history jobs_executed += 1 @@ -426,15 +527,15 @@ def _mapped_output_structure(self, trans, tool_output): mapped_output_structure = mapping_structure.multiply(output_structure) return mapped_output_structure - def ensure_implicit_collections_populated(self, history, params): + def ensure_implicit_collections_populated(self, history, params, tool_request: Optional[ToolRequest]): if not self.collection_info: return history = history or self.tool.get_default_history_by_trans(self.trans) # params = param_combinations[0] if param_combinations else mapping_params.param_template - self.precreate_output_collections(history, params) + self.precreate_output_collections(history, params, tool_request) - def precreate_output_collections(self, history, params): + def precreate_output_collections(self, history, params, tool_request: Optional[ToolRequest]): # params is just one sample tool param execution with parallelized # collection replaced with a specific dataset. Need to replace this # with the collection and wrap everything up so can evaluate output @@ -480,6 +581,14 @@ def replace_optional_runtime_values(path, key, value): collection_instance.implicit_collection_jobs = implicit_collection_jobs collection_instances[output_name] = collection_instance trans.sa_session.add(collection_instance) + if tool_request: + for output_name, collection_instance in collection_instances.items(): + trica = ToolRequestImplicitCollectionAssociation() + trica.output_name = output_name + trica.dataset_collection = collection_instance + trica.tool_request = tool_request + trans.sa_session.add(trica) + # Needed to flush the association created just above with # job.add_output_dataset_collection. with transaction(trans.sa_session): @@ -683,13 +792,13 @@ def new_collection_execution_slices(self): yield ExecutionSlice(job_index, param_combination, dataset_collection_elements) - def ensure_implicit_collections_populated(self, history, params): + def ensure_implicit_collections_populated(self, history, params, tool_request: Optional[ToolRequest]): if not self.collection_info: return history = history or self.tool.get_default_history_by_trans(self.trans) if self.invocation_step.is_new: - self.precreate_output_collections(history, params) + self.precreate_output_collections(history, params, tool_request) for output_name, implicit_collection in self.implicit_collections.items(): self.invocation_step.add_output(output_name, implicit_collection) else: diff --git a/lib/galaxy/tools/parameters/__init__.py b/lib/galaxy/tools/parameters/__init__.py index ee3a8709817d..c61fd49ba5f1 100644 --- a/lib/galaxy/tools/parameters/__init__.py +++ b/lib/galaxy/tools/parameters/__init__.py @@ -12,6 +12,10 @@ from boltons.iterutils import remap +from galaxy.model import ( + HistoryDatasetAssociation, + HistoryDatasetCollectionAssociation, +) from galaxy.util import unicodify from galaxy.util.expressions import ExpressionContext from galaxy.util.json import safe_loads @@ -20,6 +24,7 @@ DataToolParameter, ParameterValueError, SelectToolParameter, + TextToolParameter, ToolParameter, ) from .grouping import ( @@ -656,6 +661,185 @@ def _populate_state_legacy( state[input.name] = value +def populate_state_async( + request_context, + inputs: ToolInputsT, + incoming: ToolStateJobInstanceT, + state: ToolStateJobInstancePopulatedT, + errors: ParameterValidationErrorsT, + context=None, +): + context = ExpressionContext(state, context) + for input in inputs.values(): + initial_value = input.get_initial_value(request_context, context) + input_name = input.name + state[input_name] = initial_value + group_state = state[input_name] + if input.type == "repeat": + repeat_input = cast(Repeat, input) + if ( + len(incoming[repeat_input.name]) > repeat_input.max + or len(incoming[repeat_input.name]) < repeat_input.min + ): + errors[repeat_input.name] = "The number of repeat elements is outside the range specified by the tool." + else: + del group_state[:] + for rep in incoming[repeat_input.name]: + new_state: ToolStateJobInstancePopulatedT = {} + group_state.append(new_state) + repeat_errors: ParameterValidationErrorsT = {} + populate_state_async( + request_context, + repeat_input.inputs, + rep, + new_state, + repeat_errors, + context=context, + ) + if repeat_errors: + errors[repeat_input.name] = repeat_errors + + elif input.type == "conditional": + conditional_input = cast(Conditional, input) + test_param = cast(ToolParameter, conditional_input.test_param) + test_param_value = incoming.get(conditional_input.name, {}).get(test_param.name) + value, error = check_param(request_context, test_param, test_param_value, context) + if error: + errors[test_param.name] = error + else: + try: + current_case = conditional_input.get_current_case(value) + group_state = state[conditional_input.name] = {} + cast_errors: ParameterValidationErrorsT = {} + populate_state_async( + request_context, + conditional_input.cases[current_case].inputs, + cast(ToolStateJobInstanceT, incoming.get(conditional_input.name)), + group_state, + cast_errors, + context=context, + ) + if cast_errors: + errors[conditional_input.name] = cast_errors + group_state["__current_case__"] = current_case + except Exception: + errors[test_param.name] = "The selected case is unavailable/invalid." + group_state[test_param.name] = value + + elif input.type == "section": + section_input = cast(Section, input) + section_errors: ParameterValidationErrorsT = {} + populate_state_async( + request_context, + section_input.inputs, + cast(ToolStateJobInstanceT, incoming.get(section_input.name)), + group_state, + section_errors, + context=context, + ) + if section_errors: + errors[section_input.name] = section_errors + + elif input.type == "upload_dataset": + raise NotImplementedError + + else: + param_value = _get_incoming_value(incoming, input.name, state.get(input.name)) + value, error = check_param(request_context, input, param_value, context, simple_errors=False) + if error: + errors[input.name] = error + state[input.name] = value + + def to_internal_single(value): + if isinstance(value, HistoryDatasetCollectionAssociation): + return {"src": "hdca", "id": value.id} + elif isinstance(value, HistoryDatasetAssociation): + return {"src": "hda", "id": value.id} + else: + # tests and such to confirm we need DCE, LDDA, etc... + return value + + def to_internal(value): + if isinstance(value, list): + return [to_internal_single(v) for v in value] + else: + return to_internal_single(value) + + if input_name not in incoming: + if input.type == "data_column": + if isinstance(value, str): + incoming[input_name] = int(value) + elif isinstance(value, list): + incoming[input_name] = [int(v) for v in value] + else: + incoming[input_name] = value + elif input.type == "text": + text_input = cast(TextToolParameter, input) + # see behavior of tools in test_tools.py::test_null_to_text_tools + # these parameters act as empty string in this context + if value is None and not text_input.optional: + incoming[input_name] = "" + else: + incoming[input_name] = value + else: + incoming[input_name] = to_internal(value) + + +def fill_dynamic_defaults( + request_context, + inputs: ToolInputsT, + incoming: ToolStateJobInstanceT, + context=None, +): + """ + Expands incoming parameters with default values. + """ + if context is None: + context = flat_to_nested_state(incoming) + for input in inputs.values(): + if input.type == "repeat": + repeat_input = cast(Repeat, input) + for rep in incoming[repeat_input.name]: + fill_dynamic_defaults( + request_context, + repeat_input.inputs, + rep, + context=context, + ) + + elif input.type == "conditional": + conditional_input = cast(Conditional, input) + test_param = cast(ToolParameter, conditional_input.test_param) + test_param_value = incoming.get(conditional_input.name, {}).get(test_param.name) + try: + current_case = conditional_input.get_current_case(test_param_value) + fill_dynamic_defaults( + request_context, + conditional_input.cases[current_case].inputs, + cast(ToolStateJobInstanceT, incoming.get(conditional_input.name)), + context=context, + ) + except Exception: + raise Exception("The selected case is unavailable/invalid.") + + elif input.type == "section": + section_input = cast(Section, input) + fill_dynamic_defaults( + request_context, + section_input.inputs, + cast(ToolStateJobInstanceT, incoming.get(section_input.name)), + context=context, + ) + + elif input.type == "upload_dataset": + raise NotImplementedError + + else: + if input.name not in incoming: + param_value = input.get_initial_value(request_context, context) + incoming[input.name] = param_value + + def _get_incoming_value(incoming, key, default): """ Fetch value from incoming dict directly or check special nginx upload diff --git a/lib/galaxy/tools/parameters/basic.py b/lib/galaxy/tools/parameters/basic.py index 6b35473959e4..bbd0927c3e60 100644 --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -13,6 +13,7 @@ from collections.abc import MutableMapping from typing import ( Any, + cast, Dict, List, Optional, @@ -2484,7 +2485,7 @@ def from_json(self, value, trans, other_values=None): rval = value elif isinstance(value, MutableMapping) and "src" in value and "id" in value: if value["src"] == "hdca": - rval = session.get(HistoryDatasetCollectionAssociation, trans.security.decode_id(value["id"])) + rval = cast(HistoryDatasetCollectionAssociation, src_id_to_item(sa_session=trans.sa_session, value=value, security=trans.security)) elif isinstance(value, list): if len(value) > 0: value = value[0] diff --git a/lib/galaxy/tools/parameters/meta.py b/lib/galaxy/tools/parameters/meta.py index 5865a6698110..e152bae27cc1 100644 --- a/lib/galaxy/tools/parameters/meta.py +++ b/lib/galaxy/tools/parameters/meta.py @@ -26,6 +26,7 @@ matching, subcollections, ) +from galaxy.tool_util.parameters import RequestInternalDereferencedToolState from galaxy.util.permutations import ( build_combos, input_classification, @@ -40,6 +41,7 @@ from .._types import ( InputFormatT, ToolRequestT, + ToolStateDumpedToJsonInternalT, ToolStateJobInstanceT, ) @@ -334,6 +336,84 @@ def visitor(input, value, prefix, prefixed_name, prefixed_label, error, **kwargs return (single_inputs_nested, matched_multi_inputs, multiplied_multi_inputs) +ExpandedAsyncT = Tuple[ + List[ToolStateJobInstanceT], List[ToolStateDumpedToJsonInternalT], Optional[matching.MatchingCollections] +] + + +def expand_meta_parameters_async(app, tool, incoming: RequestInternalDereferencedToolState) -> ExpandedAsyncT: + # TODO: Tool State 2.0 Follow Up: rework this to only test permutation at actual input value roots. + + collections_to_match = matching.CollectionsToMatch() + + def classifier_from_value(value, input_key): + if isinstance(value, dict) and "values" in value: + # Explicit meta wrapper for inputs... + is_batch = value.get("__class__", "Batch") == "Batch" + is_linked = value.get("linked", True) + if is_batch and is_linked: + classification = input_classification.MATCHED + elif is_batch: + classification = input_classification.MULTIPLIED + else: + classification = input_classification.SINGLE + if __collection_multirun_parameter(value): + log.info("IN HERE WITH A COLLECTION MULTIRUN PARAMETER") + collection_value = value["values"][0] + values = __expand_collection_parameter_async( + app, input_key, collection_value, collections_to_match, linked=is_linked + ) + else: + log.info("NOT IN HERE WITH A COLLECTION MULTIRUN PARAMETER") + values = value["values"] + else: + classification = input_classification.SINGLE + values = value + return classification, values + + # is there a way to make Pydantic ensure reordering isn't needed - model and serialize out the parameters maybe? + reordered_incoming = reorder_parameters(tool, incoming.input_state, incoming.input_state, True) + incoming_template = reordered_incoming + + single_inputs, matched_multi_inputs, multiplied_multi_inputs = split_inputs_nested( + tool.inputs, incoming_template, classifier_from_value + ) + expanded_incomings = build_combos(single_inputs, matched_multi_inputs, multiplied_multi_inputs, nested=True) + # those all have sa model objects from expansion to be used within for additional logic (maybe?) + # but we want to record just src and IDS in the job state object - so undo that + expanded_job_states = build_combos( + to_decoded_json(single_inputs), + to_decoded_json(matched_multi_inputs), + to_decoded_json(multiplied_multi_inputs), + nested=True, + ) + if collections_to_match.has_collections(): + collection_info = app.dataset_collection_manager.match_collections(collections_to_match) + else: + collection_info = None + return expanded_incomings, expanded_job_states, collection_info + + +def to_decoded_json(has_objects): + if isinstance(has_objects, dict): + decoded_json = {} + for key, value in has_objects.items(): + decoded_json[key] = to_decoded_json(value) + return decoded_json + elif isinstance(has_objects, list): + return [to_decoded_json(o) for o in has_objects] + elif isinstance(has_objects, DatasetCollectionElement): + return {"src": "dce", "id": has_objects.id} + elif isinstance(has_objects, HistoryDatasetAssociation): + return {"src": "hda", "id": has_objects.id} + elif isinstance(has_objects, HistoryDatasetCollectionAssociation): + return {"src": "hdca", "id": has_objects.id} + elif isinstance(has_objects, LibraryDatasetDatasetAssociation): + return {"src": "ldda", "id": has_objects.id} + else: + return has_objects + + CollectionExpansionListT = Union[List[DatasetCollectionElement], List[DatasetInstance]] @@ -373,8 +453,34 @@ def __expand_collection_parameter( return hdas +def __expand_collection_parameter_async(app, input_key, incoming_val, collections_to_match, linked=False): + # If subcollection multirun of data_collection param - value will + # be "hdca_id|subcollection_type" else it will just be hdca_id + try: + src = incoming_val["src"] + if src != "hdca": + raise exceptions.ToolMetaParameterException(f"Invalid dataset collection source type {src}") + hdc_id = incoming_val["id"] + subcollection_type = incoming_val.get("map_over_type", None) + except TypeError: + hdc_id = incoming_val + subcollection_type = None + hdc = app.model.context.get(HistoryDatasetCollectionAssociation, hdc_id) + collections_to_match.add(input_key, hdc, subcollection_type=subcollection_type, linked=linked) + if subcollection_type is not None: + subcollection_elements = subcollections.split_dataset_collection_instance(hdc, subcollection_type) + return subcollection_elements + else: + hdas = [] + for element in hdc.collection.dataset_elements: + hda = element.dataset_instance + hda.element_identifier = element.element_identifier + hdas.append(hda) + return hdas + + def __collection_multirun_parameter(value: Dict[str, Any]) -> bool: - is_batch = value.get("batch", False) + is_batch = value.get("batch", False) or value.get("__class__", None) == "Batch" if not is_batch: return False diff --git a/lib/galaxy/webapps/galaxy/api/histories.py b/lib/galaxy/webapps/galaxy/api/histories.py index 57b18c1f1c17..e1cbdee66d7e 100644 --- a/lib/galaxy/webapps/galaxy/api/histories.py +++ b/lib/galaxy/webapps/galaxy/api/histories.py @@ -61,6 +61,7 @@ ShareWithPayload, SharingStatus, StoreExportPayload, + ToolRequestModel, UpdateHistoryPayload, WriteStoreToPayload, ) @@ -374,6 +375,17 @@ def citations( ) -> List[Any]: return self.service.citations(trans, history_id) + @router.get( + "/api/histories/{history_id}/tool_requests", + summary="Return all the tool requests for the tools submitted to this history.", + ) + def tool_requests( + self, + history_id: HistoryIDPathParam, + trans: ProvidesHistoryContext = DependsOnTrans, + ) -> List[ToolRequestModel]: + return self.service.tool_requests(trans, history_id) + @router.post( "/api/histories", summary="Creates a new history.", diff --git a/lib/galaxy/webapps/galaxy/api/jobs.py b/lib/galaxy/webapps/galaxy/api/jobs.py index 4c0c27169459..6e225ab17ba8 100644 --- a/lib/galaxy/webapps/galaxy/api/jobs.py +++ b/lib/galaxy/webapps/galaxy/api/jobs.py @@ -45,6 +45,7 @@ JobInputAssociation, JobInputSummary, JobOutputAssociation, + JobOutputCollectionAssociation, ReportJobErrorPayload, SearchJobsPayload, ShowFullJobResponse, @@ -68,11 +69,14 @@ ) from galaxy.webapps.galaxy.api.common import query_parameter_as_list from galaxy.webapps.galaxy.services.jobs import ( + JobCreateResponse, JobIndexPayload, JobIndexViewEnum, + JobRequest, JobsService, ) from galaxy.work.context import proxy_work_context_for_history +from .tools import validate_not_protected log = logging.getLogger(__name__) @@ -156,6 +160,12 @@ description="Limit listing of jobs to those that match the specified implicit collection job ID. If none, jobs from any implicit collection execution (or from no implicit collection execution) may be returned.", ) +ToolRequestIdQueryParam: Optional[DecodedDatabaseIdField] = Query( + default=None, + title="Tool Request ID", + description="Limit listing of jobs to those that were created from the supplied tool request ID. If none, jobs from any tool request (or from no workflows) may be returned.", +) + SortByQueryParam: JobIndexSortByEnum = Query( default=JobIndexSortByEnum.update_time, title="Sort By", @@ -208,6 +218,13 @@ class FastAPIJobs: service: JobsService = depends(JobsService) + @router.post("/api/jobs") + def create( + self, trans: ProvidesHistoryContext = DependsOnTrans, job_request: JobRequest = Body(...) + ) -> JobCreateResponse: + validate_not_protected(job_request.tool_id) + return self.service.create(trans, job_request) + @router.get("/api/jobs") def index( self, @@ -224,6 +241,7 @@ def index( workflow_id: Optional[DecodedDatabaseIdField] = WorkflowIdQueryParam, invocation_id: Optional[DecodedDatabaseIdField] = InvocationIdQueryParam, implicit_collection_jobs_id: Optional[DecodedDatabaseIdField] = ImplicitCollectionJobsIdQueryParam, + tool_request_id: Optional[DecodedDatabaseIdField] = ToolRequestIdQueryParam, order_by: JobIndexSortByEnum = SortByQueryParam, search: Optional[str] = SearchQueryParam, limit: int = LimitQueryParam, @@ -242,6 +260,7 @@ def index( workflow_id=workflow_id, invocation_id=invocation_id, implicit_collection_jobs_id=implicit_collection_jobs_id, + tool_request_id=tool_request_id, order_by=order_by, search=search, limit=limit, @@ -362,12 +381,14 @@ def outputs( self, job_id: JobIdPathParam, trans: ProvidesUserContext = DependsOnTrans, - ) -> List[JobOutputAssociation]: + ) -> List[Union[JobOutputAssociation, JobOutputCollectionAssociation]]: job = self.service.get_job(trans=trans, job_id=job_id) associations = self.service.dictify_associations(trans, job.output_datasets, job.output_library_datasets) - output_associations = [] + output_associations: List[Union[JobOutputAssociation, JobOutputCollectionAssociation]] = [] for association in associations: output_associations.append(JobOutputAssociation(name=association.name, dataset=association.dataset)) + + output_associations.extend(self.service.dictify_output_collection_associations(trans, job)) return output_associations @router.get( diff --git a/lib/galaxy/webapps/galaxy/api/tools.py b/lib/galaxy/webapps/galaxy/api/tools.py index dc27f7c3408e..6914f9ab7bf3 100644 --- a/lib/galaxy/webapps/galaxy/api/tools.py +++ b/lib/galaxy/webapps/galaxy/api/tools.py @@ -12,6 +12,8 @@ from fastapi import ( Body, Depends, + Path, + Query, Request, UploadFile, ) @@ -27,10 +29,14 @@ from galaxy.managers.context import ProvidesHistoryContext from galaxy.managers.hdas import HDAManager from galaxy.managers.histories import HistoryManager +from galaxy.model import ToolRequest from galaxy.schema.fetch_data import ( FetchDataFormPayload, FetchDataPayload, ) +from galaxy.schema.fields import DecodedDatabaseIdField +from galaxy.schema.schema import ToolRequestModel +from galaxy.tool_util.parameters import ToolParameterT from galaxy.tool_util.verify import ToolTestDescriptionDict from galaxy.tools.evaluation import global_tool_errors from galaxy.util.zipstream import ZipstreamWrapper @@ -42,7 +48,11 @@ ) from galaxy.webapps.base.controller import UsesVisualizationMixin from galaxy.webapps.base.webapp import GalaxyWebTransaction -from galaxy.webapps.galaxy.services.tools import ToolsService +from galaxy.webapps.galaxy.services.base import tool_request_to_model +from galaxy.webapps.galaxy.services.tools import ( + ToolRunReference, + ToolsService, +) from . import ( APIContentTypeRoute, as_form, @@ -73,6 +83,13 @@ class JsonApiRoute(APIContentTypeRoute): FetchDataForm = as_form(FetchDataFormPayload) +ToolIDPathParam: str = Path( + ..., + title="Tool ID", + description="The tool ID for the lineage stored in Galaxy's toolbox.", +) +ToolVersionQueryParam: Optional[str] = Query(default=None, title="Tool Version", description="") + async def get_files(request: Request, files: Optional[List[UploadFile]] = None): # FastAPI's UploadFile is a very light wrapper around starlette's UploadFile @@ -106,6 +123,57 @@ def fetch_form( ): return self.service.create_fetch(trans, payload, files) + @router.get( + "/api/tool_requests/{id}", + summary="Get tool request state.", + ) + def get_tool_request( + self, + id: DecodedDatabaseIdField, + trans: ProvidesHistoryContext = DependsOnTrans, + ) -> ToolRequestModel: + tool_request = self._get_tool_request_or_raise_not_found(trans, id) + return tool_request_to_model(tool_request) + + @router.get( + "/api/tool_requests/{id}/state", + summary="Get tool request state.", + ) + def tool_request_state( + self, + id: DecodedDatabaseIdField, + trans: ProvidesHistoryContext = DependsOnTrans, + ) -> str: + tool_request = self._get_tool_request_or_raise_not_found(trans, id) + state = tool_request.state + if not state: + raise exceptions.InconsistentDatabase() + return cast(str, state) + + def _get_tool_request_or_raise_not_found( + self, trans: ProvidesHistoryContext, id: DecodedDatabaseIdField + ) -> ToolRequest: + tool_request: Optional[ToolRequest] = cast( + Optional[ToolRequest], trans.app.model.context.query(ToolRequest).get(id) + ) + if tool_request is None: + raise exceptions.ObjectNotFound() + assert tool_request + return tool_request + + @router.get( + "/api/tools/{tool_id}/inputs", + summary="Get tool inputs.", + ) + def tool_inputs( + self, + tool_id: str = ToolIDPathParam, + tool_version: Optional[str] = ToolVersionQueryParam, + trans: ProvidesHistoryContext = DependsOnTrans, + ) -> List[ToolParameterT]: + tool_run_ref = ToolRunReference(tool_id=tool_id, tool_version=tool_version, tool_uuid=None) + return self.service.inputs(trans, tool_run_ref) + class ToolsController(BaseGalaxyAPIController, UsesVisualizationMixin): """ @@ -586,16 +654,17 @@ def create(self, trans: GalaxyWebTransaction, payload, **kwd): :type input_format: str """ tool_id = payload.get("tool_id") - tool_uuid = payload.get("tool_uuid") - if tool_id in PROTECTED_TOOLS: - raise exceptions.RequestParameterInvalidException( - f"Cannot execute tool [{tool_id}] directly, must use alternative endpoint." - ) - if tool_id is None and tool_uuid is None: - raise exceptions.RequestParameterInvalidException("Must specify a valid tool_id to use this endpoint.") + validate_not_protected(tool_id) return self.service._create(trans, payload, **kwd) +def validate_not_protected(tool_id: Optional[str]): + if tool_id in PROTECTED_TOOLS: + raise exceptions.RequestParameterInvalidException( + f"Cannot execute tool [{tool_id}] directly, must use alternative endpoint." + ) + + def _kwd_or_payload(kwd: Dict[str, Any]) -> Dict[str, Any]: if "payload" in kwd: kwd = cast(Dict[str, Any], kwd.get("payload")) diff --git a/lib/galaxy/webapps/galaxy/services/base.py b/lib/galaxy/webapps/galaxy/services/base.py index dcf91e80f2f2..144fc61b9381 100644 --- a/lib/galaxy/webapps/galaxy/services/base.py +++ b/lib/galaxy/webapps/galaxy/services/base.py @@ -23,13 +23,19 @@ ) from galaxy.managers.context import ProvidesUserContext from galaxy.managers.model_stores import create_objects_from_store -from galaxy.model import User +from galaxy.model import ( + ToolRequest, + User, +) from galaxy.model.store import ( get_export_store_factory, ModelExportStore, ) from galaxy.schema.fields import EncodedDatabaseIdField -from galaxy.schema.schema import AsyncTaskResultSummary +from galaxy.schema.schema import ( + AsyncTaskResultSummary, + ToolRequestModel, +) from galaxy.security.idencoding import IdEncodingHelper from galaxy.short_term_storage import ( ShortTermStorageAllocator, @@ -193,3 +199,28 @@ def async_task_summary(async_result: AsyncResult) -> AsyncTaskResultSummary: name=name, queue=queue, ) + + +def tool_request_to_model(tool_request: ToolRequest) -> ToolRequestModel: + implicit_collection_output_dicts = [] + for implicit_collection in tool_request.implicit_collections: + + name = implicit_collection.output_name + dataset_collection_instance = { + "src": "hdca", + "id": implicit_collection.dataset_collection.id, + } + implicit_collection_output_dict = { + "name": name, + "dataset_collection_instance": dataset_collection_instance, + } + implicit_collection_output_dicts.append(implicit_collection_output_dict) + + as_dict = { + "id": tool_request.id, + "request": tool_request.request, + "state": tool_request.state, + "state_message": tool_request.state_message, + "implicit_collection_outputs": implicit_collection_output_dicts, + } + return ToolRequestModel.model_validate(as_dict) diff --git a/lib/galaxy/webapps/galaxy/services/histories.py b/lib/galaxy/webapps/galaxy/services/histories.py index 32e8a9fa8a1c..764ce5a748a1 100644 --- a/lib/galaxy/webapps/galaxy/services/histories.py +++ b/lib/galaxy/webapps/galaxy/services/histories.py @@ -70,6 +70,7 @@ ShareHistoryWithStatus, ShareWithPayload, StoreExportPayload, + ToolRequestModel, WriteStoreToPayload, ) from galaxy.schema.tasks import ( @@ -87,6 +88,7 @@ model_store_storage_target, ServesExportStores, ServiceBase, + tool_request_to_model, ) from galaxy.webapps.galaxy.services.notifications import NotificationService from galaxy.webapps.galaxy.services.sharable import ShareableService @@ -533,6 +535,13 @@ def published( ] return rval + def tool_requests( + self, trans: ProvidesHistoryContext, history_id: DecodedDatabaseIdField + ) -> List[ToolRequestModel]: + history = self.manager.get_accessible(history_id, trans.user, current_history=trans.history) + tool_requests = history.tool_requests + return [tool_request_to_model(tr) for tr in tool_requests] + def citations(self, trans: ProvidesHistoryContext, history_id: DecodedDatabaseIdField): """ Return all the citations for the tools used to produce the datasets in diff --git a/lib/galaxy/webapps/galaxy/services/jobs.py b/lib/galaxy/webapps/galaxy/services/jobs.py index c90b50068cc2..191a5e291cf5 100644 --- a/lib/galaxy/webapps/galaxy/services/jobs.py +++ b/lib/galaxy/webapps/galaxy/services/jobs.py @@ -1,3 +1,4 @@ +import logging from enum import Enum from typing import ( Any, @@ -6,24 +7,83 @@ Optional, ) +from pydantic import ( + BaseModel, + Field, +) + from galaxy import ( exceptions, model, ) +from galaxy.celery.tasks import queue_jobs from galaxy.managers import hdas from galaxy.managers.base import security_check -from galaxy.managers.context import ProvidesUserContext +from galaxy.managers.context import ( + ProvidesHistoryContext, + ProvidesUserContext, +) +from galaxy.managers.histories import HistoryManager from galaxy.managers.jobs import ( JobManager, JobSearch, view_show_job, ) -from galaxy.model import Job -from galaxy.schema.fields import DecodedDatabaseIdField -from galaxy.schema.jobs import JobAssociation -from galaxy.schema.schema import JobIndexQueryPayload +from galaxy.model import ( + Job, + ToolRequest, + ToolSource as ToolSourceModel, +) +from galaxy.model.base import transaction +from galaxy.schema.fields import ( + DecodedDatabaseIdField, + EncodedDatabaseIdField, +) +from galaxy.schema.jobs import ( + JobAssociation, + JobOutputCollectionAssociation, +) +from galaxy.schema.schema import ( + AsyncTaskResultSummary, + JobIndexQueryPayload, +) +from galaxy.schema.tasks import ( + QueueJobs, + ToolSource, +) from galaxy.security.idencoding import IdEncodingHelper -from galaxy.webapps.galaxy.services.base import ServiceBase +from galaxy.tool_util.parameters import ( + decode, + RequestToolState, +) +from galaxy.webapps.galaxy.services.base import ( + async_task_summary, + ServiceBase, +) +from .tools import ( + ToolRunReference, + validate_tool_for_running, +) + +log = logging.getLogger(__name__) + + +class JobRequest(BaseModel): + tool_id: Optional[str] = Field(default=None, title="tool_id", description="TODO") + tool_uuid: Optional[str] = Field(default=None, title="tool_uuid", description="TODO") + tool_version: Optional[str] = Field(default=None, title="tool_version", description="TODO") + history_id: Optional[DecodedDatabaseIdField] = Field(default=None, title="history_id", description="TODO") + inputs: Optional[Dict[str, Any]] = Field(default_factory=lambda: {}, title="Inputs", description="TODO") + use_cached_jobs: Optional[bool] = Field(default=None, title="use_cached_jobs") + rerun_remap_job_id: Optional[DecodedDatabaseIdField] = Field( + default=None, title="rerun_remap_job_id", description="TODO" + ) + send_email_notification: bool = Field(default=False, title="Send Email Notification", description="TODO") + + +class JobCreateResponse(BaseModel): + tool_request_id: EncodedDatabaseIdField + task_result: AsyncTaskResultSummary class JobIndexViewEnum(str, Enum): @@ -39,6 +99,7 @@ class JobsService(ServiceBase): job_manager: JobManager job_search: JobSearch hda_manager: hdas.HDAManager + history_manager: HistoryManager def __init__( self, @@ -46,11 +107,13 @@ def __init__( job_manager: JobManager, job_search: JobSearch, hda_manager: hdas.HDAManager, + history_manager: HistoryManager, ): super().__init__(security=security) self.job_manager = job_manager self.job_search = job_search self.hda_manager = hda_manager + self.history_manager = history_manager def show( self, @@ -149,3 +212,62 @@ def __dictify_association(self, trans, job_dataset_association) -> JobAssociatio else: dataset_dict = {"src": "ldda", "id": dataset.id} return JobAssociation(name=job_dataset_association.name, dataset=dataset_dict) + + def dictify_output_collection_associations(self, trans, job: model.Job) -> List[JobOutputCollectionAssociation]: + output_associations: List[JobOutputCollectionAssociation] = [] + for job_output_collection_association in job.output_dataset_collection_instances: + ref_dict = {"src": "hdca", "id": job_output_collection_association.dataset_collection_id} + output_associations.append( + JobOutputCollectionAssociation( + name=job_output_collection_association.name, + dataset_collection_instance=ref_dict, + ) + ) + return output_associations + + def create(self, trans: ProvidesHistoryContext, job_request: JobRequest) -> JobCreateResponse: + tool_run_reference = ToolRunReference(job_request.tool_id, job_request.tool_uuid, job_request.tool_version) + tool = validate_tool_for_running(trans, tool_run_reference) + history_id = job_request.history_id + target_history = None + if history_id is not None: + target_history = self.history_manager.get_owned(history_id, trans.user, current_history=trans.history) + inputs = job_request.inputs + request_state = RequestToolState(inputs or {}) + request_state.validate(tool) + request_internal_state = decode(request_state, tool, trans.security.decode_id) + tool_request = ToolRequest() + # TODO: hash and such... + tool_source_model = ToolSourceModel( + source=[p.model_dump() for p in tool.parameters], + hash="TODO", + ) + tool_request.request = request_internal_state.input_state + tool_request.tool_source = tool_source_model + tool_request.state = ToolRequest.states.NEW + tool_request.history = target_history + sa_session = trans.sa_session + sa_session.add(tool_source_model) + sa_session.add(tool_request) + with transaction(sa_session): + sa_session.commit() + tool_request_id = tool_request.id + tool_source = ToolSource( + raw_tool_source=tool.tool_source.to_string(), + tool_dir=tool.tool_dir, + ) + task_request = QueueJobs( + user=trans.async_request_user, + history_id=target_history and target_history.id, + tool_source=tool_source, + tool_request_id=tool_request_id, + use_cached_jobs=job_request.use_cached_jobs or False, + rerun_remap_job_id=job_request.rerun_remap_job_id, + ) + result = queue_jobs.delay(request=task_request) + return JobCreateResponse( + **{ + "tool_request_id": tool_request_id, + "task_result": async_task_summary(result), + } + ) diff --git a/lib/galaxy/webapps/galaxy/services/tools.py b/lib/galaxy/webapps/galaxy/services/tools.py index 6897965d112f..bd97238ef67e 100644 --- a/lib/galaxy/webapps/galaxy/services/tools.py +++ b/lib/galaxy/webapps/galaxy/services/tools.py @@ -4,8 +4,10 @@ from json import dumps from typing import ( Any, + cast, Dict, List, + NamedTuple, Optional, Union, ) @@ -34,7 +36,9 @@ FilesPayload, ) from galaxy.security.idencoding import IdEncodingHelper +from galaxy.tool_util.parameters import ToolParameterT from galaxy.tools import Tool +from galaxy.tools._types import InputFormatT from galaxy.tools.search import ToolBoxSearch from galaxy.webapps.galaxy.services._fetch_util import validate_and_normalize_targets from galaxy.webapps.galaxy.services.base import ServiceBase @@ -42,6 +46,39 @@ log = logging.getLogger(__name__) +class ToolRunReference(NamedTuple): + tool_id: Optional[str] + tool_uuid: Optional[str] + tool_version: Optional[str] + + +def get_tool(trans: ProvidesHistoryContext, tool_ref: ToolRunReference) -> Tool: + get_kwds = dict( + tool_id=tool_ref.tool_id, + tool_uuid=tool_ref.tool_uuid, + tool_version=tool_ref.tool_version, + ) + + tool = trans.app.toolbox.get_tool(**get_kwds) + if not tool: + log.debug(f"Not found tool with kwds [{tool_ref}]") + raise exceptions.ToolMissingException("Tool not found.") + return tool + + +def validate_tool_for_running(trans: ProvidesHistoryContext, tool_ref: ToolRunReference) -> Tool: + if trans.user_is_bootstrap_admin: + raise exceptions.RealUserRequiredException("Only real users can execute tools or run jobs.") + + if tool_ref.tool_id is None and tool_ref.tool_uuid is None: + raise exceptions.RequestParameterMissingException("Must specify a valid tool_id to use this endpoint.") + + tool = get_tool(trans, tool_ref) + if not tool.allow_user_access(trans.user): + raise exceptions.ItemAccessibilityException("Tool not accessible.") + return tool + + class ToolsService(ServiceBase): def __init__( self, @@ -55,6 +92,14 @@ def __init__( self.toolbox_search = toolbox_search self.history_manager = history_manager + def inputs( + self, + trans: ProvidesHistoryContext, + tool_ref: ToolRunReference, + ) -> List[ToolParameterT]: + tool = get_tool(trans, tool_ref) + return tool.parameters + def create_fetch( self, trans: ProvidesHistoryContext, @@ -100,37 +145,14 @@ def create_fetch( return self._create(trans, create_payload) def _create(self, trans: ProvidesHistoryContext, payload, **kwd): - if trans.user_is_bootstrap_admin: - raise exceptions.RealUserRequiredException("Only real users can execute tools or run jobs.") action = payload.get("action") if action == "rerun": raise Exception("'rerun' action has been deprecated") - # Get tool. - tool_version = payload.get("tool_version") - tool_id = payload.get("tool_id") - tool_uuid = payload.get("tool_uuid") - get_kwds = dict( - tool_id=tool_id, - tool_uuid=tool_uuid, - tool_version=tool_version, + tool_run_reference = ToolRunReference( + payload.get("tool_id"), payload.get("tool_uuid"), payload.get("tool_version") ) - if tool_id is None and tool_uuid is None: - raise exceptions.RequestParameterMissingException("Must specify either a tool_id or a tool_uuid.") - - tool = trans.app.toolbox.get_tool(**get_kwds) - if not tool: - log.debug(f"Not found tool with kwds [{get_kwds}]") - raise exceptions.ToolMissingException("Tool not found.") - if not tool.allow_user_access(trans.user): - raise exceptions.ItemAccessibilityException("Tool not accessible.") - if self.config.user_activation_on: - if not trans.user: - log.warning("Anonymous user attempts to execute tool, but account activation is turned on.") - elif not trans.user.active: - log.warning( - f'User "{trans.user.email}" attempts to execute tool, but account activation is turned on and user account is not active.' - ) + tool = validate_tool_for_running(trans, tool_run_reference) # Set running history from payload parameters. # History not set correctly as part of this API call for @@ -166,7 +188,10 @@ def _create(self, trans: ProvidesHistoryContext, payload, **kwd): inputs.get("use_cached_job", "false") ) preferred_object_store_id = payload.get("preferred_object_store_id") - input_format = str(payload.get("input_format", "legacy")) + input_format_raw = str(payload.get("input_format", "legacy")) + if input_format_raw not in ["legacy", "21.01"]: + raise exceptions.RequestParameterInvalidException(f"invalid input format {input_format_raw}") + input_format = cast(InputFormatT, input_format_raw) if "data_manager_mode" in payload: incoming["__data_manager_mode"] = payload["data_manager_mode"] vars = tool.handle_input( diff --git a/lib/galaxy_test/api/conftest.py b/lib/galaxy_test/api/conftest.py index 74d8958b9158..bb21408aa00a 100644 --- a/lib/galaxy_test/api/conftest.py +++ b/lib/galaxy_test/api/conftest.py @@ -148,7 +148,7 @@ def required_tool(dataset_populator: DatasetPopulator, history_id: str, required return tool -@pytest.fixture(params=["legacy", "21.01"]) +@pytest.fixture(params=["legacy", "21.01", "request"]) def tool_input_format(request) -> Iterator[DescribeToolInputs]: yield DescribeToolInputs(request.param) diff --git a/lib/galaxy_test/api/test_tool_execute.py b/lib/galaxy_test/api/test_tool_execute.py index 95bf43e27921..7263747ed817 100644 --- a/lib/galaxy_test/api/test_tool_execute.py +++ b/lib/galaxy_test/api/test_tool_execute.py @@ -23,15 +23,33 @@ @requires_tool_id("multi_data_param") -def test_multidata_param(target_history: TargetHistory, required_tool: RequiredTool): +def test_multidata_param( + target_history: TargetHistory, required_tool: RequiredTool, tool_input_format: DescribeToolInputs +): hda1 = target_history.with_dataset("1\t2\t3").src_dict hda2 = target_history.with_dataset("4\t5\t6").src_dict - execution = required_tool.execute.with_inputs( - { - "f1": {"batch": False, "values": [hda1, hda2]}, - "f2": {"batch": False, "values": [hda2, hda1]}, - } + inputs = ( + tool_input_format.when.flat( + { + "f1": {"batch": False, "values": [hda1, hda2]}, + "f2": {"batch": False, "values": [hda2, hda1]}, + } + ) + .when.nested( + { + "f1": {"batch": False, "values": [hda1, hda2]}, + "f2": {"batch": False, "values": [hda2, hda1]}, + "advanced": {"full": "no"}, # this shouldn't be needed is it outside branch? + } + ) + .when.request( + { + "f1": [hda1, hda2], + "f2": [hda2, hda1], + } + ) ) + execution = required_tool.execute.with_inputs(inputs) execution.assert_has_job(0).with_output("out1").with_contents("1\t2\t3\n4\t5\t6\n") execution.assert_has_job(0).with_output("out2").with_contents("4\t5\t6\n1\t2\t3\n") @@ -249,20 +267,33 @@ def test_multi_run_in_repeat( multi_run_in_repeat_datasets: MultiRunInRepeatFixtures, tool_input_format: DescribeToolInputs, ): - inputs = tool_input_format.when.flat( - { - "input1": {"batch": False, "values": [multi_run_in_repeat_datasets.common_dataset]}, - "queries_0|input2": {"batch": True, "values": multi_run_in_repeat_datasets.repeat_datasets}, - } - ).when.nested( - { - "input1": {"batch": False, "values": [multi_run_in_repeat_datasets.common_dataset]}, - "queries": [ - { - "input2": {"batch": True, "values": multi_run_in_repeat_datasets.repeat_datasets}, - } - ], - } + inputs = ( + tool_input_format.when.flat( + { + "input1": {"batch": False, "values": [multi_run_in_repeat_datasets.common_dataset]}, + "queries_0|input2": {"batch": True, "values": multi_run_in_repeat_datasets.repeat_datasets}, + } + ) + .when.nested( + { + "input1": {"batch": False, "values": [multi_run_in_repeat_datasets.common_dataset]}, + "queries": [ + { + "input2": {"batch": True, "values": multi_run_in_repeat_datasets.repeat_datasets}, + } + ], + } + ) + .when.request( + { + "input1": multi_run_in_repeat_datasets.common_dataset, + "queries": [ + { + "input2": {"__class__": "Batch", "values": multi_run_in_repeat_datasets.repeat_datasets}, + } + ], + } + ) ) execute = required_tool.execute.with_inputs(inputs) _check_multi_run_in_repeat(execute) @@ -275,20 +306,33 @@ def test_multi_run_in_repeat_mismatch( tool_input_format: DescribeToolInputs, ): """Same test as above but without the batch wrapper around the common dataset shared between multirun.""" - inputs = tool_input_format.when.flat( - { - "input1": multi_run_in_repeat_datasets.common_dataset, - "queries_0|input2": {"batch": True, "values": multi_run_in_repeat_datasets.repeat_datasets}, - } - ).when.nested( - { - "input1": multi_run_in_repeat_datasets.common_dataset, - "queries": [ - { - "input2": {"batch": True, "values": multi_run_in_repeat_datasets.repeat_datasets}, - } - ], - } + inputs = ( + tool_input_format.when.flat( + { + "input1": multi_run_in_repeat_datasets.common_dataset, + "queries_0|input2": {"batch": True, "values": multi_run_in_repeat_datasets.repeat_datasets}, + } + ) + .when.nested( + { + "input1": multi_run_in_repeat_datasets.common_dataset, + "queries": [ + { + "input2": {"batch": True, "values": multi_run_in_repeat_datasets.repeat_datasets}, + } + ], + } + ) + .when.request( + { + "input1": multi_run_in_repeat_datasets.common_dataset, + "queries": [ + { + "input2": {"__class__": "Batch", "values": multi_run_in_repeat_datasets.repeat_datasets}, + } + ], + } + ) ) execute = required_tool.execute.with_inputs(inputs) _check_multi_run_in_repeat(execute) @@ -346,18 +390,39 @@ def test_multirun_on_multiple_inputs_unlinked( two_multi_run_datasets: TwoMultiRunsFixture, tool_input_format: DescribeToolInputs, ): - inputs = tool_input_format.when.flat( - { - "input1": {"batch": True, "linked": False, "values": two_multi_run_datasets.first_two_datasets}, - "queries_0|input2": {"batch": True, "linked": False, "values": two_multi_run_datasets.second_two_datasets}, - } - ).when.nested( - { - "input1": {"batch": True, "linked": False, "values": two_multi_run_datasets.first_two_datasets}, - "queries": [ - {"input2": {"batch": True, "linked": False, "values": two_multi_run_datasets.second_two_datasets}}, - ], - } + inputs = ( + tool_input_format.when.flat( + { + "input1": {"batch": True, "linked": False, "values": two_multi_run_datasets.first_two_datasets}, + "queries_0|input2": { + "batch": True, + "linked": False, + "values": two_multi_run_datasets.second_two_datasets, + }, + } + ) + .when.nested( + { + "input1": {"batch": True, "linked": False, "values": two_multi_run_datasets.first_two_datasets}, + "queries": [ + {"input2": {"batch": True, "linked": False, "values": two_multi_run_datasets.second_two_datasets}}, + ], + } + ) + .when.request( + { + "input1": {"__class__": "Batch", "linked": False, "values": two_multi_run_datasets.first_two_datasets}, + "queries": [ + { + "input2": { + "__class__": "Batch", + "linked": False, + "values": two_multi_run_datasets.second_two_datasets, + } + }, + ], + } + ) ) execute = required_tool.execute.with_inputs(inputs) execute.assert_has_n_jobs(4) @@ -372,7 +437,9 @@ def test_map_over_collection( target_history: TargetHistory, required_tool: RequiredTool, tool_input_format: DescribeToolInputs ): hdca = target_history.with_pair(["123", "456"]) - inputs = tool_input_format.when.any({"input1": {"batch": True, "values": [hdca.src_dict]}}) + legacy = {"input1": {"batch": True, "values": [hdca.src_dict]}} + request = {"input1": {"__class__": "Batch", "values": [hdca.src_dict]}} + inputs = tool_input_format.when.flat(legacy).when.nested(legacy).when.request(request) execute = required_tool.execute.with_inputs(inputs) execute.assert_has_n_jobs(2).assert_creates_n_implicit_collections(1) output_collection = execute.assert_creates_implicit_collection(0) diff --git a/lib/galaxy_test/api/test_tool_execution.py b/lib/galaxy_test/api/test_tool_execution.py new file mode 100644 index 000000000000..226542cf5da7 --- /dev/null +++ b/lib/galaxy_test/api/test_tool_execution.py @@ -0,0 +1,135 @@ +""" +""" + +from typing import ( + Any, + Dict, +) + +import requests + +from galaxy_test.base.api_asserts import assert_status_code_is_ok +from galaxy_test.base.populators import ( + DatasetPopulator, + skip_without_tool, +) +from ._framework import ApiTestCase + + +class TestToolExecution(ApiTestCase): + dataset_populator: DatasetPopulator + + def setUp(self): + super().setUp() + self.dataset_populator = DatasetPopulator(self.galaxy_interactor) + + @skip_without_tool("gx_int") + def test_validation(self): + with self.dataset_populator.test_history() as history_id: + self._assert_request_validates("gx_int", history_id, {"parameter": 5}) + self._assert_request_invalid("gx_int", history_id, {"parameter": None}) + self._assert_request_invalid("gx_int", history_id, {"parameter": "5"}) + + @skip_without_tool("gx_int") + def test_execution(self): + with self.dataset_populator.test_history() as history_id: + response = self._run("gx_int", history_id, {"parameter": 5}) + assert_status_code_is_ok(response) + response_json = response.json() + tool_request_id = response_json.get("tool_request_id") + task_result = response_json["task_result"] + history_tool_requests = self.dataset_populator.get_history_tool_requests(history_id) + assert tool_request_id in [tr["id"] for tr in history_tool_requests] + self.dataset_populator.wait_on_task_object(task_result) + state = self.dataset_populator.wait_on_tool_request(tool_request_id) + assert state + jobs = self.galaxy_interactor.jobs_for_tool_request(tool_request_id) + self.dataset_populator.wait_for_jobs(jobs, assert_ok=True) + + @skip_without_tool("gx_data") + def test_execution_with_src_urls(self): + with self.dataset_populator.test_history() as history_id: + response = self._run( + "gx_data", + history_id, + { + "parameter": { + "src": "url", + "url": "https://raw.githubusercontent.com/galaxyproject/planemo/7be1bf5b3971a43eaa73f483125bfb8cabf1c440/tests/data/hello.txt", + "ext": "txt", + } + }, + ) + assert_status_code_is_ok(response) + response_json = response.json() + tool_request_id = response_json.get("tool_request_id") + task_result = response_json["task_result"] + self.dataset_populator.wait_on_task_object(task_result) + state = self.dataset_populator.wait_on_tool_request(tool_request_id) + assert state, str(self.dataset_populator.get_tool_request(tool_request_id)) + jobs = self.galaxy_interactor.jobs_for_tool_request(tool_request_id) + self.dataset_populator.wait_for_jobs(jobs, assert_ok=True) + if len(jobs) != 1: + raise Exception(f"Found incorrect number of jobs for tool request - was expecting a single job {jobs}") + assert len(jobs) == 1, jobs + job_id = jobs[0]["id"] + job_outputs = self.galaxy_interactor.job_outputs(job_id) + assert len(job_outputs) == 1 + job_output = job_outputs[0] + assert job_output["name"] == "output" + content = self.dataset_populator.get_history_dataset_content(history_id, dataset=job_output["dataset"]) + assert content == "Hello World!" + + # verify input was not left deferred and materialized before the job started + input_dataset_details = self.dataset_populator.get_history_dataset_details(history_id, hid=1) + assert input_dataset_details["state"] == "ok", input_dataset_details + + @skip_without_tool("gx_data") + def test_execution_with_deferred_src_urls(self): + with self.dataset_populator.test_history() as history_id: + response = self._run( + "gx_data", + history_id, + { + "parameter": { + "src": "url", + "url": "https://raw.githubusercontent.com/galaxyproject/planemo/7be1bf5b3971a43eaa73f483125bfb8cabf1c440/tests/data/hello.txt", + "ext": "txt", + "deferred": True, + } + }, + ) + assert_status_code_is_ok(response) + response_json = response.json() + tool_request_id = response_json.get("tool_request_id") + task_result = response_json["task_result"] + self.dataset_populator.wait_on_task_object(task_result) + state = self.dataset_populator.wait_on_tool_request(tool_request_id) + assert state, str(self.dataset_populator.get_tool_request(tool_request_id)) + jobs = self.galaxy_interactor.jobs_for_tool_request(tool_request_id) + self.dataset_populator.wait_for_jobs(jobs, assert_ok=True) + if len(jobs) != 1: + raise Exception(f"Found incorrect number of jobs for tool request - was expecting a single job {jobs}") + assert len(jobs) == 1, jobs + job_id = jobs[0]["id"] + job_outputs = self.galaxy_interactor.job_outputs(job_id) + assert len(job_outputs) == 1 + job_output = job_outputs[0] + assert job_output["name"] == "output" + content = self.dataset_populator.get_history_dataset_content(history_id, dataset=job_output["dataset"]) + assert content == "Hello World!" + + # verify input was left deferred and infer must have been materialized just for the job + input_dataset_details = self.dataset_populator.get_history_dataset_details(history_id, hid=1) + assert input_dataset_details["state"] == "deferred", input_dataset_details + + def _assert_request_validates(self, tool_id: str, history_id: str, inputs: Dict[str, Any]): + response = self._run(tool_id, history_id, inputs) + assert response.status_code == 200 + + def _assert_request_invalid(self, tool_id: str, history_id: str, inputs: Dict[str, Any]): + response = self._run(tool_id, history_id, inputs) + assert response.status_code == 400 + + def _run(self, tool_id: str, history_id: str, inputs: Dict[str, Any]) -> requests.Response: + return self.dataset_populator.tool_request_raw(tool_id, inputs, history_id) diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index 3e47c70c18f8..93ae66c832e3 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -144,6 +144,7 @@ DEFAULT_TIMEOUT = 60 # Secs to wait for state to turn ok SKIP_FLAKEY_TESTS_ON_ERROR = os.environ.get("GALAXY_TEST_SKIP_FLAKEY_TESTS_ON_ERROR", None) +INPUT_FORMAT_T = Literal["legacy", "21.01", "request"] PRIVATE_ROLE_TYPE = "private" @@ -1013,6 +1014,15 @@ def run_tool_raw(self, tool_id: Optional[str], inputs: dict, history_id: str, ** payload = self.run_tool_payload(tool_id, inputs, history_id, **kwds) return self.tools_post(payload) + def tool_request_raw(self, tool_id: str, inputs: Dict[str, Any], history_id: str) -> Response: + payload = { + "tool_id": tool_id, + "history_id": history_id, + "inputs": inputs, + } + response = self._post("jobs", data=payload, json=True) + return response + def run_tool(self, tool_id: str, inputs: dict, history_id: str, **kwds): tool_response = self.run_tool_raw(tool_id, inputs, history_id, **kwds) api_asserts.assert_status_code_is(tool_response, 200) @@ -1528,8 +1538,38 @@ def is_ready(): wait_on(is_ready, "waiting for download to become ready") assert is_ready() + def wait_on_tool_request(self, tool_request_id: str): + # should this to defer to interactor's copy of this method? + + def state(): + state_response = self._get(f"tool_requests/{tool_request_id}/state") + state_response.raise_for_status() + return state_response.json() + + def is_ready(): + is_complete = state() in ["submitted", "failed"] + return True if is_complete else None + + wait_on(is_ready, "waiting for tool request to submit") + return state() == "submitted" + + def get_tool_request(self, tool_request_id: str) -> Dict[str, Any]: + response = self._get(f"tool_requests/{tool_request_id}") + api_asserts.assert_status_code_is_ok(response) + return response.json() + + def get_history_tool_requests(self, history_id: str) -> List[Dict[str, Any]]: + response = self._get(f"histories/{history_id}/tool_requests") + api_asserts.assert_status_code_is_ok(response) + return response.json() + def wait_on_task(self, async_task_response: Response): - task_id = async_task_response.json()["id"] + response_json = async_task_response.json() + self.wait_on_task_object(response_json) + + def wait_on_task_object(self, async_task_json: Dict[str, Any]): + assert "id" in async_task_json, f"Task response {async_task_json} does not contain expected 'id' field." + task_id = async_task_json["id"] return self.wait_on_task_id(task_id) def wait_on_task_id(self, task_id: str): @@ -3680,10 +3720,10 @@ def execute(self) -> "DescribeToolExecution": class DescribeToolInputs: - _input_format: str = "legacy" + _input_format: INPUT_FORMAT_T = "legacy" _inputs: Optional[Dict[str, Any]] - def __init__(self, input_format: str): + def __init__(self, input_format: INPUT_FORMAT_T): self._input_format = input_format self._inputs = None @@ -3697,7 +3737,12 @@ def flat(self, inputs: Dict[str, Any]) -> Self: return self def nested(self, inputs: Dict[str, Any]) -> Self: - if self._input_format == "21.01": + if self._input_format in ["21.01", "request"]: + self._inputs = inputs + return self + + def request(self, inputs: Dict[str, Any]) -> Self: + if self._input_format in ["request"]: self._inputs = inputs return self @@ -3710,7 +3755,8 @@ def when(self) -> Self: class DescribeToolExecution: _history_id: Optional[str] = None _execute_response: Optional[Response] = None - _input_format: Optional[str] = None + _tool_request_id: Optional[str] = None # if input_format == "request" request ID + _input_format: Optional[INPUT_FORMAT_T] = None _inputs: Dict[str, Any] def __init__(self, dataset_populator: BaseDatasetPopulator, tool_id: str): @@ -3739,14 +3785,27 @@ def with_nested_inputs(self, inputs: Dict[str, Any]) -> Self: self._input_format = "21.01" return self + def with_request(self, inputs: Dict[str, Any]) -> Self: + self._inputs = inputs + self._input_format = "request" + return self + def _execute(self): kwds = {} if self._input_format is not None: kwds["input_format"] = self._input_format history_id = self._ensure_history_id - self._execute_response = self._dataset_populator.run_tool_raw( - self._tool_id, self._inputs, history_id, assert_ok=False, **kwds - ) + if self._input_format == "request": + execute_response = self._dataset_populator.tool_request_raw(self._tool_id, self._inputs, history_id) + api_asserts.assert_status_code_is_ok(execute_response) + response_json = execute_response.json() + tool_request_id = response_json.get("tool_request_id") + self._dataset_populator.wait_on_tool_request(tool_request_id) + self._execute_response = execute_response + else: + self._execute_response = self._dataset_populator.run_tool_raw( + self._tool_id, self._inputs, history_id, assert_ok=False, **kwds + ) @property def _ensure_history_id(self) -> str: @@ -3764,25 +3823,56 @@ def _assert_executed_ok(self) -> Dict[str, Any]: execute_response = self._execute_response assert execute_response is not None api_asserts.assert_status_code_is_ok(execute_response) + if self._input_format == "request": + response_json = execute_response.json() + tool_request_id = response_json.get("tool_request_id") + task_result = response_json["task_result"] + self._dataset_populator.wait_on_task_object(task_result) + self._tool_request_id = tool_request_id + return execute_response.json() + @property + def _jobs(self) -> List[Dict[str, Any]]: + if self._input_format == "request": + tool_request_id = self._tool_request_id + assert tool_request_id, "request not exected" + jobs = list(reversed(self._dataset_populator.galaxy_interactor.jobs_for_tool_request(tool_request_id))) + else: + response = self._assert_executed_ok() + jobs = response["jobs"] + return jobs + + @property + def _implicit_collections(self) -> List[Dict[str, Any]]: + if self._input_format == "request": + tool_request_id = self._tool_request_id + assert tool_request_id, "request not exected" + response = self._dataset_populator.get_tool_request(tool_request_id) + collections = [c["dataset_collection_instance"] for c in response["implicit_collection_outputs"]] + else: + response = self._assert_executed_ok() + collections = response["implicit_collections"] + return collections + def assert_has_n_jobs(self, n: int) -> Self: - response = self._assert_executed_ok() - jobs = response["jobs"] - if len(jobs) != n: - raise AssertionError(f"Expected tool execution to produce {n} jobs but it produced {len(jobs)}") + self._assert_executed_ok() + jobs = self._jobs + num_jobs = len(jobs) + if num_jobs != n: + raise AssertionError(f"Expected tool execution to produce {n} jobs but it produced {num_jobs}") return self def assert_creates_n_implicit_collections(self, n: int) -> Self: - response = self._assert_executed_ok() - collections = response["implicit_collections"] + self._assert_executed_ok() + collections = self._implicit_collections if len(collections) != n: raise AssertionError(f"Expected tool execution to produce {n} implicit but it produced {len(collections)}") return self def assert_creates_implicit_collection(self, index: Union[str, int]) -> "DescribeToolExecutionOutputCollection": - response = self._assert_executed_ok() - collections = response["implicit_collections"] + self._assert_executed_ok() + collections = self._implicit_collections assert isinstance(index, int) # TODO: implement and then prefer str. history_id = self._ensure_history_id return DescribeToolExecutionOutputCollection(self._dataset_populator, history_id, collections[index]["id"]) @@ -3792,8 +3882,8 @@ def assert_has_single_job(self) -> DescribeJob: return self.assert_has_n_jobs(1).assert_has_job(0) def assert_has_job(self, job_index: int = 0) -> DescribeJob: - response = self._assert_executed_ok() - job = response["jobs"][job_index] + self._assert_executed_ok() + job = self._jobs[job_index] history_id = self._ensure_history_id return DescribeJob(self._dataset_populator, history_id, job["id"]) @@ -3805,8 +3895,8 @@ def that_fails(self) -> DescribeFailure: if execute_response.status_code != 200: return DescribeFailure(execute_response) else: - response = self._assert_executed_ok() - jobs = response["jobs"] + self._assert_executed_ok() + jobs = self._jobs for job in jobs: final_state = self._dataset_populator.wait_for_job(job["id"]) assert final_state == "error" diff --git a/test/functional/test_toolbox_pytest.py b/test/functional/test_toolbox_pytest.py index 896e3609913e..cd6314a9fc85 100644 --- a/test/functional/test_toolbox_pytest.py +++ b/test/functional/test_toolbox_pytest.py @@ -1,11 +1,16 @@ import os from typing import ( + cast, List, NamedTuple, ) import pytest +from galaxy.tool_util.verify.interactor import ( + DEFAULT_USE_LEGACY_API, + UseLegacyApiT, +) from galaxy_test.api._framework import ApiTestCase from galaxy_test.driver.driver_util import GalaxyTestDriver @@ -61,4 +66,7 @@ class TestFrameworkTools(ApiTestCase): @pytest.mark.parametrize("testcase", cases(), ids=idfn) def test_tool(self, testcase: ToolTest): - self._test_driver.run_tool_test(testcase.tool_id, testcase.test_index, tool_version=testcase.tool_version) + use_legacy_api = cast(UseLegacyApiT, os.environ.get("GALAXY_TEST_USE_LEGACY_TOOL_API", DEFAULT_USE_LEGACY_API)) + self._test_driver.run_tool_test( + testcase.tool_id, testcase.test_index, tool_version=testcase.tool_version, use_legacy_api=use_legacy_api + ) diff --git a/test/unit/tool_util/test_parameter_convert.py b/test/unit/tool_util/test_parameter_convert.py index 38efa9105aed..b781ca491c19 100644 --- a/test/unit/tool_util/test_parameter_convert.py +++ b/test/unit/tool_util/test_parameter_convert.py @@ -43,7 +43,19 @@ def test_decode_data(): assert decoded_state.input_state["parameter"]["id"] == EXAMPLE_ID_1 -def test_encode_collection(): +def test_decode_data_batch(): + tool_source = tool_source_for("parameters/gx_data") + bundle = input_models_for_tool_source(tool_source) + request_state = RequestToolState( + {"parameter": {"__class__": "Batch", "values": [{"src": "hda", "id": EXAMPLE_ID_1_ENCODED}]}} + ) + request_state.validate(bundle) + decoded_state = decode(request_state, bundle, _fake_decode) + assert decoded_state.input_state["parameter"]["values"][0]["src"] == "hda" + assert decoded_state.input_state["parameter"]["values"][0]["id"] == EXAMPLE_ID_1 + + +def test_decode_collection(): tool_source = tool_source_for("parameters/gx_data_collection") bundle = input_models_for_tool_source(tool_source) request_state = RequestToolState({"parameter": {"src": "hdca", "id": EXAMPLE_ID_1_ENCODED}}) @@ -119,6 +131,22 @@ def test_landing_encode_data(): assert encoded_state.input_state["parameter"]["id"] == EXAMPLE_ID_1_ENCODED +def test_landing_encode_data_batch(): + tool_source = tool_source_for("parameters/gx_data") + bundle = input_models_for_tool_source(tool_source) + request_state = LandingRequestToolState( + {"parameter": {"__class__": "Batch", "values": [{"src": "hda", "id": EXAMPLE_ID_1_ENCODED}]}} + ) + request_state.validate(bundle) + decoded_state = landing_decode(request_state, bundle, _fake_decode) + assert decoded_state.input_state["parameter"]["values"][0]["src"] == "hda" + assert decoded_state.input_state["parameter"]["values"][0]["id"] == EXAMPLE_ID_1 + + encoded_state = landing_encode(decoded_state, bundle, _fake_encode) + assert encoded_state.input_state["parameter"]["values"][0]["src"] == "hda" + assert encoded_state.input_state["parameter"]["values"][0]["id"] == EXAMPLE_ID_1_ENCODED + + def test_dereference(): tool_source = tool_source_for("parameters/gx_data") bundle = input_models_for_tool_source(tool_source) From 1a9fc382cb174f39add598b99ba84bed19e32a79 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 7 Oct 2024 11:11:24 -0400 Subject: [PATCH 05/11] Refactoring that lets the tool request API work. --- lib/galaxy/tools/parameters/basic.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/galaxy/tools/parameters/basic.py b/lib/galaxy/tools/parameters/basic.py index bbd0927c3e60..cab76cf4a81d 100644 --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -2485,7 +2485,10 @@ def from_json(self, value, trans, other_values=None): rval = value elif isinstance(value, MutableMapping) and "src" in value and "id" in value: if value["src"] == "hdca": - rval = cast(HistoryDatasetCollectionAssociation, src_id_to_item(sa_session=trans.sa_session, value=value, security=trans.security)) + rval = cast( + HistoryDatasetCollectionAssociation, + src_id_to_item(sa_session=trans.sa_session, value=value, security=trans.security), + ) elif isinstance(value, list): if len(value) > 0: value = value[0] From 90e0c63b07f7f611d24ee3f407919d58ffe7c1f4 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Wed, 18 Sep 2024 15:46:08 -0400 Subject: [PATCH 06/11] Blah.... --- lib/galaxy/tools/parameters/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/galaxy/tools/parameters/basic.py b/lib/galaxy/tools/parameters/basic.py index cab76cf4a81d..6aed9c85344f 100644 --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -1087,7 +1087,7 @@ def _select_from_json(self, value, trans, other_values=None, require_legal_value ) if is_runtime_value(value): return None - if value in legal_values: + if value in legal_values or str(value) in legal_values: return value elif value in fallback_values: return fallback_values[value] From 18ffa8cd208ca83950ad08704e73af7fe269d2fb Mon Sep 17 00:00:00 2001 From: John Chilton Date: Tue, 19 Nov 2024 09:50:16 -0500 Subject: [PATCH 07/11] Rebuild schema for tool request APIs... --- client/src/api/schema/schema.ts | 1669 ++++++++++++++++++++++++++++++- 1 file changed, 1640 insertions(+), 29 deletions(-) diff --git a/client/src/api/schema/schema.ts b/client/src/api/schema/schema.ts index 1e9ab944ad2e..7241bcb4d165 100644 --- a/client/src/api/schema/schema.ts +++ b/client/src/api/schema/schema.ts @@ -2502,6 +2502,23 @@ export interface paths { patch?: never; trace?: never; }; + "/api/histories/{history_id}/tool_requests": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Return all the tool requests for the tools submitted to this history. */ + get: operations["tool_requests_api_histories__history_id__tool_requests_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/histories/{history_id}/unpublish": { parameters: { query?: never; @@ -2814,7 +2831,8 @@ export interface paths { /** Index */ get: operations["index_api_jobs_get"]; put?: never; - post?: never; + /** Create */ + post: operations["create_api_jobs_post"]; delete?: never; options?: never; head?: never; @@ -4381,6 +4399,40 @@ export interface paths { patch?: never; trace?: never; }; + "/api/tool_requests/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Get tool request state. */ + get: operations["get_tool_request_api_tool_requests__id__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/tool_requests/{id}/state": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Get tool request state. */ + get: operations["tool_request_state_api_tool_requests__id__state_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/tool_shed_repositories": { parameters: { query?: never; @@ -4449,6 +4501,23 @@ export interface paths { patch?: never; trace?: never; }; + "/api/tools/{tool_id}/inputs": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Get tool inputs. */ + get: operations["tool_inputs_api_tools__tool_id__inputs_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/tours": { parameters: { query?: never; @@ -6276,6 +6345,39 @@ export interface components { ) | ("cloud" | "quota" | "no_quota" | "restricted" | "user_defined"); }; + /** BaseUrlParameterModel */ + BaseUrlParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_baseurl + * @constant + * @enum {string} + */ + parameter_type: "gx_baseurl"; + }; /** BasicRoleModel */ BasicRoleModel: { /** @@ -6391,6 +6493,48 @@ export interface components { /** Targets */ targets: unknown; }; + /** BooleanParameterModel */ + BooleanParameterModel: { + /** Argument */ + argument?: string | null; + /** Falsevalue */ + falsevalue?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_boolean + * @constant + * @enum {string} + */ + parameter_type: "gx_boolean"; + /** Truevalue */ + truevalue?: string | null; + /** + * Value + * @default false + */ + value: boolean | null; + }; /** BroadcastNotificationContent */ BroadcastNotificationContent: { /** @@ -6705,6 +6849,41 @@ export interface components { * @enum {string} */ ColletionSourceType: "hda" | "ldda" | "hdca" | "new_collection"; + /** ColorParameterModel */ + ColorParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_color + * @constant + * @enum {string} + */ + parameter_type: "gx_color"; + /** Value */ + value?: string | null; + }; /** CompositeDataElement */ CompositeDataElement: { /** Md5 */ @@ -6854,6 +7033,82 @@ export interface components { */ source: string | null; }; + /** ConditionalParameterModel */ + ConditionalParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_conditional + * @constant + * @enum {string} + */ + parameter_type: "gx_conditional"; + /** Test Parameter */ + test_parameter: + | components["schemas"]["BooleanParameterModel"] + | components["schemas"]["SelectParameterModel"]; + /** Whens */ + whens: components["schemas"]["ConditionalWhen"][]; + }; + /** ConditionalWhen */ + ConditionalWhen: { + /** Discriminator */ + discriminator: boolean | string; + /** Is Default When */ + is_default_when: boolean; + /** Parameters */ + parameters: ( + | components["schemas"]["CwlIntegerParameterModel"] + | components["schemas"]["CwlFloatParameterModel"] + | components["schemas"]["CwlStringParameterModel"] + | components["schemas"]["CwlBooleanParameterModel"] + | components["schemas"]["CwlNullParameterModel"] + | components["schemas"]["CwlFileParameterModel"] + | components["schemas"]["CwlDirectoryParameterModel"] + | components["schemas"]["CwlUnionParameterModel"] + | components["schemas"]["TextParameterModel"] + | components["schemas"]["IntegerParameterModel"] + | components["schemas"]["FloatParameterModel"] + | components["schemas"]["BooleanParameterModel"] + | components["schemas"]["HiddenParameterModel"] + | components["schemas"]["SelectParameterModel"] + | components["schemas"]["DataParameterModel"] + | components["schemas"]["DataCollectionParameterModel"] + | components["schemas"]["DataColumnParameterModel"] + | components["schemas"]["DirectoryUriParameterModel"] + | components["schemas"]["RulesParameterModel"] + | components["schemas"]["DrillDownParameterModel"] + | components["schemas"]["GroupTagParameterModel"] + | components["schemas"]["BaseUrlParameterModel"] + | components["schemas"]["GenomeBuildParameterModel"] + | components["schemas"]["ColorParameterModel"] + | components["schemas"]["ConditionalParameterModel"] + | components["schemas"]["RepeatParameterModel"] + | components["schemas"]["SectionParameterModel"] + )[]; + }; /** ConnectAction */ ConnectAction: { /** @@ -7769,6 +8024,155 @@ export interface components { */ username_and_slug?: string | null; }; + /** CwlBooleanParameterModel */ + CwlBooleanParameterModel: { + /** Name */ + name: string; + /** + * Parameter Type + * @default cwl_boolean + * @constant + * @enum {string} + */ + parameter_type: "cwl_boolean"; + }; + /** CwlDirectoryParameterModel */ + CwlDirectoryParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default cwl_directory + * @constant + * @enum {string} + */ + parameter_type: "cwl_directory"; + }; + /** CwlFileParameterModel */ + CwlFileParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default cwl_file + * @constant + * @enum {string} + */ + parameter_type: "cwl_file"; + }; + /** CwlFloatParameterModel */ + CwlFloatParameterModel: { + /** Name */ + name: string; + /** + * Parameter Type + * @default cwl_float + * @constant + * @enum {string} + */ + parameter_type: "cwl_float"; + }; + /** CwlIntegerParameterModel */ + CwlIntegerParameterModel: { + /** Name */ + name: string; + /** + * Parameter Type + * @default cwl_integer + * @constant + * @enum {string} + */ + parameter_type: "cwl_integer"; + }; + /** CwlNullParameterModel */ + CwlNullParameterModel: { + /** Name */ + name: string; + /** + * Parameter Type + * @default cwl_null + * @constant + * @enum {string} + */ + parameter_type: "cwl_null"; + }; + /** CwlStringParameterModel */ + CwlStringParameterModel: { + /** Name */ + name: string; + /** + * Parameter Type + * @default cwl_string + * @constant + * @enum {string} + */ + parameter_type: "cwl_string"; + }; + /** CwlUnionParameterModel */ + CwlUnionParameterModel: { + /** Name */ + name: string; + /** + * Parameter Type + * @default cwl_union + * @constant + * @enum {string} + */ + parameter_type: "cwl_union"; + /** Parameters */ + parameters: ( + | components["schemas"]["CwlIntegerParameterModel"] + | components["schemas"]["CwlFloatParameterModel"] + | components["schemas"]["CwlStringParameterModel"] + | components["schemas"]["CwlBooleanParameterModel"] + | components["schemas"]["CwlNullParameterModel"] + | components["schemas"]["CwlFileParameterModel"] + | components["schemas"]["CwlDirectoryParameterModel"] + | components["schemas"]["CwlUnionParameterModel"] + )[]; + }; /** * DCESummary * @description Dataset Collection Element summary information. @@ -7858,14 +8262,93 @@ export interface components { */ populated?: boolean; }; - /** DataElementsFromTarget */ - DataElementsFromTarget: { + /** DataCollectionParameterModel */ + DataCollectionParameterModel: { + /** Argument */ + argument?: string | null; + /** Collection Type */ + collection_type?: string | null; /** - * Auto Decompress - * @description Decompress compressed data before sniffing? + * Extensions + * @default [ + * "data" + * ] + */ + extensions: string[]; + /** Help */ + help?: string | null; + /** + * Hidden * @default false */ - auto_decompress: boolean; + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_data_collection + * @constant + * @enum {string} + */ + parameter_type: "gx_data_collection"; + /** Value */ + value: Record | null; + }; + /** DataColumnParameterModel */ + DataColumnParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Multiple */ + multiple: boolean; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_data_column + * @constant + * @enum {string} + */ + parameter_type: "gx_data_column"; + }; + /** DataElementsFromTarget */ + DataElementsFromTarget: { + /** + * Auto Decompress + * @description Decompress compressed data before sniffing? + * @default false + */ + auto_decompress: boolean; /** Destination */ destination: | components["schemas"]["HdaDestination"] @@ -7914,6 +8397,55 @@ export interface components { * @enum {string} */ DataItemSourceType: "hda" | "ldda" | "hdca" | "dce" | "dc"; + /** DataParameterModel */ + DataParameterModel: { + /** Argument */ + argument?: string | null; + /** + * Extensions + * @default [ + * "data" + * ] + */ + extensions: string[]; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Max */ + max?: number | null; + /** Min */ + min?: number | null; + /** + * Multiple + * @default false + */ + multiple: boolean; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_data + * @constant + * @enum {string} + */ + parameter_type: "gx_data"; + }; /** DatasetAssociationRoles */ DatasetAssociationRoles: { /** @@ -8544,6 +9076,49 @@ export interface components { */ username: string; }; + /** DirectoryUriParameterModel */ + DirectoryUriParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_directory_uri + * @constant + * @enum {string} + */ + parameter_type: "gx_directory_uri"; + /** + * Validators + * @default [] + */ + validators: ( + | components["schemas"]["LengthParameterValidatorModel"] + | components["schemas"]["RegexParameterValidatorModel"] + | components["schemas"]["ExpressionParameterValidatorModel"] + | components["schemas"]["EmptyFieldParameterValidatorModel"] + )[]; + }; /** DisconnectAction */ DisconnectAction: { /** @@ -8587,6 +9162,59 @@ export interface components { /** Version */ version: string; }; + /** DrillDownOptionsDict */ + DrillDownOptionsDict: { + /** Name */ + name: string | null; + /** Options */ + options: components["schemas"]["DrillDownOptionsDict"][]; + /** Selected */ + selected: boolean; + /** Value */ + value: string; + }; + /** DrillDownParameterModel */ + DrillDownParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Hierarchy + * @enum {string} + */ + hierarchy: "recurse" | "exact"; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Multiple */ + multiple: boolean; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** Options */ + options?: components["schemas"]["DrillDownOptionsDict"][] | null; + /** + * Parameter Type + * @default gx_drill_down + * @constant + * @enum {string} + */ + parameter_type: "gx_drill_down"; + }; /** DrsObject */ DrsObject: { /** @@ -8679,6 +9307,28 @@ export interface components { * @enum {string} */ ElementsFromType: "archive" | "bagit" | "bagit_archive" | "directory"; + /** EmptyFieldParameterValidatorModel */ + EmptyFieldParameterValidatorModel: { + /** + * Implicit + * @default false + */ + implicit: boolean; + /** Message */ + message?: string | null; + /** + * Negate + * @default false + */ + negate: boolean; + /** + * Type + * @default empty_field + * @constant + * @enum {string} + */ + type: "empty_field"; + }; /** EncodedDataItemSourceId */ EncodedDataItemSourceId: { /** @@ -8994,6 +9644,35 @@ export interface components { }; /** ExportTaskListResponse */ ExportTaskListResponse: components["schemas"]["ObjectExportTaskResponse"][]; + /** + * ExpressionParameterValidatorModel + * @description Check if a one line python expression given expression evaluates to True. + * + * The expression is given is the content of the validator tag. + */ + ExpressionParameterValidatorModel: { + /** Expression */ + expression: string; + /** + * Implicit + * @default false + */ + implicit: boolean; + /** Message */ + message?: string | null; + /** + * Negate + * @default false + */ + negate: boolean; + /** + * Type + * @default expression + * @constant + * @enum {string} + */ + type: "expression"; + }; /** ExtraFileEntry */ ExtraFileEntry: { /** @description The class of this entry, either File or Directory. */ @@ -9360,6 +10039,50 @@ export interface components { /** Step */ step: components["schemas"]["StepReferenceByOrderIndex"] | components["schemas"]["StepReferenceByLabel"]; }; + /** FloatParameterModel */ + FloatParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Max */ + max?: number | null; + /** Min */ + min?: number | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_float + * @constant + * @enum {string} + */ + parameter_type: "gx_float"; + /** + * Validators + * @default [] + */ + validators: components["schemas"]["InRangeParameterValidatorModel"][]; + /** Value */ + value?: number | null; + }; /** FolderLibraryFolderItem */ FolderLibraryFolderItem: { /** Can Manage */ @@ -9489,6 +10212,41 @@ export interface components { /** Tags */ tags?: string[] | null; }; + /** GenomeBuildParameterModel */ + GenomeBuildParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Multiple */ + multiple: boolean; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_genomebuild + * @constant + * @enum {string} + */ + parameter_type: "gx_genomebuild"; + }; /** * GroupCreatePayload * @description Payload schema for creating a group. @@ -9599,6 +10357,41 @@ export interface components { */ url: string; }; + /** GroupTagParameterModel */ + GroupTagParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Multiple */ + multiple: boolean; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_group_tag + * @constant + * @enum {string} + */ + parameter_type: "gx_group_tag"; + }; /** GroupUpdatePayload */ GroupUpdatePayload: { /** name of the group */ @@ -11198,6 +11991,51 @@ export interface components { HelpForumUser: { [key: string]: unknown; }; + /** HiddenParameterModel */ + HiddenParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_hidden + * @constant + * @enum {string} + */ + parameter_type: "gx_hidden"; + /** + * Validators + * @default [] + */ + validators: ( + | components["schemas"]["LengthParameterValidatorModel"] + | components["schemas"]["RegexParameterValidatorModel"] + | components["schemas"]["ExpressionParameterValidatorModel"] + | components["schemas"]["EmptyFieldParameterValidatorModel"] + )[]; + /** Value */ + value: string | null; + }; /** * HistoryActiveContentCounts * @description Contains the number of active, deleted or hidden items in a History. @@ -11610,6 +12448,42 @@ export interface components { */ uri: string; }; + /** InRangeParameterValidatorModel */ + InRangeParameterValidatorModel: { + /** + * Exclude Max + * @default false + */ + exclude_max: boolean; + /** + * Exclude Min + * @default false + */ + exclude_min: boolean; + /** + * Implicit + * @default false + */ + implicit: boolean; + /** Max */ + max?: number | null; + /** Message */ + message?: string | null; + /** Min */ + min?: number | null; + /** + * Negate + * @default false + */ + negate: boolean; + /** + * Type + * @default in_range + * @constant + * @enum {string} + */ + type: "in_range"; + }; /** InputDataCollectionStep */ InputDataCollectionStep: { /** @@ -11853,6 +12727,47 @@ export interface components { /** Uninstalled */ uninstalled: boolean; }; + /** IntegerParameterModel */ + IntegerParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Max */ + max?: number | null; + /** Min */ + min?: number | null; + /** Name */ + name: string; + /** Optional */ + optional: boolean; + /** + * Parameter Type + * @default gx_integer + * @constant + * @enum {string} + */ + parameter_type: "gx_integer"; + /** + * Validators + * @default [] + */ + validators: components["schemas"]["InRangeParameterValidatorModel"][]; + /** Value */ + value?: number | null; + }; /** InvocationCancellationHistoryDeletedResponse */ InvocationCancellationHistoryDeletedResponse: { /** @@ -12778,6 +13693,15 @@ export interface components { */ stdout?: string | null; }; + /** JobCreateResponse */ + JobCreateResponse: { + task_result: components["schemas"]["AsyncTaskResultSummary"]; + /** + * Tool Request Id + * @example 0123456789ABCDEF + */ + tool_request_id: string; + }; /** JobDestinationParams */ JobDestinationParams: { /** @@ -13056,6 +13980,19 @@ export interface components { */ name: string; }; + /** JobOutputCollectionAssociation */ + JobOutputCollectionAssociation: { + /** + * dataset_collection_instance + * @description Reference to the associated item. + */ + dataset_collection_instance: components["schemas"]["EncodedDataItemSourceId"]; + /** + * name + * @description Name of the job parameter. + */ + name: string; + }; /** JobParameter */ JobParameter: { /** @@ -13084,6 +14021,47 @@ export interface components { | string | null; }; + /** JobRequest */ + JobRequest: { + /** + * history_id + * @description TODO + */ + history_id?: string | null; + /** + * Inputs + * @description TODO + */ + inputs?: Record | null; + /** + * rerun_remap_job_id + * @description TODO + */ + rerun_remap_job_id?: string | null; + /** + * Send Email Notification + * @description TODO + * @default false + */ + send_email_notification: boolean; + /** + * tool_id + * @description TODO + */ + tool_id?: string | null; + /** + * tool_uuid + * @description TODO + */ + tool_uuid?: string | null; + /** + * tool_version + * @description TODO + */ + tool_version?: string | null; + /** use_cached_jobs */ + use_cached_jobs?: boolean | null; + }; /** * JobSourceType * @description Available types of job sources (model classes) that produce dataset collections. @@ -13216,6 +14194,15 @@ export interface components { */ user_email?: string | null; }; + /** LabelValue */ + LabelValue: { + /** Label */ + label: string; + /** Selected */ + selected: boolean; + /** Value */ + value: string; + }; /** * LabelValuePair * @description Generic Label/Value pair model. @@ -13264,6 +14251,32 @@ export interface components { */ LIBRARY_MODIFY_in: string[] | string | null; }; + /** LengthParameterValidatorModel */ + LengthParameterValidatorModel: { + /** + * Implicit + * @default false + */ + implicit: boolean; + /** Max */ + max?: number | null; + /** Message */ + message?: string | null; + /** Min */ + min?: number | null; + /** + * Negate + * @default false + */ + negate: boolean; + /** + * Type + * @default length + * @constant + * @enum {string} + */ + type: "length"; + }; /** LibraryAvailablePermissions */ LibraryAvailablePermissions: { /** @@ -14400,6 +15413,28 @@ export interface components { */ slug: string; }; + /** NoOptionsParameterValidatorModel */ + NoOptionsParameterValidatorModel: { + /** + * Implicit + * @default false + */ + implicit: boolean; + /** Message */ + message?: string | null; + /** + * Negate + * @default false + */ + negate: boolean; + /** + * Type + * @default no_options + * @constant + * @enum {string} + */ + type: "no_options"; + }; /** * NotificationBroadcastUpdateRequest * @description A notification update request specific for broadcasting. @@ -15537,6 +16572,37 @@ export interface components { /** Workflow */ workflow: string; }; + /** + * RegexParameterValidatorModel + * @description Check if a regular expression **matches** the value, i.e. appears + * at the beginning of the value. To enforce a match of the complete value use + * ``$`` at the end of the expression. The expression is given is the content + * of the validator tag. Note that for ``selects`` each option is checked + * separately. + */ + RegexParameterValidatorModel: { + /** Expression */ + expression: string; + /** + * Implicit + * @default false + */ + implicit: boolean; + /** Message */ + message?: string | null; + /** + * Negate + * @default false + */ + negate: boolean; + /** + * Type + * @default regex + * @constant + * @enum {string} + */ + type: "regex"; + }; /** ReloadFeedback */ ReloadFeedback: { /** Failed */ @@ -15628,6 +16694,73 @@ export interface components { */ action_type: "remove_unlabeled_workflow_outputs"; }; + /** RepeatParameterModel */ + RepeatParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Max */ + max?: number | null; + /** Min */ + min?: number | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_repeat + * @constant + * @enum {string} + */ + parameter_type: "gx_repeat"; + /** Parameters */ + parameters: ( + | components["schemas"]["CwlIntegerParameterModel"] + | components["schemas"]["CwlFloatParameterModel"] + | components["schemas"]["CwlStringParameterModel"] + | components["schemas"]["CwlBooleanParameterModel"] + | components["schemas"]["CwlNullParameterModel"] + | components["schemas"]["CwlFileParameterModel"] + | components["schemas"]["CwlDirectoryParameterModel"] + | components["schemas"]["CwlUnionParameterModel"] + | components["schemas"]["TextParameterModel"] + | components["schemas"]["IntegerParameterModel"] + | components["schemas"]["FloatParameterModel"] + | components["schemas"]["BooleanParameterModel"] + | components["schemas"]["HiddenParameterModel"] + | components["schemas"]["SelectParameterModel"] + | components["schemas"]["DataParameterModel"] + | components["schemas"]["DataCollectionParameterModel"] + | components["schemas"]["DataColumnParameterModel"] + | components["schemas"]["DirectoryUriParameterModel"] + | components["schemas"]["RulesParameterModel"] + | components["schemas"]["DrillDownParameterModel"] + | components["schemas"]["GroupTagParameterModel"] + | components["schemas"]["BaseUrlParameterModel"] + | components["schemas"]["GenomeBuildParameterModel"] + | components["schemas"]["ColorParameterModel"] + | components["schemas"]["ConditionalParameterModel"] + | components["schemas"]["RepeatParameterModel"] + | components["schemas"]["SectionParameterModel"] + )[]; + }; /** Report */ Report: { /** Markdown */ @@ -15735,6 +16868,39 @@ export interface components { RootModel_Dict_str__int__: { [key: string]: number; }; + /** RulesParameterModel */ + RulesParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_rules + * @constant + * @enum {string} + */ + parameter_type: "gx_rules"; + }; /** SearchJobsPayload */ SearchJobsPayload: { /** @@ -15755,6 +16921,108 @@ export interface components { } & { [key: string]: unknown; }; + /** SectionParameterModel */ + SectionParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_section + * @constant + * @enum {string} + */ + parameter_type: "gx_section"; + /** Parameters */ + parameters: ( + | components["schemas"]["CwlIntegerParameterModel"] + | components["schemas"]["CwlFloatParameterModel"] + | components["schemas"]["CwlStringParameterModel"] + | components["schemas"]["CwlBooleanParameterModel"] + | components["schemas"]["CwlNullParameterModel"] + | components["schemas"]["CwlFileParameterModel"] + | components["schemas"]["CwlDirectoryParameterModel"] + | components["schemas"]["CwlUnionParameterModel"] + | components["schemas"]["TextParameterModel"] + | components["schemas"]["IntegerParameterModel"] + | components["schemas"]["FloatParameterModel"] + | components["schemas"]["BooleanParameterModel"] + | components["schemas"]["HiddenParameterModel"] + | components["schemas"]["SelectParameterModel"] + | components["schemas"]["DataParameterModel"] + | components["schemas"]["DataCollectionParameterModel"] + | components["schemas"]["DataColumnParameterModel"] + | components["schemas"]["DirectoryUriParameterModel"] + | components["schemas"]["RulesParameterModel"] + | components["schemas"]["DrillDownParameterModel"] + | components["schemas"]["GroupTagParameterModel"] + | components["schemas"]["BaseUrlParameterModel"] + | components["schemas"]["GenomeBuildParameterModel"] + | components["schemas"]["ColorParameterModel"] + | components["schemas"]["ConditionalParameterModel"] + | components["schemas"]["RepeatParameterModel"] + | components["schemas"]["SectionParameterModel"] + )[]; + }; + /** SelectParameterModel */ + SelectParameterModel: { + /** Argument */ + argument?: string | null; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Multiple */ + multiple: boolean; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** Options */ + options?: components["schemas"]["LabelValue"][] | null; + /** + * Parameter Type + * @default gx_select + * @constant + * @enum {string} + */ + parameter_type: "gx_select"; + /** Validators */ + validators: components["schemas"]["NoOptionsParameterValidatorModel"][]; + }; /** ServerDirElement */ ServerDirElement: { /** Md5 */ @@ -16742,31 +18010,86 @@ export interface components { /** Name */ name: string; /** - * Type + * Type + * @constant + * @enum {string} + */ + type: "string"; + }; + /** TestUpdateInstancePayload */ + TestUpdateInstancePayload: { + /** Variables */ + variables?: { + [key: string]: string | boolean | number; + } | null; + }; + /** TestUpgradeInstancePayload */ + TestUpgradeInstancePayload: { + /** Secrets */ + secrets: { + [key: string]: string; + }; + /** Template Version */ + template_version: number; + /** Variables */ + variables: { + [key: string]: string | boolean | number; + }; + }; + /** TextParameterModel */ + TextParameterModel: { + /** + * Area + * @default false + */ + area: boolean; + /** Argument */ + argument?: string | null; + /** + * Default Options + * @default [] + */ + default_options: components["schemas"]["LabelValue"][]; + /** Help */ + help?: string | null; + /** + * Hidden + * @default false + */ + hidden: boolean; + /** + * Is Dynamic + * @default false + */ + is_dynamic: boolean; + /** Label */ + label?: string | null; + /** Name */ + name: string; + /** + * Optional + * @default false + */ + optional: boolean; + /** + * Parameter Type + * @default gx_text * @constant * @enum {string} */ - type: "string"; - }; - /** TestUpdateInstancePayload */ - TestUpdateInstancePayload: { - /** Variables */ - variables?: { - [key: string]: string | boolean | number; - } | null; - }; - /** TestUpgradeInstancePayload */ - TestUpgradeInstancePayload: { - /** Secrets */ - secrets: { - [key: string]: string; - }; - /** Template Version */ - template_version: number; - /** Variables */ - variables: { - [key: string]: string | boolean | number; - }; + parameter_type: "gx_text"; + /** + * Validators + * @default [] + */ + validators: ( + | components["schemas"]["LengthParameterValidatorModel"] + | components["schemas"]["RegexParameterValidatorModel"] + | components["schemas"]["ExpressionParameterValidatorModel"] + | components["schemas"]["EmptyFieldParameterValidatorModel"] + )[]; + /** Value */ + value?: string | null; }; /** ToolDataDetails */ ToolDataDetails: { @@ -16848,6 +18171,40 @@ export interface components { */ values: string; }; + /** ToolRequestModel */ + ToolRequestModel: { + /** + * ID + * @description Encoded ID of the role + * @example 0123456789ABCDEF + */ + id: string; + /** Implicit Collection Outputs */ + implicit_collection_outputs: components["schemas"]["ToolRequestOutputCollectionAssociation"][]; + /** Request */ + request: Record; + state: components["schemas"]["ToolRequestState"]; + /** State Message */ + state_message: string | null; + }; + /** ToolRequestOutputCollectionAssociation */ + ToolRequestOutputCollectionAssociation: { + /** + * dataset_collection_instance + * @description Reference to the associated item. + */ + dataset_collection_instance: components["schemas"]["EncodedDataItemSourceId"]; + /** + * name + * @description Name of the job output. + */ + name: string; + }; + /** + * ToolRequestState + * @enum {string} + */ + ToolRequestState: "new" | "submitted" | "failed"; /** ToolStep */ ToolStep: { /** @@ -26892,6 +28249,50 @@ export interface operations { }; }; }; + tool_requests_api_histories__history_id__tool_requests_get: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The encoded database identifier of the History. */ + history_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ToolRequestModel"][]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; unpublish_api_histories__history_id__unpublish_put: { parameters: { query?: never; @@ -27809,6 +29210,8 @@ export interface operations { invocation_id?: string | null; /** @description Limit listing of jobs to those that match the specified implicit collection job ID. If none, jobs from any implicit collection execution (or from no implicit collection execution) may be returned. */ implicit_collection_jobs_id?: string | null; + /** @description Limit listing of jobs to those that were created from the supplied tool request ID. If none, jobs from any tool request (or from no workflows) may be returned. */ + tool_request_id?: string | null; /** @description Sort results by specified field. */ order_by?: components["schemas"]["JobIndexSortByEnum"]; /** @description A mix of free text and GitHub-style tags used to filter the index operation. @@ -27901,6 +29304,51 @@ export interface operations { }; }; }; + create_api_jobs_post: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["JobRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["JobCreateResponse"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; search_jobs_api_jobs_search_post: { parameters: { query?: never; @@ -28390,7 +29838,10 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["JobOutputAssociation"][]; + "application/json": ( + | components["schemas"]["JobOutputAssociation"] + | components["schemas"]["JobOutputCollectionAssociation"] + )[]; }; }; /** @description Request Error */ @@ -32738,6 +34189,92 @@ export interface operations { }; }; }; + get_tool_request_api_tool_requests__id__get: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ToolRequestModel"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + tool_request_state_api_tool_requests__id__state_get: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": string; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; index_api_tool_shed_repositories_get: { parameters: { query?: { @@ -32916,6 +34453,80 @@ export interface operations { }; }; }; + tool_inputs_api_tools__tool_id__inputs_get: { + parameters: { + query?: { + tool_version?: string | null; + }; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The tool ID for the lineage stored in Galaxy's toolbox. */ + tool_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": ( + | components["schemas"]["CwlIntegerParameterModel"] + | components["schemas"]["CwlFloatParameterModel"] + | components["schemas"]["CwlStringParameterModel"] + | components["schemas"]["CwlBooleanParameterModel"] + | components["schemas"]["CwlNullParameterModel"] + | components["schemas"]["CwlFileParameterModel"] + | components["schemas"]["CwlDirectoryParameterModel"] + | components["schemas"]["CwlUnionParameterModel"] + | components["schemas"]["TextParameterModel"] + | components["schemas"]["IntegerParameterModel"] + | components["schemas"]["FloatParameterModel"] + | components["schemas"]["BooleanParameterModel"] + | components["schemas"]["HiddenParameterModel"] + | components["schemas"]["SelectParameterModel"] + | components["schemas"]["DataParameterModel"] + | components["schemas"]["DataCollectionParameterModel"] + | components["schemas"]["DataColumnParameterModel"] + | components["schemas"]["DirectoryUriParameterModel"] + | components["schemas"]["RulesParameterModel"] + | components["schemas"]["DrillDownParameterModel"] + | components["schemas"]["GroupTagParameterModel"] + | components["schemas"]["BaseUrlParameterModel"] + | components["schemas"]["GenomeBuildParameterModel"] + | components["schemas"]["ColorParameterModel"] + | components["schemas"]["ConditionalParameterModel"] + | components["schemas"]["RepeatParameterModel"] + | components["schemas"]["SectionParameterModel"] + )[]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; index_api_tours_get: { parameters: { query?: never; From 1ee3064430f9b98e5d4b01f74be666ca0764531f Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 4 Nov 2024 13:22:55 -0500 Subject: [PATCH 08/11] Tool Landing API... --- lib/galaxy/managers/landing.py | 30 +++++++++++-- lib/galaxy/managers/tools.py | 27 ++++++++++++ lib/galaxy/webapps/galaxy/api/tools.py | 47 ++++++++++++++++++--- lib/galaxy/webapps/galaxy/services/jobs.py | 6 +-- lib/galaxy/webapps/galaxy/services/tools.py | 29 +++---------- lib/galaxy_test/api/test_landing.py | 31 ++++++++++++++ lib/galaxy_test/base/populators.py | 10 +++-- test/unit/app/managers/test_landing.py | 34 ++++++++++++++- 8 files changed, 173 insertions(+), 41 deletions(-) diff --git a/lib/galaxy/managers/landing.py b/lib/galaxy/managers/landing.py index 5012d2280134..b5bfd49a7359 100644 --- a/lib/galaxy/managers/landing.py +++ b/lib/galaxy/managers/landing.py @@ -30,9 +30,20 @@ WorkflowLandingRequest, ) from galaxy.security.idencoding import IdEncodingHelper -from galaxy.structured_app import StructuredApp +from galaxy.structured_app import ( + MinimalManagerApp, + StructuredApp, +) +from galaxy.tool_util.parameters import ( + landing_decode, + LandingRequestToolState, +) from galaxy.util import safe_str_cmp from .context import ProvidesUserContext +from .tools import ( + get_tool_from_toolbox, + ToolRunReference, +) LandingRequestModel = Union[ToolLandingRequestModel, WorkflowLandingRequestModel] @@ -44,16 +55,27 @@ def __init__( sa_session: galaxy_scoped_session, security: IdEncodingHelper, workflow_contents_manager: WorkflowContentsManager, + app: MinimalManagerApp, ): self.sa_session = sa_session self.security = security self.workflow_contents_manager = workflow_contents_manager + self.app = app def create_tool_landing_request(self, payload: CreateToolLandingRequestPayload, user_id=None) -> ToolLandingRequest: + tool_id = payload.tool_id + tool_version = payload.tool_version + request_state = payload.request_state + + ref = ToolRunReference(tool_id=tool_id, tool_version=tool_version, tool_uuid=None) + tool = get_tool_from_toolbox(self.app.toolbox, ref) + landing_request_state = LandingRequestToolState(request_state or {}) + internal_landing_request_state = landing_decode(landing_request_state, tool, self.security.decode_id) + model = ToolLandingRequestModel() - model.tool_id = payload.tool_id - model.tool_version = payload.tool_version - model.request_state = payload.request_state + model.tool_id = tool_id + model.tool_version = tool_version + model.request_state = internal_landing_request_state.input_state model.uuid = uuid4() model.client_secret = payload.client_secret model.public = payload.public diff --git a/lib/galaxy/managers/tools.py b/lib/galaxy/managers/tools.py index c6dbe471dc84..fa4c42eadefd 100644 --- a/lib/galaxy/managers/tools.py +++ b/lib/galaxy/managers/tools.py @@ -1,5 +1,6 @@ import logging from typing import ( + NamedTuple, Optional, TYPE_CHECKING, Union, @@ -16,8 +17,10 @@ model, ) from galaxy.exceptions import DuplicatedIdentifierException +from galaxy.managers.context import ProvidesUserContext from galaxy.model import DynamicTool from galaxy.tool_util.cwl import tool_proxy +from galaxy.tools import Tool from .base import ( ModelManager, raise_filter_err, @@ -30,6 +33,30 @@ from galaxy.managers.base import OrmFilterParsersType +class ToolRunReference(NamedTuple): + tool_id: Optional[str] + tool_uuid: Optional[str] + tool_version: Optional[str] + + +def get_tool_from_trans(trans: ProvidesUserContext, tool_ref: ToolRunReference) -> Tool: + return get_tool_from_toolbox(trans.app.toolbox, tool_ref) + + +def get_tool_from_toolbox(toolbox, tool_ref: ToolRunReference) -> Tool: + get_kwds = dict( + tool_id=tool_ref.tool_id, + tool_uuid=tool_ref.tool_uuid, + tool_version=tool_ref.tool_version, + ) + + tool = toolbox.get_tool(**get_kwds) + if not tool: + log.debug(f"Not found tool with kwds [{tool_ref}]") + raise exceptions.ToolMissingException("Tool not found.") + return tool + + class DynamicToolManager(ModelManager): """Manages dynamic tools stored in Galaxy's database.""" diff --git a/lib/galaxy/webapps/galaxy/api/tools.py b/lib/galaxy/webapps/galaxy/api/tools.py index 6914f9ab7bf3..dc0c2ecaeb28 100644 --- a/lib/galaxy/webapps/galaxy/api/tools.py +++ b/lib/galaxy/webapps/galaxy/api/tools.py @@ -17,6 +17,7 @@ Request, UploadFile, ) +from pydantic import UUID4 from starlette.datastructures import UploadFile as StarletteUploadFile from galaxy import ( @@ -26,16 +27,26 @@ ) from galaxy.datatypes.data import get_params_and_input_name from galaxy.managers.collections import DatasetCollectionManager -from galaxy.managers.context import ProvidesHistoryContext +from galaxy.managers.context import ( + ProvidesHistoryContext, + ProvidesUserContext, +) from galaxy.managers.hdas import HDAManager from galaxy.managers.histories import HistoryManager +from galaxy.managers.landing import LandingRequestManager +from galaxy.managers.tools import ToolRunReference from galaxy.model import ToolRequest from galaxy.schema.fetch_data import ( FetchDataFormPayload, FetchDataPayload, ) from galaxy.schema.fields import DecodedDatabaseIdField -from galaxy.schema.schema import ToolRequestModel +from galaxy.schema.schema import ( + ClaimLandingPayload, + CreateToolLandingRequestPayload, + ToolLandingRequest, + ToolRequestModel, +) from galaxy.tool_util.parameters import ToolParameterT from galaxy.tool_util.verify import ToolTestDescriptionDict from galaxy.tools.evaluation import global_tool_errors @@ -49,16 +60,14 @@ from galaxy.webapps.base.controller import UsesVisualizationMixin from galaxy.webapps.base.webapp import GalaxyWebTransaction from galaxy.webapps.galaxy.services.base import tool_request_to_model -from galaxy.webapps.galaxy.services.tools import ( - ToolRunReference, - ToolsService, -) +from galaxy.webapps.galaxy.services.tools import ToolsService from . import ( APIContentTypeRoute, as_form, BaseGalaxyAPIController, depends, DependsOnTrans, + LandingUuidPathParam, Router, ) @@ -105,6 +114,7 @@ async def get_files(request: Request, files: Optional[List[UploadFile]] = None): @router.cbv class FetchTools: service: ToolsService = depends(ToolsService) + landing_manager: LandingRequestManager = depends(LandingRequestManager) @router.post("/api/tools/fetch", summary="Upload files to Galaxy", route_class_override=JsonApiRoute) def fetch_json(self, payload: FetchDataPayload = Body(...), trans: ProvidesHistoryContext = DependsOnTrans): @@ -161,6 +171,31 @@ def _get_tool_request_or_raise_not_found( assert tool_request return tool_request + @router.post("/api/tool_landings", public=True) + def create_landing( + self, + trans: ProvidesUserContext = DependsOnTrans, + tool_landing_request: CreateToolLandingRequestPayload = Body(...), + ) -> ToolLandingRequest: + return self.landing_manager.create_tool_landing_request(tool_landing_request) + + @router.post("/api/tool_landings/{uuid}/claim") + def claim_landing( + self, + trans: ProvidesUserContext = DependsOnTrans, + uuid: UUID4 = LandingUuidPathParam, + payload: Optional[ClaimLandingPayload] = Body(...), + ) -> ToolLandingRequest: + return self.landing_manager.claim_tool_landing_request(trans, uuid, payload) + + @router.get("/api/tool_landings/{uuid}") + def get_landing( + self, + trans: ProvidesUserContext = DependsOnTrans, + uuid: UUID4 = LandingUuidPathParam, + ) -> ToolLandingRequest: + return self.landing_manager.get_tool_landing_request(trans, uuid) + @router.get( "/api/tools/{tool_id}/inputs", summary="Get tool inputs.", diff --git a/lib/galaxy/webapps/galaxy/services/jobs.py b/lib/galaxy/webapps/galaxy/services/jobs.py index 191a5e291cf5..610e1c468d78 100644 --- a/lib/galaxy/webapps/galaxy/services/jobs.py +++ b/lib/galaxy/webapps/galaxy/services/jobs.py @@ -29,6 +29,7 @@ JobSearch, view_show_job, ) +from galaxy.managers.tools import ToolRunReference from galaxy.model import ( Job, ToolRequest, @@ -60,10 +61,7 @@ async_task_summary, ServiceBase, ) -from .tools import ( - ToolRunReference, - validate_tool_for_running, -) +from .tools import validate_tool_for_running log = logging.getLogger(__name__) diff --git a/lib/galaxy/webapps/galaxy/services/tools.py b/lib/galaxy/webapps/galaxy/services/tools.py index bd97238ef67e..256031433304 100644 --- a/lib/galaxy/webapps/galaxy/services/tools.py +++ b/lib/galaxy/webapps/galaxy/services/tools.py @@ -7,7 +7,6 @@ cast, Dict, List, - NamedTuple, Optional, Union, ) @@ -25,6 +24,10 @@ ProvidesUserContext, ) from galaxy.managers.histories import HistoryManager +from galaxy.managers.tools import ( + get_tool_from_trans, + ToolRunReference, +) from galaxy.model import ( LibraryDatasetDatasetAssociation, PostJobAction, @@ -46,26 +49,6 @@ log = logging.getLogger(__name__) -class ToolRunReference(NamedTuple): - tool_id: Optional[str] - tool_uuid: Optional[str] - tool_version: Optional[str] - - -def get_tool(trans: ProvidesHistoryContext, tool_ref: ToolRunReference) -> Tool: - get_kwds = dict( - tool_id=tool_ref.tool_id, - tool_uuid=tool_ref.tool_uuid, - tool_version=tool_ref.tool_version, - ) - - tool = trans.app.toolbox.get_tool(**get_kwds) - if not tool: - log.debug(f"Not found tool with kwds [{tool_ref}]") - raise exceptions.ToolMissingException("Tool not found.") - return tool - - def validate_tool_for_running(trans: ProvidesHistoryContext, tool_ref: ToolRunReference) -> Tool: if trans.user_is_bootstrap_admin: raise exceptions.RealUserRequiredException("Only real users can execute tools or run jobs.") @@ -73,7 +56,7 @@ def validate_tool_for_running(trans: ProvidesHistoryContext, tool_ref: ToolRunRe if tool_ref.tool_id is None and tool_ref.tool_uuid is None: raise exceptions.RequestParameterMissingException("Must specify a valid tool_id to use this endpoint.") - tool = get_tool(trans, tool_ref) + tool = get_tool_from_trans(trans, tool_ref) if not tool.allow_user_access(trans.user): raise exceptions.ItemAccessibilityException("Tool not accessible.") return tool @@ -97,7 +80,7 @@ def inputs( trans: ProvidesHistoryContext, tool_ref: ToolRunReference, ) -> List[ToolParameterT]: - tool = get_tool(trans, tool_ref) + tool = get_tool_from_trans(trans, tool_ref) return tool.parameters def create_fetch( diff --git a/lib/galaxy_test/api/test_landing.py b/lib/galaxy_test/api/test_landing.py index 53182fadae2a..a8f99acb1e01 100644 --- a/lib/galaxy_test/api/test_landing.py +++ b/lib/galaxy_test/api/test_landing.py @@ -5,9 +5,14 @@ ) from galaxy.schema.schema import ( + CreateToolLandingRequestPayload, CreateWorkflowLandingRequestPayload, WorkflowLandingRequest, ) +from galaxy_test.base.api_asserts import ( + assert_error_code_is, + assert_status_code_is, +) from galaxy_test.base.populators import ( DatasetPopulator, skip_without_tool, @@ -25,6 +30,32 @@ def setUp(self): self.dataset_populator = DatasetPopulator(self.galaxy_interactor) self.workflow_populator = WorkflowPopulator(self.galaxy_interactor) + @skip_without_tool("cat") + def test_tool_landing(self): + request = CreateToolLandingRequestPayload( + tool_id="create_2", + tool_version=None, + request_state={"sleep_time": 0}, + ) + response = self.dataset_populator.create_tool_landing(request) + assert response.tool_id == "create_2" + assert response.state == "unclaimed" + response = self.dataset_populator.claim_tool_landing(response.uuid) + assert response.tool_id == "create_2" + assert response.state == "claimed" + + @skip_without_tool("gx_int") + def test_tool_landing_invalid(self): + request = CreateToolLandingRequestPayload( + tool_id="gx_int", + tool_version=None, + request_state={"parameter": "foobar"}, + ) + response = self.dataset_populator.create_tool_landing_raw(request) + assert_status_code_is(response, 400) + assert_error_code_is(response, 400008) + assert "Input should be a valid integer" in response.text + @skip_without_tool("cat1") def test_create_public_workflow_landing_authenticated_user(self): request = _get_simple_landing_payload(self.workflow_populator, public=True) diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index 93ae66c832e3..e593092fc3bc 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -795,13 +795,17 @@ def _wait_for_purge(): return self._get(dataset_url) def create_tool_landing(self, payload: CreateToolLandingRequestPayload) -> ToolLandingRequest: - create_url = "tool_landings" - json = payload.model_dump(mode="json") - create_response = self._post(create_url, json, json=True, anon=True) + create_response = self.create_tool_landing_raw(payload) api_asserts.assert_status_code_is(create_response, 200) create_response.raise_for_status() return ToolLandingRequest.model_validate(create_response.json()) + def create_tool_landing_raw(self, payload: CreateToolLandingRequestPayload) -> Response: + create_url = "tool_landings" + json = payload.model_dump(mode="json") + create_response = self._post(create_url, json, json=True, anon=True) + return create_response + def create_workflow_landing(self, payload: CreateWorkflowLandingRequestPayload) -> WorkflowLandingRequest: create_url = "workflow_landings" json = payload.model_dump(mode="json") diff --git a/test/unit/app/managers/test_landing.py b/test/unit/app/managers/test_landing.py index f2ccb059b4bf..b6774e1dc452 100644 --- a/test/unit/app/managers/test_landing.py +++ b/test/unit/app/managers/test_landing.py @@ -1,3 +1,7 @@ +from typing import ( + cast, + List, +) from uuid import uuid4 from galaxy.config import GalaxyAppConfiguration @@ -22,6 +26,11 @@ ToolLandingRequest, WorkflowLandingRequest, ) +from galaxy.structured_app import MinimalManagerApp +from galaxy.tool_util.parameters import ( + DataParameterModel, + ToolParameterT, +) from galaxy.workflow.trs_proxy import TrsProxy from .base import BaseTestCase @@ -37,13 +46,36 @@ CLIENT_SECRET = "mycoolsecret" +class MockApp: + + @property + def toolbox(self): + return MockToolbox() + + +class MockToolbox: + + def get_tool(self, tool_id, tool_uuid, tool_version): + return MockTool() + + +class MockTool: + + @property + def parameters(self) -> List[ToolParameterT]: + return [DataParameterModel(name="input1")] + + class TestLanding(BaseTestCase): def setUp(self): super().setUp() self.workflow_contents_manager = WorkflowContentsManager(self.app, self.app.trs_proxy) self.landing_manager = LandingRequestManager( - self.trans.sa_session, self.app.security, self.workflow_contents_manager + self.trans.sa_session, + self.app.security, + self.workflow_contents_manager, + cast(MinimalManagerApp, MockApp()), ) self.trans.app.trs_proxy = TrsProxy(GalaxyAppConfiguration(override_tempdir=False)) From f48162dbe7b2dec5a37e90c3ac7c21b25afc936c Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 7 Oct 2024 12:19:54 -0400 Subject: [PATCH 09/11] Regenerate schema --- client/src/api/schema/schema.ts | 220 ++++++++++++++++++++++++++++++++ 1 file changed, 220 insertions(+) diff --git a/client/src/api/schema/schema.ts b/client/src/api/schema/schema.ts index 7241bcb4d165..eb7a48ed0542 100644 --- a/client/src/api/schema/schema.ts +++ b/client/src/api/schema/schema.ts @@ -4399,6 +4399,57 @@ export interface paths { patch?: never; trace?: never; }; + "/api/tool_landings": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Create Landing */ + post: operations["create_landing_api_tool_landings_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/tool_landings/{uuid}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Get Landing */ + get: operations["get_landing_api_tool_landings__uuid__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/tool_landings/{uuid}/claim": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Claim Landing */ + post: operations["claim_landing_api_tool_landings__uuid__claim_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/tool_requests/{id}": { parameters: { query?: never; @@ -7551,6 +7602,22 @@ export interface components { */ url: string; }; + /** CreateToolLandingRequestPayload */ + CreateToolLandingRequestPayload: { + /** Client Secret */ + client_secret?: string | null; + /** + * Public + * @default false + */ + public: boolean; + /** Request State */ + request_state?: Record | null; + /** Tool Id */ + tool_id: string; + /** Tool Version */ + tool_version?: string | null; + }; /** * CreateType * @enum {string} @@ -18171,6 +18238,22 @@ export interface components { */ values: string; }; + /** ToolLandingRequest */ + ToolLandingRequest: { + /** Request State */ + request_state?: Record | null; + state: components["schemas"]["LandingRequestState"]; + /** Tool Id */ + tool_id: string; + /** Tool Version */ + tool_version?: string | null; + /** + * UUID + * Format: uuid4 + * @description Universal unique identifier for this dataset. + */ + uuid: string; + }; /** ToolRequestModel */ ToolRequestModel: { /** @@ -34189,6 +34272,143 @@ export interface operations { }; }; }; + create_landing_api_tool_landings_post: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["CreateToolLandingRequestPayload"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ToolLandingRequest"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + get_landing_api_tool_landings__uuid__get: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The UUID used to identify a persisted landing request. */ + uuid: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ToolLandingRequest"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + claim_landing_api_tool_landings__uuid__claim_post: { + parameters: { + query?: never; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The UUID used to identify a persisted landing request. */ + uuid: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["ClaimLandingPayload"] | null; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ToolLandingRequest"]; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; get_tool_request_api_tool_requests__id__get: { parameters: { query?: never; From 45ee35f44a97df8fdb06325100ce785ae30685ba Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 30 Sep 2024 15:35:00 -0400 Subject: [PATCH 10/11] More json schema API options... --- lib/galaxy/tool_util/parameters/__init__.py | 2 + lib/galaxy/webapps/galaxy/api/__init__.py | 13 +++++ lib/galaxy/webapps/galaxy/api/tools.py | 64 ++++++++++++++++++++- lib/galaxy_test/api/test_tools.py | 27 +++++++++ lib/tool_shed/webapp/api2/tools.py | 43 +++++++++++--- 5 files changed, 139 insertions(+), 10 deletions(-) diff --git a/lib/galaxy/tool_util/parameters/__init__.py b/lib/galaxy/tool_util/parameters/__init__.py index 22dd7e6053aa..3435f81d25e1 100644 --- a/lib/galaxy/tool_util/parameters/__init__.py +++ b/lib/galaxy/tool_util/parameters/__init__.py @@ -66,6 +66,7 @@ ValidationFunctionT, ) from .state import ( + HasToolParameters, JobInternalToolState, LandingRequestInternalToolState, LandingRequestToolState, @@ -140,6 +141,7 @@ "ToolState", "TestCaseToolState", "ToolParameterT", + "HasToolParameters", "to_json_schema_string", "test_case_state", "validate_test_cases_for_tool_source", diff --git a/lib/galaxy/webapps/galaxy/api/__init__.py b/lib/galaxy/webapps/galaxy/api/__init__.py index 95ec6cf4069a..8d2db1544981 100644 --- a/lib/galaxy/webapps/galaxy/api/__init__.py +++ b/lib/galaxy/webapps/galaxy/api/__init__.py @@ -81,6 +81,11 @@ from galaxy.schema.fields import DecodedDatabaseIdField from galaxy.security.idencoding import IdEncodingHelper from galaxy.structured_app import StructuredApp +from galaxy.tool_util.parameters import ( + HasToolParameters, + to_json_schema_string, + ToolState, +) from galaxy.web.framework.decorators import require_admin_message from galaxy.webapps.base.controller import BaseAPIController from galaxy.webapps.galaxy.api.cbv import cbv @@ -611,6 +616,14 @@ async def _as_form(**data): return cls +def json_schema_response_for_tool_state_model( + state_type: Type[ToolState], has_parameters: HasToolParameters +) -> Response: + pydantic_model = state_type.parameter_model_for(has_parameters) + json_str = to_json_schema_string(pydantic_model) + return Response(content=json_str, media_type="application/json") + + async def try_get_request_body_as_json(request: Request) -> Optional[Any]: """Returns the request body as a JSON object if the content type is JSON.""" if "application/json" in request.headers.get("content-type", ""): diff --git a/lib/galaxy/webapps/galaxy/api/tools.py b/lib/galaxy/webapps/galaxy/api/tools.py index dc0c2ecaeb28..d2591bf7335a 100644 --- a/lib/galaxy/webapps/galaxy/api/tools.py +++ b/lib/galaxy/webapps/galaxy/api/tools.py @@ -15,6 +15,7 @@ Path, Query, Request, + Response, UploadFile, ) from pydantic import UUID4 @@ -47,7 +48,12 @@ ToolLandingRequest, ToolRequestModel, ) -from galaxy.tool_util.parameters import ToolParameterT +from galaxy.tool_util.parameters import ( + LandingRequestToolState, + RequestToolState, + TestCaseToolState, + ToolParameterT, +) from galaxy.tool_util.verify import ToolTestDescriptionDict from galaxy.tools.evaluation import global_tool_errors from galaxy.util.zipstream import ZipstreamWrapper @@ -67,6 +73,7 @@ BaseGalaxyAPIController, depends, DependsOnTrans, + json_schema_response_for_tool_state_model, LandingUuidPathParam, Router, ) @@ -209,6 +216,61 @@ def tool_inputs( tool_run_ref = ToolRunReference(tool_id=tool_id, tool_version=tool_version, tool_uuid=None) return self.service.inputs(trans, tool_run_ref) + @router.get( + "/api/tools/{tool_id}/parameter_request_schema", + operation_id="tools__parameter_request_schema", + summary="Return a JSON schema description of the tool's inputs for the tool request API that will be added to Galaxy at some point", + description="The tool request schema includes validation of map/reduce concepts that can be consumed by the tool execution API and not just the request for a single execution.", + ) + def tool_state_request( + self, + tool_id: str = ToolIDPathParam, + tool_version: Optional[str] = ToolVersionQueryParam, + trans: ProvidesHistoryContext = DependsOnTrans, + ) -> Response: + tool_run_ref = ToolRunReference(tool_id=tool_id, tool_version=tool_version, tool_uuid=None) + inputs = self.service.inputs(trans, tool_run_ref) + return json_schema_response_for_tool_state_model( + RequestToolState, + inputs, + ) + + @router.get( + "/api/tools/{tool_id}/parameter_landing_request_schema", + operation_id="tools__parameter_landing_request_schema", + summary="Return a JSON schema description of the tool's inputs for the tool landing request API.", + ) + def tool_state_landing_request( + self, + tool_id: str = ToolIDPathParam, + tool_version: Optional[str] = ToolVersionQueryParam, + trans: ProvidesHistoryContext = DependsOnTrans, + ) -> Response: + tool_run_ref = ToolRunReference(tool_id=tool_id, tool_version=tool_version, tool_uuid=None) + inputs = self.service.inputs(trans, tool_run_ref) + return json_schema_response_for_tool_state_model( + LandingRequestToolState, + inputs, + ) + + @router.get( + "/api/tools/{tool_id}/parameter_test_case_xml_schema", + operation_id="tools__parameter_test_case_xml_schema", + summary="Return a JSON schema description of the tool's inputs for test case construction.", + ) + def tool_state_test_case_xml( + self, + tool_id: str = ToolIDPathParam, + tool_version: Optional[str] = ToolVersionQueryParam, + trans: ProvidesHistoryContext = DependsOnTrans, + ) -> Response: + tool_run_ref = ToolRunReference(tool_id=tool_id, tool_version=tool_version, tool_uuid=None) + inputs = self.service.inputs(trans, tool_run_ref) + return json_schema_response_for_tool_state_model( + TestCaseToolState, + inputs, + ) + class ToolsController(BaseGalaxyAPIController, UsesVisualizationMixin): """ diff --git a/lib/galaxy_test/api/test_tools.py b/lib/galaxy_test/api/test_tools.py index 41d982d246c9..ff2d9a6fe7a0 100644 --- a/lib/galaxy_test/api/test_tools.py +++ b/lib/galaxy_test/api/test_tools.py @@ -13,6 +13,8 @@ from uuid import uuid4 import pytest +from jsonschema import validate +from jsonschema.exceptions import ValidationError from requests import ( get, put, @@ -247,6 +249,31 @@ def test_legacy_biotools_xref_injection(self): assert xref["reftype"] == "bio.tools" assert xref["value"] == "bwa" + @skip_without_tool("gx_int") + def test_tool_schemas(self): + tool_id = "gx_int" + + def get_jsonschema(state_type: str): + schema_url = self._api_url(f"tools/{tool_id}/parameter_{state_type}_schema") + schema_response = get(schema_url) + schema_response.raise_for_status() + return schema_response.json() + + request_schema = get_jsonschema("request") + validate(instance={"parameter": 5}, schema=request_schema) + with pytest.raises(ValidationError): + validate(instance={"parameter": "Foobar"}, schema=request_schema) + + test_case_schema = get_jsonschema("test_case_xml") + validate(instance={"parameter": 5}, schema=test_case_schema) + with pytest.raises(ValidationError): + validate(instance={"parameter": "Foobar"}, schema=test_case_schema) + + landing_schema = get_jsonschema("landing_request") + validate(instance={"parameter": 5}, schema=landing_schema) + with pytest.raises(ValidationError): + validate(instance={"parameter": "Foobar"}, schema=landing_schema) + @skip_without_tool("test_data_source") @skip_if_github_down def test_data_source_ok_request(self): diff --git a/lib/tool_shed/webapp/api2/tools.py b/lib/tool_shed/webapp/api2/tools.py index be5d04da9aeb..b8709060eda9 100644 --- a/lib/tool_shed/webapp/api2/tools.py +++ b/lib/tool_shed/webapp/api2/tools.py @@ -9,9 +9,11 @@ from galaxy.tool_util.models import ParsedTool from galaxy.tool_util.parameters import ( + LandingRequestToolState, RequestToolState, - to_json_schema_string, + TestCaseToolState, ) +from galaxy.webapps.galaxy.api import json_schema_response_for_tool_state_model from tool_shed.context import SessionRequestContext from tool_shed.managers.tools import ( parsed_tool_model_cached_for, @@ -57,11 +59,6 @@ ) -def json_schema_response(pydantic_model) -> Response: - json_str = to_json_schema_string(pydantic_model) - return Response(content=json_str, media_type="application/json") - - @router.cbv class FastAPITools: app: ToolShedApp = depends(ToolShedApp) @@ -158,15 +155,43 @@ def show_tool( @router.get( "/api/tools/{tool_id}/versions/{tool_version}/parameter_request_schema", - operation_id="tools__parameter_request_model", + operation_id="tools__parameter_request_schema", summary="Return a JSON schema description of the tool's inputs for the tool request API that will be added to Galaxy at some point", description="The tool request schema includes validation of map/reduce concepts that can be consumed by the tool execution API and not just the request for a single execution.", ) - def tool_state( + def tool_state_request( + self, + trans: SessionRequestContext = DependsOnTrans, + tool_id: str = TOOL_ID_PATH_PARAM, + tool_version: str = TOOL_VERSION_PATH_PARAM, + ) -> Response: + parsed_tool = parsed_tool_model_cached_for(trans, tool_id, tool_version) + return json_schema_response_for_tool_state_model(RequestToolState, parsed_tool.inputs) + + @router.get( + "/api/tools/{tool_id}/versions/{tool_version}/parameter_landing_request_schema", + operation_id="tools__parameter_landing_request_schema", + summary="Return a JSON schema description of the tool's inputs for the tool landing request API.", + ) + def tool_state_landing_request( + self, + trans: SessionRequestContext = DependsOnTrans, + tool_id: str = TOOL_ID_PATH_PARAM, + tool_version: str = TOOL_VERSION_PATH_PARAM, + ) -> Response: + parsed_tool = parsed_tool_model_cached_for(trans, tool_id, tool_version) + return json_schema_response_for_tool_state_model(LandingRequestToolState, parsed_tool.inputs) + + @router.get( + "/api/tools/{tool_id}/versions/{tool_version}/parameter_test_case_xml_schema", + operation_id="tools__parameter_test_case_xml_schema", + summary="Return a JSON schema description of the tool's inputs for test case construction.", + ) + def tool_state_test_case_xml( self, trans: SessionRequestContext = DependsOnTrans, tool_id: str = TOOL_ID_PATH_PARAM, tool_version: str = TOOL_VERSION_PATH_PARAM, ) -> Response: parsed_tool = parsed_tool_model_cached_for(trans, tool_id, tool_version) - return json_schema_response(RequestToolState.parameter_model_for(parsed_tool.inputs)) + return json_schema_response_for_tool_state_model(TestCaseToolState, parsed_tool.inputs) From d73b2724a5fc9e25f4bb54be5b5c916ddb17b381 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 30 Sep 2024 18:55:09 -0400 Subject: [PATCH 11/11] Update API schema for more tool state schema APIs... --- client/src/api/schema/schema.ts | 192 ++++++++++++++++++ .../webapp/frontend/src/schema/schema.ts | 124 ++++++++++- 2 files changed, 314 insertions(+), 2 deletions(-) diff --git a/client/src/api/schema/schema.ts b/client/src/api/schema/schema.ts index eb7a48ed0542..1797563507d9 100644 --- a/client/src/api/schema/schema.ts +++ b/client/src/api/schema/schema.ts @@ -4569,6 +4569,60 @@ export interface paths { patch?: never; trace?: never; }; + "/api/tools/{tool_id}/parameter_landing_request_schema": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Return a JSON schema description of the tool's inputs for the tool landing request API. */ + get: operations["tools__parameter_landing_request_schema"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/tools/{tool_id}/parameter_request_schema": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Return a JSON schema description of the tool's inputs for the tool request API that will be added to Galaxy at some point + * @description The tool request schema includes validation of map/reduce concepts that can be consumed by the tool execution API and not just the request for a single execution. + */ + get: operations["tools__parameter_request_schema"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/tools/{tool_id}/parameter_test_case_xml_schema": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Return a JSON schema description of the tool's inputs for test case construction. */ + get: operations["tools__parameter_test_case_xml_schema"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/tours": { parameters: { query?: never; @@ -34747,6 +34801,144 @@ export interface operations { }; }; }; + tools__parameter_landing_request_schema: { + parameters: { + query?: { + tool_version?: string | null; + }; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The tool ID for the lineage stored in Galaxy's toolbox. */ + tool_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + tools__parameter_request_schema: { + parameters: { + query?: { + tool_version?: string | null; + }; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The tool ID for the lineage stored in Galaxy's toolbox. */ + tool_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; + tools__parameter_test_case_xml_schema: { + parameters: { + query?: { + tool_version?: string | null; + }; + header?: { + /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */ + "run-as"?: string | null; + }; + path: { + /** @description The tool ID for the lineage stored in Galaxy's toolbox. */ + tool_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["MessageExceptionModel"]; + }; + }; + }; + }; index_api_tours_get: { parameters: { query?: never; diff --git a/lib/tool_shed/webapp/frontend/src/schema/schema.ts b/lib/tool_shed/webapp/frontend/src/schema/schema.ts index 14aab45a3c36..d52683b36513 100644 --- a/lib/tool_shed/webapp/frontend/src/schema/schema.ts +++ b/lib/tool_shed/webapp/frontend/src/schema/schema.ts @@ -532,6 +532,23 @@ export interface paths { patch?: never trace?: never } + "/api/tools/{tool_id}/versions/{tool_version}/parameter_landing_request_schema": { + parameters: { + query?: never + header?: never + path?: never + cookie?: never + } + /** Return a JSON schema description of the tool's inputs for the tool landing request API. */ + get: operations["tools__parameter_landing_request_schema"] + put?: never + post?: never + delete?: never + options?: never + head?: never + patch?: never + trace?: never + } "/api/tools/{tool_id}/versions/{tool_version}/parameter_request_schema": { parameters: { query?: never @@ -543,7 +560,24 @@ export interface paths { * Return a JSON schema description of the tool's inputs for the tool request API that will be added to Galaxy at some point * @description The tool request schema includes validation of map/reduce concepts that can be consumed by the tool execution API and not just the request for a single execution. */ - get: operations["tools__parameter_request_model"] + get: operations["tools__parameter_request_schema"] + put?: never + post?: never + delete?: never + options?: never + head?: never + patch?: never + trace?: never + } + "/api/tools/{tool_id}/versions/{tool_version}/parameter_test_case_xml_schema": { + parameters: { + query?: never + header?: never + path?: never + cookie?: never + } + /** Return a JSON schema description of the tool's inputs for test case construction. */ + get: operations["tools__parameter_test_case_xml_schema"] put?: never post?: never delete?: never @@ -4422,7 +4456,93 @@ export interface operations { } } } - tools__parameter_request_model: { + tools__parameter_landing_request_schema: { + parameters: { + query?: never + header?: never + path: { + /** @description See also https://ga4gh.github.io/tool-registry-service-schemas/DataModel/#trs-tool-and-trs-tool-version-ids */ + tool_id: string + /** @description The full version string defined on the Galaxy tool wrapper. */ + tool_version: string + } + cookie?: never + } + requestBody?: never + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown + } + content: { + "application/json": unknown + } + } + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown + } + content: { + "application/json": components["schemas"]["MessageExceptionModel"] + } + } + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown + } + content: { + "application/json": components["schemas"]["MessageExceptionModel"] + } + } + } + } + tools__parameter_request_schema: { + parameters: { + query?: never + header?: never + path: { + /** @description See also https://ga4gh.github.io/tool-registry-service-schemas/DataModel/#trs-tool-and-trs-tool-version-ids */ + tool_id: string + /** @description The full version string defined on the Galaxy tool wrapper. */ + tool_version: string + } + cookie?: never + } + requestBody?: never + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown + } + content: { + "application/json": unknown + } + } + /** @description Request Error */ + "4XX": { + headers: { + [name: string]: unknown + } + content: { + "application/json": components["schemas"]["MessageExceptionModel"] + } + } + /** @description Server Error */ + "5XX": { + headers: { + [name: string]: unknown + } + content: { + "application/json": components["schemas"]["MessageExceptionModel"] + } + } + } + } + tools__parameter_test_case_xml_schema: { parameters: { query?: never header?: never