diff --git a/alchemiscale/interface/api.py b/alchemiscale/interface/api.py index 3aff63a1..0784ea49 100644 --- a/alchemiscale/interface/api.py +++ b/alchemiscale/interface/api.py @@ -7,10 +7,13 @@ from typing import Dict, List, Optional, Union from collections import Counter -from fastapi import FastAPI, APIRouter, Body, Depends, HTTPException +from fastapi import FastAPI, APIRouter, Body, Depends, HTTPException, Request from fastapi import status as http_status from fastapi.middleware.gzip import GZipMiddleware +import json +from gufe.tokenization import JSON_HANDLER + from ..base.api import ( GufeJSONResponse, scope_params, @@ -100,18 +103,25 @@ def check_existence( @router.post("/networks", response_model=ScopedKey) -def create_network( +async def create_network( *, - network: List = Body(embed=True), - scope: Scope = Body(embed=True), - state: str = Body(embed=True), + request: Request, n4js: Neo4jStore = Depends(get_n4js_depends), token: TokenData = Depends(get_token_data_depends), ): + # we handle the request directly so we can decode with custom JSON decoder + # this is important for properly handling GUFE objects + body = await request.body() + body_ = json.loads(body.decode("utf-8"), cls=JSON_HANDLER.decoder) + + scope = Scope.parse_obj(body_["scope"]) validate_scopes(scope, token) + network = body_["network"] an = KeyedChain(network).to_gufe() + state = body_["state"] + try: an_sk, _, _ = n4js.assemble_network(network=an, scope=scope, state=state) except ValueError as e: diff --git a/alchemiscale/storage/statestore.py b/alchemiscale/storage/statestore.py index f7a30c4d..fd9c8bbd 100644 --- a/alchemiscale/storage/statestore.py +++ b/alchemiscale/storage/statestore.py @@ -862,19 +862,26 @@ def query_networks( gufe_key_pattern=None if key is None else str(key), ) + where_params = dict( + name_pattern="an.name", + org_pattern="an.`_org`", + campaign_pattern="an.`_campaign`", + project_pattern="an.`_project`", + state_pattern="nm.state", + gufe_key_pattern="an.`_gufe_key`", + ) + + conditions = [] + for k, v in query_params.items(): - if v is None: - query_params[k] = ".*" + if v is not None: + conditions.append(f"{where_params[k]} =~ ${k}") - q = """ + where_clause = "WHERE " + " AND ".join(conditions) if len(conditions) else "" + + q = f""" MATCH (an:AlchemicalNetwork)<-[:MARKS]-(nm:NetworkMark) - WHERE - an.name =~ $name_pattern - AND an.`_gufe_key` =~ $gufe_key_pattern - AND an.`_org` =~ $org_pattern - AND an.`_campaign` =~ $campaign_pattern - AND an.`_project` =~ $project_pattern - AND nm.state =~ $state_pattern + {where_clause} RETURN an._scoped_key as sk """ diff --git a/alchemiscale/tests/integration/compute/client/test_compute_client.py b/alchemiscale/tests/integration/compute/client/test_compute_client.py index 185eada4..99777c41 100644 --- a/alchemiscale/tests/integration/compute/client/test_compute_client.py +++ b/alchemiscale/tests/integration/compute/client/test_compute_client.py @@ -162,10 +162,17 @@ def test_claim_taskhub_task( taskhub_sks = compute_client.query_taskhubs([scope_test]) + remaining_tasks = n4js_preloaded.get_taskhub_unclaimed_tasks(taskhub_sks[0]) + assert len(remaining_tasks) == 3 + # claim a single task; should get highest priority task task_sks = compute_client.claim_taskhub_tasks( taskhub_sks[0], compute_service_id=compute_service_id ) + + remaining_tasks = n4js_preloaded.get_taskhub_unclaimed_tasks(taskhub_sks[0]) + assert len(remaining_tasks) == 2 + all_tasks = n4js_preloaded.get_taskhub_tasks(taskhub_sks[0], return_gufe=True) assert len(task_sks) == 1 diff --git a/alchemiscale/tests/integration/compute/conftest.py b/alchemiscale/tests/integration/compute/conftest.py index 86beb1ce..d66b20c5 100644 --- a/alchemiscale/tests/integration/compute/conftest.py +++ b/alchemiscale/tests/integration/compute/conftest.py @@ -81,8 +81,10 @@ def n4js_preloaded( n4js: Neo4jStore = n4js_fresh # Set up tasks from select set of transformations + # we need to use second_network_an2 because its edges + # are a subset of network_tyk2's edges transformations = sorted( - filter(lambda x: type(x) is not NonTransformation, network_tyk2.edges) + filter(lambda x: type(x) is not NonTransformation, second_network_an2.edges) )[0:3] # set starting contents for many of the tests in this module diff --git a/alchemiscale/tests/integration/storage/test_statestore.py b/alchemiscale/tests/integration/storage/test_statestore.py index ca6a0b2d..2632524b 100644 --- a/alchemiscale/tests/integration/storage/test_statestore.py +++ b/alchemiscale/tests/integration/storage/test_statestore.py @@ -206,7 +206,7 @@ def test_get_network(self, n4js, network_tyk2, scope_test): def test_query_networks(self, n4js, network_tyk2, scope_test, multiple_scopes): an = network_tyk2 - an2 = AlchemicalNetwork(edges=list(an.edges)[:-2], name="incomplete") + an2 = AlchemicalNetwork(edges=list(an.edges)[:-2], name=None) sk: ScopedKey = n4js.assemble_network(an, scope_test)[0] sk2: ScopedKey = n4js.assemble_network(an2, scope_test)[0] diff --git a/devtools/conda-envs/alchemiscale-client.yml b/devtools/conda-envs/alchemiscale-client.yml index f2ed56e1..6f583986 100644 --- a/devtools/conda-envs/alchemiscale-client.yml +++ b/devtools/conda-envs/alchemiscale-client.yml @@ -3,6 +3,7 @@ channels: - jaimergp/label/unsupported-cudatoolkit-shim - conda-forge - openeye + dependencies: - pip - python =3.10 diff --git a/devtools/conda-envs/alchemiscale-compute.yml b/devtools/conda-envs/alchemiscale-compute.yml index 2e033e00..56b21cef 100644 --- a/devtools/conda-envs/alchemiscale-compute.yml +++ b/devtools/conda-envs/alchemiscale-compute.yml @@ -2,6 +2,7 @@ name: alchemiscale-compute channels: - conda-forge - openeye + dependencies: - pip - python =3.10 diff --git a/devtools/conda-envs/alchemiscale-server.yml b/devtools/conda-envs/alchemiscale-server.yml index 8dee891b..f8909d40 100644 --- a/devtools/conda-envs/alchemiscale-server.yml +++ b/devtools/conda-envs/alchemiscale-server.yml @@ -3,6 +3,7 @@ channels: - jaimergp/label/unsupported-cudatoolkit-shim - conda-forge - openeye + dependencies: - pip - python =3.10 @@ -10,6 +11,7 @@ dependencies: # alchemiscale dependencies - gufe=0.9.5 - openfe=0.14.0 + - openmmforcefields>=0.12.0 - requests - click diff --git a/devtools/conda-envs/docs.yml b/devtools/conda-envs/docs.yml index 6e20b645..b0718227 100644 --- a/devtools/conda-envs/docs.yml +++ b/devtools/conda-envs/docs.yml @@ -1,6 +1,7 @@ name: alchemiscale-docs channels: - conda-forge + dependencies: - sphinx>=5.0,<6 - sphinx_rtd_theme diff --git a/devtools/conda-envs/test.yml b/devtools/conda-envs/test.yml index 14b41d89..fdab69d7 100644 --- a/devtools/conda-envs/test.yml +++ b/devtools/conda-envs/test.yml @@ -2,12 +2,13 @@ name: alchemiscale-test channels: - jaimergp/label/unsupported-cudatoolkit-shim - conda-forge + dependencies: - pip # alchemiscale dependencies - - gufe>=0.9.5 - - openfe>=0.14.0 + - gufe>=1.0.0 + - openfe>=1.0.1 - openmmforcefields>=0.12.0 - pydantic<2.0 diff --git a/docs/operations.rst b/docs/operations.rst index 5b050985..0a14610e 100644 --- a/docs/operations.rst +++ b/docs/operations.rst @@ -69,15 +69,20 @@ Creating a database dump **With the Neo4j service shut down**, navigate to the directory containing your database data, set ``$BACKUPS_DIR`` to the absolute path of your choice and ``$NEO4J_VERSION`` to the version of Neo4j you are using, then run:: + # create the dump `neo4j.dump` docker run --rm \ -v $(pwd):/var/lib/neo4j/data \ -v ${BACKUPS_DIR}:/tmp \ --entrypoint /bin/bash \ neo4j:${NEO4J_VERSION} \ - -c "neo4j-admin dump --to /tmp/neo4j-$(date -I).dump" + -c "neo4j-admin database dump --to-path /tmp neo4j" -This will create a new database dump in the ``$BACKUPS_DIR`` directory. + # create a copy of the dump with a timestamp + cp ${BACKUPS_DIR}/neo4j.dump ${BACKUPS_DIR}/neo4j-$(date -I).dump +This will create a new database dump in the ``$BACKUPS_DIR`` directory. +Note that this command will fail if ``neo4j.dump`` already exists in this directory. +It is recommended to copy this file to one with a timestamp (e.g. ``neo4j-$(date -I).dump``), as above. Restoring from a database dump ============================== @@ -86,12 +91,16 @@ To later restore from a database dump, navigate to the directory containing your **With the Neo4j service shut down**, choose ``$DUMP_DATE`` and set ``$NEO4J_VERSION`` to the version of Neo4j you are using, then run:: + # create a copy of the timestamped dump to `neo4j.dump` + cp ${BACKUPS_DIR}/neo4j-$(date -I).dump ${BACKUPS_DIR}/neo4j.dump + + # load the dump `neo4j.dump` docker run --rm \ -v $(pwd):/var/lib/neo4j/data \ -v ${BACKUPS_DIR}:/tmp \ --entrypoint /bin/bash \ neo4j:${NEO4J_VERSION} \ - -c "neo4j-admin load --from /tmp/neo4j-${DUMP_DATE}.dump" + -c "neo4j-admin database load --from-path=/tmp neo4j" You may need to perform a ``chown -R`` following this operation to set correct ownership of the newly-loaded database contents.