From fe6f6258c7ee99552cbee130a2ff7ae68e3a5865 Mon Sep 17 00:00:00 2001 From: Anirban Chaudhuri <75496534+anirban-chaudhuri@users.noreply.github.com> Date: Mon, 10 Jul 2023 09:43:49 -0400 Subject: [PATCH] Updating from main to fix model loading errors (#211) * Change setup to fix MIRA name error (#205) * changed mira version before bug * fix error * Add utilities for loading distributions from AMR (#200) * added mira distribution loading * added normal distribution * fixed Normal2 and Normal3 * nit * added minimal mira_distribution_to_pyro test * Symbolic Rate law to Pytorch Rate law (#201) * I believe I wrote the correct code, based on experiments in the notebook. Will test next. * FAILED test/test_mira/test_rate_law.py::TestRateLaw::test_rate_law_compilation - AttributeError: 'ScaledBetaNoisePetriNetODESystem' object has no attribute 'beta' * Added Symbolic_Deriv_Experiments notebook * Something weird is happening. I can confirm that 'beta' is an attribute of ScaledBetaNoisePetriNetODESystem after setting up the model, but then it can't be found at sample time * Clarified the bug in the Symbolic derivatives notebook * Expected and actual derivative match * Time varying parameter rate law correctly read * Thought we added this already * Added kwargs to from_askenet and from_mira and compile_rate_law_p to load_petri_net * Blocked on https://github.com/indralab/mira/issues/189 but tests pass by making compile_rate_law_p False by default * Removed unnecessary pygraphviz dependency * Unit test to fail when concept name does not equal rate law symbols * All tests pass with default compile_rate_law_p = False * Merged from main. removed dependency on older version of mira * point mira to the github repo main branch * point mira to the github repo main branch * load_and_calibrate_and_sample(..., compile_rate_law_p=True) works with the caveat that the ScaledBetaNoisePetriNetODESystem solution was returning very slightly negative values, so I set mean = torch.abs(solution[var_name]) to address the issue * merged changes to MiraPetriNetODESystem and ScaledBetaNoisePetriNetODESystem from main. ScaledBetaNoisePetriNetODESystem has default compiled_rate_law_p=True * observation_function for ScaledBetaNoisePetriNetODESystem now uses torch.maximum(solution[var_name], torch.tensor(1e-9)) to deal with overshooting derivatives * aggregate parameters is now by default opt-out, and AMR models with multiple parameters per transition can be interpreted using compile_rate_law --------- Co-authored-by: Sam Witty Co-authored-by: Jeremy Zucker --- ...eling_Representation_Issues_Examples.ipynb | 163 +++++- .../Symbolic_Deriv_Experiments.ipynb | 536 ++++++++++++++++++ notebook/integration_demo/data.csv | 2 +- notebook/integration_demo/demo_ensemble.ipynb | 97 ++-- setup.cfg | 3 +- src/pyciemss/PetriNetODE/interfaces.py | 9 +- src/pyciemss/utils/__init__.py | 3 +- test/models/sir.json | 1 + .../test_ensemble/test_ensemble_interfaces.py | 6 +- test/test_mira/test_petrinet_derivs.py | 12 +- test/test_mira/test_rate_law.py | 167 ++++++ test/test_petrinet_ode/data.csv | 2 +- .../expected_intervened_samples.csv | 2 +- test/test_petrinet_ode/test_ode_interfaces.py | 15 +- test/test_utils/test_interface_utils.py | 80 +++ 15 files changed, 1017 insertions(+), 81 deletions(-) create mode 100644 notebook/Examples_for_TA2_Model_Representation/Symbolic_Deriv_Experiments.ipynb create mode 100644 test/models/sir.json create mode 100644 test/test_mira/test_rate_law.py create mode 100644 test/test_utils/test_interface_utils.py diff --git a/notebook/Examples_for_TA2_Model_Representation/Modeling_Representation_Issues_Examples.ipynb b/notebook/Examples_for_TA2_Model_Representation/Modeling_Representation_Issues_Examples.ipynb index 852932958..ae6a76813 100644 --- a/notebook/Examples_for_TA2_Model_Representation/Modeling_Representation_Issues_Examples.ipynb +++ b/notebook/Examples_for_TA2_Model_Representation/Modeling_Representation_Issues_Examples.ipynb @@ -11,7 +11,17 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 3, + "id": "ecbb71f2-4ac5-459f-8c3b-825b5f520bf3", + "metadata": {}, + "outputs": [], + "source": [ + "from collections.abc import Callable" + ] + }, + { + "cell_type": "code", + "execution_count": 13, "id": "8d9914c4", "metadata": {}, "outputs": [], @@ -21,11 +31,158 @@ "from mira.modeling.viz import GraphicalModel\n", "from mira.modeling import Model\n", "from mira.modeling.askenet.petrinet import AskeNetPetriNetModel\n", - "\n", + "import torch\n", "from pyciemss.interfaces import setup_model, calibrate, intervene\n", "from pyciemss.PetriNetODE.interfaces import load_petri_model, setup_petri_model\n", + "from collections.abc import Callable\n", + "from typing import Tuple\n", + "import sympy\n", + "import sympytorch" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "f6cb8d58-c4f1-4301-bd49-46875ba35571", + "metadata": {}, + "outputs": [], + "source": [ + "def make_model() -> Callable[[float, Tuple[torch.Tensor]], Tuple[torch.Tensor]]:\n", + " \"\"\"Compile the deriv function during initialization.\"\"\"\n", + " state_variables = \"beta, total_population, susceptible_population, infectious_population, gamma, recovered_population\"\n", + " beta, total_pop,S, I, gamma, R = sympy.symbols(state_variables)\n", + " susceptible = Concept(name=\"susceptible_population\", identifiers={\"ido\": \"0000514\"})\n", + " infectious = Concept(name=\"infectious_population\", identifiers={\"ido\": \"0000513\"}) # http://purl.obolibrary.org/obo/IDO_0000513\n", + " recovered = Concept(name=\"recovered_population\", identifiers={\"ido\": \"0000592\"})\n", + " \n", + " # Set a value for the total population\n", + " total_pop = 100000 \n", + " S_to_I = ControlledConversion(\n", + " controller = infectious,\n", + " subject=susceptible,\n", + " outcome=infectious,\n", + " rate_law=(beta/total_pop)*S*I\n", + " )\n", + " I_to_R = NaturalConversion(\n", + " subject=infectious,\n", + " outcome=recovered,\n", + " rate_law=gamma*I\n", + " )\n", + " template_model = TemplateModel(\n", + " templates=[S_to_I, I_to_R],\n", + " parameters={\n", + " 'beta': Parameter(name='beta', value=0.55), # transmission rate\n", + " 'total_population': Parameter(name='total_population', value=total_pop),\n", + " 'gamma': Parameter(name='gamma', value=0.2), # recovery rate\n", + " },\n", + " initials={\n", + " 'susceptible_population': (Initial(concept=self.susceptible, value=total_pop-1)),\n", + " 'infectious_population': (Initial(concept=self.infectious, value=1)),\n", + " 'recovered_population': (Initial(concept=self.recovered, value=0))\n", + " }\n", + " )\n", + " model=Model(template_model)\n", + " return model" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "6a1478f5-4a77-44e2-a729-1cf4f200eebd", + "metadata": {}, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'self' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[18], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mmake_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "Cell \u001b[0;32mIn[17], line 26\u001b[0m, in \u001b[0;36mmake_model\u001b[0;34m()\u001b[0m\n\u001b[1;32m 11\u001b[0m S_to_I \u001b[38;5;241m=\u001b[39m ControlledConversion(\n\u001b[1;32m 12\u001b[0m controller \u001b[38;5;241m=\u001b[39m infectious,\n\u001b[1;32m 13\u001b[0m subject\u001b[38;5;241m=\u001b[39msusceptible,\n\u001b[1;32m 14\u001b[0m outcome\u001b[38;5;241m=\u001b[39minfectious,\n\u001b[1;32m 15\u001b[0m rate_law\u001b[38;5;241m=\u001b[39m(beta\u001b[38;5;241m/\u001b[39mtotal_pop)\u001b[38;5;241m*\u001b[39mS\u001b[38;5;241m*\u001b[39mI\n\u001b[1;32m 16\u001b[0m )\n\u001b[1;32m 17\u001b[0m I_to_R \u001b[38;5;241m=\u001b[39m NaturalConversion(\n\u001b[1;32m 18\u001b[0m subject\u001b[38;5;241m=\u001b[39minfectious,\n\u001b[1;32m 19\u001b[0m outcome\u001b[38;5;241m=\u001b[39mrecovered,\n\u001b[1;32m 20\u001b[0m rate_law\u001b[38;5;241m=\u001b[39mgamma\u001b[38;5;241m*\u001b[39mI\n\u001b[1;32m 21\u001b[0m )\n\u001b[1;32m 22\u001b[0m template_model \u001b[38;5;241m=\u001b[39m TemplateModel(\n\u001b[1;32m 23\u001b[0m templates\u001b[38;5;241m=\u001b[39m[S_to_I, I_to_R],\n\u001b[1;32m 24\u001b[0m parameters\u001b[38;5;241m=\u001b[39m{\n\u001b[1;32m 25\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mbeta\u001b[39m\u001b[38;5;124m'\u001b[39m: Parameter(name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mbeta\u001b[39m\u001b[38;5;124m'\u001b[39m, value\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.55\u001b[39m), \u001b[38;5;66;03m# transmission rate\u001b[39;00m\n\u001b[0;32m---> 26\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtotal_population\u001b[39m\u001b[38;5;124m'\u001b[39m: Parameter(name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtotal_population\u001b[39m\u001b[38;5;124m'\u001b[39m, value\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241m.\u001b[39mtotal_pop),\n\u001b[1;32m 27\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mgamma\u001b[39m\u001b[38;5;124m'\u001b[39m: Parameter(name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mgamma\u001b[39m\u001b[38;5;124m'\u001b[39m, value\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.2\u001b[39m), \u001b[38;5;66;03m# recovery rate\u001b[39;00m\n\u001b[1;32m 28\u001b[0m },\n\u001b[1;32m 29\u001b[0m initials\u001b[38;5;241m=\u001b[39m{\n\u001b[1;32m 30\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124msusceptible_population\u001b[39m\u001b[38;5;124m'\u001b[39m: (Initial(concept\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msusceptible, value\u001b[38;5;241m=\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtotal_pop\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m))),\n\u001b[1;32m 31\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124minfectious_population\u001b[39m\u001b[38;5;124m'\u001b[39m: (Initial(concept\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfectious, value\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)),\n\u001b[1;32m 32\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrecovered_population\u001b[39m\u001b[38;5;124m'\u001b[39m: (Initial(concept\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrecovered, value\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m))\n\u001b[1;32m 33\u001b[0m }\n\u001b[1;32m 34\u001b[0m )\n\u001b[1;32m 35\u001b[0m model\u001b[38;5;241m=\u001b[39mModel(template_model)\n\u001b[1;32m 36\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m model\n", + "\u001b[0;31mNameError\u001b[0m: name 'self' is not defined" + ] + } + ], + "source": [ + "model = make_model()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "fb719d87-c045-4d46-a764-501f6e788758", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([-0.9900, -0.0100, 1.0000])" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import sympy, torch, sympytorch\n", + "from torch import tensor\n", + "total_pop, S, I, R, beta, gamma = sympy.symbols('total_pop, S, I, R, beta, gamma')\n", + "S_to_I = beta*I*S/total_pop\n", + "I_to_R = gamma*I\n", + "\n", + "# = sympytorch.SymPyModule(expressions=[S_to_I, I_to_R])\n", + "dSdt = -S_to_I\n", + "dIdt = S_to_I - I_to_R\n", + "dRdt = I_to_R\n", "\n", - "import sympy" + "compile_deriv = sympytorch.SymPyModule(expressions=[dSdt, dIdt, dRdt])\n", + "\n", + "compiled_deriv = compile_deriv(beta=getattr(self, 'beta'),\n", + " gamma=getattr(self, 'gamma'),\n", + " S=states['S'],\n", + " I=states['I'],\n", + " R=states['R'],\n", + " total_pop=sum(states[i] for i in states)\n", + " )\n", + "\n", + "compile_deriv(beta=tensor(1.),\n", + " gamma=tensor(1.),\n", + " S=tensor(99.),\n", + " I=tensor(1.),\n", + " R=tensor(0.0),\n", + " total_pop=tensor(100.)\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40c5d7eb-72f2-41a7-9b15-efec7458447c", + "metadata": {}, + "outputs": [], + "source": [ + " \n", + "x_ = torch.rand(3)\n", + "out = mod(x_name=x_) # out has shape (3, 2)\n", + "\n", + "assert torch.equal(out[:, 0], x_.cos())\n", + "assert torch.equal(out[:, 1], 2 * x_.sin())\n", + "assert out.requires_grad # from the two Parameters initialised as 1.0 and 2.0\n", + "assert {x.item() for x in mod.parameters()} == {1.0, 2.0}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "efda5703-eb9f-4118-ad38-685bb0fe18d4", + "metadata": {}, + "outputs": [], + "source": [ + "def get_fluxes(rate_law):\n", + " \n", + " " ] }, { diff --git a/notebook/Examples_for_TA2_Model_Representation/Symbolic_Deriv_Experiments.ipynb b/notebook/Examples_for_TA2_Model_Representation/Symbolic_Deriv_Experiments.ipynb new file mode 100644 index 000000000..a2259ac35 --- /dev/null +++ b/notebook/Examples_for_TA2_Model_Representation/Symbolic_Deriv_Experiments.ipynb @@ -0,0 +1,536 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a5bc3fe2", + "metadata": {}, + "source": [ + "# Symbolic Derivative experiments\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "d30c01d7-0e65-42ab-af46-c018d1932c1c", + "metadata": {}, + "outputs": [], + "source": [ + "import sys" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "8d9914c4", + "metadata": {}, + "outputs": [], + "source": [ + "import mira\n", + "from mira.metamodel import Concept, ControlledConversion, GroupedControlledConversion, Initial, NaturalConversion, Parameter, Template, TemplateModel\n", + "from mira.modeling.viz import GraphicalModel\n", + "from mira.modeling import Model\n", + "from mira.modeling.askenet.petrinet import AskeNetPetriNetModel\n", + "from torch import tensor\n", + "import torch\n", + "from pyciemss.interfaces import sample, calibrate\n", + "from pyciemss.PetriNetODE.interfaces import load_petri_model, sample_petri, calibrate_petri, setup_petri_model, load_and_sample_petri_model\n", + "from pyciemss.PetriNetODE.base import get_name\n", + "from torch import tensor\n", + "import sympy\n", + "from sympytorch import SymPyModule" + ] + }, + { + "cell_type": "markdown", + "id": "16a74229-8332-4190-9736-1689cdb8624f", + "metadata": {}, + "source": [ + "# MIRA SIR Model" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "6f281623-e5dd-4a5f-84b9-c2a12b68dc92", + "metadata": {}, + "outputs": [], + "source": [ + "beta, gamma, S, I, R, total_population = sympy.symbols('beta, gamma, susceptible_population, I, recovered_population, total_population')\n", + "\n", + "susceptible = Concept(name=\"susceptible_population\", identifiers={\"ido\": \"0000514\"})\n", + "infected = Concept(name=\"I\", identifiers={\"ido\": \"0000573\"}) # http://purl.obolibrary.org/obo/IDO_0000573\n", + "recovered = Concept(name=\"recovered_population\", identifiers={\"ido\": \"0000592\"})\n", + "total_pop = 100000\n", + "\n", + "S_to_I = ControlledConversion(\n", + " controller = infected,\n", + " subject=susceptible,\n", + " outcome=infected,\n", + " rate_law=beta*S*I/(S + I + R)\n", + ")\n", + "I_to_R = NaturalConversion(\n", + " subject=infected,\n", + " outcome=recovered,\n", + " rate_law=gamma*I\n", + ")\n", + "tm = TemplateModel(\n", + " templates=[S_to_I, I_to_R],\n", + " parameters={\n", + " 'beta': Parameter(name='beta', value=0.55), # transmission rate\n", + " 'gamma': Parameter(name='gamma', value=0.2), # recovery rate\n", + " },\n", + " initials={\n", + " 'susceptible_population': (Initial(concept=susceptible, value=(total_pop - 1))), \n", + " 'I': (Initial(concept=infected, value=1)),\n", + " 'recovered_population': (Initial(concept=recovered, value=0)),\n", + " }\n", + ")\n", + "\n", + "model = mira.modeling.Model(tm)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "6a0e316d-f86e-43ca-a73a-cbf85ef6340b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'beta': ,\n", + " 'gamma': }" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.parameters" + ] + }, + { + "cell_type": "markdown", + "id": "1cd73da2-8e54-464d-a968-12d8a31e8d19", + "metadata": {}, + "source": [ + "## SIR DynamicalSystem" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "52336ec5-589e-4694-8d23-6c2a0f460471", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ScaledBetaNoisePetriNetODESystem(\n", + "\tbeta = Uniform(low: 0.4950000047683716, high: 0.6050000190734863),\n", + "\tgamma = Uniform(low: 0.18000000715255737, high: 0.2199999988079071),\n", + "\tpseudocount = 1.0\n", + ")" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sir_raw = load_petri_model(model)\n", + "sir = setup_petri_model(sir_raw, \n", + " 0.0, \n", + " start_state= {\n", + " k: v.value \n", + " for k,v in model.template_model.initials.items()\n", + " }\n", + " )\n", + "sir" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "e6ba1615-df53-49f3-9156-fd1dd5281075", + "metadata": {}, + "outputs": [], + "source": [ + "sir.param_prior()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "0df72036-a0e7-469b-a693-c9a002095ed0", + "metadata": {}, + "outputs": [], + "source": [ + "assert isinstance(getattr(sir, 'beta'), torch.Tensor)" + ] + }, + { + "cell_type": "markdown", + "id": "b9b8d2eb-2ccb-4095-80dc-da86cc12c235", + "metadata": {}, + "source": [ + "### Convert MIRA SympyExprStr to regular Sympy Expr" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "4ddd5b20-ac09-4cd4-990a-1ac740e4f017", + "metadata": {}, + "outputs": [], + "source": [ + "def extract_sympy(sympy_expr_str: mira.metamodel.templates.SympyExprStr) -> sympy.Expr:\n", + " \"\"\"Convert the mira SympyExprStr to a sympy.Expr.\"\"\"\n", + " return sympy.sympify(str(sympy_expr_str), \n", + " locals={str(x): x \n", + " for x in sympy_expr_str.free_symbols})\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "bf9ec456-0909-41f9-bdbf-9ba44a202717", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "OrderedDict([('I', ),\n", + " ('recovered_population', ),\n", + " ('susceptible_population',\n", + " )])" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sir.var_order" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "c19bd34d-3bad-4443-8f81-ed89ab891130", + "metadata": {}, + "outputs": [], + "source": [ + "timepoints = [0.1, 0.2, 0.3, 0.4]\n", + "nsamples = 3\n", + "\n", + "try:\n", + " samples = sample_petri(sir, timepoints, nsamples)\n", + "except AttributeError as e:\n", + " print(e)" + ] + }, + { + "cell_type": "markdown", + "id": "9e5eb248-4bf1-40bd-8504-04a204d9fa87", + "metadata": {}, + "source": [ + "### Confirm that symbolic fluxes can be compiled to numeric fluxes" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "3da6b243-792f-4cc1-bb53-30502aff8d87", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([0.5863, 0.1905])" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "states = {k: tensor(v.value) for k,v in model.template_model.initials.items()}\n", + "symbolic_derivs = {k: 0. for k in states}\n", + "\n", + " \n", + "flux = SymPyModule(expressions=[beta*S*I/(S + I + R), gamma*I])\n", + "fluxes = flux(beta=getattr(sir, 'beta'),\n", + " gamma=getattr(sir,'gamma'),\n", + " susceptible_population=states['susceptible_population'],\n", + " I=states['I'],\n", + " recovered_population=states['recovered_population'])\n", + "fluxes" + ] + }, + { + "cell_type": "markdown", + "id": "16c8ed11-2cab-4955-aba0-47abab10e34f", + "metadata": {}, + "source": [ + "### Confirm that a symbolic derivative can be compiled to numeric derivatives\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "b6f2d2a1-8d4a-4af2-8cac-29f2b673f7ff", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([-0.5863, 0.3958, 0.1905])" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "states = {k: tensor(v.value) for k,v in model.template_model.initials.items()}\n", + "symbolic_derivs = {k: 0. for k in states}\n", + "\n", + "\n", + "symbolic_derivs['susceptible_population'] = -beta*S*I/(S + I + R)\n", + "symbolic_derivs['I'] = beta*S*I/(S + I + R) - gamma*I\n", + "symbolic_derivs['recovered_population'] = gamma*I\n", + "\n", + "\n", + "numeric_deriv = SymPyModule(expressions=list(symbolic_derivs.values()))\n", + "numeric_deriv(beta=getattr(sir, 'beta'),\n", + " gamma=getattr(sir,'gamma'),\n", + " susceptible_population=states['susceptible_population'],\n", + " I=states['I'],\n", + " recovered_population=states['recovered_population'])" + ] + }, + { + "cell_type": "markdown", + "id": "3ee821a1-0570-4771-80a5-be447697785f", + "metadata": {}, + "source": [ + "### Confirm that Mira rate laws can be compiled to numeric derivatives" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "5496ada3-3d65-4219-a298-5ff160d90c58", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([-0.5863, 0.3958, 0.1905])" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "states = {k: tensor(v.value) for k,v in model.template_model.initials.items()}\n", + "symbolic_derivs = {k: 0. for k in states}\n", + "\n", + "\n", + "symbolic_derivs['susceptible_population'] = -extract_sympy(S_to_I.rate_law)\n", + "symbolic_derivs['I'] = extract_sympy(S_to_I.rate_law) - extract_sympy(I_to_R.rate_law)\n", + "symbolic_derivs['recovered_population'] = extract_sympy(I_to_R.rate_law)\n", + "\n", + "\n", + "numeric_deriv = SymPyModule(expressions=list(symbolic_derivs.values()))\n", + "numeric_deriv(beta=getattr(sir, 'beta'),\n", + " gamma=getattr(sir,'gamma'),\n", + " susceptible_population=states['susceptible_population'],\n", + " I=states['I'],\n", + " recovered_population=states['recovered_population'])" + ] + }, + { + "cell_type": "markdown", + "id": "18e418f0-6372-416a-b951-745839c231cd", + "metadata": {}, + "source": [ + "### Confirm that symbolic fluxes can be converted to numeric derivatives" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "9cf7dee8-a5a6-475c-a61e-7e4f508e1545", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([-0.5863, 0.3958, 0.1905])" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "states = {k: tensor(v.value) for k,v in model.template_model.initials.items()}\n", + "symbolic_derivs = {k: 0. for k in states}\n", + "\n", + "for t in sir.G.transitions.values():\n", + " flux = extract_sympy(t.template.rate_law)\n", + " for c in t.consumed:\n", + " symbolic_derivs[get_name(c)] -= flux\n", + " for p in t.produced:\n", + " symbolic_derivs[get_name(p)] += flux\n", + "\n", + "numeric_deriv = SymPyModule(expressions=list(symbolic_derivs.values()))\n", + "numeric_deriv(beta=getattr(sir, 'beta'),\n", + " gamma=getattr(sir,'gamma'),\n", + " susceptible_population=states['susceptible_population'],\n", + " I=states['I'],\n", + " recovered_population=states['recovered_population'])" + ] + }, + { + "cell_type": "markdown", + "id": "c1580b6b-4097-4538-b704-5bb232d3101b", + "metadata": {}, + "source": [ + "### Confirm that numeric derivatives can be compiled using mira model parameter objects and initial states" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "ac4b97e0-3ee8-486e-8e53-d124459feea8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([-0.5863, 0.3958, 0.1905])" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "states = {k: tensor(v.value) for k,v in model.template_model.initials.items()}\n", + "symbolic_derivs = {k: 0. for k in states}\n", + "\n", + "\n", + "for t in sir.G.transitions.values():\n", + " flux = extract_sympy(t.template.rate_law)\n", + " for c in t.consumed:\n", + " symbolic_derivs[c.data['name']] -= flux\n", + " for p in t.produced:\n", + " symbolic_derivs[p.data['name']] += flux\n", + "\n", + "deriv = SymPyModule(expressions=symbolic_derivs.values())\n", + "deriv(**{param.key: getattr(sir, param.key) \n", + " for param in sir.G.parameters.values()\n", + " },\n", + " **states)\n", + "\n", + " " + ] + }, + { + "cell_type": "markdown", + "id": "e5b4646d-6f92-4d3d-8a72-7fb022c3c581", + "metadata": {}, + "source": [ + "### Alternative approach using mira model parameter keys instead of model parameter objects" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "4a66ec23-593c-4230-9407-89870c8be020", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([-0.5863, 0.3958, 0.1905])" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "states = {k: tensor(v.value) for k,v in model.template_model.initials.items()}\n", + "symbolic_derivs = {k: 0. for k in states}\n", + "\n", + "for t in sir.G.transitions.values():\n", + " flux = extract_sympy(t.template.rate_law)\n", + " for c in t.consumed:\n", + " symbolic_derivs[get_name(c)] -= flux\n", + " for p in t.produced:\n", + " symbolic_derivs[get_name(p)] += flux\n", + "\n", + "deriv = SymPyModule(expressions=list(symbolic_derivs.values()))\n", + "deriv(**{k: getattr(sir, k) for k in model.parameters},\n", + " **states)\n" + ] + }, + { + "cell_type": "markdown", + "id": "2df9e5c7-087f-4b86-b0bf-6e8807fe7b6a", + "metadata": {}, + "source": [ + "### ASKEM Model Representation" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "3fb6cd74", + "metadata": {}, + "outputs": [], + "source": [ + "sir = load_and_sample_petri_model('https://raw.githubusercontent.com/DARPA-ASKEM/Model-Representations/main/petrinet/examples/sir_typed.json', \n", + " num_samples=3,\n", + " timepoints=timepoints,\n", + " compile_rate_law_p=True)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "pyciemss-main", + "language": "python", + "name": "pyciemss-main" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebook/integration_demo/data.csv b/notebook/integration_demo/data.csv index fd6e9b3fe..4f9493f0c 100644 --- a/notebook/integration_demo/data.csv +++ b/notebook/integration_demo/data.csv @@ -1,3 +1,3 @@ -Timestep,Susceptible,Infected +Timestep,S,I 1.1,0.9,0.1 2.2,0.8 \ No newline at end of file diff --git a/notebook/integration_demo/demo_ensemble.ipynb b/notebook/integration_demo/demo_ensemble.ipynb index 1a2c00255..5b3f25fe0 100644 --- a/notebook/integration_demo/demo_ensemble.ipynb +++ b/notebook/integration_demo/demo_ensemble.ipynb @@ -18,9 +18,9 @@ "metadata": {}, "outputs": [], "source": [ - "DEMO_PATH = \"notebook/integration_demo/\"\n", + "DEMO_PATH = \"../../notebook/integration_demo/\"\n", "ASKENET_PATH_1 = \"https://raw.githubusercontent.com/DARPA-ASKEM/Model-Representations/main/petrinet/examples/sir_typed.json\"\n", - "ASKENET_PATH_2 = \"test/models/AMR_examples/SIDARTHE.amr.json\"\n", + "ASKENET_PATH_2 = \"../../test/models/AMR_examples/SIDARTHE.amr.json\"\n", "\n", "ASKENET_PATHS = [ASKENET_PATH_1, ASKENET_PATH_2]" ] @@ -37,20 +37,20 @@ "\n", "def solution_mapping2(model2_solution: dict) -> dict:\n", " mapped_solution = {}\n", - " mapped_solution[\"Susceptible\"] = (\n", + " mapped_solution[\"S\"] = (\n", " model2_solution[\"Susceptible\"]\n", " + model2_solution[\"Recognized\"]\n", " + model2_solution[\"Threatened\"]\n", " )\n", "\n", - " mapped_solution[\"Infected\"] = (\n", + " mapped_solution[\"I\"] = (\n", " model2_solution[\"Infected\"]\n", " + model2_solution[\"Ailing\"]\n", " + model2_solution[\"Diagnosed\"]\n", " )\n", "\n", " # Model 1 doesn't include dead people, and implicitly assumes that everyone who is infected will recover.\n", - " mapped_solution[\"Recovered\"] = (\n", + " mapped_solution[\"R\"] = (\n", " model2_solution[\"Healed\"] + model2_solution[\"Extinct\"]\n", " )\n", "\n", @@ -72,7 +72,16 @@ "cell_type": "code", "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/zuck016/Projects/Proposals/ASKEM/build/pyciemss/src/pyciemss/PetriNetODE/base.py:330: UserWarning: Parameter (('Infected', ('identity', 'ido:0000511')), ('Healed', ('identity', 'ido:0000592')), 'NaturalConversion', 'rate') has value None and will be set to Uniform(0, 1)\n", + " warnings.warn(warnings_string)\n" + ] + } + ], "source": [ "weights = [0.5, 0.5]\n", "num_samples = 100\n", @@ -97,53 +106,35 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 5, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/zuck016/Projects/Proposals/ASKEM/build/pyciemss/src/pyciemss/PetriNetODE/base.py:330: UserWarning: Parameter (('Infected', ('identity', 'ido:0000511')), ('Healed', ('identity', 'ido:0000592')), 'NaturalConversion', 'rate') has value None and will be set to Uniform(0, 1)\n", + " warnings.warn(warnings_string)\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "iteration 0: loss = 56.12164771556854\n", - "iteration 25: loss = 49.60371994972229\n", - "iteration 50: loss = 33.30795359611511\n", - "iteration 75: loss = 20.579649567604065\n", - "iteration 100: loss = 21.66509783267975\n", - "iteration 125: loss = 18.753786206245422\n", - "iteration 150: loss = 16.53753125667572\n", - "iteration 175: loss = 17.371854722499847\n", - "iteration 200: loss = 15.905078649520874\n", - "iteration 225: loss = 13.789868116378784\n", - "iteration 250: loss = 13.533606469631195\n", - "iteration 275: loss = 14.916905522346497\n", - "iteration 300: loss = 13.741706490516663\n", - "iteration 325: loss = 14.56143856048584\n", - "iteration 350: loss = 12.594428539276123\n", - "iteration 375: loss = 14.353631019592285\n", - "iteration 400: loss = 14.124584197998047\n", - "iteration 425: loss = 16.33763426542282\n", - "iteration 450: loss = 15.56375253200531\n", - "iteration 475: loss = 15.068402886390686\n", - "iteration 500: loss = 14.676460444927216\n", - "iteration 525: loss = 15.804013967514038\n", - "iteration 550: loss = 14.724295616149902\n", - "iteration 575: loss = 14.85770708322525\n", - "iteration 600: loss = 14.058846235275269\n", - "iteration 625: loss = 13.119673490524292\n", - "iteration 650: loss = 15.249755263328552\n", - "iteration 675: loss = 14.653111934661865\n", - "iteration 700: loss = 14.084820747375488\n", - "iteration 725: loss = 12.390039920806885\n", - "iteration 750: loss = 15.382243156433105\n", - "iteration 775: loss = 13.437841176986694\n", - "iteration 800: loss = 15.685991406440735\n", - "iteration 825: loss = 14.873103141784668\n", - "iteration 850: loss = 14.577804327011108\n", - "iteration 875: loss = 12.592588305473328\n", - "iteration 900: loss = 16.371800184249878\n", - "iteration 925: loss = 12.398984611034393\n", - "iteration 950: loss = 14.811098098754883\n", - "iteration 975: loss = 15.616987764835358\n" + "iteration 0: loss = 66.16035544872284\n", + "iteration 25: loss = 42.31934851408005\n", + "iteration 50: loss = 33.17540234327316\n", + "iteration 75: loss = 27.15283751487732\n", + "iteration 100: loss = 21.151540756225586\n", + "iteration 125: loss = 17.88287889957428\n", + "iteration 150: loss = 19.490506768226624\n", + "iteration 175: loss = 16.146777868270874\n", + "iteration 200: loss = 16.498568773269653\n", + "iteration 225: loss = 15.53571480512619\n", + "iteration 250: loss = 15.07484495639801\n", + "iteration 275: loss = 14.595208406448364\n", + "iteration 300: loss = 14.831865012645721\n", + "iteration 325: loss = 15.439243197441101\n" ] } ], @@ -162,6 +153,7 @@ " timepoints,\n", " verbose=True,\n", " total_population=1000,\n", + " num_iterations=350,\n", ")\n", "\n", "# Save results\n", @@ -180,9 +172,9 @@ ], "metadata": { "kernelspec": { - "display_name": "askem-test", + "display_name": "pyciemss-main", "language": "python", - "name": "python3" + "name": "pyciemss-main" }, "language_info": { "codemirror_mode": { @@ -194,10 +186,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.11" - }, - "orig_nbformat": 4 + "version": "3.10.9" + } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/setup.cfg b/setup.cfg index d9ac1138e..c9ccf21f1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,7 +24,8 @@ install_requires = h5netcdf dask requests - + sympytorch + zip_safe = false include_package_data = true python_requires = >=3.9 diff --git a/src/pyciemss/PetriNetODE/interfaces.py b/src/pyciemss/PetriNetODE/interfaces.py index 1fb5984eb..411d04bb5 100644 --- a/src/pyciemss/PetriNetODE/interfaces.py +++ b/src/pyciemss/PetriNetODE/interfaces.py @@ -57,6 +57,7 @@ def load_and_sample_petri_model( pseudocount: float = 1.0, start_time: float = -1e-10, method="dopri5", + compile_rate_law_p: bool = True ) -> pd.DataFrame: """ Load a petri net from a file, compile it into a probabilistic program, and sample from it. @@ -92,6 +93,7 @@ def load_and_sample_petri_model( petri_model_or_path=petri_model_or_path, add_uncertainty=True, pseudocount=pseudocount, + compile_rate_law_p=compile_rate_law_p ) # If the user doesn't override the start state, use the initial values from the model. @@ -137,6 +139,7 @@ def load_and_calibrate_and_sample_petri_model( num_particles: int = 1, autoguide=pyro.infer.autoguide.AutoLowRankMultivariateNormal, method="dopri5", + compile_rate_law_p: bool = True ) -> pd.DataFrame: """ Load a petri net from a file, compile it into a probabilistic program, calibrate it on data, @@ -186,6 +189,7 @@ def load_and_calibrate_and_sample_petri_model( model = load_petri_model( petri_model_or_path=petri_model_or_path, add_uncertainty=True, + compile_rate_law_p=compile_rate_law_p, pseudocount=pseudocount, ) @@ -551,17 +555,18 @@ def load_petri_model( petri_model_or_path: Union[str, mira.metamodel.TemplateModel, mira.modeling.Model], add_uncertainty=True, pseudocount=1.0, + compile_rate_law_p: bool = False ) -> PetriNetODESystem: """ Load a petri net from a file and compile it into a probabilistic program. """ if add_uncertainty: - model = ScaledBetaNoisePetriNetODESystem.from_askenet(petri_model_or_path) + model = ScaledBetaNoisePetriNetODESystem.from_askenet(petri_model_or_path, compile_rate_law_p=compile_rate_law_p) model.pseudocount = torch.tensor(pseudocount) return model else: - return MiraPetriNetODESystem.from_askenet(petri_model_or_path) + return MiraPetriNetODESystem.from_askenet(petri_model_or_path, compile_rate_law_p=compile_rate_law_p) @setup_model.register diff --git a/src/pyciemss/utils/__init__.py b/src/pyciemss/utils/__init__.py index caf945dee..6789e6032 100644 --- a/src/pyciemss/utils/__init__.py +++ b/src/pyciemss/utils/__init__.py @@ -14,6 +14,7 @@ deterministic, petri_to_ode, order_state, + reparameterize, unorder_state, duplicate_petri_net, intervene_petri_net) @@ -28,4 +29,4 @@ plot_intervention_line, plot_ouu_risk, sideaxis, - sideaxishist) \ No newline at end of file + sideaxishist) diff --git a/test/models/sir.json b/test/models/sir.json new file mode 100644 index 000000000..372b725b7 --- /dev/null +++ b/test/models/sir.json @@ -0,0 +1 @@ +{"name": "SIR Model", "schema": "https://raw.githubusercontent.com/DARPA-ASKEM/Model-Representations/petrinet_v0.5/petrinet/petrinet_schema.json", "schema_name": "petrinet", "description": "SIR model", "model_version": "0.1", "properties": {}, "model": {"states": [{"id": "Susceptible", "name": "Susceptible", "grounding": {"identifiers": {"ido": "0000514"}, "modifiers": {}}}, {"id": "Infected", "name": "Infected", "grounding": {"identifiers": {"ido": "0000511"}, "modifiers": {}}}, {"id": "Recovered", "name": "Recovered", "grounding": {"identifiers": {"ido": "0000592"}, "modifiers": {}}}], "transitions": [{"id": "t1", "input": ["Infected", "Susceptible"], "output": ["Infected", "Infected"], "properties": {"name": "t1"}}, {"id": "t2", "input": ["Infected"], "output": ["Recovered"], "properties": {"name": "t2"}}]}, "semantics": {"ode": {"rates": [{"target": "t1", "expression": "I*S*beta", "expression_mathml": "ISbeta"}, {"target": "t2", "expression": "I*gamma", "expression_mathml": "Igamma"}], "initials": [{"target": "Susceptible", "expression": "1000.00000000000", "expression_mathml": "1000.0"}, {"target": "Infected", "expression": "1.00000000000000", "expression_mathml": "1.0"}, {"target": "Recovered", "expression": "0.0", "expression_mathml": "0.0"}], "parameters": [{"id": "beta", "value": 2.7e-07, "distribution": {"type": "StandardUniform1", "parameters": {"minimum": 2.6e-07, "maximum": 2.8e-07}}}, {"id": "gamma", "value": 0.14, "distribution": {"type": "StandardUniform1", "parameters": {"minimum": 0.1, "maximum": 0.18}}}], "observables": [{"id": "noninf", "name": "noninf", "expression": "R + S", "expression_mathml": "RS"}], "time": {"id": "t"}}}, "metadata": {"license": null, "authors": [], "references": [], "time_scale": null, "time_start": null, "time_end": null, "locations": [], "pathogens": [], "diseases": [], "hosts": [], "model_types": []}} \ No newline at end of file diff --git a/test/test_ensemble/test_ensemble_interfaces.py b/test/test_ensemble/test_ensemble_interfaces.py index c593da2a0..1c6225a9e 100644 --- a/test/test_ensemble/test_ensemble_interfaces.py +++ b/test/test_ensemble/test_ensemble_interfaces.py @@ -32,20 +32,20 @@ def solution_mapping1(model1_solution: dict) -> dict: def solution_mapping2(model2_solution: dict) -> dict: mapped_solution = {} - mapped_solution["Susceptible"] = ( + mapped_solution["S"] = ( model2_solution["Susceptible"] + model2_solution["Recognized"] + model2_solution["Threatened"] ) - mapped_solution["Infected"] = ( + mapped_solution["I"] = ( model2_solution["Infected"] + model2_solution["Ailing"] + model2_solution["Diagnosed"] ) # Model 1 doesn't include dead people, and implicitly assumes that everyone who is infected will recover. - mapped_solution["Recovered"] = ( + mapped_solution["R"] = ( model2_solution["Healed"] + model2_solution["Extinct"] ) diff --git a/test/test_mira/test_petrinet_derivs.py b/test/test_mira/test_petrinet_derivs.py index b3cf35e95..eff3b082c 100644 --- a/test/test_mira/test_petrinet_derivs.py +++ b/test/test_mira/test_petrinet_derivs.py @@ -67,13 +67,13 @@ def setup_ASKENET(self): mira_model = model_from_url('https://raw.githubusercontent.com/DARPA-ASKEM/Model-Representations/main/petrinet/examples/sir.json') self.sir_askenet = load_petri_model(mira_model) # Initialize - self.askenet2hand = dict(Susceptible_sol='susceptible_population_sol', - Infected_sol='infected_population_sol', - Recovered_sol='immune_population_sol') + self.askenet2hand = dict(S_sol='susceptible_population_sol', + I_sol='infected_population_sol', + R_sol='immune_population_sol') - self.hand2askenet = dict(susceptible_population='Susceptible', - infected_population='Infected', - immune_population='Recovered') + self.hand2askenet = dict(susceptible_population='S', + infected_population='I', + immune_population='R') initial_state = {self.hand2askenet[k]: v for k, v in self.initial_state.items()} diff --git a/test/test_mira/test_rate_law.py b/test/test_mira/test_rate_law.py new file mode 100644 index 000000000..a9d513218 --- /dev/null +++ b/test/test_mira/test_rate_law.py @@ -0,0 +1,167 @@ +import pandas as pd +import unittest +import sympy +from sympytorch import SymPyModule +import mira +from pyciemss.PetriNetODE.interfaces import setup_petri_model, sample, load_petri_model, load_and_sample_petri_model +import torch +from mira.metamodel import Concept, ControlledConversion, GroupedControlledConversion, Initial, NaturalConversion, Parameter, Template, TemplateModel +from mira.modeling import Model +from mira.modeling.askenet.petrinet import AskeNetPetriNetModel +from collections.abc import Callable +from pyciemss.PetriNetODE.base import ScaledBetaNoisePetriNetODESystem +from pyciemss.utils import reparameterize + +class TestRateLaw(unittest.TestCase): + """Test the symbolic rate law.""" + def setUp(self): + """Set up the test fixtures.""" + beta, gamma, S, I, R, total_population = sympy.symbols('beta, gamma, susceptible_population, infected_population, recovered_population, total_population') + + susceptible = Concept(name="susceptible_population", identifiers={"ido": "0000514"}) + infected = Concept(name="infected_population", identifiers={"ido": "0000573"}) # http://purl.obolibrary.org/obo/IDO_0000573 + recovered = Concept(name="recovered_population", identifiers={"ido": "0000592"}) + total_pop = 100000 + + S_to_I = ControlledConversion( + controller = infected, + subject=susceptible, + outcome=infected, + rate_law=beta*S*I/(S + I + R) + ) + I_to_R = NaturalConversion( + subject=infected, + outcome=recovered, + rate_law=gamma*I + ) + self.tm = TemplateModel( + templates=[S_to_I, I_to_R], + parameters={ + 'beta': Parameter(name='beta', value=0.55), # transmission rate + 'gamma': Parameter(name='gamma', value=0.2), # recovery rate + }, + initials={ + 'susceptible_population': (Initial(concept=susceptible, value=(total_pop - 1))), + 'infected_population': (Initial(concept=infected, value=1)), + 'recovered_population': (Initial(concept=recovered, value=0)), + } + ) + + compiled_sir = ScaledBetaNoisePetriNetODESystem(mira.modeling.Model(self.tm), compile_rate_law_p=True) + uncompiled_sir = ScaledBetaNoisePetriNetODESystem(mira.modeling.Model(self.tm), compile_rate_law_p=False) + + + self.start_state = {k: v.value + for k,v in self.tm.initials.items()} + symbolic_derivs = {} + + + + symbolic_derivs['infected_population'] = beta*S*I/(S + I + R) - gamma*I + symbolic_derivs['recovered_population'] = gamma*I + symbolic_derivs['susceptible_population'] = -beta*S*I/(S + I + R) + + self.numeric_deriv = SymPyModule(expressions=list(symbolic_derivs.values())) + self.nsamples = 5 + self.timepoints = [1.0, 2.0, 3.0] + + self.compiled_sir = setup_petri_model(compiled_sir, 0.0, start_state=self.start_state) + self.uncompiled_sir = setup_petri_model(uncompiled_sir, 0.0, start_state=self.start_state) + + def test_rate_law_compilation(self): + """Test that the rate law can be compiled correctly.""" + self.uncompiled_sir.param_prior() + self.compiled_sir.param_prior() + compiled_trajectories = sample(self.compiled_sir, self.timepoints, self.nsamples) + for i in range(self.nsamples): + uncompiled_sir = reparameterize(self.uncompiled_sir, { + 'beta': compiled_trajectories['beta'][i], + 'gamma': compiled_trajectories['gamma'][i] + }) + uncompiled_trajectories = sample(uncompiled_sir, self.timepoints, 1) + for state_variable in compiled_trajectories: + if '_sol' in state_variable: + self.assertTrue( + torch.allclose( + compiled_trajectories[state_variable][i], + uncompiled_trajectories[state_variable][0], + atol=1e-4 + ), + f"Compiled {state_variable} trajectory {i}: {compiled_trajectories[state_variable][i]}\n" + f"Uncompiled {state_variable} trajectory: {uncompiled_trajectories[state_variable][0]}" + ) + + def test_extract_sympy(self): + """Test that the sympy expression can be extracted from the rate law.""" + for template in self.tm.templates: + rate_law = template.rate_law + expected_rate_law = sympy.sympify( + str(rate_law), locals={ + str(x): x + for x in rate_law.free_symbols + } + ) + self.assertEqual(str(expected_rate_law), str(rate_law)) + self.assertEqual(str(expected_rate_law), str(self.compiled_sir.extract_sympy(rate_law))) + self.assertEqual(expected_rate_law, self.compiled_sir.extract_sympy(rate_law)) + + def test_symbolic_flux_to_numeric_flux(self): + """Test that the symbolic flux can be converted to a numeric flux.""" + self.compiled_sir.param_prior() + expected_deriv = self.numeric_deriv( + beta=0.5, + gamma=0.2, + susceptible_population=99999.0, + infected_population=1.0, + recovered_population=0.0 + ) + actual_deriv = self.compiled_sir.compiled_rate_law( + beta=0.5, + gamma=0.2, + susceptible_population=99999.0, + infected_population=1.0, + recovered_population=0.0 + ) + self.assertTrue( + torch.allclose( + expected_deriv, actual_deriv, atol=1e-3 + ), + f"Expected deriv: {expected_deriv}\n" + f"Actual deriv {actual_deriv}" + ) + + def test_time_varying_parameter_rate_law(self): + """Test that the rate law can be compiled correctly.""" + url = 'https://raw.githubusercontent.com/indralab/mira/hackathon/notebooks/hackathon_2023.07/scenario1_c.json' + scenario1_c = load_petri_model(url, compile_rate_law_p=True) + expected_rate_law_str = 'kappa*(beta_nc + (beta_c - beta_nc)/(1 + exp(-k_2*(-t + t_1))) + (-beta_c + beta_s)/(1 + exp(-k_1*(-t + t_0))))' + expected_rate_law_symbolic = sympy.sympify(expected_rate_law_str) + param_vals = dict(kappa=1.0, beta_nc=0.5, beta_c=0.6, beta_s=0.4, k_1=0.1, k_2=0.1, t_0=0.0, t_1=1.0) + expected_rate_law_mod = SymPyModule(expressions=[expected_rate_law_symbolic]) + expected_rate_law_values = expected_rate_law_mod(**param_vals, **dict(t=torch.tensor(0.2))) + actual_rate_law_symbolic = scenario1_c.extract_sympy(scenario1_c.G.template_model.templates[0].rate_law) + actual_rate_law_mod = SymPyModule(expressions=[actual_rate_law_symbolic]) + actual_rate_law_values1 = actual_rate_law_mod(**param_vals, **dict(t=torch.tensor(23.0))) + actual_rate_law_values2 = actual_rate_law_mod(**param_vals, **dict(t=torch.tensor(0.2))) + + self.assertFalse(torch.allclose( + expected_rate_law_values, actual_rate_law_values1, atol=1e-3 + ), + f"Expected rate law value: {expected_rate_law_values}\n" + f"Actual rate law value {actual_rate_law_values1}") + + self.assertTrue(torch.allclose( + expected_rate_law_values, actual_rate_law_values2, atol=1e-3 + ), + f"Expected rate law value: {expected_rate_law_values}\n" + f"Actual rate law value {actual_rate_law_values2}") + + def test_askem_model_representation(self): + """Test that the rate law can be compiled correctly.""" + url='https://raw.githubusercontent.com/DARPA-ASKEM/Model-Representations/main/petrinet/examples/sir_typed.json' + samples = load_and_sample_petri_model(url, num_samples=self.nsamples, + timepoints=self.timepoints, + compile_rate_law_p=True) + self.assertTrue(isinstance(samples, pd.DataFrame)) +if __name__ == "__main__": + unittest.main() diff --git a/test/test_petrinet_ode/data.csv b/test/test_petrinet_ode/data.csv index fd6e9b3fe..4f9493f0c 100644 --- a/test/test_petrinet_ode/data.csv +++ b/test/test_petrinet_ode/data.csv @@ -1,3 +1,3 @@ -Timestep,Susceptible,Infected +Timestep,S,I 1.1,0.9,0.1 2.2,0.8 \ No newline at end of file diff --git a/test/test_petrinet_ode/expected_intervened_samples.csv b/test/test_petrinet_ode/expected_intervened_samples.csv index 06227624b..482c726cc 100644 --- a/test/test_petrinet_ode/expected_intervened_samples.csv +++ b/test/test_petrinet_ode/expected_intervened_samples.csv @@ -1,4 +1,4 @@ -timepoint_id,sample_id,beta_param,gamma_param,Infected_sol,Recovered_sol,Susceptible_sol +timepoint_id,sample_id,beta_param,gamma_param,I_sol,R_sol,S_sol 0,0,1.0,0.1,0.024186953902244568,0.00160835194401443,0.9742047786712646 1,0,1.0,0.1,0.02639336884021759,0.0018610984552651644,0.971746027469635 2,0,1.0,0.1,0.028793662786483765,0.002136865397915244,0.9690694808959961 diff --git a/test/test_petrinet_ode/test_ode_interfaces.py b/test/test_petrinet_ode/test_ode_interfaces.py index 2a352b99a..26b38da65 100644 --- a/test/test_petrinet_ode/test_ode_interfaces.py +++ b/test/test_petrinet_ode/test_ode_interfaces.py @@ -65,9 +65,6 @@ def setUp(self): interventions=self.interventions, ) - - - def test_samples_type(self): """Test that `samples` is a Pandas DataFrame""" for s in [self.samples, self.calibrated_samples]: @@ -272,9 +269,9 @@ def test_load_and_sample_petri_model(self): timepoints = [1.0, 1.1, 1.2, 1.3] num_samples = 3 initial_state = { - "Susceptible": 0.99, - "Infected": 0.01, - "Recovered": 0.0, + "S": 0.99, + "I": 0.01, + "R": 0.0, } expected_intervened_samples = pd.read_csv('test/test_petrinet_ode/expected_intervened_samples.csv') actual_intervened_samples = load_and_sample_petri_model(ASKENET_PATH, num_samples, timepoints, interventions = interventions, start_state=initial_state) @@ -288,9 +285,9 @@ def test_load_and_calibrate_and_sample_petri_model(self): timepoints = [1.0, 1.1, 1.2, 1.3] num_samples = 3 initial_state = { - "Susceptible": 0.99, - "Infected": 0.01, - "Recovered": 0.0, + "S": 0.99, + "I": 0.01, + "R": 0.0, } expected_intervened_samples = pd.read_csv('test/test_petrinet_ode/expected_intervened_samples.csv') data_path = 'test/test_petrinet_ode/data.csv' diff --git a/test/test_utils/test_interface_utils.py b/test/test_utils/test_interface_utils.py new file mode 100644 index 000000000..536541e98 --- /dev/null +++ b/test/test_utils/test_interface_utils.py @@ -0,0 +1,80 @@ +import unittest +import os +from pyciemss.utils.interface_utils import ( + convert_to_output_format, + interventions_and_sampled_params_to_interval, + assign_interventions_to_timepoints + ) +from torch import tensor +import pandas as pd +import numpy as np +import bisect +from pandas.testing import assert_frame_equal + +class Test_Interface_Utils(unittest.TestCase): + """Test interface_utils.py""" + def setUp(self): + """Set up test fixtures.""" + self.intervened_samples = { + 'beta': tensor([0.0263, 0.0271, 0.0246]), + 'gamma': tensor([0.1308, 0.1342, 0.1359]), + 'Infected_sol': tensor([[ 0.9491, 0.9008, 3.5336, 22.6421, 132.3466], + [ 0.9478, 0.8984, 3.5195, 22.5536, 131.8928], + [ 0.9459, 0.8946, 3.5016, 22.4412, 131.3172]]), + 'Recovered_sol': tensor([[0.0637, 0.1242, 0.5759, 1.6780, 8.0082], + [0.0653, 0.1273, 0.5787, 1.6764, 7.9834], + [0.0661, 0.1286, 0.5784, 1.6706, 7.9482]]), + 'Susceptible_sol': tensor([[999.9871, 999.9746, 996.8905, 976.6799, 860.6453], + [999.9868, 999.9745, 996.9033, 976.7705, 861.1242], + [999.9880, 999.9767, 996.9209, 976.8878, 861.7342]])} + self.interventions = [ + (1.1, 'beta', 1.0), + (2.1, 'gamma', 0.1), + (1.3, 'beta', 2.0), + (1.4, 'gamma', 0.3) + ] + self.timepoints = [0.5, 1.0, 2.0, 3.0, 4.0] + self.sampled_params = { + 'beta_param': [0.2, 0.25, 0.15], + 'gamma_param':[0.3, 0.25, 0.35] + } + + def test_convert_to_output_format(self): + """Test convert_to_output_format.""" + expected_output = pd.read_csv('test/test_utils/expected_output_format.csv') + assert_frame_equal( + expected_output, + convert_to_output_format(self.intervened_samples, + self.timepoints, + self.interventions), + check_exact=False, atol=1e-5, rtol=1e-5 + ) + + def test_intervention_to_interval(self): + """Test intervention_to_interval.""" + expected_intervals = { + 'beta_param': [ + {'start': -np.inf, 'end': 1.1, 'param_values': [0.2, 0.25, 0.15]}, + {'start': 1.1, 'end': 1.3, 'param_values': [1.0, 1.0, 1.0]}, + {'start': 1.3, 'end': np.inf, 'param_values': [2.0, 2.0, 2.0]}], + 'gamma_param': [ + {'start': -np.inf, 'end': 1.4, 'param_values': [0.3, 0.25, 0.35]}, + {'start': 1.4, 'end': 2.1, 'param_values': [0.3, 0.3, 0.3]}, + {'start': 2.1, 'end': np.inf, 'param_values': [0.1, 0.1, 0.1]}]} + + self.assertEqual(expected_intervals, + interventions_and_sampled_params_to_interval(self.interventions, self.sampled_params)) + + def test_assign_interventions_to_timepoints(self): + """Test assign_interventions_to_timepoints.""" + expected_intervened_values = { + 'beta_param': [ + 0.2, 0.2, 2.0, 2.0, 2.0, 0.25, 0.25, 2.0, 2.0, 2.0, 0.15, 0.15, 2.0, 2.0, 2.0 + ], + 'gamma_param': [ + 0.3, 0.3, 0.3, 0.1, 0.1, 0.25, 0.25, 0.3, 0.1, 0.1, 0.35, 0.35, 0.3, 0.1, 0.1 + ] + } + self.assertEqual(expected_intervened_values, assign_interventions_to_timepoints( + self.interventions, self.timepoints, self.sampled_params)) +