Skip to content

Commit

Permalink
Create settings.py
Browse files Browse the repository at this point in the history
  • Loading branch information
fivegrant committed Jul 24, 2023
1 parent 455b7da commit 2de50f6
Show file tree
Hide file tree
Showing 5 changed files with 134 additions and 111 deletions.
1 change: 0 additions & 1 deletion api.env.sample
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
REDIS_HOST=redis
REDIS_PORT=6379
TDS_URL=http://data-service-api:8000
STANDALONE_MODE=True
PYCIEMSS_OUTPUT_FILEPATH=result.csv
125 changes: 64 additions & 61 deletions service/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,33 @@ def from_rq(rq_status):
return Status(rq_status_to_tds_status[rq_status])


class ModelConfig(BaseModel):
id: str = Field(..., example="cd339570-047d-11ee-be55")
solution_mappings: dict[str, str] = Field(..., example={"Infected": "Cases", "Hospitalizations": "hospitalized_population"})
weight: float = Field(..., example="cd339570-047d-11ee-be55")


class Dataset(BaseModel):
id: str = Field(None, example="cd339570-047d-11ee-be55")
filename: str = Field(None, example="dataset.csv")
mappings: Dict[str, str] = Field(
default_factory=dict,
description="Mappings from the dataset column names to the model names they should be replaced with.",
example={'postive_tests': 'infected'},
)

class InterventionObject(BaseModel):
timestep: float
name: str
value: float

######################### Base operation request ############
class OperationRequest(BaseModel):
engine: Engine = Field(..., example="ciemss")
username: str = Field("not_provided", example="not_provided")


######################### `simulate` Operation ############
class SimulateExtra(BaseModel):
class Config:
extra = ExtraEnum.allow
Expand All @@ -50,6 +77,17 @@ class Config:
)


class SimulatePostRequest(OperationRequest):
model_config_id: str = Field(..., example="ba8da8d4-047d-11ee-be56")
timespan: Timespan
interventions: List[InterventionObject] = Field(default_factory=list, example=[{"timestep":1,"name":"beta","value":.4}])
extra: SimulateExtra = Field(
None,
description="optional extra system specific arguments for advanced use cases",
)


######################### `calibrate` Operation ############
class CalibrateExtra(BaseModel):
class Config:
extra = ExtraEnum.allow
Expand Down Expand Up @@ -82,6 +120,17 @@ class Config:
)


class CalibratePostRequest(OperationRequest):
model_config_id: str = Field(..., example="c1cd941a-047d-11ee-be56")
dataset: Dataset = None
timespan: Optional[Timespan] = None
extra: CalibrateExtra = Field(
None,
description="optional extra system specific arguments for advanced use cases",
)


######################### `ensemble-simulate` Operation ############
class EnsembleSimulateExtra(BaseModel):
class Config:
extra = ExtraEnum.allow
Expand All @@ -91,6 +140,19 @@ class Config:
)


class EnsembleSimulatePostRequest(OperationRequest):
model_configs: List[ModelConfig] = Field(
[],
example=[],
)
timespan: Timespan

extra: EnsembleSimulateExtra = Field(
None,
description="optional extra system specific arguments for advanced use cases",
)

######################### `ensemble-calibrate` Operation ############
class EnsembleCalibrateExtra(BaseModel):
class Config:
extra = ExtraEnum.allow
Expand All @@ -111,67 +173,7 @@ class Config:
"days", description="units in numbers of days", example="days"
)


class ModelConfig(BaseModel):
id: str = Field(..., example="cd339570-047d-11ee-be55")
solution_mappings: dict[str, str] = Field(..., example={"Infected": "Cases", "Hospitalizations": "hospitalized_population"})
weight: float = Field(..., example="cd339570-047d-11ee-be55")


class Dataset(BaseModel):
id: str = Field(None, example="cd339570-047d-11ee-be55")
filename: str = Field(None, example="dataset.csv")
mappings: Dict[str, str] = Field(
default_factory=dict,
description="Mappings from the dataset column names to the model names they should be replaced with.",
example={'postive_tests': 'infected'},
)

class InterventionObject(BaseModel):
timestep: float
name: str
value: float

class SimulatePostRequest(BaseModel):
engine: Engine = Field(..., example="ciemss")
username: str = Field("not_provided", example="not_provided")
model_config_id: str = Field(..., example="ba8da8d4-047d-11ee-be56")
timespan: Timespan
interventions: List[InterventionObject] = Field(default_factory=list, example=[{"timestep":1,"name":"beta","value":.4}])
extra: SimulateExtra = Field(
None,
description="optional extra system specific arguments for advanced use cases",
)


class CalibratePostRequest(BaseModel):
engine: Engine = Field(..., example="ciemss")
username: str = Field("not_provided", example="not_provided")
model_config_id: str = Field(..., example="c1cd941a-047d-11ee-be56")
dataset: Dataset = None
timespan: Optional[Timespan] = None
extra: CalibrateExtra = Field(
None,
description="optional extra system specific arguments for advanced use cases",
)


class EnsembleSimulatePostRequest(BaseModel):
engine: Engine = Field(..., example="ciemss")
username: str = Field("not_provided", example="not_provided")
model_configs: List[ModelConfig] = Field(
[],
example=[],
)
timespan: Timespan

extra: EnsembleSimulateExtra = Field(
None,
description="optional extra system specific arguments for advanced use cases",
)


class EnsembleCalibratePostRequest(BaseModel):
class EnsembleCalibratePostRequest(OperationRequest):
engine: Engine = Field(..., example="ciemss")
username: str = Field("not_provided", example="not_provided")
model_configs: List[ModelConfig] = Field(
Expand All @@ -185,6 +187,7 @@ class EnsembleCalibratePostRequest(BaseModel):
description="optional extra system specific arguments for advanced use cases",
)

######################### API Response ############
class JobResponse(BaseModel):
simulation_id: Optional[str] = Field(
None,
Expand Down
35 changes: 18 additions & 17 deletions service/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import logging
import numpy as np
import requests
from settings import settings
from utils import (
make_job_dir,
update_tds_status,
Expand All @@ -27,9 +28,9 @@

TDS_CONFIGURATIONS = "/model_configurations/"
TDS_SIMULATIONS = "/simulations/"
OUTPUT_FILENAME = os.getenv("PYCIEMSS_OUTPUT_FILEPATH")
EVAL_OUTPUT_FILENAME = "eval.csv"
TDS_API = os.getenv("TDS_URL")
OUTPUT_FILENAME = settings.PYCIEMSS_OUTPUT_FILEPATH
EVAL_OUTPUT_FILENAME = settings.EVAL_OUTPUT_FILENAME
TDS_URL = settings.TDS_URL

logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
Expand All @@ -44,7 +45,7 @@ def simulate(*args, **kwargs):
job_id = kwargs.pop("job_id")
logging.debug(f"{job_id} (username - {username}): start simulate")

sim_results_url = TDS_API + TDS_SIMULATIONS + job_id
sim_results_url = TDS_URL + TDS_SIMULATIONS + job_id
job_dir = make_job_dir(job_id)
output_filename = os.path.join(job_dir, OUTPUT_FILENAME)
eval_output_filename = os.path.join(job_dir, EVAL_OUTPUT_FILENAME)
Expand All @@ -53,7 +54,7 @@ def simulate(*args, **kwargs):
update_tds_status(sim_results_url, status="running", start=True)

# Get model from TDS
amr_path = fetch_model(model_config_id, TDS_API, TDS_CONFIGURATIONS, job_dir)
amr_path = fetch_model(model_config_id, TDS_URL, TDS_CONFIGURATIONS, job_dir)

# Generate timepoints
time_count = end - start
Expand All @@ -67,7 +68,7 @@ def simulate(*args, **kwargs):
samples.to_csv(output_filename, index=False)
eval = output.get('quantiles')
eval.to_csv(eval_output_filename, index=False)
attach_files({output_filename: "result.csv", visualization_filename: "visualization.json", eval_output_filename: "eval.csv"}, TDS_API, TDS_SIMULATIONS, job_id)
attach_files({output_filename: "result.csv", visualization_filename: "visualization.json", eval_output_filename: "eval.csv"}, TDS_URL, TDS_SIMULATIONS, job_id)
logging.debug(f"{job_id} (username - {username}): finish simulate")

return
Expand All @@ -82,21 +83,21 @@ def calibrate_then_simulate(*args, **kwargs):
job_id = kwargs.pop("job_id")
logging.debug(f"{job_id} (username - {username}): start calibrate")

sim_results_url = TDS_API + TDS_SIMULATIONS + job_id
sim_results_url = TDS_URL + TDS_SIMULATIONS + job_id
job_dir = make_job_dir(job_id)
output_filename = os.path.join(job_dir, OUTPUT_FILENAME)
eval_output_filename = os.path.join(job_dir, EVAL_OUTPUT_FILENAME)
visualization_filename = os.path.join(job_dir, "./visualization.json")

update_tds_status(sim_results_url, status="running", start=True)

amr_path = fetch_model(model_config_id, TDS_API, TDS_CONFIGURATIONS, job_dir)
amr_path = fetch_model(model_config_id, TDS_URL, TDS_CONFIGURATIONS, job_dir)

# Generate timepoints
time_count = end - start
timepoints=[x for x in range(1,time_count+1)]

dataset_path = fetch_dataset(kwargs.pop("dataset"), TDS_API, job_dir)
dataset_path = fetch_dataset(kwargs.pop("dataset"), TDS_URL, job_dir)

output = load_and_calibrate_and_sample_petri_model(
amr_path,
Expand All @@ -111,7 +112,7 @@ def calibrate_then_simulate(*args, **kwargs):
samples.to_csv(output_filename, index=False)
eval = output.get('quantiles')
eval.to_csv(eval_output_filename, index=False)
attach_files({output_filename: "simulation.csv", visualization_filename: "visualization.json", eval_output_filename: "eval.csv"}, TDS_API, TDS_SIMULATIONS, job_id)
attach_files({output_filename: "simulation.csv", visualization_filename: "visualization.json", eval_output_filename: "eval.csv"}, TDS_URL, TDS_SIMULATIONS, job_id)

logging.debug(f"{job_id} (username - {username}): finish calibrate")

Expand All @@ -128,7 +129,7 @@ def ensemble_simulate(*args, **kwargs):
job_id = kwargs.pop("job_id")
logging.debug(f"{job_id} (username - {username}): start ensemble simulate")

sim_results_url = TDS_API + TDS_SIMULATIONS + job_id
sim_results_url = TDS_URL + TDS_SIMULATIONS + job_id
job_dir = make_job_dir(job_id)
output_filename = os.path.join(job_dir, OUTPUT_FILENAME)
eval_output_filename = os.path.join(job_dir, EVAL_OUTPUT_FILENAME)
Expand All @@ -138,7 +139,7 @@ def ensemble_simulate(*args, **kwargs):

weights = [config["weight"] for config in model_configs]
solution_mappings = [config["solution_mappings"] for config in model_configs]
amr_paths = [fetch_model(config["id"], TDS_API, TDS_CONFIGURATIONS, job_dir) for config in model_configs]
amr_paths = [fetch_model(config["id"], TDS_URL, TDS_CONFIGURATIONS, job_dir) for config in model_configs]

# Generate timepoints
time_count = end - start
Expand All @@ -159,7 +160,7 @@ def ensemble_simulate(*args, **kwargs):
samples.to_csv(output_filename, index=False)
eval = output.get('quantiles')
eval.to_csv(eval_output_filename, index=False)
attach_files({output_filename: "simulation.csv", visualization_filename: "visualization.json", eval_output_filename: "eval.csv"}, TDS_API, TDS_SIMULATIONS, job_id)
attach_files({output_filename: "simulation.csv", visualization_filename: "visualization.json", eval_output_filename: "eval.csv"}, TDS_URL, TDS_SIMULATIONS, job_id)
logging.debug(f"{job_id} (username - {username}): finish ensemble simulate")
return True

Expand All @@ -175,7 +176,7 @@ def ensemble_calibrate(*args, **kwargs):
job_id = kwargs.pop("job_id")
logging.debug(f"{job_id} (username - {username}): start ensemble calibrate")

sim_results_url = TDS_API + TDS_SIMULATIONS + job_id
sim_results_url = TDS_URL + TDS_SIMULATIONS + job_id
job_dir = make_job_dir(job_id)
output_filename = os.path.join(job_dir, OUTPUT_FILENAME)
eval_output_filename = os.path.join(job_dir, EVAL_OUTPUT_FILENAME)
Expand All @@ -185,9 +186,9 @@ def ensemble_calibrate(*args, **kwargs):

weights = [config["weight"] for config in model_configs]
solution_mappings = [config["solution_mappings"] for config in model_configs]
amr_paths = [fetch_model(config["id"], TDS_API, TDS_CONFIGURATIONS, job_dir) for config in model_configs]
amr_paths = [fetch_model(config["id"], TDS_URL, TDS_CONFIGURATIONS, job_dir) for config in model_configs]

dataset_path = fetch_dataset(dataset, TDS_API, job_dir)
dataset_path = fetch_dataset(dataset, TDS_URL, job_dir)

# Generate timepoints
time_count = end - start
Expand All @@ -209,6 +210,6 @@ def ensemble_calibrate(*args, **kwargs):
samples.to_csv(output_filename, index=False)
eval = output.get('quantiles')
eval.to_csv(eval_output_filename, index=False)
attach_files({output_filename: "simulation.csv", visualization_filename: "visualization.json", eval_output_filename: "eval.csv"}, TDS_API, TDS_SIMULATIONS, job_id)
attach_files({output_filename: "simulation.csv", visualization_filename: "visualization.json", eval_output_filename: "eval.csv"}, TDS_URL, TDS_SIMULATIONS, job_id)
logging.debug(f"{job_id} (username - {username}): finish ensemble calibrate")
return True
20 changes: 20 additions & 0 deletions service/settings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
"""
Configures pyciemss-service using environment variables
"""
from typing import Optional

from pydantic import BaseSettings


class Settings(BaseSettings):
"""
pyciemss-service configuration
"""
REDIS_HOST: str = "redis"
REDIS_PORT: int = 6379
TDS_URL: str = "http://data-service-api:8000"
PYCIEMSS_OUTPUT_FILEPATH: str = "result.csv"
EVAL_OUTPUT_FILENAME: str = "eval.csv"


settings = Settings()
Loading

0 comments on commit 2de50f6

Please sign in to comment.