From eef13f705b08972b29f733ba8642875927f4090a Mon Sep 17 00:00:00 2001 From: LegrandNico Date: Mon, 2 Oct 2023 17:11:00 +0200 Subject: [PATCH] notebooks --- .../notebooks/0-Creating_networks.ipynb | 6 ++--- docs/source/notebooks/0-Creating_networks.md | 8 +++---- .../2-Using_custom_response_functions.ipynb | 6 ++--- .../2-Using_custom_response_functions.md | 12 +++++----- .../Example_1_Heart_rate_variability.ipynb | 10 ++++---- .../Example_1_Heart_rate_variability.md | 10 ++++---- .../notebooks/Exercise_1_Using_the_HGF.ipynb | 24 +++++++++---------- .../notebooks/Exercise_1_Using_the_HGF.md | 24 +++++++++---------- 8 files changed, 50 insertions(+), 50 deletions(-) diff --git a/docs/source/notebooks/0-Creating_networks.ipynb b/docs/source/notebooks/0-Creating_networks.ipynb index 5eec9ebc8..e3ca44a9c 100644 --- a/docs/source/notebooks/0-Creating_networks.ipynb +++ b/docs/source/notebooks/0-Creating_networks.ipynb @@ -84,7 +84,7 @@ "outputs": [], "source": [ "from pyhgf.typing import Indexes\n", - "parameters = {\"mu\": 0.0, \"pi\": 1.0}\n", + "parameters = {\"mean\": 0.0, \"precision\": 1.0}\n", "\n", "attributes = (parameters, parameters, parameters)\n", "edges = (\n", @@ -99,7 +99,7 @@ "id": "54c60f31-3554-48d5-906f-484f7b209545", "metadata": {}, "source": [ - "The code above illustrate the creation of a probabilistic network of 3 nodes with simple parameters sets $(\\mu=0.0, \\pi=1.0)$. Node 2 is the value parent of node 1. Node 3 is the value parent of node 2 and has no parents." + "The code above illustrates creating a probabilistic network of 3 nodes with simple parameter sets $(mean = 0.0, precision = 1.0)$. Node 2 is the value parent of node 1. Node 3 is the value parent of node 2 and has no parents." ] }, { @@ -218,7 +218,7 @@ "metadata": {}, "outputs": [], "source": [ - "hgf.attributes[3][\"pi\"] = 5.0" + "hgf.attributes[3][\"precision\"] = 5.0" ] }, { diff --git a/docs/source/notebooks/0-Creating_networks.md b/docs/source/notebooks/0-Creating_networks.md index 374d26d36..a79e570e1 100644 --- a/docs/source/notebooks/0-Creating_networks.md +++ b/docs/source/notebooks/0-Creating_networks.md @@ -5,7 +5,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.14.7 + jupytext_version: 1.15.1 kernelspec: display_name: Python 3 (ipykernel) language: python @@ -66,7 +66,7 @@ One of the advantages of reasoning this way is that it dissociates variables tha ```{code-cell} ipython3 from pyhgf.typing import Indexes -parameters = {"mu": 0.0, "pi": 1.0} +parameters = {"mean": 0.0, "precision": 1.0} attributes = (parameters, parameters, parameters) edges = ( @@ -76,7 +76,7 @@ edges = ( ) ``` -The code above illustrate the creation of a probabilistic network of 3 nodes with simple parameters sets $(\mu=0.0, \pi=1.0)$. Node 2 is the value parent of node 1. Node 3 is the value parent of node 2 and has no parents. +The code above illustrates creating a probabilistic network of 3 nodes with simple parameter sets $(mean = 0.0, precision = 1.0)$. Node 2 is the value parent of node 1. Node 3 is the value parent of node 2 and has no parents. +++ @@ -95,7 +95,7 @@ hgf.plot_network() The simpler change we can make on a network is to change the values of some of its parameters. The parameters are stored in the `attributes` variable as a dictionary where the key (integers) are node indexes. Therefore, modifying the expected precision of the third node in the previous example is as simple as: ```{code-cell} ipython3 -hgf.attributes[3]["pi"] = 5.0 +hgf.attributes[3]["precision"] = 5.0 ``` However, modifying parameters values *manually* should not be that common as this is something we want the model to perform dynamically as we present new observations, but this can be used for example to generate prior predictive by sampling some parameter values from a distribution. diff --git a/docs/source/notebooks/2-Using_custom_response_functions.ipynb b/docs/source/notebooks/2-Using_custom_response_functions.ipynb index 42a46c7e1..9b883421f 100644 --- a/docs/source/notebooks/2-Using_custom_response_functions.ipynb +++ b/docs/source/notebooks/2-Using_custom_response_functions.ipynb @@ -158,9 +158,9 @@ "agent = HGF(\n", " n_levels=2,\n", " model_type=\"binary\",\n", - " initial_mu={\"1\": .0, \"2\": .5},\n", - " initial_pi={\"1\": .0, \"2\": 1e4},\n", - " omega={\"2\": -4.0},\n", + " initial_mean={\"1\": .0, \"2\": .5},\n", + " initial_precision={\"1\": .0, \"2\": 1e4},\n", + " tonic_volatility={\"2\": -4.0},\n", ").input_data(input_data=u)" ] }, diff --git a/docs/source/notebooks/2-Using_custom_response_functions.md b/docs/source/notebooks/2-Using_custom_response_functions.md index 7428a1285..b1cd2aa61 100644 --- a/docs/source/notebooks/2-Using_custom_response_functions.md +++ b/docs/source/notebooks/2-Using_custom_response_functions.md @@ -86,9 +86,9 @@ slideshow: agent = HGF( n_levels=2, model_type="binary", - initial_mu={"1": .0, "2": .5}, - initial_pi={"1": .0, "2": 1e4}, - omega={"2": -4.0}, + initial_mean={"1": .0, "2": .5}, + initial_precision={"1": .0, "2": 1e4}, + tonic_volatility={"2": -4.0}, ).input_data(input_data=u) ``` @@ -121,7 +121,7 @@ slideshow: --- # a simple decision rule using the first level of the HGF np.random.seed(1) -responses = np.random.binomial(p=agent.node_trajectories[1]["muhat"], n=1) +responses = np.random.binomial(p=agent.node_trajectories[1]["expected_mean"], n=1) ``` +++ {"editable": true, "slideshow": {"slide_type": ""}} @@ -139,7 +139,7 @@ plt.figure(figsize=(12, 3)) jitter = responses * .1 + (1-responses) * -.1 plt.scatter(np.arange(len(u)), u, label="Observations", color="#4c72b0", edgecolor="k", alpha=.2) plt.scatter(np.arange(len(responses)), responses + jitter, label="Responses", color="#c44e52", alpha=.2, edgecolor="k") -plt.plot(agent.node_trajectories[1]["muhat"], label="Beliefs", linestyle="--") +plt.plot(agent.node_trajectories[1]["expected_mean"], label="Beliefs", linestyle="--") plt.legend() plt.xlabel("Trials") ``` @@ -205,7 +205,7 @@ def response_function(hgf, response_function_parameters): responses = response_function_parameters[0] # the expected values at the first level of the HGF - beliefs = hgf.node_trajectories[1]["muhat"] + beliefs = hgf.node_trajectories[1]["expected_mean"] return jnp.sum(jnp.where(responses, -jnp.log(beliefs), -jnp.log(1.0 - beliefs))) ``` diff --git a/docs/source/notebooks/Example_1_Heart_rate_variability.ipynb b/docs/source/notebooks/Example_1_Heart_rate_variability.ipynb index afdfa38c2..feddb81fc 100644 --- a/docs/source/notebooks/Example_1_Heart_rate_variability.ipynb +++ b/docs/source/notebooks/Example_1_Heart_rate_variability.ipynb @@ -753,11 +753,11 @@ "hgf = HGF(\n", " n_levels=2,\n", " model_type=\"continuous\",\n", - " initial_mu={\"1\": rr[0], \"2\": -4.0},\n", - " initial_pi={\"1\": 1e4, \"2\": 1e1},\n", - " omega={\"1\": -4.0, \"2\": omega_2},\n", - " rho={\"1\": 0.0, \"2\": 0.0},\n", - " kappas={\"1\": 1.0}).input_data(input_data=rr)" + " initial_mean={\"1\": rr[0], \"2\": -4.0},\n", + " initial_precision={\"1\": 1e4, \"2\": 1e1},\n", + " tonic_volatility={\"1\": -4.0, \"2\": omega_2},\n", + " tonic_drift={\"1\": 0.0, \"2\": 0.0},\n", + " volatility_coupling={\"1\": 1.0}).input_data(input_data=rr)" ] }, { diff --git a/docs/source/notebooks/Example_1_Heart_rate_variability.md b/docs/source/notebooks/Example_1_Heart_rate_variability.md index 5d8b59341..8862ddb49 100644 --- a/docs/source/notebooks/Example_1_Heart_rate_variability.md +++ b/docs/source/notebooks/Example_1_Heart_rate_variability.md @@ -127,11 +127,11 @@ omega_2 = az.summary(idata)["mean"]["omega_2"] hgf = HGF( n_levels=2, model_type="continuous", - initial_mu={"1": rr[0], "2": -4.0}, - initial_pi={"1": 1e4, "2": 1e1}, - omega={"1": -4.0, "2": omega_2}, - rho={"1": 0.0, "2": 0.0}, - kappas={"1": 1.0}).input_data(input_data=rr) + initial_mean={"1": rr[0], "2": -4.0}, + initial_precision={"1": 1e4, "2": 1e1}, + tonic_volatility={"1": -4.0, "2": omega_2}, + tonic_drift={"1": 0.0, "2": 0.0}, + volatility_coupling={"1": 1.0}).input_data(input_data=rr) ``` ```{code-cell} ipython3 diff --git a/docs/source/notebooks/Exercise_1_Using_the_HGF.ipynb b/docs/source/notebooks/Exercise_1_Using_the_HGF.ipynb index 8a5ec8e46..6079dd392 100644 --- a/docs/source/notebooks/Exercise_1_Using_the_HGF.ipynb +++ b/docs/source/notebooks/Exercise_1_Using_the_HGF.ipynb @@ -222,9 +222,9 @@ "two_levels_continuous_hgf = HGF(\n", " n_levels=2,\n", " model_type=\"continuous\",\n", - " initial_mu={\"1\": 1.04, \"2\": 0.0},\n", - " initial_pi={\"1\": 1e4, \"2\": 1e1},\n", - " omega={\"1\": -8.0, \"2\": -1.0},\n", + " initial_mean={\"1\": 1.04, \"2\": 0.0},\n", + " initial_precision={\"1\": 1e4, \"2\": 1e1},\n", + " tonic_volatility={\"1\": -8.0, \"2\": -1.0},\n", ")" ] }, @@ -875,9 +875,9 @@ "hgf = HGF(\n", " n_levels=2,\n", " model_type=\"continuous\",\n", - " initial_mu={\"1\": timeserie[0], \"2\": .5},\n", - " initial_pi={\"1\": 1e4, \"2\": 1e1},\n", - " omega={\"1\":-6.0, \"2\": -3.0},\n", + " initial_mean={\"1\": timeserie[0], \"2\": .5},\n", + " initial_precision={\"1\": 1e4, \"2\": 1e1},\n", + " tonic_volatility={\"1\":-6.0, \"2\": -3.0},\n", ")\n", "\n", "# add new observations\n", @@ -975,9 +975,9 @@ "two_levels_hgf = HGF(\n", " n_levels=2,\n", " model_type=\"binary\",\n", - " initial_mu={\"1\": .0, \"2\": 0.0},\n", - " initial_pi={\"1\": np.nan, \"2\": 1.0},\n", - " omega={\"2\": -5},\n", + " initial_mean={\"1\": .0, \"2\": 0.0},\n", + " initial_precision={\"1\": np.nan, \"2\": 1.0},\n", + " tonic_volatility={\"2\": -5},\n", ")" ] }, @@ -2334,9 +2334,9 @@ " three_levels_df = HGF(\n", " n_levels=3,\n", " model_type=\"binary\",\n", - " initial_mu={\"1\": .0, \"2\": 0.0, \"3\": 1.0},\n", - " initial_pi={\"1\": .0, \"2\": 1.0, \"3\": 1.0},\n", - " omega={\"2\": omega_2, \"3\": -6.0},\n", + " initial_mean={\"1\": .0, \"2\": 0.0, \"3\": 1.0},\n", + " initial_precision={\"1\": .0, \"2\": 1.0, \"3\": 1.0},\n", + " tonic_volatility={\"2\": omega_2, \"3\": -6.0},\n", " verbose=False\n", " ).input_data(input_data=u).to_pandas()\n", " \n", diff --git a/docs/source/notebooks/Exercise_1_Using_the_HGF.md b/docs/source/notebooks/Exercise_1_Using_the_HGF.md index fc9711686..295dc246d 100644 --- a/docs/source/notebooks/Exercise_1_Using_the_HGF.md +++ b/docs/source/notebooks/Exercise_1_Using_the_HGF.md @@ -143,9 +143,9 @@ This requires propagating updates on sufficient statistics and sending precision two_levels_continuous_hgf = HGF( n_levels=2, model_type="continuous", - initial_mu={"1": 1.04, "2": 0.0}, - initial_pi={"1": 1e4, "2": 1e1}, - omega={"1": -8.0, "2": -1.0}, + initial_mean={"1": 1.04, "2": 0.0}, + initial_precision={"1": 1e4, "2": 1e1}, + tonic_volatility={"1": -8.0, "2": -1.0}, ) ``` @@ -280,9 +280,9 @@ timeserie = aarhus_weather_df["t2m"][:24*30].to_numpy() hgf = HGF( n_levels=2, model_type="continuous", - initial_mu={"1": timeserie[0], "2": .5}, - initial_pi={"1": 1e4, "2": 1e1}, - omega={"1":-6.0, "2": -3.0}, + initial_mean={"1": timeserie[0], "2": .5}, + initial_precision={"1": 1e4, "2": 1e1}, + tonic_volatility={"1":-6.0, "2": -3.0}, ) # add new observations @@ -325,9 +325,9 @@ Fitting data to a binary HGF is quite similar to the continuous one (note that ` two_levels_hgf = HGF( n_levels=2, model_type="binary", - initial_mu={"1": .0, "2": 0.0}, - initial_pi={"1": np.nan, "2": 1.0}, - omega={"2": -5}, + initial_mean={"1": .0, "2": 0.0}, + initial_precision={"1": np.nan, "2": 1.0}, + tonic_volatility={"2": -5}, ) ``` @@ -743,9 +743,9 @@ for _ in range(20): three_levels_df = HGF( n_levels=3, model_type="binary", - initial_mu={"1": .0, "2": 0.0, "3": 1.0}, - initial_pi={"1": .0, "2": 1.0, "3": 1.0}, - omega={"2": omega_2, "3": -6.0}, + initial_mean={"1": .0, "2": 0.0, "3": 1.0}, + initial_precision={"1": .0, "2": 1.0, "3": 1.0}, + tonic_volatility={"2": omega_2, "3": -6.0}, verbose=False ).input_data(input_data=u).to_pandas()