Skip to content

Commit

Permalink
Fig2 (#11)
Browse files Browse the repository at this point in the history
* added generative models, plots, tests, fitting infections per node, and more
  • Loading branch information
nwlandry authored Nov 15, 2023
1 parent c1e2752 commit c7c5332
Show file tree
Hide file tree
Showing 25 changed files with 1,030 additions and 311 deletions.
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,9 @@ tutorials/test.*

# airspeed velocity files
Data/frac_vs_beta
Data/erdos-renyi_experiment
Data/erdos-renyi
Data/watts-strogatz
Data/sbm

#slurm config files
config.json
Expand Down
1 change: 1 addition & 0 deletions Data/erdos-renyi.json

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions Data/watts-strogatz.json

Large diffs are not rendered by default.

File renamed without changes.
67 changes: 67 additions & 0 deletions collect_erdos_renyi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import json
import os

import numpy as np

from lcs import *

plist = set()
clist = set()
rlist = set()
beta = []
frac = []


data_dir = "Data/erdos-renyi/"

for f in os.listdir(data_dir):
d = f.split(".json")[0].split("_")
p = float(d[0])
c = int(d[1])
r = int(d[2])

plist.add(p)
clist.add(c)
rlist.add(r)

clist = sorted(clist)
plist = sorted(plist)
rlist = sorted(rlist)

c_dict = {c: i for i, c in enumerate(clist)}
p_dict = {p: i for i, p in enumerate(plist)}
r_dict = {r: i for i, r in enumerate(rlist)}


ps = np.zeros((len(clist), len(plist), len(rlist)))
sps = np.zeros((len(clist), len(plist), len(rlist)))

for f in os.listdir(data_dir):
d = f.split(".json")[0].split("_")
p = float(d[0])
c = int(d[1])
r = int(d[2])

i = c_dict[c]
j = p_dict[p]
k = r_dict[r]

fname = os.path.join(data_dir, f)

with open(fname, "r") as file:
data = json.loads(file.read())

A = np.array(data["A"])
samples = np.array(data["samples"])

ps[i, j, k] = posterior_similarity(samples, A)
sps[i, j, k] = samplewise_posterior_similarity(samples, A)

data = {}
data["p"] = plist
data["sps"] = sps.tolist()
data["ps"] = ps.tolist()
datastring = json.dumps(data)

with open("Data/erdos-renyi.json", "w") as output_file:
output_file.write(datastring)
4 changes: 2 additions & 2 deletions collect_frac_vs_beta.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@

ipn[i, j, k] = infections_per_node(x)

psmat[i, j, k] = posterior_similarity(A, samples)
spsmat[i, j, k] = samplewise_posterior_similarity(A, samples)
psmat[i, j, k] = posterior_similarity(samples, A)
spsmat[i, j, k] = samplewise_posterior_similarity(samples, A)
it += 1
print(it, flush=True)

Expand Down
77 changes: 77 additions & 0 deletions collect_watts-strogatz.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import json
import os

import numpy as np

from lcs import *

plist = set()
clist = set()
rlist = set()
beta = []
frac = []


data_dir = "Data/watts-strogatz/"

for f in os.listdir(data_dir):
d = f.split(".json")[0].split("_")
try:
p = float(d[0])
c = int(d[1])
r = int(d[2])
except:
p = float(d[0] + "-" + d[1])
c = int(d[2])
r = int(d[3])

plist.add(p)
clist.add(c)
rlist.add(r)

clist = sorted(clist)
plist = sorted(plist)
rlist = sorted(rlist)

c_dict = {c: i for i, c in enumerate(clist)}
p_dict = {p: i for i, p in enumerate(plist)}
r_dict = {r: i for i, r in enumerate(rlist)}


ps = np.zeros((len(clist), len(plist), len(rlist)))
sps = np.zeros((len(clist), len(plist), len(rlist)))

for f in os.listdir(data_dir):
d = f.split(".json")[0].split("_")
try:
p = float(d[0])
c = int(d[1])
r = int(d[2])
except:
p = float(d[0] + "-" + d[1])
c = int(d[2])
r = int(d[3])

i = c_dict[c]
j = p_dict[p]
k = r_dict[r]

fname = os.path.join(data_dir, f)

with open(fname, "r") as file:
data = json.loads(file.read())

A = np.array(data["A"])
samples = np.array(data["samples"])

ps[i, j, k] = posterior_similarity(samples, A)
sps[i, j, k] = samplewise_posterior_similarity(samples, A)

data = {}
data["p"] = plist
data["sps"] = sps.tolist()
data["ps"] = ps.tolist()
datastring = json.dumps(data)

with open("Data/watts-strogatz.json", "w") as output_file:
output_file.write(datastring)
3 changes: 1 addition & 2 deletions convergence/determine_mcmc_parameters.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@
"metadata": {},
"outputs": [],
"source": [
"G = nx.karate_club_graph()\n",
"A = nx.adjacency_matrix(G, weight=None).todense()\n",
"A = zkc()\n",
"n = np.size(A, axis=0)"
]
},
Expand Down
119 changes: 119 additions & 0 deletions erdos-renyi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
import json
import multiprocessing as mp
import os

import numpy as np

from lcs import *


def target_ipn(n, p, gamma, c, mode, rho0, tmax, realizations):
x0 = np.zeros(n)
x0[random.sample(range(n), int(round(rho0 * n)))] = 1
ipn = 0
for _ in range(realizations):
A = erdos_renyi(n, p)
x = contagion_process(A, gamma, c, x0, tmin=0, tmax=tmax)
ipn += infections_per_node(x, mode) / realizations
return ipn


def single_inference(
fname, gamma, c, b, rho0, A, tmax, p_c, p_rho, nsamples, burn_in, skip
):
n = np.size(A, axis=0)
x0 = np.zeros(n)
x0[random.sample(range(n), int(round(rho0 * n)))] = 1

x = contagion_process(A, gamma, c, x0, tmin=0, tmax=tmax)
p = beta(p_rho[0], p_rho[1]).rvs()
A0 = erdos_renyi(n, p)
samples = infer_adjacency_matrix(
x, A0, p_rho, p_c, nsamples=nsamples, burn_in=burn_in, skip=skip
)

# json dict
data = {}
data["gamma"] = gamma
data["c"] = c.tolist()
data["b"] = b
data["p-rho"] = p_rho.tolist()
data["p-c"] = p_c.tolist()
data["x"] = x.tolist()
data["A"] = A.tolist()
data["samples"] = samples.tolist()

datastring = json.dumps(data)

with open(fname, "w") as output_file:
output_file.write(datastring)


data_dir = "Data/erdos-renyi"
os.makedirs(data_dir, exist_ok=True)

for f in os.listdir(data_dir):
os.remove(os.path.join(data_dir, f))

n = 50

n_processes = len(os.sched_getaffinity(0))
realizations = 10
probabilities = np.linspace(0.0, 1.0, 33)

# MCMC parameters
burn_in = 100000
nsamples = 100
skip = 1500
p_c = np.ones((2, n))
p_rho = np.array([1, 1])

# contagion functions and parameters
cf1 = lambda nu, beta: 1 - (1 - beta) ** nu # simple contagion
cf2 = lambda nu, beta: beta * (nu >= 2) # complex contagion, tau=2
cf3 = lambda nu, beta: beta * (nu >= 3) # complex contagion, tau=3

cfs = [cf1, cf2, cf3]

rho0 = 1.0
gamma = 0.1
b = 0.04
mode = "max"

tmax = 1000


arglist = []
for p in probabilities:
c = cfs[0](np.arange(n), b)
ipn = target_ipn(n, p, gamma, c, mode, rho0, tmax, 1000)
for i, cf in enumerate(cfs):
if i != 0:
A = erdos_renyi(n, p)
bscaled = fit_ipn(0.5, ipn, cf, gamma, A, rho0, tmax, mode)
else:
bscaled = b
c = cf(np.arange(n), bscaled)
print((p, i), flush=True)

for r in range(realizations):
A = erdos_renyi(n, p)
arglist.append(
(
f"{data_dir}/{p}_{i}_{r}",
gamma,
c,
bscaled,
rho0,
A,
tmax,
p_c,
p_rho,
nsamples,
burn_in,
skip,
)
)

with mp.Pool(processes=n_processes) as pool:
pool.starmap(single_inference, arglist)
6 changes: 2 additions & 4 deletions fitting_ipn.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"from lcs import *\n",
"import networkx as nx"
"from lcs import *"
]
},
{
Expand All @@ -28,8 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"G = nx.karate_club_graph()\n",
"A = nx.adjacency_matrix(G, weight=None).todense()\n",
"A = zkc()\n",
"n = A.shape[0]"
]
},
Expand Down
7 changes: 3 additions & 4 deletions frac_vs_beta.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,7 @@ def single_inference(
for f in os.listdir(data_dir):
os.remove(os.path.join(data_dir, f))

G = nx.karate_club_graph()
A = nx.adjacency_matrix(G, weight=None).todense()
A = zkc()
n = A.shape[0]

n_processes = len(os.sched_getaffinity(0))
Expand All @@ -54,7 +53,7 @@ def single_inference(
nb = 33

# MCMC parameters
burn_in = 10000
burn_in = 100000
nsamples = 1000
skip = 2000
p_c = np.ones((2, n))
Expand All @@ -79,7 +78,7 @@ def single_inference(
for k in range(realizations):
arglist.append(
(
f"Data/frac_vs_beta/{b}-{f}-{k}",
f"Data/frac_vs_beta/{b}_{f}_{k}",
gamma,
c,
rho0,
Expand Down
7 changes: 3 additions & 4 deletions infer_contagion_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@

from lcs import *

G = nx.karate_club_graph()
A = nx.adjacency_matrix(G, weight=None).todense()
A = zkc()
n = A.shape[0]

p_gamma = [1, 1]
Expand Down Expand Up @@ -39,7 +38,7 @@
p_gamma,
p_c,
nsamples=1000,
burn_in=30000,
burn_in=100000,
skip=1000,
nspa=10,
return_likelihood=True,
Expand Down Expand Up @@ -69,7 +68,7 @@
p_gamma,
p_c,
nsamples=1000,
burn_in=30000,
burn_in=100000,
skip=1000,
nspa=10,
return_likelihood=True,
Expand Down
Loading

0 comments on commit c7c5332

Please sign in to comment.