Skip to content

Commit

Permalink
update contro flow, integrators, operators, and docs (#205)
Browse files Browse the repository at this point in the history
update contro flow, integrators, operators, and docs
  • Loading branch information
chaoming0625 authored May 14, 2022
2 parents fc16951 + 8ef3a56 commit 683e5cb
Show file tree
Hide file tree
Showing 64 changed files with 3,129 additions and 3,848 deletions.
2 changes: 1 addition & 1 deletion brainpy/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-

__version__ = "2.1.10"
__version__ = "2.1.11"


try:
Expand Down
5 changes: 2 additions & 3 deletions brainpy/datasets/chaotic_systems.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import jax.numpy as jnp

from brainpy import math as bm, dyn
from brainpy.integrators import odeint, ddeint, JointEq, IntegratorRunner
from brainpy.integrators import odeint, JointEq, IntegratorRunner

__all__ = [
'henon_map_series',
Expand Down Expand Up @@ -172,8 +172,7 @@ def mackey_glass_series(duration, dt=0.1, beta=2., gamma=1., tau=2., n=9.65,
xdelay = bm.TimeDelay(inits, tau, dt=dt, interp_method='round')
xdelay.data.value = inits + 0.2 * (rng.random((xdelay.num_delay_step,) + inits.shape) - 0.5)

@ddeint(method=method,
state_delays={'x': xdelay})
@odeint(method=method, state_delays={'x': xdelay})
def mg_eq(x, t):
xtau = xdelay(t - tau)
return beta * xtau / (1 + xtau ** n) - gamma * x
Expand Down
5 changes: 3 additions & 2 deletions brainpy/dyn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@
"""


from .base import *
from .neurons import *
from .synapses import *
from .channels import *
from .base import *
from .others import *
from .utils import *
from .runners import *

from . import neurons, synapses, channels, rates, utils, runners
from . import neurons, synapses, channels, rates, others, utils, runners
1 change: 1 addition & 0 deletions brainpy/dyn/networks/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# -*- coding: utf-8 -*-
1 change: 0 additions & 1 deletion brainpy/dyn/neurons/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,4 @@

from .biological_models import *
from .fractional_models import *
from .input_models import *
from .reduced_models import *
187 changes: 187 additions & 0 deletions brainpy/dyn/neurons/biological_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
'HH',
'MorrisLecar',
'PinskyRinzelModel',
'WangBuzsakiModel',
]


Expand Down Expand Up @@ -204,6 +205,7 @@ def __init__(
h_initializer: Union[Initializer, Callable, Tensor] = OneInit(0.6),
n_initializer: Union[Initializer, Callable, Tensor] = OneInit(0.32),
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None
):
# initialization
Expand Down Expand Up @@ -287,6 +289,7 @@ def update(self, t, dt):
self.input[:] = 0.



class MorrisLecar(NeuGroup):
r"""The Morris-Lecar neuron model.
Expand Down Expand Up @@ -383,6 +386,7 @@ def __init__(
W_initializer: Union[Callable, Initializer, Tensor] = OneInit(0.02),
V_initializer: Union[Callable, Initializer, Tensor] = Uniform(-70., -60.),
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None
):
# initialization
Expand Down Expand Up @@ -626,6 +630,7 @@ def __init__(
Ca_initializer: Union[Initializer, Callable, Tensor] = OneInit(0.2),
# others
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None,
):
# initialization
Expand Down Expand Up @@ -805,3 +810,185 @@ def inf_q(self, Ca):
alpha = self.alpha_q(Ca)
beta = self.beta_q(Ca)
return alpha / (alpha + beta)


class WangBuzsakiModel(NeuGroup):
r"""Wang-Buzsaki model [9]_, an implementation of a modified Hodgkin-Huxley model.
Each model is described by a single compartment and obeys the current balance equation:
.. math::
C_{m} \frac{d V}{d t}=-I_{\mathrm{Na}}-I_{\mathrm{K}}-I_{\mathrm{L}}-I_{\mathrm{syn}}+I_{\mathrm{app}}
where :math:`C_{m}=1 \mu \mathrm{F} / \mathrm{cm}^{2}` and :math:`I_{\mathrm{app}}` is the
injected current (in :math:`\mu \mathrm{A} / \mathrm{cm}^{2}` ). The leak current
:math:`I_{\mathrm{L}}=g_{\mathrm{L}}\left(V-E_{\mathrm{L}}\right)` has a conductance
:math:`g_{\mathrm{L}}=0.1 \mathrm{mS} / \mathrm{cm}^{2}`, so that the passive time constant
:math:`\tau_{0}=C_{m} / g_{\mathrm{L}}=10 \mathrm{msec} ; E_{\mathrm{L}}=-65 \mathrm{mV}`.
The spike-generating :math:`\mathrm{Na}^{+}` and :math:`\mathrm{K}^{+}` voltage-dependent ion
currents :math:`\left(I_{\mathrm{Na}}\right.` and :math:`I_{\mathrm{K}}` ) are of the
Hodgkin-Huxley type (Hodgkin and Huxley, 1952). The transient sodium current
:math:`I_{\mathrm{Na}}=g_{\mathrm{Na}} m_{\infty}^{3} h\left(V-E_{\mathrm{Na}}\right)`,
where the activation variable :math:`m` is assumed fast and substituted by its steady-state
function :math:`m_{\infty}=\alpha_{m} /\left(\alpha_{m}+\beta_{m}\right)` ;
:math:`\alpha_{m}(V)=-0.1(V+35) /(\exp (-0.1(V+35))-1), \beta_{m}(V)=4 \exp (-(V+60) / 18)`.
The inactivation variable :math:`h` obeys a first-order kinetics:
.. math::
\frac{d h}{d t}=\phi\left(\alpha_{h}(1-h)-\beta_{h} h\right)
where :math:`\alpha_{h}(V)=0.07 \exp (-(V+58) / 20)` and
:math:`\beta_{h}(V)=1 /(\exp (-0.1(V+28)) +1) \cdot g_{\mathrm{Na}}=35 \mathrm{mS} / \mathrm{cm}^{2}` ;
:math:`E_{\mathrm{Na}}=55 \mathrm{mV}, \phi=5 .`
The delayed rectifier :math:`I_{\mathrm{K}}=g_{\mathrm{K}} n^{4}\left(V-E_{\mathrm{K}}\right)`,
where the activation variable :math:`n` obeys the following equation:
.. math::
\frac{d n}{d t}=\phi\left(\alpha_{n}(1-n)-\beta_{n} n\right)
with :math:`\alpha_{n}(V)=-0.01(V+34) /(\exp (-0.1(V+34))-1)` and
:math:`\beta_{n}(V)=0.125\exp (-(V+44) / 80)` ; :math:`g_{\mathrm{K}}=9 \mathrm{mS} / \mathrm{cm}^{2}`, and
:math:`E_{\mathrm{K}}=-90 \mathrm{mV}`.
Parameters
----------
size: sequence of int, int
The size of the neuron group.
ENa: float, JaxArray, ndarray, Initializer, callable
The reversal potential of sodium. Default is 50 mV.
gNa: float, JaxArray, ndarray, Initializer, callable
The maximum conductance of sodium channel. Default is 120 msiemens.
EK: float, JaxArray, ndarray, Initializer, callable
The reversal potential of potassium. Default is -77 mV.
gK: float, JaxArray, ndarray, Initializer, callable
The maximum conductance of potassium channel. Default is 36 msiemens.
EL: float, JaxArray, ndarray, Initializer, callable
The reversal potential of learky channel. Default is -54.387 mV.
gL: float, JaxArray, ndarray, Initializer, callable
The conductance of learky channel. Default is 0.03 msiemens.
V_th: float, JaxArray, ndarray, Initializer, callable
The threshold of the membrane spike. Default is 20 mV.
C: float, JaxArray, ndarray, Initializer, callable
The membrane capacitance. Default is 1 ufarad.
phi: float, JaxArray, ndarray, Initializer, callable
The temperature regulator constant.
V_initializer: JaxArray, ndarray, Initializer, callable
The initializer of membrane potential.
h_initializer: JaxArray, ndarray, Initializer, callable
The initializer of h channel.
n_initializer: JaxArray, ndarray, Initializer, callable
The initializer of n channel.
method: str
The numerical integration method.
name: str
The group name.
References
----------
.. [9] Wang, X.J. and Buzsaki, G., (1996) Gamma oscillation by synaptic
inhibition in a hippocampal interneuronal network model. Journal of
neuroscience, 16(20), pp.6402-6413.
"""

def __init__(
self,
size: Shape,
ENa: Union[float, Tensor, Initializer, Callable] = 55.,
gNa: Union[float, Tensor, Initializer, Callable] = 35.,
EK: Union[float, Tensor, Initializer, Callable] = -90.,
gK: Union[float, Tensor, Initializer, Callable] = 9.,
EL: Union[float, Tensor, Initializer, Callable] = -65,
gL: Union[float, Tensor, Initializer, Callable] = 0.1,
V_th: Union[float, Tensor, Initializer, Callable] = 20.,
phi: Union[float, Tensor, Initializer, Callable] = 5.0,
C: Union[float, Tensor, Initializer, Callable] = 1.0,
V_initializer: Union[Initializer, Callable, Tensor] = OneInit(-65.),
h_initializer: Union[Initializer, Callable, Tensor] = OneInit(0.6),
n_initializer: Union[Initializer, Callable, Tensor] = OneInit(0.32),
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None
):
# initialization
super(WangBuzsakiModel, self).__init__(size=size, name=name)

# parameters
self.ENa = init_param(ENa, self.num, allow_none=False)
self.EK = init_param(EK, self.num, allow_none=False)
self.EL = init_param(EL, self.num, allow_none=False)
self.gNa = init_param(gNa, self.num, allow_none=False)
self.gK = init_param(gK, self.num, allow_none=False)
self.gL = init_param(gL, self.num, allow_none=False)
self.C = init_param(C, self.num, allow_none=False)
self.phi = init_param(phi, self.num, allow_none=False)
self.V_th = init_param(V_th, self.num, allow_none=False)

# initializers
check_initializer(h_initializer, 'h_initializer', allow_none=False)
check_initializer(n_initializer, 'n_initializer', allow_none=False)
check_initializer(V_initializer, 'V_initializer', allow_none=False)
self._h_initializer = h_initializer
self._n_initializer = n_initializer
self._V_initializer = V_initializer

# variables
self.h = bm.Variable(init_param(self._h_initializer, (self.num,)))
self.n = bm.Variable(init_param(self._n_initializer, (self.num,)))
self.V = bm.Variable(init_param(self._V_initializer, (self.num,)))
self.input = bm.Variable(bm.zeros(self.num))
self.spike = bm.Variable(bm.zeros(self.num, dtype=bool))

# integral
self.integral = odeint(method=method, f=self.derivative)

def reset(self):
self.h.value = init_param(self._h_initializer, (self.num,))
self.n.value = init_param(self._n_initializer, (self.num,))
self.V.value = init_param(self._V_initializer, (self.num,))
self.input[:] = 0
self.spike[:] = False

def m_inf(self, V):
alpha = -0.1 * (V + 35) / (bm.exp(-0.1 * (V + 35)) - 1)
beta = 4. * bm.exp(-(V + 60.) / 18.)
return alpha / (alpha + beta)

def dh(self, h, t, V):
alpha = 0.07 * bm.exp(-(V + 58) / 20)
beta = 1 / (bm.exp(-0.1 * (V + 28)) + 1)
dhdt = alpha * (1 - h) - beta * h
return self.phi * dhdt

def dn(self, n, t, V):
alpha = -0.01 * (V + 34) / (bm.exp(-0.1 * (V + 34)) - 1)
beta = 0.125 * bm.exp(-(V + 44) / 80)
dndt = alpha * (1 - n) - beta * n
return self.phi * dndt

def dV(self, V, t, h, n, I_ext):
INa = self.gNa * self.m_inf(V) ** 3 * h * (V - self.ENa)
IK = self.gK * n ** 4 * (V - self.EK)
IL = self.gL * (V - self.EL)
dVdt = (- INa - IK - IL + I_ext) / self.C
return dVdt

@property
def derivative(self):
return JointEq([self.dV, self.dh, self.dn])

def update(self, t, dt):
V, h, n = self.integral(self.V, self.h, self.n, t, self.input, dt=dt)
self.spike.value = bm.logical_and(self.V < self.V_th, V >= self.V_th)
self.V.value = V
self.h.value = h
self.n.value = n
self.input[:] = 0.


6 changes: 4 additions & 2 deletions brainpy/dyn/neurons/fractional_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ def __init__(
V_initializer: Union[Initializer, Callable, Tensor] = OneInit(2.5),
w_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
y_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
name: str = None
name: str = None,
keep_size: bool = False,
):
super(FractionalFHR, self).__init__(size, name=name)

Expand Down Expand Up @@ -128,7 +129,7 @@ def __init__(
# integral function
self.integral = GLShortMemory(self.derivative,
alpha=alpha,
num_memory=num_memory,
num_step=num_memory,
inits=[self.V, self.w, self.y])

def reset(self):
Expand Down Expand Up @@ -233,6 +234,7 @@ def __init__(
V_th: Union[float, Tensor, Initializer, Callable] = 30.,
V_initializer: Union[Initializer, Callable, Tensor] = OneInit(-65.),
u_initializer: Union[Initializer, Callable, Tensor] = OneInit(0.20 * -65.),
keep_size: bool = False,
name: str = None
):
# initialization
Expand Down
19 changes: 12 additions & 7 deletions brainpy/dyn/neurons/reduced_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def __init__(
V_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
noise: Union[float, Tensor, Initializer, Callable] = None,
noise_type: str = 'value',
keep_size: bool=False,
keep_size: bool = False,
method: str = 'exp_auto',
name: str = None
):
Expand All @@ -99,18 +99,15 @@ def __init__(
# parameters
self.keep_size = keep_size
self.noise_type = noise_type
if noise_type not in ['func', 'value']:
raise ValueError(f'noise_type only supports `func` and `value`, but we got {noise_type}')
size = self.size if keep_size else self.num
self.V_rest = init_param(V_rest, size, allow_none=False)
self.V_reset = init_param(V_reset, size, allow_none=False)
self.V_th = init_param(V_th, size, allow_none=False)
self.tau = init_param(tau, size, allow_none=False)
self.tau_ref = init_param(tau_ref, size, allow_none=False)
if noise_type == 'func':
self.noise = noise
else:
self.noise = init_param(noise, size, allow_none=True)
if noise_type not in ['func', 'value']:
raise ValueError(f'noise_type only supports `func` and `value`, but we got {noise_type}')
self.noise = noise if (noise_type == 'func') else init_param(noise, size, allow_none=True)

# initializers
check_initializer(V_initializer, 'V_initializer')
Expand Down Expand Up @@ -260,6 +257,7 @@ def __init__(
tau: Union[float, Tensor, Initializer, Callable] = 10.,
tau_ref: Union[float, Tensor, Initializer, Callable] = 1.7,
V_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
keep_size: bool = False,
method: str = 'exp_auto',
name: str = None
):
Expand Down Expand Up @@ -403,6 +401,7 @@ def __init__(
V_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
w_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None
):
super(AdExIF, self).__init__(size=size, name=name)
Expand Down Expand Up @@ -543,6 +542,7 @@ def __init__(
tau: Union[float, Tensor, Initializer, Callable] = 10.,
tau_ref: Union[float, Tensor, Initializer, Callable] = 0.,
V_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
keep_size: bool = False,
method: str = 'exp_auto',
name: str = None
):
Expand Down Expand Up @@ -688,6 +688,7 @@ def __init__(
V_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
w_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None
):
super(AdQuaIF, self).__init__(size=size, name=name)
Expand Down Expand Up @@ -851,6 +852,7 @@ def __init__(
I2_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
Vth_initializer: Union[Initializer, Callable, Tensor] = OneInit(-50.),
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None
):
# initialization
Expand Down Expand Up @@ -1013,6 +1015,7 @@ def __init__(
V_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
u_initializer: Union[Initializer, Callable, Tensor] = OneInit(),
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None
):
# initialization
Expand Down Expand Up @@ -1185,6 +1188,7 @@ def __init__(
y_initializer: Union[Initializer, Callable, Tensor] = OneInit(-10.),
z_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None
):
# initialization
Expand Down Expand Up @@ -1340,6 +1344,7 @@ def __init__(
V_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
w_initializer: Union[Initializer, Callable, Tensor] = ZeroInit(),
method: str = 'exp_auto',
keep_size: bool = False,
name: str = None
):
# initialization
Expand Down
Loading

0 comments on commit 683e5cb

Please sign in to comment.