diff --git a/codecov.yml b/codecov.yml index 0399157b..c349802b 100644 --- a/codecov.yml +++ b/codecov.yml @@ -3,13 +3,14 @@ coverage: round: nearest range: "50...90" ignore: - - "neurolib/models/**/timeIntegration.py" + - "neurolib/models/**/timeIntegration.py" - "neurolib/utils/devutils.py" - "neurolib/utils/atlases.py" + - "neurolib/models/multimodel/builder/aln.py" status: project: default: enabled: yes target: auto threshold: 5% - patch: off \ No newline at end of file + patch: off diff --git a/neurolib/models/model.py b/neurolib/models/model.py index b9f37778..faf13d47 100644 --- a/neurolib/models/model.py +++ b/neurolib/models/model.py @@ -397,7 +397,7 @@ def setOutput(self, name, data, append=False, removeICs=False): assert isinstance(data, np.ndarray), "Output must be a `numpy.ndarray`." # remove initial conditions from output - if removeICs and name is not "t": + if removeICs and name != "t": if data.ndim == 1: data = data[self.startindt :] elif data.ndim == 2: diff --git a/neurolib/models/multimodel/builder/aln.py b/neurolib/models/multimodel/builder/aln.py new file mode 100644 index 00000000..86d997c7 --- /dev/null +++ b/neurolib/models/multimodel/builder/aln.py @@ -0,0 +1,1029 @@ +import logging +import os +from copy import deepcopy + +import numba +import numpy as np +import symengine as se +from h5py import File +from jitcdde import input as system_input + +from ..builder.base.constants import EXC, INH, LAMBDA_SPEED +from ..builder.base.network import Network, SingleCouplingExcitatoryInhibitoryNode +from ..builder.base.neural_mass import NeuralMass + +DEFAULT_QUANTITIES_CASCADE_FILENAME = "quantities_cascade.h5" + +ALN_EXC_DEFAULT_PARAMS = { + # number of inputs per neuron from EXC/INH + "Ke": 800.0, + "Ki": 200.0, + # postsynaptic potential amplitude for global connectome + "c_gl": 0.4, + # number of incoming EXC connections (to EXC population) from each area + "Ke_gl": 250.0, + # synaptic time constant EXC/INH + "tau_se": 2.0, # ms + "tau_si": 5.0, # ms + # internal Fokker-Planck noise due to random coupling + "sigmae_ext": 1.5, # mV/sqrt(ms) + # maximum synaptic current between EXC/INH nodes in mV/ms + "Jee_max": 2.43, + "Jei_max": -3.3, + # single neuron parameters + "C": 200.0, + "gL": 10.0, + # external drives + "ext_exc_current": 0.0, + "ext_exc_rate": 0.0, + # adaptation current model parameters + # subthreshold adaptation conductance + "a": 15.0, # nS + # spike-triggered adaptation increment + "b": 40.0, # pA + "EA": -80.0, + "tauA": 200.0, + "lambda": LAMBDA_SPEED, +} + +ALN_INH_DEFAULT_PARAMS = { + # number of inputs per neuron from EXC/INH + "Ke": 800.0, + "Ki": 200.0, + # postsynaptic potential amplitude for global connectome + "c_gl": 0.4, + # number of incoming EXC connections (to EXC population) from each area + "Ke_gl": 250.0, + # synaptic time constant EXC/INH + "tau_se": 2.0, # ms + "tau_si": 5.0, # ms + # internal Fokker-Planck noise due to random coupling + "sigmai_ext": 1.5, # mV/sqrt(ms) + # maximum synaptic current between EXC/INH nodes in mV/ms + "Jie_max": 2.60, + "Jii_max": -1.64, + # single neuron parameters + "C": 200.0, + "gL": 10.0, + # external drives + "ext_inh_current": 0.0, + "ext_inh_rate": 0.0, + "lambda": LAMBDA_SPEED, +} +# matrices as [to, from], masses as (EXC, INH) +# EXC is index 0, INH is index 1 +ALN_NODE_DEFAULT_CONNECTIVITY = np.array([[0.3, 0.5], [0.3, 0.5]]) +# same but delays, in ms +ALN_NODE_DEFAULT_DELAYS = np.array([[4.0, 2.0], [4.0, 2.0]]) + + +@numba.njit() +def _get_interpolation_values(xi, yi, sigma_range, mu_range, d_sigma, d_mu): + """ + Return values needed for interpolation: bilinear (2D) interpolation + within ranges, linear (1D) if "one edge" is crossed, corner value if + "two edges" are crossed. Defined as jitted function due to compatibility + with numba backend. + + :param xi: interpolation value on x-axis, i.e. I_sigma + :type xi: float + :param yi: interpolation value on y-axis, i.e. I_mu + :type yi: float + :param sigma_range: range of x-axis, i.e. sigma values + :type sigma_range: np.ndarray + :param mu_range: range of y-axis, i.e. mu values + :type mu_range: np.ndarray + :param d_sigma: grid coarsness in the x-axis, i.e. sigma values + :type d_sigma: float + :param d_mu: grid coarsness in the y-axis, i.e. mu values + :type d_mu: float + :return: index of the lower interpolation value on x-axis, index of the + lower interpolation value on y-axis, distance of xi to the lower + value, distance of yi to the lower value + :rtype: (int, int, float, float) + """ + # within all boundaries + if xi >= sigma_range[0] and xi < sigma_range[-1] and yi >= mu_range[0] and yi < mu_range[-1]: + xid = (xi - sigma_range[0]) / d_sigma + xid1 = np.floor(xid) + dxid = xid - xid1 + yid = (yi - mu_range[0]) / d_mu + yid1 = np.floor(yid) + dyid = yid - yid1 + return int(xid1), int(yid1), dxid, dyid + + # outside one boundary + if yi < mu_range[0]: + yid1 = 0 + dyid = 0.0 + if xi >= sigma_range[0] and xi < sigma_range[-1]: + xid = (xi - sigma_range[0]) / d_sigma + xid1 = np.floor(xid) + dxid = xid - xid1 + elif xi < sigma_range[0]: + xid1 = 0 + dxid = 0.0 + else: # xi >= x(end) + xid1 = -1 + dxid = 0.0 + return int(xid1), int(yid1), dxid, dyid + + if yi >= mu_range[-1]: + yid1 = -1 + dyid = 0.0 + if xi >= sigma_range[0] and xi < sigma_range[-1]: + xid = (xi - sigma_range[0]) / d_sigma + xid1 = np.floor(xid) + dxid = xid - xid1 + elif xi < sigma_range[0]: + xid1 = 0 + dxid = 0.0 + else: # xi >= x(end) + xid1 = -1 + dxid = 0.0 + return int(xid1), int(yid1), dxid, dyid + + if xi < sigma_range[0]: + xid1 = 0 + dxid = 0.0 + yid = (yi - mu_range[0]) / d_mu + yid1 = np.floor(yid) + dyid = yid - yid1 + return int(xid1), int(yid1), dxid, dyid + + if xi >= sigma_range[-1]: + xid1 = -1 + dxid = 0.0 + yid = (yi - mu_range[0]) / d_mu + yid1 = np.floor(yid) + dyid = yid - yid1 + return int(xid1), int(yid1), dxid, dyid + + +@numba.njit() +def _table_lookup( + current_mu, + current_sigma, + sigma_range, + mu_range, + d_sigma, + d_mu, + transfer_function_table, +): + """ + Translate mean and std. deviation of the current to selected quantity using + linear-nonlinear lookup table for ALN. Defined as jitted function due to + compatibility with numba backend. + """ + x_idx, y_idx, dx_idx, dy_idx = _get_interpolation_values( + current_sigma, current_mu, sigma_range, mu_range, d_sigma, d_mu + ) + return ( + transfer_function_table[y_idx, x_idx] * (1 - dx_idx) * (1 - dy_idx) + + transfer_function_table[y_idx, x_idx + 1] * dx_idx * (1 - dy_idx) + + transfer_function_table[y_idx + 1, x_idx] * (1 - dx_idx) * dy_idx + + transfer_function_table[y_idx + 1, x_idx + 1] * dx_idx * dy_idx + ) + + +class ALNMass(NeuralMass): + """ + Adaptive linear-nonlinear neural mass model of exponential integrate-and-fire (AdEx) + neurons. + + References: + Cakan C., Obermayer K. (2020). Biophysically grounded mean-field models of + neural populations under electrical stimulation. PLoS Comput Biol, 16(4), + e1007822. + + Augustin, M., Ladenbauer, J., Baumann, F., & Obermayer, K. (2017). + Low-dimensional spike rate models derived from networks of adaptive + integrate-and-fire neurons: comparison and implementation. PLoS Comput Biol, + 13(6), e1005545. + """ + + name = "ALN neural mass model" + label = "ALN" + # define python callback function name for table lookup (linear-nonlinear + # approximation of Fokker-Planck equation) + python_callbacks = ["firing_rate_lookup", "voltage_lookup", "tau_lookup"] + + num_noise_variables = 1 + + def __init__(self, params, lin_nonlin_transfer_function_filename=None, seed=None): + """ + :param lin_nonlin_transfer_function_filename: filename for precomputed + transfer functions of the ALN model, if None, will look for it in this + directory + :type lin_nonlin_transfer_function_filename: str|None + :param seed: seed for random number generator + :type seed: int|None + """ + params = self._rescale_strengths(params) + super().__init__(params=params, seed=seed) + # use the same file as neurolib's native + lin_nonlin_transfer_function_filename = lin_nonlin_transfer_function_filename or os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "..", + "..", + "aln", + "aln-precalc", + DEFAULT_QUANTITIES_CASCADE_FILENAME, + ) + self._load_lin_nonlin_transfer_function(lin_nonlin_transfer_function_filename) + + def _load_lin_nonlin_transfer_function(self, filename): + """ + Load precomputed transfer functions from h5 file. + """ + # load transfer functions from file + logging.info(f"Loading precomputed transfer functions from {filename}") + loaded_h5 = File(filename, "r") + self.mu_range = np.array(loaded_h5["mu_vals"]) + self.d_mu = self.mu_range[1] - self.mu_range[0] + self.sigma_range = np.array(loaded_h5["sigma_vals"]) + self.d_sigma = self.sigma_range[1] - self.sigma_range[0] + self.firing_rate_transfer_function = np.array(loaded_h5["r_ss"]) + self.voltage_transfer_function = np.array(loaded_h5["V_mean_ss"]) + self.tau_transfer_function = np.array(loaded_h5["tau_mu_exp"]) + logging.info("All transfer functions loaded.") + # close the file + loaded_h5.close() + self.lin_nonlin_fname = filename + + def describe(self): + return { + **super().describe(), + "lin_nonlin_transfer_function_filename": self.lin_nonlin_fname, + } + + def _callbacks(self): + """ + Construct list of python callbacks for ALN model. + """ + callbacks_list = [ + (self.callback_functions["firing_rate_lookup"], self.firing_rate_lookup, 2), + (self.callback_functions["voltage_lookup"], self.voltage_lookup, 2), + (self.callback_functions["tau_lookup"], self.tau_lookup, 2), + ] + self._validate_callbacks(callbacks_list) + return callbacks_list + + def _numba_callbacks(self): + """ + Define numba callbacks - has to be different than jitcdde callbacks + because of the internals. + """ + + def _table_numba_gen(sigma_range, mu_range, d_sigma, d_mu, transfer_function): + """ + Function generator for numba callbacks. This works similarly as + `functools.partial` (i.e. sets some of the arguments of the inner + function), but afterwards can be jitted with `numba.njit()`, while + partial functions cannot. + """ + + def inner(current_mu, current_sigma): + return _table_lookup( + current_mu, + current_sigma, + sigma_range, + mu_range, + d_sigma, + d_mu, + transfer_function, + ) + + return inner + + return [ + ( + "firing_rate_lookup", + numba.njit( + _table_numba_gen( + self.sigma_range, + self.mu_range, + self.d_sigma, + self.d_mu, + self.firing_rate_transfer_function, + ) + ), + ), + ( + "voltage_lookup", + numba.njit( + _table_numba_gen( + self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.voltage_transfer_function + ) + ), + ), + ( + "tau_lookup", + numba.njit( + _table_numba_gen( + self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.tau_transfer_function + ) + ), + ), + ] + + def firing_rate_lookup(self, y, current_mu, current_sigma): + """ + Translate mean and std. deviation of the current to firing rate using + linear-nonlinear lookup table for ALN. + """ + return _table_lookup( + current_mu, + current_sigma, + self.sigma_range, + self.mu_range, + self.d_sigma, + self.d_mu, + self.firing_rate_transfer_function, + ) + + def voltage_lookup(self, y, current_mu, current_sigma): + """ + Translate mean and std. deviation of the current to voltage using + precomputed transfer functions of the aln model. + """ + return _table_lookup( + current_mu, + current_sigma, + self.sigma_range, + self.mu_range, + self.d_sigma, + self.d_mu, + self.voltage_transfer_function, + ) + + def tau_lookup(self, y, current_mu, current_sigma): + """ + Translate mean and std. deviation of the current to tau - membrane time + constant using precomputed transfer functions of the aln model. + """ + return _table_lookup( + current_mu, + current_sigma, + self.sigma_range, + self.mu_range, + self.d_sigma, + self.d_mu, + self.tau_transfer_function, + ) + + +class ExcitatoryALNMass(ALNMass): + """ + Excitatory ALN neural mass. Contains firing rate adaptation current. + """ + + name = "ALN excitatory neural mass" + label = f"ALN{EXC}" + num_state_variables = 7 + coupling_variables = {6: f"r_mean_{EXC}"} + mass_type = EXC + state_variable_names = [ + "I_mu", + "I_A", + "I_syn_mu_exc", + "I_syn_mu_inh", + "I_syn_sigma_exc", + "I_syn_sigma_inh", + "r_mean", + ] + required_couplings = [ + "node_exc_exc", + "node_exc_exc_sq", + "node_exc_inh", + "node_exc_inh_sq", + "network_exc_exc", + "network_exc_exc_sq", + ] + required_params = [ + "Ke", + "Ki", + "c_gl", + "Ke_gl", + "tau_se", + "tau_si", + "sigmae_ext", + "Jee_max", + "Jei_max", + "taum", + "C", + "ext_exc_current", + "ext_exc_rate", + "a", + "b", + "EA", + "tauA", + "lambda", + ] + + @staticmethod + def _rescale_strengths(params): + """ + Rescale connection strengths. + """ + params = deepcopy(params) + assert isinstance(params, dict) + params["c_gl"] = params["c_gl"] * params["tau_se"] / params["Jee_max"] + + params["taum"] = params["C"] / params["gL"] + return params + + def __init__(self, params=None, lin_nonlin_transfer_function_filename=None, seed=None): + super().__init__( + params=params or ALN_EXC_DEFAULT_PARAMS, + lin_nonlin_transfer_function_filename=lin_nonlin_transfer_function_filename, + seed=seed, + ) + + def update_params(self, params_dict): + """ + Update parameters as in the base class but also rescale. + """ + # if we are changing C_m or g_L, update tau_m as well + if any(k in params_dict for k in ("C", "gL")): + C_m = params_dict["C"] if "C" in params_dict else self.params["C"] + g_L = params_dict["gL"] if "gL" in params_dict else self.params["gL"] + params_dict["taum"] = C_m / g_L + + # if we are changing any of the J_exc_max, tau_syn_exc or c_global, rescale c_global + if any(k in params_dict for k in ("c_gl", "Jee_max", "tau_se")): + # get original c_global + c_global = ( + params_dict["c_gl"] + if "c_gl" in params_dict + else self.params["c_gl"] * (self.params["Jee_max"] / self.params["tau_se"]) + ) + tau_syn_exc = params_dict["tau_se"] if "tau_se" in params_dict else self.params["tau_se"] + J_exc_max = params_dict["Jee_max"] if "Jee_max" in params_dict else self.params["Jee_max"] + params_dict["c_gl"] = c_global * tau_syn_exc / J_exc_max + + # update all parameters finally + super().update_params(params_dict) + + def _initialize_state_vector(self): + """ + Initialize state vector. + """ + np.random.seed(self.seed) + self.initial_state = ( + np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 200.0, 0.5, 0.5, 0.001, 0.001, 0.01]) + ).tolist() + + def _compute_couplings(self, coupling_variables): + """ + Helper that computes coupling from other nodes and network. + """ + exc_coupling = ( + self.params["Ke"] * coupling_variables["node_exc_exc"] + + self.params["c_gl"] * self.params["Ke_gl"] * coupling_variables["network_exc_exc"] + + self.params["c_gl"] * self.params["Ke_gl"] * self.params["ext_exc_rate"] + ) + inh_coupling = self.params["Ki"] * coupling_variables["node_exc_inh"] + exc_coupling_squared = ( + self.params["Ke"] * coupling_variables["node_exc_exc_sq"] + + self.params["c_gl"] ** 2 * self.params["Ke_gl"] * coupling_variables["network_exc_exc_sq"] + + self.params["c_gl"] ** 2 * self.params["Ke_gl"] * self.params["ext_exc_rate"] + ) + inh_coupling_squared = self.params["Ki"] * coupling_variables["node_exc_inh_sq"] + + return ( + exc_coupling, + inh_coupling, + exc_coupling_squared, + inh_coupling_squared, + ) + + def _derivatives(self, coupling_variables): + ( + I_mu, + I_adaptation, + I_syn_mu_exc, + I_syn_mu_inh, + I_syn_sigma_exc, + I_syn_sigma_inh, + firing_rate, + ) = self._unwrap_state_vector() + + exc_inp, inh_inp, exc_inp_sq, inh_inp_sq = self._compute_couplings(coupling_variables) + + I_sigma = se.sqrt( + 2 + * self.params["Jee_max"] ** 2 + * I_syn_sigma_exc + * self.params["tau_se"] + * self.params["taum"] + / ((1.0 + exc_inp) * self.params["taum"] + self.params["tau_se"]) + + 2 + * self.params["Jei_max"] ** 2 + * I_syn_sigma_inh + * self.params["tau_si"] + * self.params["taum"] + / ((1.0 + inh_inp) * self.params["taum"] + self.params["tau_si"]) + + self.params["sigmae_ext"] ** 2 + ) + + # get values from linear-nonlinear lookup table + firing_rate_now = self.callback_functions["firing_rate_lookup"](I_mu - I_adaptation / self.params["C"], I_sigma) + voltage = self.callback_functions["voltage_lookup"](I_mu - I_adaptation / self.params["C"], I_sigma) + tau = self.callback_functions["tau_lookup"](I_mu - I_adaptation / self.params["C"], I_sigma) + + d_I_mu = ( + self.params["Jee_max"] * I_syn_mu_exc + + self.params["Jei_max"] * I_syn_mu_inh + + system_input(self.noise_input_idx[0]) + + self.params["ext_exc_current"] + - I_mu + ) / tau + + d_I_adaptation = ( + self.params["a"] * (voltage - self.params["EA"]) + - I_adaptation + + self.params["tauA"] * self.params["b"] * firing_rate_now + ) / self.params["tauA"] + + d_I_syn_mu_exc = ((1.0 - I_syn_mu_exc) * exc_inp - I_syn_mu_exc) / self.params["tau_se"] + + d_I_syn_mu_inh = ((1.0 - I_syn_mu_inh) * inh_inp - I_syn_mu_inh) / self.params["tau_si"] + + d_I_syn_sigma_exc = ( + (1.0 - I_syn_mu_exc) ** 2 * exc_inp_sq + + (exc_inp_sq - 2.0 * self.params["tau_se"] * (exc_inp + 1.0)) * I_syn_sigma_exc + ) / (self.params["tau_se"] ** 2) + + d_I_syn_sigma_inh = ( + (1.0 - I_syn_mu_inh) ** 2 * inh_inp_sq + + (inh_inp_sq - 2.0 * self.params["tau_si"] * (inh_inp + 1.0)) * I_syn_sigma_inh + ) / (self.params["tau_si"] ** 2) + # firing rate as dummy dynamical variable with infinitely fast + # fixed-point dynamics + d_firing_rate = -self.params["lambda"] * (firing_rate - firing_rate_now) + + return [ + d_I_mu, + d_I_adaptation, + d_I_syn_mu_exc, + d_I_syn_mu_inh, + d_I_syn_sigma_exc, + d_I_syn_sigma_inh, + d_firing_rate, + ] + + +class InhibitoryALNMass(ALNMass): + """ + Inhibitory ALN neural mass. In contrast to excitatory, inhibitory mass do + not contain fiting rate adaptation current. + """ + + name = "ALN inhibitory neural mass" + label = f"ALN{INH}" + num_state_variables = 6 + coupling_variables = {5: f"r_mean_{INH}"} + mass_type = INH + state_variable_names = [ + "I_mu", + "I_syn_mu_exc", + "I_syn_mu_inh", + "I_syn_sigma_exc", + "I_syn_sigma_inh", + "r_mean", + ] + required_couplings = [ + "node_inh_exc", + "node_inh_exc_sq", + "node_inh_inh", + "node_inh_inh_sq", + ] + required_params = [ + "Ke", + "Ki", + "c_gl", + "Ke_gl", + "tau_se", + "tau_si", + "sigmai_ext", + "Jie_max", + "Jii_max", + "taum", + "C", + "ext_inh_current", + "ext_inh_rate", + "lambda", + ] + + @staticmethod + def _rescale_strengths(params): + """ + Rescale connection strengths. + """ + params = deepcopy(params) + assert isinstance(params, dict) + params["c_gl"] = params["c_gl"] * params["tau_se"] / params["Jie_max"] + + params["taum"] = params["C"] / params["gL"] + return params + + def __init__(self, params=None, lin_nonlin_transfer_function_filename=None, seed=None): + super().__init__( + params=params or ALN_INH_DEFAULT_PARAMS, + lin_nonlin_transfer_function_filename=lin_nonlin_transfer_function_filename, + seed=seed, + ) + + def update_params(self, params_dict): + """ + Update parameters as in the base class but also rescale. + """ + # if we are changing C_m or g_L, update tau_m as well + if any(k in params_dict for k in ("C", "gL")): + C_m = params_dict["C"] if "C" in params_dict else self.params["C"] + g_L = params_dict["gL"] if "gL" in params_dict else self.params["gL"] + params_dict["taum"] = C_m / g_L + + # if we are changing any of the J_exc_max, tau_syn_exc or c_global, rescale c_global + if any(k in params_dict for k in ("c_gl", "Jie_max", "tau_se")): + # get original c_global + c_global = ( + params_dict["c_gl"] + if "c_gl" in params_dict + else self.params["c_gl"] * (self.params["Jie_max"] / self.params["tau_se"]) + ) + tau_syn_exc = params_dict["tau_se"] if "tau_se" in params_dict else self.params["tau_se"] + J_exc_max = params_dict["Jie_max"] if "Jie_max" in params_dict else self.params["Jie_max"] + params_dict["c_gl"] = c_global * tau_syn_exc / J_exc_max + + # update all parameters finally + super().update_params(params_dict) + + def _initialize_state_vector(self): + """ + Initialize state vector. + """ + np.random.seed(self.seed) + self.initial_state = ( + np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 0.5, 0.5, 0.01, 0.01, 0.01]) + ).tolist() + + def _compute_couplings(self, coupling_variables): + """ + Helper that computes coupling from other nodes and network. + """ + exc_coupling = ( + self.params["Ke"] * coupling_variables["node_inh_exc"] + + self.params["c_gl"] * self.params["Ke_gl"] * self.params["ext_inh_rate"] + ) + inh_coupling = self.params["Ki"] * coupling_variables["node_inh_inh"] + exc_coupling_squared = ( + self.params["Ke"] * coupling_variables["node_inh_exc_sq"] + + self.params["c_gl"] ** 2 * self.params["Ke_gl"] * self.params["ext_inh_rate"] + ) + inh_coupling_squared = self.params["Ki"] * coupling_variables["node_inh_inh_sq"] + + return ( + exc_coupling, + inh_coupling, + exc_coupling_squared, + inh_coupling_squared, + ) + + def _derivatives(self, coupling_variables): + ( + I_mu, + I_syn_mu_exc, + I_syn_mu_inh, + I_syn_sigma_exc, + I_syn_sigma_inh, + firing_rate, + ) = self._unwrap_state_vector() + + exc_inp, inh_inp, exc_inp_sq, inh_inp_sq = self._compute_couplings(coupling_variables) + + I_sigma = se.sqrt( + 2 + * self.params["Jie_max"] ** 2 + * I_syn_sigma_exc + * self.params["tau_se"] + * self.params["taum"] + / ((1.0 + exc_inp) * self.params["taum"] + self.params["tau_se"]) + + 2 + * self.params["Jii_max"] ** 2 + * I_syn_sigma_inh + * self.params["tau_si"] + * self.params["taum"] + / ((1.0 + inh_inp) * self.params["taum"] + self.params["tau_si"]) + + self.params["sigmai_ext"] ** 2 + ) + + # get values from linear-nonlinear lookup table + firing_rate_now = self.callback_functions["firing_rate_lookup"](I_mu, I_sigma) + tau = self.callback_functions["tau_lookup"](I_mu, I_sigma) + + d_I_mu = ( + self.params["Jie_max"] * I_syn_mu_exc + + self.params["Jii_max"] * I_syn_mu_inh + + system_input(self.noise_input_idx[0]) + + self.params["ext_inh_current"] + - I_mu + ) / tau + + d_I_syn_mu_exc = ((1.0 - I_syn_mu_exc) * exc_inp - I_syn_mu_exc) / self.params["tau_se"] + + d_I_syn_mu_inh = ((1.0 - I_syn_mu_inh) * inh_inp - I_syn_mu_inh) / self.params["tau_si"] + + d_I_syn_sigma_exc = ( + (1.0 - I_syn_mu_exc) ** 2 * exc_inp_sq + + (exc_inp_sq - 2.0 * self.params["tau_se"] * (exc_inp + 1.0)) * I_syn_sigma_exc + ) / (self.params["tau_se"] ** 2) + + d_I_syn_sigma_inh = ( + (1.0 - I_syn_mu_inh) ** 2 * inh_inp_sq + + (inh_inp_sq - 2.0 * self.params["tau_si"] * (inh_inp + 1.0)) * I_syn_sigma_inh + ) / (self.params["tau_si"] ** 2) + # firing rate as dummy dynamical variable with infinitely fast + # fixed-point dynamics + d_firing_rate = -self.params["lambda"] * (firing_rate - firing_rate_now) + + return [ + d_I_mu, + d_I_syn_mu_exc, + d_I_syn_mu_inh, + d_I_syn_sigma_exc, + d_I_syn_sigma_inh, + d_firing_rate, + ] + + +class ALNNode(SingleCouplingExcitatoryInhibitoryNode): + """ + Default ALN network node with 1 excitatory (featuring adaptive current) and + 1 inhibitory population. + """ + + name = "ALN neural mass node" + label = "ALNNode" + + sync_variables = [ + "node_exc_exc", + "node_inh_exc", + "node_exc_inh", + "node_inh_inh", + # squared variants + "node_exc_exc_sq", + "node_inh_exc_sq", + "node_exc_inh_sq", + "node_inh_inh_sq", + ] + + default_network_coupling = { + "network_exc_exc": 0.0, + "network_exc_exc_sq": 0.0, + } + + default_output = f"r_mean_{EXC}" + + def _rescale_connectivity(self): + """ + Rescale connection strengths for ALN. Should work also for ALN nodes + with arbitrary number of masses of any type. + """ + # create tau and J_max matrices for rescaling + tau_mat = np.zeros_like(self.connectivity) + J_mat = np.zeros_like(self.connectivity) + for col, mass_from in enumerate(self.masses): + # taus are constant per col and depends only on "from" mass + tau_mat[:, col] = mass_from.params[f"tau_s{mass_from.mass_type.lower()[0]}"] + # Js are specific: take J from "to" mass but of type "from" mass + for row, mass_to in enumerate(self.masses): + J_mat[row, col] = mass_to.params[f"J{mass_to.mass_type.lower()[0]}{mass_from.mass_type.lower()[0]}_max"] + + # multiplication with tau makes the increase of synaptic activity + # subject to a single input spike invariant to tau and division by J + # ensures that mu = J*s will result in a PSP of exactly c for a single + # spike + self.connectivity = (self.connectivity * tau_mat) / np.abs(J_mat) + + def __init__( + self, + exc_params=None, + inh_params=None, + exc_lin_nonlin_transfer_function_filename=None, + inh_lin_nonlin_transfer_function_filename=None, + connectivity=ALN_NODE_DEFAULT_CONNECTIVITY, + delays=ALN_NODE_DEFAULT_DELAYS, + exc_seed=None, + inh_seed=None, + ): + """ + :param exc_params: parameters for the excitatory mass + :type exc_params: dict|None + :param inh_params: parameters for the inhibitory mass + :type inh_params: dict|None + :param exc_lin_nonlin_transfer_function_filename: filename for precomputed + linear-nonlinear transfer functions for excitatory ALN mass, if None, will + look for it in this directory + :type exc_lin_nonlin_transfer_function_filename: str|None + :param inh_lin_nonlin_transfer_function_filename: filename for precomputed + linear-nonlinear transfer functions for inhibitory ALN mass, if None, will + look for it in this directory + :type inh_lin_nonlin_transfer_function_filename: str|None + :param connectivity: local connectivity matrix + :type connectivity: np.ndarray + :param delays: local delay matrix + :type delays: np.ndarray + :param exc_seed: seed for random number generator for the excitatory + mass + :type exc_seed: int|None + :param inh_seed: seed for random number generator for the inhibitory + mass + :type inh_seed: int|None + """ + excitatory_mass = ExcitatoryALNMass( + params=exc_params, + lin_nonlin_transfer_function_filename=exc_lin_nonlin_transfer_function_filename, + seed=exc_seed, + ) + excitatory_mass.index = 0 + inhibitory_mass = InhibitoryALNMass( + params=inh_params, + lin_nonlin_transfer_function_filename=inh_lin_nonlin_transfer_function_filename, + seed=inh_seed, + ) + inhibitory_mass.index = 1 + super().__init__( + neural_masses=[excitatory_mass, inhibitory_mass], + local_connectivity=connectivity, + local_delays=delays, + ) + self._rescale_connectivity() + + def update_params(self, params_dict): + """ + Rescale connectivity after params update if connectivity was updated. + """ + rescale_flag = "local_connectivity" in params_dict + super().update_params(params_dict) + if rescale_flag: + self._rescale_connectivity() + + def _sync(self): + """ + Apart from basic EXC<->INH connectivity, construct also squared + variants. + """ + connectivity_sq = self.connectivity ** 2 * self.inputs + sq_connectivity = [ + ( + # exc -> exc squared connectivity + self.sync_symbols[f"node_exc_exc_sq_{self.index}"], + sum([connectivity_sq[row, col] for row in self.excitatory_masses for col in self.excitatory_masses]), + ), + ( + # exc -> inh squared connectivity + self.sync_symbols[f"node_inh_exc_sq_{self.index}"], + sum([connectivity_sq[row, col] for row in self.inhibitory_masses for col in self.excitatory_masses]), + ), + ( + # inh -> exc squared connectivity + self.sync_symbols[f"node_exc_inh_sq_{self.index}"], + sum([connectivity_sq[row, col] for row in self.excitatory_masses for col in self.inhibitory_masses]), + ), + ( + # inh -> inh squared connectivity + self.sync_symbols[f"node_inh_inh_sq_{self.index}"], + sum([connectivity_sq[row, col] for row in self.inhibitory_masses for col in self.inhibitory_masses]), + ), + ] + return super()._sync() + sq_connectivity + + +class ALNNetwork(Network): + """ + Whole brain network of adaptive exponential integrate-and-fire mean-field + excitatory and inhibitory nodes. + """ + + name = "ALN neural mass network" + label = "ALNNet" + + sync_variables = ["network_exc_exc", "network_exc_exc_sq"] + + def __init__( + self, + connectivity_matrix, + delay_matrix, + exc_mass_params=None, + inh_mass_params=None, + exc_lin_nonlin_transfer_function_filename=None, + inh_lin_nonlin_transfer_function_filename=None, + local_connectivity=ALN_NODE_DEFAULT_CONNECTIVITY, + local_delays=ALN_NODE_DEFAULT_DELAYS, + exc_seed=None, + inh_seed=None, + ): + """ + :param connectivity_matrix: connectivity matrix for coupling between + nodes, defined as [from, to] + :type connectivity_matrix: np.ndarray + :param delay_matrix: delay matrix between nodes, if None, delays are + all zeros, in ms, defined as [from, to] + :type delay_matrix: np.ndarray|None + :param exc_mass_params: parameters for each excitatory ALN neural + mass, if None, will use default + :type exc_mass_params: list[dict]|dict|None + :param inh_mass_params: parameters for each inhibitory ALN neural + mass, if None, will use default + :type inh_mass_params: list[dict]|dict|None + param exc_lin_nonlin_transfer_function_filename: filename for precomputed + linear-nonlinear transfer_function for excitatory ALN mass, if None, will + look for it in this directory + :type exc_lin_nonlin_transfer_function_filename: list[str]|str|None + :param inh_lin_nonlin_transfer_function_filename: filename for precomputed + linear-nonlinear transfer_function for inhibitory ALN mass, if None, will + look for it in this directory + :type inh_lin_nonlin_transfer_function_filename: list[str]|str|None + :param local_connectivity: local within-node connectivity matrix + :type local_connectivity: np.ndarray + :param local_delays: local within-node delay matrix + :type local_delays: list[np.ndarray]|np.ndarray + :param exc_seed: seed for random number generator for the excitatory + masses + :type exc_seed: int|None + :param inh_seed: seed for random number generator for the excitatory + masses + :type inh_seed: int|None + """ + num_nodes = connectivity_matrix.shape[0] + exc_mass_params = self._prepare_mass_params(exc_mass_params, num_nodes) + inh_mass_params = self._prepare_mass_params(inh_mass_params, num_nodes) + exc_lin_nonlin_transfer_function_filename = self._prepare_mass_params( + exc_lin_nonlin_transfer_function_filename, num_nodes, native_type=str + ) + inh_lin_nonlin_transfer_function_filename = self._prepare_mass_params( + inh_lin_nonlin_transfer_function_filename, num_nodes, native_type=str + ) + local_connectivity = self._prepare_mass_params(local_connectivity, num_nodes, native_type=np.ndarray) + local_delays = self._prepare_mass_params(local_delays, num_nodes, native_type=np.ndarray) + exc_seeds = self._prepare_mass_params(exc_seed, num_nodes, native_type=int) + inh_seeds = self._prepare_mass_params(inh_seed, num_nodes, native_type=int) + + nodes = [] + for ( + i, + (exc_params, inh_params, exc_transfer_function, inh_transfer_function, local_conn, local_dels), + ) in enumerate( + zip( + exc_mass_params, + inh_mass_params, + exc_lin_nonlin_transfer_function_filename, + inh_lin_nonlin_transfer_function_filename, + local_connectivity, + local_delays, + ) + ): + node = ALNNode( + exc_params=exc_params, + inh_params=inh_params, + exc_lin_nonlin_transfer_function_filename=exc_transfer_function, + inh_lin_nonlin_transfer_function_filename=inh_transfer_function, + connectivity=local_conn, + delays=local_dels, + exc_seed=exc_seeds[i], + inh_seed=inh_seeds[i], + ) + node.index = i + node.idx_state_var = i * node.num_state_variables + # set correct indices of noise input + for mass in node: + mass.noise_input_idx = [2 * i + mass.index] + nodes.append(node) + + super().__init__(nodes=nodes, connectivity_matrix=connectivity_matrix, delay_matrix=delay_matrix) + # assert we have 2 sync variable + assert len(self.sync_variables) == 2 + + def _sync(self): + """ + Overload sync method since the ALN model requires + squared coupling weights and non-trivial coupling indices. + """ + # get coupling variable index from excitatory mass within each node + coupling_var_idx = set(sum([list(node[0].coupling_variables.keys()) for node in self], [])) + assert len(coupling_var_idx) == 1 + coupling_var_idx = next(iter(coupling_var_idx)) + return ( + # regular additive coupling + self._additive_coupling(within_node_idx=coupling_var_idx, symbol="network_exc_exc") + # additive coupling with squared weights + + self._additive_coupling( + within_node_idx=coupling_var_idx, + symbol="network_exc_exc_sq", + # multiplier is connectivity again, then they'll be squared + connectivity_multiplier=self.connectivity, + ) + + super()._sync() + ) diff --git a/neurolib/models/multimodel/builder/base/constants.py b/neurolib/models/multimodel/builder/base/constants.py index d57bddd6..63641b13 100644 --- a/neurolib/models/multimodel/builder/base/constants.py +++ b/neurolib/models/multimodel/builder/base/constants.py @@ -1,7 +1,3 @@ -""" -Set of constants for model building. -""" - ### -- NAMING CONVENTIONS -- # names for excitatory and inhibitory masses EXC = "EXC" diff --git a/neurolib/models/multimodel/builder/base/network.py b/neurolib/models/multimodel/builder/base/network.py index a7ce0ea4..634afb8e 100644 --- a/neurolib/models/multimodel/builder/base/network.py +++ b/neurolib/models/multimodel/builder/base/network.py @@ -1,6 +1,3 @@ -""" -Base for "network" and "node" operations on neural masses. -""" import logging import numpy as np diff --git a/neurolib/models/multimodel/builder/base/neural_mass.py b/neurolib/models/multimodel/builder/base/neural_mass.py index 1e20deac..531d2284 100644 --- a/neurolib/models/multimodel/builder/base/neural_mass.py +++ b/neurolib/models/multimodel/builder/base/neural_mass.py @@ -1,6 +1,3 @@ -""" -Base for all mass models. -""" import numpy as np import symengine as se from jitcdde import y as state_vector @@ -8,7 +5,7 @@ class NeuralMass: """ - Represent a neural population with given parameters and equations, in + Represents a neural population with given parameters and equations, in particular, the derivatives of the state vector. """ @@ -53,7 +50,7 @@ class NeuralMass: # list of callbacks functions in pure python, that are called from the # symbolic derivative, useful when part of neural dynamics cannot be - # expressed as symbolic expression (e.g. table lookups in AdEx models) + # expressed as symbolic expression (e.g. table lookups in aln model) # provide names of the functions here - this name shall be used in # definition of the derivative; # NOTE callbacks should be defined as jitted function (i.e. decorated with diff --git a/neurolib/models/multimodel/builder/fitzhugh_nagumo.py b/neurolib/models/multimodel/builder/fitzhugh_nagumo.py index 15214bf3..9f74c8cb 100644 --- a/neurolib/models/multimodel/builder/fitzhugh_nagumo.py +++ b/neurolib/models/multimodel/builder/fitzhugh_nagumo.py @@ -1,42 +1,24 @@ -""" -FitzHugh–Nagumo model. - -Main references: - FitzHugh, R. (1955). Mathematical models of threshold phenomena in the - nerve membrane. The bulletin of mathematical biophysics, 17(4), 257-278. - - Nagumo, J., Arimoto, S., & Yoshizawa, S. (1962). An active pulse - transmission line simulating nerve axon. Proceedings of the IRE, 50(10), - 2061-2070. - -Additional reference: - Kostova, T., Ravindran, R., & Schonbek, M. (2004). FitzHugh–Nagumo - revisited: Types of bifurcations, periodical forcing and stability regions - by a Lyapunov functional. International journal of bifurcation and chaos, - 14(03), 913-925. -""" - import numpy as np from jitcdde import input as system_input from ..builder.base.network import Network, Node from ..builder.base.neural_mass import NeuralMass -DEFAULT_PARAMS = { +FHN_DEFAULT_PARAMS = { "alpha": 3.0, "beta": 4.0, "gamma": -1.5, "delta": 0.0, "epsilon": 0.5, "tau": 20.0, - "ext_input_x": 1.0, - "ext_input_y": 0.0, + "x_ext": 1.0, + "y_ext": 0.0, } class FitzHughNagumoMass(NeuralMass): """ - FitzHugh-Nagumo neural mass. + FitzHugh-Nagumo model. """ name = "FitzHugh-Nagumo mass" @@ -53,13 +35,13 @@ class FitzHughNagumoMass(NeuralMass): "delta", "epsilon", "tau", - "ext_input_x", - "ext_input_y", + "x_ext", + "y_ext", ] required_couplings = ["network_x", "network_y"] def __init__(self, params=None, seed=None): - super().__init__(params=params or DEFAULT_PARAMS, seed=seed) + super().__init__(params=params or FHN_DEFAULT_PARAMS, seed=seed) def _initialize_state_vector(self): """ @@ -78,14 +60,14 @@ def _derivatives(self, coupling_variables): - y + coupling_variables["network_x"] + system_input(self.noise_input_idx[0]) - + self.params["ext_input_x"] + + self.params["x_ext"] ) d_y = ( (x - self.params["delta"] - self.params["epsilon"] * y) / self.params["tau"] + coupling_variables["network_y"] + system_input(self.noise_input_idx[1]) - + self.params["ext_input_y"] + + self.params["y_ext"] ) return [d_x, d_y] diff --git a/neurolib/models/multimodel/builder/hopf.py b/neurolib/models/multimodel/builder/hopf.py index 6a59d58f..88271cc2 100644 --- a/neurolib/models/multimodel/builder/hopf.py +++ b/neurolib/models/multimodel/builder/hopf.py @@ -1,40 +1,28 @@ -""" -Hopf normal form model. - -References: - Landau, L. D. (1944). On the problem of turbulence. In Dokl. Akad. Nauk USSR - (Vol. 44, p. 311). - - Stuart, J. T. (1960). On the non-linear mechanics of wave disturbances in - stable and unstable parallel flows Part 1. The basic behaviour in plane - Poiseuille flow. Journal of Fluid Mechanics, 9(3), 353-370. - - Kuznetsov, Y. A. (2013). Elements of applied bifurcation theory (Vol. 112). - Springer Science & Business Media. - - Deco, G., Cabral, J., Woolrich, M. W., Stevner, A. B., Van Hartevelt, T. J., - & Kringelbach, M. L. (2017). Single or multiple frequency generators in - on-going brain activity: A mechanistic whole-brain model of empirical MEG - data. Neuroimage, 152, 538-550. -""" - import numpy as np from jitcdde import input as system_input from ..builder.base.network import Network, Node from ..builder.base.neural_mass import NeuralMass -DEFAULT_PARAMS = { +HOPF_DEFAULT_PARAMS = { "a": 0.25, - "omega": 0.2, - "ext_input_x": 0.0, - "ext_input_y": 0.0, + "w": 0.2, + "x_ext": 0.0, + "y_ext": 0.0, } class HopfMass(NeuralMass): """ Hopf normal form (Landau-Stuart oscillator). + + References: + Landau, L. D. (1944). On the problem of turbulence. In Dokl. Akad. Nauk USSR + (Vol. 44, p. 311). + + Stuart, J. T. (1960). On the non-linear mechanics of wave disturbances in + stable and unstable parallel flows Part 1. The basic behaviour in plane + Poiseuille flow. Journal of Fluid Mechanics, 9(3), 353-370. """ name = "Hopf normal form mass" @@ -44,11 +32,11 @@ class HopfMass(NeuralMass): num_noise_variables = 2 coupling_variables = {0: "x", 1: "y"} state_variable_names = ["x", "y"] - required_params = ["a", "omega", "ext_input_x", "ext_input_y"] + required_params = ["a", "w", "x_ext", "y_ext"] required_couplings = ["network_x", "network_y"] def __init__(self, params=None, seed=None): - super().__init__(params=params or DEFAULT_PARAMS, seed=seed) + super().__init__(params=params or HOPF_DEFAULT_PARAMS, seed=seed) def _initialize_state_vector(self): """ @@ -62,18 +50,18 @@ def _derivatives(self, coupling_variables): d_x = ( (self.params["a"] - x ** 2 - y ** 2) * x - - self.params["omega"] * y + - self.params["w"] * y + coupling_variables["network_x"] + system_input(self.noise_input_idx[0]) - + self.params["ext_input_x"] + + self.params["x_ext"] ) d_y = ( (self.params["a"] - x ** 2 - y ** 2) * y - + self.params["omega"] * x + + self.params["w"] * x + coupling_variables["network_y"] + system_input(self.noise_input_idx[1]) - + self.params["ext_input_y"] + + self.params["y_ext"] ) return [d_x, d_y] diff --git a/neurolib/models/multimodel/builder/thalamus.py b/neurolib/models/multimodel/builder/thalamus.py index 7bc71f86..48452488 100644 --- a/neurolib/models/multimodel/builder/thalamus.py +++ b/neurolib/models/multimodel/builder/thalamus.py @@ -1,13 +1,3 @@ -""" -Thalamic mass models. - -References: - Costa, M. S., Weigenand, A., Ngo, H. V. V., Marshall, L., Born, J., - Martinetz, T., & Claussen, J. C. (2016). A thalamocortical neural mass - model of the EEG during NREM sleep and its response to auditory stimulation. - PLoS computational biology, 12(9). -""" - import numpy as np from jitcdde import input as system_input from symengine import exp @@ -16,7 +6,7 @@ from ..builder.base.network import SingleCouplingExcitatoryInhibitoryNode from ..builder.base.neural_mass import NeuralMass -DEFAULT_PARAMS_TCR = { +TCR_DEFAULT_PARAMS = { "tau": 20.0, # ms "Q_max": 400.0e-3, # 1/ms "theta": -58.5, # mV @@ -49,7 +39,7 @@ "ext_current": 0.0, "lambda": LAMBDA_SPEED, } -DEFAULT_PARAMS_TRN = { +TRN_DEFAULT_PARAMS = { "tau": 20.0, # ms "Q_max": 400.0e-3, # 1/ms "theta": -58.5, # mV @@ -72,12 +62,18 @@ "lambda": LAMBDA_SPEED, } # matrix as [to, from], masses as (TCR, TRN) -DEFAULT_THALAMIC_CONNECTIVITY = np.array([[0.0, 5.0], [3.0, 25.0]]) +THALAMUS_NODE_DEFAULT_CONNECTIVITY = np.array([[0.0, 5.0], [3.0, 25.0]]) class ThalamicMass(NeuralMass): """ - Base for thalamic neural populations due to Costa et al. + Base for thalamic neural populations + + Reference: + Costa, M. S., Weigenand, A., Ngo, H. V. V., Marshall, L., Born, J., + Martinetz, T., & Claussen, J. C. (2016). A thalamocortical neural mass + model of the EEG during NREM sleep and its response to auditory stimulation. + PLoS computational biology, 12(9). """ name = "Thalamic mass" @@ -172,7 +168,7 @@ class ThalamocorticalMass(ThalamicMass): ] def __init__(self, params=None): - super().__init__(params=params or DEFAULT_PARAMS_TCR) + super().__init__(params=params or TCR_DEFAULT_PARAMS) def _initialize_state_vector(self): """ @@ -336,7 +332,7 @@ class ThalamicReticularMass(ThalamicMass): ] def __init__(self, params=None): - super().__init__(params=params or DEFAULT_PARAMS_TRN) + super().__init__(params=params or TRN_DEFAULT_PARAMS) def _m_inf_T(self, voltage): return 1.0 / (1.0 + exp(-(voltage + 52.0) / 7.4)) @@ -420,7 +416,7 @@ class ThalamicNode(SingleCouplingExcitatoryInhibitoryNode): default_output = f"q_mean_{EXC}" def __init__( - self, tcr_params=None, trn_params=None, connectivity=DEFAULT_THALAMIC_CONNECTIVITY, + self, tcr_params=None, trn_params=None, connectivity=THALAMUS_NODE_DEFAULT_CONNECTIVITY, ): """ :param tcr_params: parameters for the excitatory (TCR) mass diff --git a/neurolib/models/multimodel/builder/wilson_cowan.py b/neurolib/models/multimodel/builder/wilson_cowan.py index 75b85bfe..affaf8ad 100644 --- a/neurolib/models/multimodel/builder/wilson_cowan.py +++ b/neurolib/models/multimodel/builder/wilson_cowan.py @@ -1,18 +1,3 @@ -""" -Wilson-Cowan model. - -Main reference: - Wilson, H. R., & Cowan, J. D. (1972). Excitatory and inhibitory - interactions in localized populations of model neurons. Biophysical journal, - 12(1), 1-24. - -Additional reference: - Papadopoulos, L., Lynn, C. W., Battaglia, D., & Bassett, D. S. (2020). - Relations between large scale brain connectivity and effects of regional - stimulation depend on collective dynamical state. arXiv preprint - arXiv:2002.00094. -""" - import numpy as np from jitcdde import input as system_input from symengine import exp @@ -21,16 +6,21 @@ from ..builder.base.network import Network, SingleCouplingExcitatoryInhibitoryNode from ..builder.base.neural_mass import NeuralMass -DEFAULT_PARAMS_EXC = {"a": 1.5, "mu": 3.0, "tau": 2.5, "ext_input": 1.0} -DEFAULT_PARAMS_INH = {"a": 1.5, "mu": 3.0, "tau": 3.75, "ext_input": 0.0} +WC_EXC_DEFAULT_PARAMS = {"a": 1.5, "mu": 3.0, "tau": 2.5, "ext_input": 1.0} +WC_INH_DEFAULT_PARAMS = {"a": 1.5, "mu": 3.0, "tau": 3.75, "ext_input": 0.0} # matrix as [to, from], masses as (EXC, INH) -DEFAULT_WC_NODE_CONNECTIVITY = np.array([[16.0, 12.0], [15.0, 3.0]]) +WC_NODE_DEFAULT_CONNECTIVITY = np.array([[16.0, 12.0], [15.0, 3.0]]) class WilsonCowanMass(NeuralMass): """ Wilson-Cowan neural mass. Can be excitatory or inhibitory, depending on the parameters. + + Reference: + Wilson, H. R., & Cowan, J. D. (1972). Excitatory and inhibitory + interactions in localized populations of model neurons. Biophysical journal, + 12(1), 1-24. """ name = "Wilson-Cowan mass" @@ -66,7 +56,7 @@ class ExcitatoryWilsonCowanMass(WilsonCowanMass): required_couplings = ["node_exc_exc", "node_exc_inh", "network_exc_exc"] def __init__(self, params=None, seed=None): - super().__init__(params=params or DEFAULT_PARAMS_EXC, seed=seed) + super().__init__(params=params or WC_EXC_DEFAULT_PARAMS, seed=seed) def _derivatives(self, coupling_variables): [x] = self._unwrap_state_vector() @@ -95,7 +85,7 @@ class InhibitoryWilsonCowanMass(WilsonCowanMass): required_couplings = ["node_inh_exc", "node_inh_inh", "network_inh_exc"] def __init__(self, params=None, seed=None): - super().__init__(params=params or DEFAULT_PARAMS_INH, seed=seed) + super().__init__(params=params or WC_INH_DEFAULT_PARAMS, seed=seed) def _derivatives(self, coupling_variables): [x] = self._unwrap_state_vector() @@ -127,7 +117,7 @@ class WilsonCowanNode(SingleCouplingExcitatoryInhibitoryNode): default_output = f"q_mean_{EXC}" def __init__( - self, exc_params=None, inh_params=None, connectivity=DEFAULT_WC_NODE_CONNECTIVITY, exc_seed=None, inh_seed=None + self, exc_params=None, inh_params=None, connectivity=WC_NODE_DEFAULT_CONNECTIVITY, exc_seed=None, inh_seed=None ): """ :param exc_params: parameters for the excitatory mass @@ -173,7 +163,7 @@ def __init__( delay_matrix, exc_mass_params=None, inh_mass_params=None, - local_connectivity=DEFAULT_WC_NODE_CONNECTIVITY, + local_connectivity=WC_NODE_DEFAULT_CONNECTIVITY, exc_seed=None, inh_seed=None, ): diff --git a/neurolib/models/multimodel/builder/wong_wang.py b/neurolib/models/multimodel/builder/wong_wang.py index 00f69d80..009415bf 100644 --- a/neurolib/models/multimodel/builder/wong_wang.py +++ b/neurolib/models/multimodel/builder/wong_wang.py @@ -1,25 +1,3 @@ -""" -Wong-Wang model. Contains both: - - classical Wong-Wang with one network node containing one excitatory and - one inhibitory mass - - Reduced Wong-Wang model with one mass per node - -Main reference: - [original] Wong, K. F., & Wang, X. J. (2006). A recurrent network mechanism - of time integration in perceptual decisions. Journal of Neuroscience, 26(4), - 1314-1328. - -Additional references: - [reduced] Deco, G., Ponce-Alvarez, A., Mantini, D., Romani, G. L., Hagmann, - P., & Corbetta, M. (2013). Resting-state functional connectivity emerges - from structurally and dynamically shaped slow linear fluctuations. Journal - of Neuroscience, 33(27), 11239-11252. - - [original] Deco, G., Ponce-Alvarez, A., Hagmann, P., Romani, G. L., Mantini, - D., & Corbetta, M. (2014). How local excitation–inhibition ratio impacts the - whole brain dynamics. Journal of Neuroscience, 34(23), 7886-7898. -""" - import numpy as np from jitcdde import input as system_input from symengine import exp @@ -28,7 +6,7 @@ from ..builder.base.network import Network, Node, SingleCouplingExcitatoryInhibitoryNode from ..builder.base.neural_mass import NeuralMass -DEFAULT_PARAMS_EXC = { +WW_EXC_DEFAULT_PARAMS = { "a": 0.31, # nC^-1 "b": 0.125, # kHz "d": 160.0, # ms @@ -40,7 +18,7 @@ "J_I": 1.0, # nA "lambda": LAMBDA_SPEED, } -DEFAULT_PARAMS_INH = { +WW_INH_DEFAULT_PARAMS = { "a": 0.615, # nC^-1 "b": 0.177, # kHz "d": 87.0, # ms @@ -50,7 +28,7 @@ "J_NMDA": 0.15, # nA "lambda": LAMBDA_SPEED, } -DEFAULT_PARAMS_REDUCED = { +WW_REDUCED_DEFAULT_PARAMS = { "a": 0.27, # nC^-1 "b": 0.108, # kHz "d": 154.0, # ms @@ -63,13 +41,33 @@ } # matrix as [to, from], masses as (EXC, INH) -DEFAULT_WW_NODE_CONNECTIVITY = np.array([[1.4, 1.0], [1.0, 1.0]]) +WW_NODE_DEFAULT_CONNECTIVITY = np.array([[1.4, 1.0], [1.0, 1.0]]) class WongWangMass(NeuralMass): """ Wong-Wang neural mass. Can be excitatory or inhibitory, depending on the parameters. Also a base for reduced Wong-Wang mass. + + Wong-Wang model. Contains both: + - classical Wong-Wang with one network node containing one excitatory and + one inhibitory mass + - Reduced Wong-Wang model with one mass per node + + Main reference: + [original] Wong, K. F., & Wang, X. J. (2006). A recurrent network mechanism + of time integration in perceptual decisions. Journal of Neuroscience, 26(4), + 1314-1328. + + Additional references: + [reduced] Deco, G., Ponce-Alvarez, A., Mantini, D., Romani, G. L., Hagmann, + P., & Corbetta, M. (2013). Resting-state functional connectivity emerges + from structurally and dynamically shaped slow linear fluctuations. Journal + of Neuroscience, 33(27), 11239-11252. + + [original] Deco, G., Ponce-Alvarez, A., Hagmann, P., Romani, G. L., Mantini, + D., & Corbetta, M. (2014). How local excitation–inhibition ratio impacts the + whole brain dynamics. Journal of Neuroscience, 34(23), 7886-7898. """ name = "Wong-Wang mass" @@ -118,7 +116,7 @@ class ExcitatoryWongWangMass(WongWangMass): ] def __init__(self, params=None, seed=None): - super().__init__(params=params or DEFAULT_PARAMS_EXC, seed=seed) + super().__init__(params=params or WW_EXC_DEFAULT_PARAMS, seed=seed) def _derivatives(self, coupling_variables): [s, firing_rate] = self._unwrap_state_vector() @@ -165,7 +163,7 @@ class InhibitoryWongWangMass(WongWangMass): ] def __init__(self, params=None, seed=None): - super().__init__(params=params or DEFAULT_PARAMS_INH, seed=seed) + super().__init__(params=params or WW_INH_DEFAULT_PARAMS, seed=seed) def _derivatives(self, coupling_variables): [s, firing_rate] = self._unwrap_state_vector() @@ -207,7 +205,7 @@ class ReducedWongWangMass(WongWangMass): ] def __init__(self, params=None, seed=None): - super().__init__(params=params or DEFAULT_PARAMS_REDUCED, seed=seed) + super().__init__(params=params or WW_REDUCED_DEFAULT_PARAMS, seed=seed) def _derivatives(self, coupling_variables): [s, firing_rate] = self._unwrap_state_vector() @@ -242,7 +240,7 @@ class WongWangNode(SingleCouplingExcitatoryInhibitoryNode): default_output = f"S_{EXC}" def __init__( - self, exc_params=None, inh_params=None, connectivity=DEFAULT_WW_NODE_CONNECTIVITY, exc_seed=None, inh_seed=None, + self, exc_params=None, inh_params=None, connectivity=WW_NODE_DEFAULT_CONNECTIVITY, exc_seed=None, inh_seed=None, ): """ :param exc_params: parameters for the excitatory mass @@ -314,7 +312,7 @@ def __init__( delay_matrix, exc_mass_params=None, inh_mass_params=None, - local_connectivity=DEFAULT_WW_NODE_CONNECTIVITY, + local_connectivity=WW_NODE_DEFAULT_CONNECTIVITY, exc_seed=None, inh_seed=None, ): diff --git a/neurolib/models/thalamus/model.py b/neurolib/models/thalamus/model.py index d307a87d..f74296e5 100644 --- a/neurolib/models/thalamus/model.py +++ b/neurolib/models/thalamus/model.py @@ -6,6 +6,13 @@ class ThalamicMassModel(Model): """ Two population thalamic model + + Reference: + Costa, M. S., Weigenand, A., Ngo, H. V. V., Marshall, L., Born, J., + Martinetz, T., & Claussen, J. C. (2016). A thalamocortical neural mass + model of the EEG during NREM sleep and its response to auditory stimulation. + PLoS computational biology, 12(9). + """ name = "thalamus" diff --git a/setup.py b/setup.py index 0be0ca7e..0632f8cb 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ setuptools.setup( name="neurolib", - version="0.5.8", + version="0.5.9", description="Easy whole-brain neural mass modeling", long_description=long_description, long_description_content_type="text/markdown", diff --git a/tests/multimodel/test_aln.py b/tests/multimodel/test_aln.py new file mode 100644 index 00000000..f986e4e3 --- /dev/null +++ b/tests/multimodel/test_aln.py @@ -0,0 +1,328 @@ +""" +Set of tests for adaptive exponential integrate-and-fire mean-field model. +""" + +import unittest + +import numba +import numpy as np +import xarray as xr +from jitcdde import jitcdde_input +from neurolib.models.aln import ALNModel +from neurolib.models.multimodel.builder.aln import ( + ALN_EXC_DEFAULT_PARAMS, + ALN_INH_DEFAULT_PARAMS, + ALN_NODE_DEFAULT_CONNECTIVITY, + ALNNetwork, + ALNNode, + ExcitatoryALNMass, + InhibitoryALNMass, + _get_interpolation_values, + _table_lookup, +) +from neurolib.models.multimodel.builder.base.constants import EXC +from neurolib.models.multimodel.builder.model_input import ZeroInput + +# these keys do not test since they are rescaled on the go +PARAMS_NOT_TEST_KEYS = ["c_gl", "taum"] + + +def _strip_keys(dict_test, strip_keys=PARAMS_NOT_TEST_KEYS): + return {k: v for k, v in dict_test.items() if k not in strip_keys} + + +SEED = 42 +DURATION = 100.0 +DT = 0.01 +CORR_THRESHOLD = 0.9 +NEUROLIB_VARIABLES_TO_TEST = [("r_mean_EXC", "rates_exc"), ("r_mean_INH", "rates_inh")] + +# dictionary as backend name: format in which the noise is passed +BACKENDS_TO_TEST = { + "jitcdde": lambda x: x.as_cubic_splines(), + "numba": lambda x: x.as_array(), +} + + +class TestALNCallbacks(unittest.TestCase): + SIGMA_TEST = 3.2 + MU_TEST = 1.7 + INTERP_EXPECTED = (37, 117, 0.8000000000000185, 0.7875000000002501) + FIRING_RATE_EXPECTED = 0.09444942503533124 + VOLTAGE_EXPECTED = -56.70455755705249 + TAU_EXPECTED = 0.4487499999999963 + + @classmethod + def setUpClass(cls): + cls.mass = ExcitatoryALNMass() + + def test_get_interpolation_values(self): + self.assertTrue(callable(_get_interpolation_values)) + print(type(_get_interpolation_values)) + self.assertTrue(isinstance(_get_interpolation_values, numba.core.registry.CPUDispatcher)) + interp_result = _get_interpolation_values( + self.SIGMA_TEST, + self.MU_TEST, + self.mass.sigma_range, + self.mass.mu_range, + self.mass.d_sigma, + self.mass.d_mu, + ) + self.assertTupleEqual(interp_result, self.INTERP_EXPECTED) + + def test_table_lookup(self): + self.assertTrue(callable(_table_lookup)) + self.assertTrue(isinstance(_table_lookup, numba.core.registry.CPUDispatcher)) + firing_rate = _table_lookup( + self.SIGMA_TEST, + self.MU_TEST, + self.mass.sigma_range, + self.mass.mu_range, + self.mass.d_sigma, + self.mass.d_mu, + self.mass.firing_rate_transfer_function, + ) + self.assertEqual(firing_rate, self.FIRING_RATE_EXPECTED) + + voltage = _table_lookup( + self.SIGMA_TEST, + self.MU_TEST, + self.mass.sigma_range, + self.mass.mu_range, + self.mass.d_sigma, + self.mass.d_mu, + self.mass.voltage_transfer_function, + ) + self.assertEqual(voltage, self.VOLTAGE_EXPECTED) + + tau = _table_lookup( + self.SIGMA_TEST, + self.MU_TEST, + self.mass.sigma_range, + self.mass.mu_range, + self.mass.d_sigma, + self.mass.d_mu, + self.mass.tau_transfer_function, + ) + self.assertEqual(tau, self.TAU_EXPECTED) + + +class ALNMassTestCase(unittest.TestCase): + def _run_node(self, node, duration, dt): + coupling_variables = {k: 0.0 for k in node.required_couplings} + noise = ZeroInput(duration, dt, independent_realisations=node.num_noise_variables).as_cubic_splines() + system = jitcdde_input( + node._derivatives(coupling_variables), + input=noise, + callback_functions=node._callbacks(), + ) + system.constant_past(np.array(node.initial_state)) + system.adjust_diff() + times = np.arange(dt, duration + dt, dt) + return np.vstack([system.integrate(time) for time in times]) + + +class TestALNMass(ALNMassTestCase): + def _create_exc_mass(self): + exc = ExcitatoryALNMass() + exc.index = 0 + exc.idx_state_var = 0 + exc.init_mass() + return exc + + def _create_inh_mass(self): + inh = InhibitoryALNMass() + inh.index = 0 + inh.idx_state_var = 0 + inh.init_mass() + return inh + + def test_init(self): + aln_exc = self._create_exc_mass() + aln_inh = self._create_inh_mass() + self.assertTrue(isinstance(aln_exc, ExcitatoryALNMass)) + self.assertTrue(isinstance(aln_inh, InhibitoryALNMass)) + self.assertDictEqual(_strip_keys(aln_exc.params), _strip_keys(ALN_EXC_DEFAULT_PARAMS)) + self.assertDictEqual(_strip_keys(aln_inh.params), _strip_keys(ALN_INH_DEFAULT_PARAMS)) + # test cascade + np.testing.assert_equal(aln_exc.mu_range, aln_inh.mu_range) + np.testing.assert_equal(aln_exc.sigma_range, aln_inh.sigma_range) + np.testing.assert_equal(aln_exc.firing_rate_transfer_function, aln_inh.firing_rate_transfer_function) + np.testing.assert_equal(aln_exc.voltage_transfer_function, aln_inh.voltage_transfer_function) + np.testing.assert_equal(aln_exc.tau_transfer_function, aln_inh.tau_transfer_function) + for aln in [aln_exc, aln_inh]: + # test cascade + self.assertTrue(callable(getattr(aln, "firing_rate_lookup"))) + self.assertTrue(callable(getattr(aln, "voltage_lookup"))) + self.assertTrue(callable(getattr(aln, "tau_lookup"))) + # test callbacks + self.assertEqual(len(aln._callbacks()), 3) + self.assertTrue(all(len(callback) == 3 for callback in aln._callbacks())) + # test numba callbacks + self.assertEqual(len(aln._numba_callbacks()), 3) + for numba_callbacks in aln._numba_callbacks(): + self.assertEqual(len(numba_callbacks), 2) + self.assertTrue(isinstance(numba_callbacks[0], str)) + self.assertTrue(isinstance(numba_callbacks[1], numba.core.registry.CPUDispatcher)) + # test derivatives + coupling_variables = {k: 0.0 for k in aln.required_couplings} + self.assertEqual( + len(aln._derivatives(coupling_variables)), + aln.num_state_variables, + ) + self.assertEqual(len(aln.initial_state), aln.num_state_variables) + self.assertEqual(len(aln.noise_input_idx), aln.num_noise_variables) + + def test_update_rescale_params(self): + # update params that have to do something with rescaling + UPDATE_PARAMS = {"C": 150.0, "Jee_max": 3.0} + aln = self._create_exc_mass() + aln.update_params(UPDATE_PARAMS) + self.assertEqual(aln.params["taum"], 15.0) + self.assertEqual(aln.params["c_gl"], 0.4 * aln.params["tau_se"] / 3.0) + + def test_run(self): + aln_exc = self._create_exc_mass() + aln_inh = self._create_inh_mass() + for aln in [aln_exc, aln_inh]: + result = self._run_node(aln, DURATION, DT) + self.assertTrue(isinstance(result, np.ndarray)) + self.assertTupleEqual(result.shape, (int(DURATION / DT), aln.num_state_variables)) + + +class TestALNNode(unittest.TestCase): + def _create_node(self): + node = ALNNode(exc_seed=SEED, inh_seed=SEED) + node.index = 0 + node.idx_state_var = 0 + node.init_node() + return node + + def test_init(self): + aln = self._create_node() + self.assertTrue(isinstance(aln, ALNNode)) + self.assertEqual(len(aln), 2) + self.assertDictEqual(_strip_keys(aln[0].params), _strip_keys(ALN_EXC_DEFAULT_PARAMS)) + self.assertDictEqual(_strip_keys(aln[1].params), _strip_keys(ALN_INH_DEFAULT_PARAMS)) + self.assertTrue(hasattr(aln, "_rescale_connectivity")) + self.assertEqual(len(aln._sync()), 4 * len(aln)) + self.assertEqual(len(aln.default_network_coupling), 2) + np.testing.assert_equal( + np.array(sum([alnm.initial_state for alnm in aln], [])), + aln.initial_state, + ) + + def test_update_rescale_params(self): + aln = self._create_node() + # update connectivity and check rescaling + old_rescaled = aln.connectivity.copy() + aln.update_params({"local_connectivity": 2 * ALN_NODE_DEFAULT_CONNECTIVITY}) + np.testing.assert_equal(aln.connectivity, 2 * old_rescaled) + + def test_run(self): + aln = self._create_node() + all_results = [] + for backend, noise_func in BACKENDS_TO_TEST.items(): + result = aln.run( + DURATION, DT, noise_func(ZeroInput(DURATION, DT, aln.num_noise_variables)), backend=backend + ) + self.assertTrue(isinstance(result, xr.Dataset)) + self.assertEqual(len(result), aln.num_state_variables) + self.assertTrue(all(state_var in result for state_var in aln.state_variable_names[0])) + self.assertTrue( + all(result[state_var].shape == (int(DURATION / DT), 1) for state_var in aln.state_variable_names[0]) + ) + all_results.append(result) + # test results are the same from different backends + for state_var in all_results[0]: + corr_mat = np.corrcoef( + np.vstack([result[state_var].values.flatten().astype(float) for result in all_results]) + ) + self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) + + def test_compare_w_neurolib_native_model(self): + """ + Compare with neurolib's native ALN model. + """ + # run this model + aln_multi = self._create_node() + multi_result = aln_multi.run(DURATION, DT, ZeroInput(DURATION, DT).as_array(), backend="numba") + # run neurolib's model + aln_neurolib = ALNModel(seed=SEED) + aln_neurolib.params["duration"] = DURATION + aln_neurolib.params["dt"] = DT + aln_neurolib.run() + for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: + corr_mat = np.corrcoef(aln_neurolib[var_neurolib], multi_result[var_multi].values.T) + self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) + + +class TestALNNetwork(unittest.TestCase): + SC = np.random.rand(2, 2) + np.fill_diagonal(SC, 0.0) + DELAYS = np.array([[0.0, 7.8], [7.8, 0.0]]) + + def test_init(self): + aln = ALNNetwork(self.SC, self.DELAYS) + self.assertTrue(isinstance(aln, ALNNetwork)) + self.assertEqual(len(aln), self.SC.shape[0]) + self.assertEqual(aln.initial_state.shape[0], aln.num_state_variables) + self.assertEqual(aln.default_output, f"r_mean_{EXC}") + + def test_run(self): + aln = ALNNetwork(self.SC, self.DELAYS, exc_seed=SEED, inh_seed=SEED) + all_results = [] + for backend, noise_func in BACKENDS_TO_TEST.items(): + result = aln.run( + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, aln.num_noise_variables)), + backend=backend, + ) + self.assertTrue(isinstance(result, xr.Dataset)) + self.assertEqual(len(result), aln.num_state_variables / aln.num_nodes) + self.assertTrue(all(result[result_].shape == (int(DURATION / DT), aln.num_nodes) for result_ in result)) + all_results.append(result) + # test results are the same from different backends + for state_var in all_results[0]: + all_ts = np.vstack([result[state_var].values.flatten().astype(float) for result in all_results]) + if np.isnan(all_ts).any(): + continue + corr_mat = np.corrcoef(all_ts) + print(corr_mat) + self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) + + def test_compare_w_neurolib_native_model(self): + """ + Compare with neurolib's native ALN model. + """ + aln_multi = ALNNetwork(self.SC, self.DELAYS, exc_seed=SEED, inh_seed=SEED) + multi_result = aln_multi.run(DURATION, DT, ZeroInput(DURATION, DT).as_array(), backend="numba") + # run neurolib's model + aln_neurolib = ALNModel(Cmat=self.SC, Dmat=self.DELAYS, seed=SEED) + aln_neurolib.params["duration"] = DURATION + aln_neurolib.params["dt"] = DT + # there is no "global coupling" parameter in MultiModel + aln_neurolib.params["K_gl"] = 1.0 + # delays <-> length matrix + aln_neurolib.params["signalV"] = 1.0 + aln_neurolib.params["sigma_ou"] = 0.0 + # match initial state at least for current - this seems to be enough + aln_neurolib.params["mufe_init"] = np.array( + [aln_multi[0][0].initial_state[0], aln_multi[1][0].initial_state[0]] + ) + aln_neurolib.params["mufi_init"] = np.array( + [aln_multi[0][1].initial_state[0], aln_multi[1][1].initial_state[0]] + ) + aln_neurolib.run() + for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: + for node_idx in range(len(aln_multi)): + corr_mat = np.corrcoef( + aln_neurolib[var_neurolib][node_idx, :], multi_result[var_multi].values.T[node_idx, :] + ) + print(corr_mat) + self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/multimodel/test_fitzhugh_nagumo.py b/tests/multimodel/test_fitzhugh_nagumo.py index 620edcf3..6933edd9 100644 --- a/tests/multimodel/test_fitzhugh_nagumo.py +++ b/tests/multimodel/test_fitzhugh_nagumo.py @@ -3,13 +3,13 @@ """ import unittest -import numba + import numpy as np import xarray as xr from jitcdde import jitcdde_input from neurolib.models.fhn import FHNModel from neurolib.models.multimodel.builder.fitzhugh_nagumo import ( - DEFAULT_PARAMS, + FHN_DEFAULT_PARAMS, FitzHughNagumoMass, FitzHughNagumoNetwork, FitzHughNagumoNode, @@ -51,7 +51,7 @@ def _create_mass(self): def test_init(self): fhn = self._create_mass() self.assertTrue(isinstance(fhn, FitzHughNagumoMass)) - self.assertDictEqual(fhn.params, DEFAULT_PARAMS) + self.assertDictEqual(fhn.params, FHN_DEFAULT_PARAMS) coupling_variables = {k: 0.0 for k in fhn.required_couplings} self.assertEqual(len(fhn._derivatives(coupling_variables)), fhn.num_state_variables) self.assertEqual(len(fhn.initial_state), fhn.num_state_variables) @@ -76,7 +76,7 @@ def test_init(self): fhn = self._create_node() self.assertTrue(isinstance(fhn, FitzHughNagumoNode)) self.assertEqual(len(fhn), 1) - self.assertDictEqual(fhn[0].params, DEFAULT_PARAMS) + self.assertDictEqual(fhn[0].params, FHN_DEFAULT_PARAMS) self.assertEqual(len(fhn.default_network_coupling), 2) np.testing.assert_equal(np.array(fhn[0].initial_state), fhn.initial_state) @@ -85,7 +85,10 @@ def test_run(self): all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = fhn.run( - DURATION, DT, noise_func(ZeroInput(DURATION, DT, fhn.num_noise_variables)), backend=backend, + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, fhn.num_noise_variables)), + backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), fhn.num_state_variables) @@ -134,7 +137,10 @@ def test_run(self): all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = fhn.run( - DURATION, DT, noise_func(ZeroInput(DURATION, DT, fhn.num_noise_variables)), backend=backend, + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, fhn.num_noise_variables)), + backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), fhn.num_state_variables / fhn.num_nodes) diff --git a/tests/multimodel/test_hopf.py b/tests/multimodel/test_hopf.py index 01f10592..e3a472b6 100644 --- a/tests/multimodel/test_hopf.py +++ b/tests/multimodel/test_hopf.py @@ -7,7 +7,7 @@ import xarray as xr from jitcdde import jitcdde_input from neurolib.models.hopf import HopfModel -from neurolib.models.multimodel.builder.hopf import DEFAULT_PARAMS, HopfMass, HopfNetwork, HopfNode +from neurolib.models.multimodel.builder.hopf import HOPF_DEFAULT_PARAMS, HopfMass, HopfNetwork, HopfNode from neurolib.models.multimodel.builder.model_input import ZeroInput SEED = 42 @@ -45,7 +45,7 @@ def _create_mass(self): def test_init(self): hopf = self._create_mass() self.assertTrue(isinstance(hopf, HopfMass)) - self.assertDictEqual(hopf.params, DEFAULT_PARAMS) + self.assertDictEqual(hopf.params, HOPF_DEFAULT_PARAMS) coupling_variables = {k: 0.0 for k in hopf.required_couplings} self.assertEqual(len(hopf._derivatives(coupling_variables)), hopf.num_state_variables) self.assertEqual(len(hopf.initial_state), hopf.num_state_variables) @@ -70,7 +70,7 @@ def test_init(self): hopf = self._create_node() self.assertTrue(isinstance(hopf, HopfNode)) self.assertEqual(len(hopf), 1) - self.assertDictEqual(hopf[0].params, DEFAULT_PARAMS) + self.assertDictEqual(hopf[0].params, HOPF_DEFAULT_PARAMS) self.assertEqual(len(hopf.default_network_coupling), 2) np.testing.assert_equal(np.array(hopf[0].initial_state), hopf.initial_state) @@ -79,7 +79,10 @@ def test_run(self): all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = hopf.run( - DURATION, DT, noise_func(ZeroInput(DURATION, DT, hopf.num_noise_variables)), backend=backend, + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, hopf.num_noise_variables)), + backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), hopf.num_state_variables) @@ -128,7 +131,10 @@ def test_run(self): all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = hopf.run( - DURATION, DT, noise_func(ZeroInput(DURATION, DT, hopf.num_noise_variables)), backend=backend, + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, hopf.num_noise_variables)), + backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), hopf.num_state_variables / hopf.num_nodes) diff --git a/tests/multimodel/test_model_input.py b/tests/multimodel/test_model_input.py index 1ad9d336..525a2272 100644 --- a/tests/multimodel/test_model_input.py +++ b/tests/multimodel/test_model_input.py @@ -59,7 +59,13 @@ def test_generate_input(self): class TestOrnsteinUhlenbeckProcess(unittest.TestCase): def test_generate_input(self): ou = OrnsteinUhlenbeckProcess( - duration=DURATION, dt=DT, mu=3.0, sigma=0.1, tau=2 * DT, independent_realisations=2, seed=42, + duration=DURATION, + dt=DT, + mu=3.0, + sigma=0.1, + tau=2 * DT, + independent_realisations=2, + seed=42, ).generate_input() self.assertTrue(isinstance(ou, np.ndarray)) self.assertTupleEqual(ou.shape, SHAPE) @@ -70,7 +76,11 @@ class TestStepInput(unittest.TestCase): def test_generate_input(self): step = StepInput( - duration=DURATION, dt=DT, step_size=self.STEP_SIZE, independent_realisations=2, seed=42, + duration=DURATION, + dt=DT, + step_size=self.STEP_SIZE, + independent_realisations=2, + seed=42, ).generate_input() self.assertTrue(isinstance(step, np.ndarray)) self.assertTupleEqual(step.shape, SHAPE) @@ -97,7 +107,12 @@ class TestSinusoidalInput(unittest.TestCase): def test_generate_input(self): sin = SinusoidalInput( - duration=DURATION, dt=DT, amplitude=self.AMPLITUDE, period=self.PERIOD, independent_realisations=2, seed=42, + duration=DURATION, + dt=DT, + amplitude=self.AMPLITUDE, + period=self.PERIOD, + independent_realisations=2, + seed=42, ).generate_input() self.assertTrue(isinstance(sin, np.ndarray)) self.assertTupleEqual(sin.shape, SHAPE) @@ -125,7 +140,12 @@ class TestSquareInput(unittest.TestCase): def test_generate_input(self): sq = SquareInput( - duration=DURATION, dt=DT, amplitude=self.AMPLITUDE, period=self.PERIOD, independent_realisations=2, seed=42, + duration=DURATION, + dt=DT, + amplitude=self.AMPLITUDE, + period=self.PERIOD, + independent_realisations=2, + seed=42, ).generate_input() self.assertTrue(isinstance(sq, np.ndarray)) self.assertTupleEqual(sq.shape, SHAPE) diff --git a/tests/multimodel/test_thalamus.py b/tests/multimodel/test_thalamus.py index 1c3e314a..ae1f7401 100644 --- a/tests/multimodel/test_thalamus.py +++ b/tests/multimodel/test_thalamus.py @@ -9,8 +9,8 @@ from jitcdde import jitcdde_input from neurolib.models.multimodel.builder.model_input import ZeroInput from neurolib.models.multimodel.builder.thalamus import ( - DEFAULT_PARAMS_TCR, - DEFAULT_PARAMS_TRN, + TCR_DEFAULT_PARAMS, + TRN_DEFAULT_PARAMS, ThalamicNode, ThalamicReticularMass, ThalamocorticalMass, @@ -65,8 +65,8 @@ def test_init(self): trn = self._create_trn_mass() self.assertTrue(isinstance(tcr, ThalamocorticalMass)) self.assertTrue(isinstance(trn, ThalamicReticularMass)) - self.assertDictEqual(tcr.params, DEFAULT_PARAMS_TCR) - self.assertDictEqual(trn.params, DEFAULT_PARAMS_TRN) + self.assertDictEqual(tcr.params, TCR_DEFAULT_PARAMS) + self.assertDictEqual(trn.params, TRN_DEFAULT_PARAMS) for thlm in [tcr, trn]: coupling_variables = {k: 0.0 for k in thlm.required_couplings} self.assertEqual( @@ -97,8 +97,8 @@ def test_init(self): thlm = self._create_node() self.assertTrue(isinstance(thlm, ThalamicNode)) self.assertEqual(len(thlm), 2) - self.assertDictEqual(thlm[0].params, DEFAULT_PARAMS_TCR) - self.assertDictEqual(thlm[1].params, DEFAULT_PARAMS_TRN) + self.assertDictEqual(thlm[0].params, TCR_DEFAULT_PARAMS) + self.assertDictEqual(thlm[1].params, TRN_DEFAULT_PARAMS) self.assertEqual(len(thlm.default_network_coupling), 2) np.testing.assert_equal( np.array(sum([thlmm.initial_state for thlmm in thlm], [])), diff --git a/tests/multimodel/test_wilson_cowan.py b/tests/multimodel/test_wilson_cowan.py index 25382373..b2d8aae4 100644 --- a/tests/multimodel/test_wilson_cowan.py +++ b/tests/multimodel/test_wilson_cowan.py @@ -10,8 +10,8 @@ from neurolib.models.multimodel.builder.base.constants import EXC from neurolib.models.multimodel.builder.model_input import ZeroInput from neurolib.models.multimodel.builder.wilson_cowan import ( - DEFAULT_PARAMS_EXC, - DEFAULT_PARAMS_INH, + WC_EXC_DEFAULT_PARAMS, + WC_INH_DEFAULT_PARAMS, ExcitatoryWilsonCowanMass, InhibitoryWilsonCowanMass, WilsonCowanNetwork, @@ -22,7 +22,7 @@ SEED = 42 DURATION = 100.0 DT = 0.01 -CORR_THRESHOLD = 0.99 +CORR_THRESHOLD = 0.93 NEUROLIB_VARIABLES_TO_TEST = [("q_mean_EXC", "exc"), ("q_mean_INH", "inh")] # dictionary as backend name: format in which the noise is passed @@ -63,8 +63,8 @@ def test_init(self): wc_inh = self._create_inh_mass() self.assertTrue(isinstance(wc_exc, ExcitatoryWilsonCowanMass)) self.assertTrue(isinstance(wc_inh, InhibitoryWilsonCowanMass)) - self.assertDictEqual(wc_exc.params, DEFAULT_PARAMS_EXC) - self.assertDictEqual(wc_inh.params, DEFAULT_PARAMS_INH) + self.assertDictEqual(wc_exc.params, WC_EXC_DEFAULT_PARAMS) + self.assertDictEqual(wc_inh.params, WC_INH_DEFAULT_PARAMS) for wc in [wc_exc, wc_inh]: coupling_variables = {k: 0.0 for k in wc.required_couplings} self.assertEqual(len(wc._derivatives(coupling_variables)), wc.num_state_variables) @@ -92,18 +92,24 @@ def test_init(self): wc = self._create_node() self.assertTrue(isinstance(wc, WilsonCowanNode)) self.assertEqual(len(wc), 2) - self.assertDictEqual(wc[0].params, DEFAULT_PARAMS_EXC) - self.assertDictEqual(wc[1].params, DEFAULT_PARAMS_INH) + self.assertDictEqual(wc[0].params, WC_EXC_DEFAULT_PARAMS) + self.assertDictEqual(wc[1].params, WC_INH_DEFAULT_PARAMS) self.assertEqual(len(wc.default_network_coupling), 2) np.testing.assert_equal( - np.array(sum([wcm.initial_state for wcm in wc], [])), wc.initial_state, + np.array(sum([wcm.initial_state for wcm in wc], [])), + wc.initial_state, ) def test_run(self): wc = self._create_node() all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): - result = wc.run(DURATION, DT, noise_func(ZeroInput(DURATION, DT, wc.num_noise_variables)), backend=backend,) + result = wc.run( + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, wc.num_noise_variables)), + backend=backend, + ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), wc.num_state_variables) self.assertTrue(all(state_var in result for state_var in wc.state_variable_names[0])) @@ -139,8 +145,8 @@ def test_compare_w_neurolib_native_model(self): class TestWilsonCowanNetwork(unittest.TestCase): - SC = np.array(([[0.0, 1.0], [0.0, 0.0]])) - DELAYS = np.array([[0.0, 0.0], [0.0, 0.0]]) + SC = np.array(([[0.0, 1.0], [1.0, 0.0]])) + DELAYS = np.array([[0.0, 10.0], [10.0, 0.0]]) def test_init(self): wc = WilsonCowanNetwork(self.SC, self.DELAYS) @@ -153,16 +159,23 @@ def test_run(self): wc = WilsonCowanNetwork(self.SC, self.DELAYS, exc_seed=SEED, inh_seed=SEED) all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): - result = wc.run(DURATION, DT, noise_func(ZeroInput(DURATION, DT, wc.num_noise_variables)), backend=backend,) + result = wc.run( + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, wc.num_noise_variables)), + backend=backend, + ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), wc.num_state_variables / wc.num_nodes) self.assertTrue(all(result[result_].shape == (int(DURATION / DT), wc.num_nodes) for result_ in result)) all_results.append(result) # test results are the same from different backends for state_var in all_results[0]: - corr_mat = np.corrcoef( - np.vstack([result[state_var].values.flatten().astype(float) for result in all_results]) - ) + all_ts = np.vstack([result[state_var].values.flatten().astype(float) for result in all_results]) + if np.isnan(all_ts).any(): + continue + corr_mat = np.corrcoef(all_ts) + print(corr_mat) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) def test_compare_w_neurolib_native_model(self): @@ -186,9 +199,12 @@ def test_compare_w_neurolib_native_model(self): wc_neurolib.run() for (var_multi, var_neurolib) in NEUROLIB_VARIABLES_TO_TEST: for node_idx in range(len(wc_multi)): - corr_mat = np.corrcoef( - wc_neurolib[var_neurolib][node_idx, :], multi_result[var_multi].values.T[node_idx, :] - ) + neurolib_ts = wc_neurolib[var_neurolib][node_idx, :] + multi_ts = multi_result[var_multi].values.T[node_idx, :] + if np.isnan(neurolib_ts).any() or np.isnan(multi_ts).any(): + continue + corr_mat = np.corrcoef(neurolib_ts, multi_ts) + print(var_multi, node_idx, corr_mat) self.assertTrue(np.greater(corr_mat, CORR_THRESHOLD).all()) diff --git a/tests/multimodel/test_wong_wang.py b/tests/multimodel/test_wong_wang.py index f1756d40..8238d22e 100644 --- a/tests/multimodel/test_wong_wang.py +++ b/tests/multimodel/test_wong_wang.py @@ -9,9 +9,9 @@ from neurolib.models.multimodel.builder.base.constants import EXC from neurolib.models.multimodel.builder.model_input import ZeroInput from neurolib.models.multimodel.builder.wong_wang import ( - DEFAULT_PARAMS_EXC, - DEFAULT_PARAMS_INH, - DEFAULT_PARAMS_REDUCED, + WW_EXC_DEFAULT_PARAMS, + WW_INH_DEFAULT_PARAMS, + WW_REDUCED_DEFAULT_PARAMS, ExcitatoryWongWangMass, InhibitoryWongWangMass, ReducedWongWangMass, @@ -64,8 +64,8 @@ def test_init(self): ww_inh = self._create_inh_mass() self.assertTrue(isinstance(ww_exc, ExcitatoryWongWangMass)) self.assertTrue(isinstance(ww_inh, InhibitoryWongWangMass)) - self.assertDictEqual(ww_exc.params, DEFAULT_PARAMS_EXC) - self.assertDictEqual(ww_inh.params, DEFAULT_PARAMS_INH) + self.assertDictEqual(ww_exc.params, WW_EXC_DEFAULT_PARAMS) + self.assertDictEqual(ww_inh.params, WW_INH_DEFAULT_PARAMS) for ww in [ww_exc, ww_inh]: coupling_variables = {k: 0.0 for k in ww.required_couplings} self.assertEqual(len(ww._derivatives(coupling_variables)), ww.num_state_variables) @@ -92,7 +92,7 @@ def _create_mass(self): def test_init(self): rww = self._create_mass() self.assertTrue(isinstance(rww, ReducedWongWangMass)) - self.assertDictEqual(rww.params, DEFAULT_PARAMS_REDUCED) + self.assertDictEqual(rww.params, WW_REDUCED_DEFAULT_PARAMS) coupling_variables = {k: 0.0 for k in rww.required_couplings} self.assertEqual(len(rww._derivatives(coupling_variables)), rww.num_state_variables) self.assertEqual(len(rww.initial_state), rww.num_state_variables) @@ -117,18 +117,24 @@ def test_init(self): ww = self._create_node() self.assertTrue(isinstance(ww, WongWangNode)) self.assertEqual(len(ww), 2) - self.assertDictEqual(ww[0].params, DEFAULT_PARAMS_EXC) - self.assertDictEqual(ww[1].params, DEFAULT_PARAMS_INH) + self.assertDictEqual(ww[0].params, WW_EXC_DEFAULT_PARAMS) + self.assertDictEqual(ww[1].params, WW_INH_DEFAULT_PARAMS) self.assertEqual(len(ww.default_network_coupling), 2) np.testing.assert_equal( - np.array(sum([wwm.initial_state for wwm in ww], [])), ww.initial_state, + np.array(sum([wwm.initial_state for wwm in ww], [])), + ww.initial_state, ) def test_run(self): ww = self._create_node() all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): - result = ww.run(DURATION, DT, noise_func(ZeroInput(DURATION, DT, ww.num_noise_variables)), backend=backend,) + result = ww.run( + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, ww.num_noise_variables)), + backend=backend, + ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), ww.num_state_variables) self.assertTrue(all(state_var in result for state_var in ww.state_variable_names[0])) @@ -156,7 +162,7 @@ def test_init(self): rww = self._create_node() self.assertTrue(isinstance(rww, ReducedWongWangNode)) self.assertEqual(len(rww), 1) - self.assertDictEqual(rww[0].params, DEFAULT_PARAMS_REDUCED) + self.assertDictEqual(rww[0].params, WW_REDUCED_DEFAULT_PARAMS) self.assertEqual(len(rww.default_network_coupling), 1) np.testing.assert_equal(np.array(rww[0].initial_state), rww.initial_state) @@ -197,7 +203,12 @@ def test_run(self): ww = WongWangNetwork(self.SC, self.DELAYS, exc_seed=SEED, inh_seed=SEED) all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): - result = ww.run(DURATION, DT, noise_func(ZeroInput(DURATION, DT, ww.num_noise_variables)), backend=backend,) + result = ww.run( + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, ww.num_noise_variables)), + backend=backend, + ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), ww.num_state_variables / ww.num_nodes) self.assertTrue(all(result[result_].shape == (int(DURATION / DT), ww.num_nodes) for result_ in result)) @@ -226,7 +237,10 @@ def test_run(self): all_results = [] for backend, noise_func in BACKENDS_TO_TEST.items(): result = rww.run( - DURATION, DT, noise_func(ZeroInput(DURATION, DT, rww.num_noise_variables)), backend=backend, + DURATION, + DT, + noise_func(ZeroInput(DURATION, DT, rww.num_noise_variables)), + backend=backend, ) self.assertTrue(isinstance(result, xr.Dataset)) self.assertEqual(len(result), rww.num_state_variables / rww.num_nodes)