diff --git a/404.html b/404.html index a474a98..3bcfd9b 100644 --- a/404.html +++ b/404.html @@ -699,6 +699,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/contributing/index.html b/contributing/index.html index ca23682..fec0a04 100644 --- a/contributing/index.html +++ b/contributing/index.html @@ -809,6 +809,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-0-aln-minimal/index.html b/examples/example-0-aln-minimal/index.html index 2f98e75..e974c7d 100644 --- a/examples/example-0-aln-minimal/index.html +++ b/examples/example-0-aln-minimal/index.html @@ -838,6 +838,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-0.1-hopf-minimal/index.html b/examples/example-0.1-hopf-minimal/index.html index e3be939..d216f4e 100644 --- a/examples/example-0.1-hopf-minimal/index.html +++ b/examples/example-0.1-hopf-minimal/index.html @@ -757,6 +757,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-0.2-basic_analysis/index.html b/examples/example-0.2-basic_analysis/index.html index b2bb45b..74b9eea 100644 --- a/examples/example-0.2-basic_analysis/index.html +++ b/examples/example-0.2-basic_analysis/index.html @@ -785,6 +785,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-0.3-fhn-minimal/index.html b/examples/example-0.3-fhn-minimal/index.html index 1677bbc..5ad369f 100644 --- a/examples/example-0.3-fhn-minimal/index.html +++ b/examples/example-0.3-fhn-minimal/index.html @@ -764,6 +764,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-0.4-wc-minimal/index.html b/examples/example-0.4-wc-minimal/index.html index bb2c2fc..1893926 100644 --- a/examples/example-0.4-wc-minimal/index.html +++ b/examples/example-0.4-wc-minimal/index.html @@ -771,6 +771,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-0.5-kuramoto/index.html b/examples/example-0.5-kuramoto/index.html index c8ed29b..ba04243 100644 --- a/examples/example-0.5-kuramoto/index.html +++ b/examples/example-0.5-kuramoto/index.html @@ -720,6 +720,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-0.6-external-stimulus/index.html b/examples/example-0.6-external-stimulus/index.html index d5e0648..daccb0a 100644 --- a/examples/example-0.6-external-stimulus/index.html +++ b/examples/example-0.6-external-stimulus/index.html @@ -908,6 +908,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-0.7-custom-model/index.html b/examples/example-0.7-custom-model/index.html index 5919a4b..0feb617 100644 --- a/examples/example-0.7-custom-model/index.html +++ b/examples/example-0.7-custom-model/index.html @@ -799,6 +799,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-1-aln-parameter-exploration/index.html b/examples/example-1-aln-parameter-exploration/index.html index f71e5ca..e190b4e 100644 --- a/examples/example-1-aln-parameter-exploration/index.html +++ b/examples/example-1-aln-parameter-exploration/index.html @@ -713,6 +713,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-1.1-custom-parameter-exploration/index.html b/examples/example-1.1-custom-parameter-exploration/index.html index 5215ea2..275171b 100644 --- a/examples/example-1.1-custom-parameter-exploration/index.html +++ b/examples/example-1.1-custom-parameter-exploration/index.html @@ -778,6 +778,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-1.2-brain-network-exploration/index.html b/examples/example-1.2-brain-network-exploration/index.html index 25b8492..91ffb5c 100644 --- a/examples/example-1.2-brain-network-exploration/index.html +++ b/examples/example-1.2-brain-network-exploration/index.html @@ -798,6 +798,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-1.2.1-brain-exploration-postprocessing/index.html b/examples/example-1.2.1-brain-exploration-postprocessing/index.html index 386f8d4..a70648c 100644 --- a/examples/example-1.2.1-brain-exploration-postprocessing/index.html +++ b/examples/example-1.2.1-brain-exploration-postprocessing/index.html @@ -799,6 +799,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-1.3-aln-bifurcation-diagram/index.html b/examples/example-1.3-aln-bifurcation-diagram/index.html index eb0dc2e..888b7ec 100644 --- a/examples/example-1.3-aln-bifurcation-diagram/index.html +++ b/examples/example-1.3-aln-bifurcation-diagram/index.html @@ -818,6 +818,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-2-evolutionary-optimization-minimal/index.html b/examples/example-2-evolutionary-optimization-minimal/index.html index 214856e..9bfb0fe 100644 --- a/examples/example-2-evolutionary-optimization-minimal/index.html +++ b/examples/example-2-evolutionary-optimization-minimal/index.html @@ -720,6 +720,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-2.0.1-save-and-load-evolution/index.html b/examples/example-2.0.1-save-and-load-evolution/index.html index 6e1650c..dcd08b6 100644 --- a/examples/example-2.0.1-save-and-load-evolution/index.html +++ b/examples/example-2.0.1-save-and-load-evolution/index.html @@ -764,6 +764,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-2.1-evolutionary-optimization-aln/index.html b/examples/example-2.1-evolutionary-optimization-aln/index.html index 2cc32d4..c8a5bf5 100644 --- a/examples/example-2.1-evolutionary-optimization-aln/index.html +++ b/examples/example-2.1-evolutionary-optimization-aln/index.html @@ -764,6 +764,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-2.2-evolution-brain-network-aln-resting-state-fit/index.html b/examples/example-2.2-evolution-brain-network-aln-resting-state-fit/index.html index 29fd62c..74c613a 100644 --- a/examples/example-2.2-evolution-brain-network-aln-resting-state-fit/index.html +++ b/examples/example-2.2-evolution-brain-network-aln-resting-state-fit/index.html @@ -720,6 +720,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-3-meg-functional-connectivity/index.html b/examples/example-3-meg-functional-connectivity/index.html index 6dec308..4064ba6 100644 --- a/examples/example-3-meg-functional-connectivity/index.html +++ b/examples/example-3-meg-functional-connectivity/index.html @@ -859,6 +859,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-4-multimodel-intro/index.html b/examples/example-4-multimodel-intro/index.html index 24276ce..7877647 100644 --- a/examples/example-4-multimodel-intro/index.html +++ b/examples/example-4-multimodel-intro/index.html @@ -792,6 +792,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-4.1-multimodel-custom-model/index.html b/examples/example-4.1-multimodel-custom-model/index.html index c3ea5bf..7da6191 100644 --- a/examples/example-4.1-multimodel-custom-model/index.html +++ b/examples/example-4.1-multimodel-custom-model/index.html @@ -778,6 +778,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-4.2-multimodel-backends-and-optimization/index.html b/examples/example-4.2-multimodel-backends-and-optimization/index.html index 12b91e5..9fc4995 100644 --- a/examples/example-4.2-multimodel-backends-and-optimization/index.html +++ b/examples/example-4.2-multimodel-backends-and-optimization/index.html @@ -12,7 +12,7 @@ - + @@ -845,6 +845,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/examples/example-5.1-oc-phenomenological-model-deterministic/index.html b/examples/example-5.1-oc-phenomenological-model-deterministic/index.html new file mode 100644 index 0000000..e1f8d01 --- /dev/null +++ b/examples/example-5.1-oc-phenomenological-model-deterministic/index.html @@ -0,0 +1,1953 @@ + + + + + + + + + + + + + + + + + + + + + + + Example 5.1 oc phenomenological model deterministic - Neurolib + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + + + + +

    Example 5.1 oc phenomenological model deterministic

    + + + + +
    +
    +
    +

    Binder

    +
    +
    +
    +
    +
    +
    +

    Optimal control of deterministic phenomenological models

    +

    This notebook shows how to compute the optimal control (OC) signal for phenomenological models (FHN, Hopf) for a simple example task.

    +
    +
    +
    +
    +
    + +
    import matplotlib.pyplot as plt
    +import numpy as np
    +import os
    +
    +while os.getcwd().split(os.sep)[-1] != "neurolib":
    +    os.chdir('..')
    +
    +# We import the model, stimuli, and the optimal control package
    +from neurolib.models.fhn import FHNModel
    +from neurolib.models.hopf import HopfModel
    +from neurolib.utils.stimulus import ZeroInput
    +from neurolib.control.optimal_control import oc_fhn
    +from neurolib.control.optimal_control import oc_hopf
    +from neurolib.utils.plot_oc import plot_oc_singlenode, plot_oc_network
    +
    +# This will reload all imports as soon as the code changes
    +%load_ext autoreload
    +%autoreload 3
    +
    + +
    +
    +
    +
    +
    +

    We stimulate the system with a known control signal, define the resulting activity as target, and compute the optimal control for this target. We define weights such that precision is penalized only (w_p=1, w_2=0). Hence, the optimal control signal should converge to the input signal.

    +
    +
    +
    +
    +
    + +
    # We import the model
    +model = FHNModel()
    +# model = HopfModel()    # OC can be computed for the Hopf model completely analogously
    +
    +# Some parameters to define stimulation signals
    +dt = model.params["dt"]
    +duration = 10.
    +amplitude = 1.
    +period = duration/4.
    +
    +# We define a "zero-input", and a sine-input
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,1:-2] = np.sin(2.*np.pi*np.arange(0,duration-0.2, dt)/period) # other functions or random values can be used as well
    +
    +# We set the duration of the simulation and the initial values
    +model.params["duration"] = duration
    +x_init = 0.
    +y_init = 0.
    +model.params["xs_init"] = np.array([[x_init]])
    +model.params["ys_init"] = np.array([[y_init]])
    +
    + +
    +
    +
    +
    + +
    # We set the stimulus in x and y variables, and run the simulation
    +model.params["x_ext"] = input
    +model.params["y_ext"] = zero_input
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = np.concatenate((np.concatenate( (model.params["xs_init"], model.params["ys_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
    +target_input = np.concatenate( (input,zero_input), axis=0)[np.newaxis,:,:]
    +
    +# Remove stimuli and re-run the simulation
    +model.params["x_ext"] = zero_input
    +model.params["y_ext"] = zero_input
    +control = np.concatenate( (zero_input,zero_input), axis=0)[np.newaxis,:,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state = np.concatenate((np.concatenate( (model.params["xs_init"], model.params["ys_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["x_ext"] = zero_input
    +model.params["y_ext"] = zero_input
    +
    +# We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +if model.name == 'fhn':
    +    model_controlled = oc_fhn.OcFhn(model, target, print_array=np.arange(0,501,25))
    +elif model.name == 'hopf':
    +    model_controlled = oc_hopf.OcHopf(model, target, print_array=np.arange(0,501,25))
    +
    +# per default, the weights are set to w_p = 1 and w_2 = 0, meaning that energy costs do not contribute. The algorithm will produce a control such that the signal will match the target exactly, regardless of the strength of the required control input.
    +# If you want to adjust the ratio of precision and energy weight, you can change the values in the weights dictionary
    +model_controlled.weights["w_p"] = 1. # default value 1
    +model_controlled.weights["w_2"] = 0. # default value 0
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.5533851530971279
    +Cost in iteration 25: 0.2424229146183965
    +Cost in iteration 50: 0.1584467235220361
    +Cost in iteration 75: 0.12000029040838786
    +Cost in iteration 100: 0.09606458437628636
    +Cost in iteration 125: 0.07875899052824148
    +Cost in iteration 150: 0.06567349888722097
    +Cost in iteration 175: 0.055617171219608186
    +Cost in iteration 200: 0.04682087916195195
    +Cost in iteration 225: 0.03978086855629591
    +Cost in iteration 250: 0.03392391540076884
    +Cost in iteration 275: 0.028992099916335258
    +Cost in iteration 300: 0.024790790776996006
    +Cost in iteration 325: 0.021330380416435698
    +Cost in iteration 350: 0.018279402174332753
    +Cost in iteration 375: 0.01576269909191436
    +Cost in iteration 400: 0.013565848707923062
    +Cost in iteration 425: 0.011714500580338114
    +Cost in iteration 450: 0.009981011218383677
    +Cost in iteration 475: 0.008597600155106654
    +Cost in iteration 500: 0.007380756958683128
    +Final cost : 0.007380756958683128
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # Do another 100 iterations if you want to.
    +# Repeated execution will continue with further 100 iterations.
    +model_controlled.optimize(100)
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.007380756958683128
    +Cost in iteration 25: 0.0063153874519220445
    +Cost in iteration 50: 0.00541103301473969
    +Cost in iteration 75: 0.004519862815977447
    +Cost in iteration 100: 0.003828425847813115
    +Final cost : 0.003828425847813115
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Network of neural populations (no delay)

    +

    Let us know study a simple 2-node network of FHN oscillators. We first define the coupling matrix and the distance matrix. We can then initialize the model.

    +
    +
    +
    +
    +
    + +
    cmat = np.array( [[0., 0.5], [1., 0.]] )  # diagonal elements are zero, connection strength is 1 (0.5) from node 0 to node 1 (from node 1 to node 0)
    +dmat = np.array( [[0., 0.], [0., 0.]] )  # no delay
    +
    +if model.name == 'fhn':
    +    model = FHNModel(Cmat=cmat, Dmat=dmat)
    +elif model.name == 'hopf':
    +    model = HopfModel(Cmat=cmat, Dmat=dmat)
    +
    +# we define the control input matrix to enable or disable certain channels and nodes
    +control_mat = np.zeros( (model.params.N, len(model.state_vars)) )
    +control_mat[0,0] = 1. # only allow inputs in x-channel in node 0
    +
    +if control_mat[0,0] == 0. and control_mat[1,0] == 0:
    +    # if x is input channel, high connection strength can lead to numerical issues
    +    model.params.K_gl = 5. # increase for stronger connectivity, WARNING: too high value will cause numerical problems
    +
    +model.params["duration"] = duration
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,1:-3] = np.sin(np.arange(0,duration-0.3, dt)) # other functions or random values can be used as well
    +model.params["xs_init"] = np.vstack( [x_init, x_init] )
    +model.params["ys_init"] = np.vstack( [y_init, y_init] )
    +
    +# We set the stimulus in x and y variables, and run the simulation
    +input_nw = np.concatenate( (np.vstack( [control_mat[0,0] * input, control_mat[0,1] * input] )[np.newaxis,:,:],
    +                            np.vstack( [control_mat[1,0] * input, control_mat[1,1] * input] )[np.newaxis,:,:]), axis=0)
    +zero_input_nw = np.concatenate( (np.vstack( [zero_input, zero_input] )[np.newaxis,:,:],
    +                                 np.vstack( [zero_input, zero_input] )[np.newaxis,:,:]), axis=0)
    +
    +model.params["x_ext"] = input_nw[:,0,:]
    +model.params["y_ext"] = input_nw[:,1,:]
    +
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = np.concatenate( (np.concatenate( (model.params["xs_init"], model.params["ys_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
    +
    +# Remove stimuli and re-run the simulation
    +model.params["x_ext"] = zero_input_nw[:,0,:]
    +model.params["y_ext"] = zero_input_nw[:,0,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state =  np.concatenate( (np.concatenate( (model.params["xs_init"], model.params["ys_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
    +
    +plot_oc_network(model.params.N, duration, dt, state, target, zero_input_nw, input_nw)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # we define the precision matrix to specify, in which nodes and channels we measure deviations from the target
    +cost_mat = np.zeros( (model.params.N, len(model.output_vars)) )
    +cost_mat[1,0] = 1. # only measure in y-channel in node 1
    +
    +# We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["x_ext"] = zero_input_nw[:,0,:]
    +model.params["y_ext"] = zero_input_nw[:,0,:]
    +
    +# We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +if model.name == 'fhn':
    +    model_controlled = oc_fhn.OcFhn(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +elif model.name == 'hopf':
    +    model_controlled = oc_hopf.OcHopf(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.26634675059119883
    +Cost in iteration 25: 0.007720097126561841
    +Cost in iteration 50: 0.0034680947661811417
    +Cost in iteration 75: 0.0019407060206991053
    +Cost in iteration 100: 0.0014869014234351792
    +Cost in iteration 125: 0.0012416880831819742
    +Cost in iteration 150: 0.001092671530708714
    +Cost in iteration 175: 0.0009785714578839102
    +Cost in iteration 200: 0.0008690983607758308
    +Cost in iteration 225: 0.0007820993626886098
    +Cost in iteration 250: 0.0007014496869583778
    +Cost in iteration 275: 0.0006336452348537255
    +Cost in iteration 300: 0.0005674277634957603
    +Cost in iteration 325: 0.0005103364437866347
    +Cost in iteration 350: 0.0004672824975699639
    +Cost in iteration 375: 0.0004270480894871664
    +Cost in iteration 400: 0.00038299359917410083
    +Cost in iteration 425: 0.00033863450743146543
    +Cost in iteration 450: 0.0002822096745731488
    +Cost in iteration 475: 0.00025498430139333237
    +Cost in iteration 500: 0.0002317087704141942
    +Final cost : 0.0002317087704141942
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # Do another 100 iterations if you want to.
    +# Repeated execution will continue with further 100 iterations.
    +model_controlled.optimize(100)
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.0002317087704141942
    +Cost in iteration 25: 0.00021249031308297534
    +Cost in iteration 50: 0.00019830797443039547
    +Cost in iteration 75: 0.0001844977342872052
    +Cost in iteration 100: 0.00017230020232441738
    +Final cost : 0.00017230020232441738
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Delayed network of neural populations

    +

    We now consider a network topology with delayed signalling between the two nodes.

    +
    +
    +
    +
    +
    + +
    cmat = np.array( [[0., 0.], [1., 0.]] ) # diagonal elements are zero, connection strength is 1 from node 0 to node 1
    +dmat = np.array( [[0., 0.], [18, 0.]] ) # distance from 0 to 1, delay is computed by dividing by the signal speed params.signalV
    +
    +if model.name == 'fhn':
    +    model = FHNModel(Cmat=cmat, Dmat=dmat)
    +elif model.name == 'hopf':
    +    model = HopfModel(Cmat=cmat, Dmat=dmat)
    +
    +duration, dt = 2000., 0.1
    +model.params.duration = duration
    +model.params.dt = dt
    +
    +# change coupling parameters for faster and stronger connection between nodes
    +model.params.K_gl = 1.
    +
    +model.params.x_ext = np.zeros((1,))
    +model.params.y_ext = np.zeros((1,))
    +
    +model.run()
    +
    +e0 = model.x[0,-1]
    +e1 = model.x[1,-1]
    +i0 = model.y[0,-1]
    +i1 = model.y[1,-1]
    +
    +maxdelay = model.getMaxDelay()
    +
    +model.params["xs_init"] = np.array([[e0] * (maxdelay + 1), [e1] * (maxdelay + 1) ])
    +model.params["ys_init"] = np.array([[i0] * (maxdelay + 1), [i1] * (maxdelay + 1) ])
    +
    +duration = 6.
    +model.params.duration = duration
    +time = np.arange(dt, duration+dt, dt)
    +
    +# we define the control input matrix to enable or disable certain channels and nodes
    +control_mat = np.zeros( (model.params.N, len(model.state_vars)) )
    +control_mat[0,0] = 1. # only allow inputs in E-channel in node 0
    +
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,10] = 1. 
    +input[0,20] = 1.
    +input[0,30] = 1. # Three pulses as control input
    +
    +input_nw = np.concatenate( (np.vstack( [control_mat[0,0] * input, control_mat[0,1] * input] )[np.newaxis,:,:],
    +                            np.vstack( [control_mat[1,0] * input, control_mat[1,1] * input] )[np.newaxis,:,:]), axis=0)
    +zero_input_nw = np.concatenate( (np.vstack( [zero_input, zero_input] )[np.newaxis,:,:],
    +                                 np.vstack( [zero_input, zero_input] )[np.newaxis,:,:]), axis=0)
    +
    +model.params["x_ext"] = input_nw[:,0,:]
    +model.params["y_ext"] = input_nw[:,1,:]
    +
    +model.params["xs_init"] = np.array([[e0] * (maxdelay + 1), [e1] * (maxdelay + 1) ])
    +model.params["ys_init"] = np.array([[i0] * (maxdelay + 1), [i1] * (maxdelay + 1) ])
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = np.concatenate( (np.stack( (model.params["xs_init"][:,-1], model.params["ys_init"][:,-1]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
    +
    +# Remove stimuli and re-run the simulation
    +model.params["x_ext"] = zero_input_nw[:,0,:]
    +model.params["y_ext"] = zero_input_nw[:,0,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state =  np.concatenate( (np.stack( (model.params["xs_init"][:,-1], model.params["ys_init"][:,-1]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
    +plot_oc_network(model.params.N, duration, dt, state, target, zero_input_nw, input_nw)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["x_ext"] = zero_input_nw[:,0,:]
    +model.params["y_ext"] = zero_input_nw[:,0,:]
    +
    +# We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +if model.name == "fhn":
    +    model_controlled = oc_fhn.OcFhn(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +elif model.name == "hopf":
    +    model_controlled = oc_hopf.OcHopf(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.0011947065709511494
    +Cost in iteration 25: 1.8995713965492315e-05
    +Cost in iteration 50: 1.2661264833225136e-05
    +Cost in iteration 75: 9.010644155785715e-06
    +Cost in iteration 100: 6.820944851923922e-06
    +Cost in iteration 125: 5.474911745391518e-06
    +Cost in iteration 150: 4.530608100186918e-06
    +Cost in iteration 175: 3.927022075378679e-06
    +Cost in iteration 200: 3.506301912798229e-06
    +Cost in iteration 225: 3.1905412820140275e-06
    +Cost in iteration 250: 2.9567061175703895e-06
    +Cost in iteration 275: 2.7741407209279735e-06
    +Cost in iteration 300: 2.625794937490633e-06
    +Cost in iteration 325: 2.502192369572658e-06
    +Cost in iteration 350: 2.3959920314309043e-06
    +Cost in iteration 375: 2.303282831253012e-06
    +Cost in iteration 400: 2.220451776797742e-06
    +Cost in iteration 425: 2.1458248650643056e-06
    +Cost in iteration 450: 2.0775097671229942e-06
    +Cost in iteration 475: 2.0119242553645737e-06
    +Cost in iteration 500: 1.953220604966201e-06
    +Final cost : 1.953220604966201e-06
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # perofrm another 100 iterations to improve result
    +# repeat execution to add another 100 iterations
    +model_controlled.optimize(100)
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 1.953220604966201e-06
    +Cost in iteration 25: 1.8983582753730346e-06
    +Cost in iteration 50: 1.8467668220809676e-06
    +Cost in iteration 75: 1.798071064385974e-06
    +Cost in iteration 100: 1.7518998980010873e-06
    +Final cost : 1.7518998980010873e-06
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    
    +
    + +
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/examples/example-5.2-oc-wc-model-deterministic/index.html b/examples/example-5.2-oc-wc-model-deterministic/index.html new file mode 100644 index 0000000..25b7bf9 --- /dev/null +++ b/examples/example-5.2-oc-wc-model-deterministic/index.html @@ -0,0 +1,1922 @@ + + + + + + + + + + + + + + + + + + + + + + + Example 5.2 oc wc model deterministic - Neurolib + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    Example 5.2 oc wc model deterministic

    + + + + +
    +
    +
    +

    Binder

    +
    +
    +
    +
    +
    +
    +

    Optimal control of the Wilson-Cowan model

    +

    This notebook shows how to compute the optimal control (OC) signal for the Wilson-Cowan model for a simple example task.

    +
    +
    +
    +
    +
    + +
    import matplotlib.pyplot as plt
    +import numpy as np
    +import os
    +
    +while os.getcwd().split(os.sep)[-1] != "neurolib":
    +    os.chdir('..')
    +
    +# We import the model, stimuli, and the optimal control package
    +from neurolib.models.wc import WCModel
    +from neurolib.utils.stimulus import ZeroInput
    +from neurolib.control.optimal_control import oc_wc
    +from neurolib.utils.plot_oc import plot_oc_singlenode, plot_oc_network
    +
    +# This will reload all imports as soon as the code changes
    +%load_ext autoreload
    +%autoreload 2
    +
    + +
    +
    +
    +
    +
    +

    We stimulate the system with a known control signal, define the resulting activity as target, and compute the optimal control for this target. We define weights such that precision is penalized only (w_p=1, w_2=0). Hence, the optimal control signal should converge to the input signal.

    +
    +
    +
    +
    +
    + +
    # We import the model
    +model = WCModel()
    +
    +# Some parameters to define stimulation signals
    +dt = model.params["dt"]
    +duration = 10.
    +amplitude = 1.
    +period = duration /4.
    +
    +# We define a "zero-input", and a sine-input
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,1:-1] = amplitude * np.sin(2.*np.pi*np.arange(0,duration-0.1, dt)/period) # other functions or random values can be used as well
    +
    +# We set the duration of the simulation and the initial values
    +model.params["duration"] = duration
    +x_init = 0.011225367461896877
    +y_init = 0.013126741089502588
    +model.params["exc_init"] = np.array([[x_init]])
    +model.params["inh_init"] = np.array([[y_init]])
    +
    + +
    +
    +
    +
    + +
    # We set the stimulus in x and y variables, and run the simulation
    +model.params["exc_ext"] = input
    +model.params["inh_ext"] = zero_input
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = np.concatenate((np.concatenate( (model.params["exc_init"], model.params["inh_init"]), axis=1)[:,:, np.newaxis],
    +    np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +target_input = np.concatenate( (input,zero_input), axis=0)[np.newaxis,:,:]
    +
    +# Remove stimuli and re-run the simulation
    +model.params["exc_ext"] = zero_input
    +model.params["inh_ext"] = zero_input
    +control = np.concatenate( (zero_input,zero_input), axis=0)[np.newaxis,:,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state = np.concatenate((np.concatenate( (model.params["exc_init"], model.params["inh_init"]), axis=1)[:,:, np.newaxis],
    +    np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["exc_ext"] = zero_input
    +model.params["inh_ext"] = zero_input
    +
    +# We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +model_controlled = oc_wc.OcWc(model, target, print_array=np.arange(0,501,25))
    +model_controlled.weights["w_p"] = 1. # default value 1
    +model_controlled.weights["w_2"] = 0. # default value 0
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.00041810554198290294
    +Cost in iteration 25: 1.0532102454109209e-05
    +Cost in iteration 50: 3.925315729100555e-06
    +Cost in iteration 75: 2.1054588334476998e-06
    +Cost in iteration 100: 1.398320694183479e-06
    +Cost in iteration 125: 1.0229387100203843e-06
    +Cost in iteration 150: 7.974333735234386e-07
    +Cost in iteration 175: 6.521115340266662e-07
    +Cost in iteration 200: 5.444869100157712e-07
    +Cost in iteration 225: 4.64536510299819e-07
    +Cost in iteration 250: 4.017338930501393e-07
    +Cost in iteration 275: 3.5110841320809306e-07
    +Cost in iteration 300: 3.096084004886465e-07
    +Cost in iteration 325: 2.752219772816687e-07
    +Cost in iteration 350: 2.466122217504442e-07
    +Cost in iteration 375: 2.2171404739100818e-07
    +Cost in iteration 400: 2.0072190143053269e-07
    +Cost in iteration 425: 1.8306021177634902e-07
    +Cost in iteration 450: 1.6681651877735875e-07
    +Cost in iteration 475: 1.5334951215981366e-07
    +Cost in iteration 500: 1.409261374589448e-07
    +Final cost : 1.409261374589448e-07
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # Do another 100 iterations if you want to.
    +# Repeated execution will continue with further 100 iterations.
    +model_controlled.optimize(100)
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 1.409261374589448e-07
    +Cost in iteration 25: 1.3051113114486073e-07
    +Cost in iteration 50: 1.2069164098268257e-07
    +Cost in iteration 75: 1.1215971283577606e-07
    +Cost in iteration 100: 1.0456327452784617e-07
    +Final cost : 1.0456327452784617e-07
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Network case

    +

    Let us know study a simple 2-node network of model oscillators. We first define the coupling matrix and the distance matrix. We can then initialize the model.

    +
    +
    +
    +
    +
    + +
    cmat = np.array( [[0., 0.5], [1., 0.]] )  # diagonal elements are zero, connection strength is 1 (0.5) from node 0 to node 1 (from node 1 to node 0)
    +dmat = np.array( [[0., 0.], [0., 0.]] )  # no delay
    +
    +model = WCModel(Cmat=cmat, Dmat=dmat)
    +
    +# we define the control input matrix to enable or disable certain channels and nodes
    +control_mat = np.zeros( (model.params.N, len(model.state_vars)) )
    +control_mat[0,0] = 1. # only allow inputs in x-channel in node 0
    +
    +model.params.K_gl = 5.
    +
    +model.params["duration"] = duration
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,1:-3] = np.sin(np.arange(0,duration-0.3, dt)) # other functions or random values can be used as well
    +model.params["exc_init"] = np.vstack( [x_init, x_init] )
    +model.params["inh_init"] = np.vstack( [y_init, y_init] )
    +
    +
    +# We set the stimulus in x and y variables, and run the simulation
    +input_nw = np.concatenate( (np.vstack( [control_mat[0,0] * input, control_mat[0,1] * input] )[np.newaxis,:,:],
    +                            np.vstack( [control_mat[1,0] * input, control_mat[1,1] * input] )[np.newaxis,:,:]), axis=0)
    +zero_input_nw = np.concatenate( (np.vstack( [zero_input, zero_input] )[np.newaxis,:,:],
    +                                 np.vstack( [zero_input, zero_input] )[np.newaxis,:,:]), axis=0)
    +
    +model.params["exc_ext"] = input_nw[:,0,:]
    +model.params["inh_ext"] = input_nw[:,1,:]
    +
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = np.concatenate( (np.concatenate( (model.params["exc_init"], model.params["inh_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +
    +# Remove stimuli and re-run the simulation
    +model.params["exc_ext"] = zero_input_nw[:,0,:]
    +model.params["inh_ext"] = zero_input_nw[:,0,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state =  np.concatenate( (np.concatenate( (model.params["exc_init"], model.params["inh_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +
    +plot_oc_network(model.params.N, duration, dt, state, target, zero_input_nw, input_nw)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # we define the precision matrix to specify, in which nodes and channels we measure deviations from the target
    +cost_mat = np.zeros( (model.params.N, len(model.output_vars)) )
    +cost_mat[1,0] = 1. # only measure in y-channel in node 1
    +
    +# We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["exc_ext"] = zero_input_nw[:,0,:]
    +model.params["inh_ext"] = zero_input_nw[:,0,:]
    +
    +# We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +model_controlled = oc_wc.OcWc(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 8.117061134315108e-06
    +Cost in iteration 25: 4.0329637221407195e-07
    +Cost in iteration 50: 2.133706589679289e-07
    +Cost in iteration 75: 1.0846418185856119e-07
    +Cost in iteration 100: 6.237553898673198e-08
    +Cost in iteration 125: 3.607365058691262e-08
    +Cost in iteration 150: 2.2496421814207724e-08
    +Cost in iteration 175: 1.5886138922670738e-08
    +Cost in iteration 200: 1.1727415781910453e-08
    +Cost in iteration 225: 9.005487959890062e-09
    +Cost in iteration 250: 7.191281120908631e-09
    +Cost in iteration 275: 5.835744371001404e-09
    +Cost in iteration 300: 4.915806895112334e-09
    +Cost in iteration 325: 4.206672224203755e-09
    +Cost in iteration 350: 3.6916483993194285e-09
    +Cost in iteration 375: 3.2948161905145206e-09
    +Cost in iteration 400: 2.9837006122863342e-09
    +Cost in iteration 425: 2.7310136209212046e-09
    +Cost in iteration 450: 2.5267282859627983e-09
    +Cost in iteration 475: 2.352356874896669e-09
    +Cost in iteration 500: 2.2057268519628175e-09
    +Final cost : 2.2057268519628175e-09
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # Do another 100 iterations if you want to.
    +# Repeated execution will continue with further 100 iterations.
    +model_controlled.optimize(100)
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 2.2057268519628175e-09
    +Cost in iteration 25: 2.079569265893922e-09
    +Cost in iteration 50: 1.969986550217457e-09
    +Cost in iteration 75: 1.874389888067335e-09
    +Cost in iteration 100: 1.7855706988225455e-09
    +Final cost : 1.7855706988225455e-09
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Delayed network of neural populations

    +

    We now consider a network topology with delayed signalling between the two nodes.

    +
    +
    +
    +
    +
    + +
    cmat = np.array( [[0., 0.], [1., 0.]] ) # diagonal elements are zero, connection strength is 1 from node 0 to node 1
    +dmat = np.array( [[0., 0.], [18, 0.]] ) # distance from 0 to 1, delay is computed by dividing by the signal speed params.signalV
    +
    +model = WCModel(Cmat=cmat, Dmat=dmat)
    +
    +duration, dt = 2000., 0.1
    +model.params.duration = duration
    +model.params.dt = dt
    +model.params.K_gl = 10.
    +
    +model.run()
    +
    +e0 = model.exc[0,-1]
    +e1 = model.exc[1,-1]
    +i0 = model.inh[0,-1]
    +i1 = model.inh[1,-1]
    +
    +maxdelay = model.getMaxDelay()
    +
    +model.params["exc_init"] = np.array([[e0] * (maxdelay + 1), [e1] * (maxdelay + 1) ])
    +model.params["inh_init"] = np.array([[i0] * (maxdelay + 1), [i1] * (maxdelay + 1) ])
    +
    +duration = 6.
    +model.params.duration = duration
    +model.run()
    +
    +# we define the control input matrix to enable or disable certain channels and nodes
    +control_mat = np.zeros( (model.params.N, len(model.state_vars)) )
    +control_mat[0,0] = 1. # only allow inputs in E-channel in node 0
    +
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = zero_input.copy()
    +input[0,10] = 1. 
    +input[0,20] = 1.
    +input[0,30] = 1. # Three pulses as control input
    +
    +input_nw = np.concatenate( (np.vstack( [control_mat[0,0] * input, control_mat[0,1] * input] )[np.newaxis,:,:],
    +                            np.vstack( [control_mat[1,0] * input, control_mat[1,1] * input] )[np.newaxis,:,:]), axis=0)
    +zero_input_nw = np.concatenate( (np.vstack( [zero_input, zero_input] )[np.newaxis,:,:],
    +                                 np.vstack( [zero_input, zero_input] )[np.newaxis,:,:]), axis=0)
    +
    +model.params["exc_ext"] = input_nw[:,0,:]
    +model.params["inh_ext"] = input_nw[:,1,:]
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = np.concatenate( (np.stack( (model.params["exc_init"][:,-1], model.params["inh_init"][:,-1]), axis=1)[:,:, np.newaxis], np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +
    +# Remove stimuli and re-run the simulation
    +model.params["exc_ext"] = zero_input_nw[:,0,:]
    +model.params["inh_ext"] = zero_input_nw[:,0,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state = np.concatenate( (np.stack( (model.params["exc_init"][:,-1], model.params["inh_init"][:,-1]), axis=1)[:,:, np.newaxis], np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +plot_oc_network(model.params.N, duration, dt, state, target, zero_input_nw, input_nw)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +model_controlled = oc_wc.OcWc(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 1.792835053390993e-07
    +Cost in iteration 25: 3.224858708247228e-10
    +Cost in iteration 50: 1.0235990384283723e-10
    +Cost in iteration 75: 8.627681277851615e-11
    +Cost in iteration 100: 8.09708890397755e-11
    +Cost in iteration 125: 6.901547805762654e-11
    +Cost in iteration 150: 6.563898918059379e-11
    +Cost in iteration 175: 6.358322097910284e-11
    +Cost in iteration 200: 5.819126634851626e-11
    +Cost in iteration 225: 5.598411882794661e-11
    +Cost in iteration 250: 5.458351655389417e-11
    +Cost in iteration 275: 5.101837452145287e-11
    +Cost in iteration 300: 4.9526343719852504e-11
    +Cost in iteration 325: 4.872279762423021e-11
    +Cost in iteration 350: 4.599347400927492e-11
    +Cost in iteration 375: 4.5049466495032303e-11
    +Cost in iteration 400: 4.32863678958512e-11
    +Cost in iteration 425: 4.241565430129624e-11
    +Cost in iteration 450: 4.121896579349796e-11
    +Cost in iteration 475: 4.036542019862459e-11
    +Cost in iteration 500: 3.990804399212831e-11
    +Final cost : 3.990804399212831e-11
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # perofrm another 100 iterations to improve result
    +# repeat execution to add another 500 iterations
    +# converence to the input stimulus is relatively slow for the WC nodel
    +model_controlled.optimize(100)
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 3.990804399212831e-11
    +Cost in iteration 25: 3.8701660107380814e-11
    +Cost in iteration 50: 3.8275743610357815e-11
    +Cost in iteration 75: 3.731362663528545e-11
    +Cost in iteration 100: 3.694171527929222e-11
    +Final cost : 3.694171527929222e-11
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    
    +
    + +
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/examples/example-5.3-oc-wc-model-noisy/index.html b/examples/example-5.3-oc-wc-model-noisy/index.html new file mode 100644 index 0000000..b263252 --- /dev/null +++ b/examples/example-5.3-oc-wc-model-noisy/index.html @@ -0,0 +1,1803 @@ + + + + + + + + + + + + + + + + + + + + + + + Example 5.3 oc wc model noisy - Neurolib + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    Example 5.3 oc wc model noisy

    + + + + +
    +
    +
    +

    Binder

    +
    +
    +
    +
    +
    +
    +

    Optimal control of the noisy Wilson-Cowan odel

    +

    This notebook shows how to compute the optimal control (OC) signal for the noisy WC model for a simple example task.

    +
    +
    +
    +
    +
    + +
    import matplotlib.pyplot as plt
    +import numpy as np
    +import os
    +
    +while os.getcwd().split(os.sep)[-1] != "neurolib":
    +    os.chdir('..')
    +
    +# We import the model, stimuli, and the optimal control package
    +from neurolib.models.wc import WCModel
    +from neurolib.utils.stimulus import ZeroInput
    +from neurolib.control.optimal_control import oc_wc
    +from neurolib.utils.plot_oc import plot_oc_singlenode, plot_oc_network
    +
    +# This will reload all imports as soon as the code changes
    +%load_ext autoreload
    +%autoreload 2
    +
    + +
    +
    +
    +
    +
    +

    We stimulate the system with a known control signal, define the resulting activity as target, and compute the optimal control for this target. We define weights such that precision is penalized only (w_p=1, w_2=0). Hence, the optimal control signal should converge to the input signal.

    +
    +
    +
    +
    +
    + +
    # We import the model
    +model = WCModel()
    +
    +# Set noise strength to zero to define target state
    +model.params.sigma_ou = 0.
    +
    +# Some parameters to define stimulation signals
    +dt = model.params["dt"]
    +duration = 40.
    +amplitude = 1.
    +period = duration / 4.
    +
    +# We define a "zero-input", and a sine-input
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,1:-1] = amplitude * np.sin(2.*np.pi*np.arange(0,duration-0.1, dt)/period) # other functions or random values can be used as well
    +
    +# We set the duration of the simulation and the initial values
    +model.params["duration"] = duration
    +x_init = 0.011225367461896877
    +y_init = 0.013126741089502588
    +model.params["exc_init"] = np.array([[x_init]])
    +model.params["inh_init"] = np.array([[y_init]])
    +
    +# We set the stimulus in x and y variables, and run the simulation
    +model.params["exc_ext"] = input
    +model.params["inh_ext"] = zero_input
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = np.concatenate((np.concatenate( (model.params["exc_init"], model.params["inh_init"]), axis=1)[:,:, np.newaxis],
    +    np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +target_input = np.concatenate( (input,zero_input), axis=0)[np.newaxis,:,:]
    +
    +# Remove stimuli and re-run the simulation
    +# Change sigma_ou_parameter to adjust the noise strength
    +model.params['sigma_ou'] = 0.1
    +model.params['tau_ou'] = 1.
    +model.params["exc_ext"] = zero_input
    +model.params["inh_ext"] = zero_input
    +control = np.concatenate( (zero_input,zero_input), axis=0)[np.newaxis,:,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state = np.concatenate((np.concatenate( (model.params["exc_init"], model.params["inh_init"]), axis=1)[:,:, np.newaxis],
    +    np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    The target is a periodic oscillation of x and y variable (computed in deterministic, noise-free system).

    +

    The noisy, undistrubed system fluctuates around zero.

    +

    For the optimization, you can now set several new parameters: +- M: the number of noise realizations that the algorithm averages over. Default=1 +- M_validation: the number of noise realization the final cost is computed from. Default=1000 +- validate_per_step: If True, the cost for each step is computed averaging over M_validation instead of M realizations, this takes much longer. Default=False +- method: determines, how the noise averages are computed. Results may vary for different methods depending on the specific task. Choose from ['3']. Default='3'

    +

    Please note: +- higher number of iterations does not promise better results for computations in noisy systems. The cost will level off at some iteration number, and start increasing again afterwards. Make sure not to perform too many iterations. +- M, M_validation should increase with sigma_ou model parameter +- validate_per_step does not impact the control result

    +

    Let's first optimize with the following parameters: M=20, iterations=100

    +
    +
    +
    +
    +
    + +
    # We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["exc_ext"] = zero_input
    +model.params["inh_ext"] = zero_input
    +
    +# We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +model_controlled = oc_wc.OcWc(model, target, print_array=np.arange(0,101,10),
    +        M=20, M_validation=500, validate_per_step=True)
    +
    +# We run 100 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(100)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a noisy system
    +Mean cost in iteration 0: 0.0486299027821106
    +Mean cost in iteration 10: 0.02795683316984877
    +Mean cost in iteration 20: 0.027101411958439722
    +Mean cost in iteration 30: 0.026543919519260453
    +Mean cost in iteration 40: 0.026707819124178123
    +Mean cost in iteration 50: 0.026786489900410732
    +Mean cost in iteration 60: 0.026412584686262147
    +Mean cost in iteration 70: 0.026425089398826186
    +Mean cost in iteration 80: 0.026760368474147204
    +Mean cost in iteration 90: 0.026954163211574594
    +Mean cost in iteration 100: 0.027106734179733114
    +Minimal cost found at iteration 36
    +Final cost validated with 500 noise realizations : 0.02719992592343364
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Let's do the same thing with different parameters: M=100, iterations=30

    +
    +
    +
    +
    +
    + +
    # We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["exc_ext"] = zero_input
    +model.params["inh_ext"] = zero_input
    +
    +# We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +model_controlled = oc_wc.OcWc(model, target,print_array=np.arange(0,31,5),
    +        M=100, M_validation=500, validate_per_step=True)
    +
    +# We run 30 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(30)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a noisy system
    +Mean cost in iteration 0: 0.044519683319845585
    +Mean cost in iteration 5: 0.049139417017223554
    +Mean cost in iteration 10: 0.050857609671347954
    +Mean cost in iteration 15: 0.04663531486878592
    +Mean cost in iteration 20: 0.046747345271133535
    +Mean cost in iteration 25: 0.05112611753258763
    +Mean cost in iteration 30: 0.04785865829049892
    +Minimal cost found at iteration 27
    +Final cost validated with 500 noise realizations : 0.045416281905513174
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Network case

    +
    +
    +
    +
    +
    +
    +

    Let us know study a simple 2-node network of model oscillators. We first need to define the coupling matrix and the delay matrix. We can then initialize the model.

    +
    +
    +
    +
    +
    + +
    cmat = np.array( [[0., 0.5], [1.0, 0.]] )  # diagonal elements are zero, connection strength is 1 (0.5) from node 0 to node 1 (from node 1 to node 0)
    +dmat = np.array( [[0., 0.], [0., 0.]] )  # no delay
    +
    +model = WCModel(Cmat=cmat, Dmat=dmat)
    +
    +# we define the control input matrix to enable or disable certain channels and nodes
    +control_mat = np.zeros( (model.params.N, len(model.state_vars)) )
    +control_mat[0,0] = 1. # only allow inputs in x-channel in node 0
    +
    +model.params.K_gl = 10.
    +
    +# Set noise strength to zero to define target state
    +model.params['sigma_ou'] = 0.
    +
    +model.params["duration"] = duration
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,1:-1] = amplitude * np.sin(2.*np.pi*np.arange(0,duration-0.1, dt)/period) # other functions or random values can be used as well
    +model.params["exc_init"] = np.vstack( [0.01255381969006173, 0.01190300495001282] )
    +model.params["inh_init"] = np.vstack( [0.013492631513639169, 0.013312224583806076] )
    +
    +
    +# We set the stimulus in x and y variables, and run the simulation
    +input_nw = np.concatenate( (np.vstack( [control_mat[0,0] * input, control_mat[0,1] * input] )[np.newaxis,:,:],
    +                            np.vstack( [control_mat[1,0] * input, control_mat[1,1] * input] )[np.newaxis,:,:]), axis=0)
    +zero_input_nw = np.concatenate( (np.vstack( [zero_input, zero_input] )[np.newaxis,:,:],
    +                                 np.vstack( [zero_input, zero_input] )[np.newaxis,:,:]), axis=0)
    +
    +model.params["exc_ext"] = input_nw[:,0,:]
    +model.params["inh_ext"] = input_nw[:,1,:]
    +
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = np.concatenate( (np.concatenate( (model.params["exc_init"], model.params["inh_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +
    +# Remove stimuli and re-run the simulation
    +model.params['sigma_ou'] = 0.03
    +model.params['tau_ou'] = 1.
    +model.params["exc_ext"] = zero_input_nw[:,0,:]
    +model.params["inh_ext"] = zero_input_nw[:,0,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state =  np.concatenate( (np.concatenate( (model.params["exc_init"], model.params["inh_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.exc, model.inh), axis=1)), axis=2)
    +
    +plot_oc_network(model.params.N, duration, dt, state, target, zero_input_nw, input_nw)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Let's optimize with the following parameters: M=20, iterations=100

    +
    +
    +
    +
    +
    + +
    # we define the precision matrix to specify, in which nodes and channels we measure deviations from the target
    +cost_mat = np.zeros( (model.params.N, len(model.output_vars)) )
    +cost_mat[1,0] = 1. # only measure in x-channel in node 1
    +
    +# We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["exc_ext"] = zero_input_nw[:,0,:]
    +model.params["inh_ext"] = zero_input_nw[:,0,:]
    +
    +# We load the optimal control class
    +# print array (optinal parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +model_controlled = oc_wc.OcWc(model,
    +                                target,
    +                                print_array=np.arange(0,101,10),
    +                                control_matrix=control_mat,
    +                                cost_matrix=cost_mat,
    +                                M=20,
    +                                M_validation=500,
    +                                validate_per_step=True)
    +
    +# We run 100 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(100)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a noisy system
    +Mean cost in iteration 0: 0.0161042019653286
    +Mean cost in iteration 10: 0.029701202083900886
    +Mean cost in iteration 20: 0.02055100392146934
    +Mean cost in iteration 30: 0.01824138412316584
    +Mean cost in iteration 40: 0.01774943248604246
    +Mean cost in iteration 50: 0.00938616563892467
    +Mean cost in iteration 60: 0.013815979179667275
    +Mean cost in iteration 70: 0.011677029951767951
    +Mean cost in iteration 80: 0.03103645422939053
    +Mean cost in iteration 90: 0.018355469642118635
    +Mean cost in iteration 100: 0.021407393453975455
    +Minimal cost found at iteration 67
    +Final cost validated with 500 noise realizations : 0.02038125379192151
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Let's do the same thing with different parameters: M=100, iterations=30

    +
    +
    +
    +
    +
    + +
    # We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["exc_ext"] = zero_input_nw[:,0,:]
    +model.params["inh_ext"] = zero_input_nw[:,0,:]
    +
    +# We load the optimal control class
    +# print array (optinal parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +model_controlled = oc_wc.OcWc(model,
    +                                target,
    +                                print_array=np.arange(0,31,5),
    +                                control_matrix=control_mat,
    +                                cost_matrix=cost_mat,
    +                                M=100,
    +                                M_validation=500,
    +                                validate_per_step=True)
    +
    +# We run 30 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(30)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a noisy system
    +Mean cost in iteration 0: 0.01775755329403377
    +Mean cost in iteration 5: 0.010280452998278504
    +Mean cost in iteration 10: 0.01594708289308906
    +Mean cost in iteration 15: 0.028644745813145765
    +Mean cost in iteration 20: 0.030889247442364865
    +Mean cost in iteration 25: 0.02629869930972565
    +Mean cost in iteration 30: 0.017322464091192105
    +Minimal cost found at iteration 21
    +Final cost validated with 500 noise realizations : 0.04481574197020663
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    
    +
    + +
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/examples/example-5.4-oc-aln-model-deterministic/index.html b/examples/example-5.4-oc-aln-model-deterministic/index.html new file mode 100644 index 0000000..eac0536 --- /dev/null +++ b/examples/example-5.4-oc-aln-model-deterministic/index.html @@ -0,0 +1,2130 @@ + + + + + + + + + + + + + + + + + + + + + + + Example 5.4 oc aln model deterministic - Neurolib + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    Example 5.4 oc aln model deterministic

    + + + + +
    +
    +
    +

    Binder

    +
    +
    +
    +
    +
    +
    +

    Optimal control of the ALN model

    +

    This notebook shows how to compute the optimal control (OC) signal for the ALN model for a simple example task.

    +
    +
    +
    +
    +
    + +
    import matplotlib.pyplot as plt
    +import numpy as np
    +import os
    +
    +while os.getcwd().split(os.sep)[-1] != "neurolib":
    +    os.chdir('..')
    +
    +# We import the model, stimuli, and the optimal control package
    +from neurolib.models.aln import ALNModel
    +from neurolib.utils.stimulus import ZeroInput
    +from neurolib.control.optimal_control import oc_aln
    +from neurolib.utils.plot_oc import plot_oc_singlenode, plot_oc_network
    +
    +# This will reload all imports as soon as the code changes
    +%load_ext autoreload
    +%autoreload 2
    +
    +
    +# This function reads out the final state of a simulation
    +def getfinalstate(model):
    +    N = model.params.Cmat.shape[0]
    +    V = len(model.state_vars)
    +    T = model.getMaxDelay() + 1
    +    state = np.zeros((N, V, T))
    +    for v in range(V):
    +        if "rates" in model.state_vars[v] or "IA" in model.state_vars[v]:
    +            for n in range(N):
    +                state[n, v, :] = model.state[model.state_vars[v]][n, -T:]
    +        else:
    +            for n in range(N):
    +                state[n, v, :] = model.state[model.state_vars[v]][n]
    +    return state
    +
    +
    +def setinitstate(model, state):
    +    N = model.params.Cmat.shape[0]
    +    V = len(model.init_vars)
    +    T = model.getMaxDelay() + 1
    +
    +    for n in range(N):
    +        for v in range(V):
    +            if "rates" in model.init_vars[v] or "IA" in model.init_vars[v]:
    +                model.params[model.init_vars[v]] = state[:, v, -T:]
    +            else:
    +                model.params[model.init_vars[v]] = state[:, v, -1]
    +
    +    return
    +
    +def getstate(model):
    +    state = np.concatenate(  ( np.concatenate((model.params["rates_exc_init"][:, np.newaxis, -1],
    +                                            model.params["rates_inh_init"][:, np.newaxis, -1],
    +                                            model.params["IA_init"][:, np.newaxis, -1], ), axis=1, )[:, :, np.newaxis],
    +                            np.stack((model.rates_exc, model.rates_inh, model.IA), axis=1),),axis=2,  )
    +
    +    return state
    +
    + +
    +
    +
    +
    +
    +

    We stimulate the system with a known control signal, define the resulting activity as target, and compute the optimal control for this target. We define weights such that precision is penalized only (w_p=1, w_2=0). Hence, the optimal control signal should converge to the input signal.

    +

    We first study current inputs. We will later proceed to rate inputs.

    +
    +
    +
    +
    +
    + +
    # We import the model
    +model = ALNModel()
    +model.params.duration = 10000
    +model.params.mue_ext_mean = 2. # up state
    +model.run()
    +setinitstate(model, getfinalstate(model))
    +
    +# Some parameters to define stimulation signals
    +dt = model.params["dt"]
    +duration = 10.
    +amplitude = 1.
    +period = duration /4.
    +
    +# We define a "zero-input", and a sine-input
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,1:-1] = amplitude * np.sin(2.*np.pi*np.arange(0,duration-0.1, dt)/period) # other functions or random values can be used as well
    +
    +# We set the duration of the simulation and the initial values
    +model.params["duration"] = duration
    +
    +# We set the stimulus in x and y variables, and run the simulation
    +model.params["ext_exc_current"] = input
    +model.params["ext_inh_current"] = zero_input
    +model.params["ext_exc_rate"] = zero_input
    +model.params["ext_inh_rate"] = zero_input
    +
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = getstate(model)
    +target_input = np.concatenate( (input, zero_input, zero_input, zero_input), axis=0)[np.newaxis,:,:]
    +
    +# Remove stimuli and re-run the simulation
    +model.params["ext_exc_current"] = zero_input
    +model.params["ext_inh_current"] = zero_input
    +control = np.concatenate( (zero_input, zero_input, zero_input, zero_input), axis=0)[np.newaxis,:,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state = getstate(model)
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +control_mat = np.zeros((1,len(model.input_vars)))
    +control_mat[0,0] = 1.
    +cost_mat = np.zeros((1,len(model.output_vars)))
    +cost_mat[0,0] = 1.
    +
    +model_controlled = oc_aln.OcAln(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +model_controlled.weights["w_p"] = 1. # default value 1
    +model_controlled.weights["w_2"] = 0. # default value 0
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 314.1247597247194
    +Cost in iteration 25: 0.13317432824531167
    +Cost in iteration 50: 0.025934764241784855
    +Cost in iteration 75: 0.010689714898934012
    +Cost in iteration 100: 0.006042649711908977
    +Cost in iteration 125: 0.003852074448389804
    +Cost in iteration 150: 0.0026454397557471756
    +Cost in iteration 175: 0.0019048498068881534
    +Cost in iteration 200: 0.0014175325285176437
    +Cost in iteration 225: 0.0010832777739798686
    +Cost in iteration 250: 0.0008270405756069322
    +Cost in iteration 275: 0.000647747907643482
    +Cost in iteration 300: 0.0005135789763737352
    +Cost in iteration 325: 0.00041166220430455887
    +Cost in iteration 350: 0.00033334319584000865
    +Cost in iteration 375: 0.0002682483135493626
    +Cost in iteration 400: 0.00021897331522083166
    +Cost in iteration 425: 0.0001797951466810639
    +Cost in iteration 450: 0.0001484385297291106
    +Cost in iteration 475: 0.00012322292996632452
    +Cost in iteration 500: 0.0001019978308262297
    +Final cost : 0.0001019978308262297
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # Do another 100 iterations if you want to.
    +# Repeated execution will continue with further 100 iterations.
    +model_controlled.optimize(100)
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.0001019978308262297
    +Cost in iteration 25: 8.503577269809191e-05
    +Cost in iteration 50: 7.113629148054069e-05
    +Cost in iteration 75: 5.970536946996868e-05
    +Cost in iteration 100: 5.02763560369055e-05
    +Final cost : 5.02763560369055e-05
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Let us now look at a scenario with rate-type control inputs

    +
    +
    +
    +
    +
    + +
    amplitude = 40.
    +offset = 60.
    +period = duration /4.
    +
    +# We define a "zero-input", and a sine-input
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,1:-1] = offset + amplitude * np.sin(2.*np.pi*np.arange(0,duration-0.1, dt)/period) # other functions or random values can be used as well
    +
    +# We set the stimulus in x and y variables, and run the simulation
    +model.params["ext_exc_current"] = zero_input
    +model.params["ext_inh_current"] = zero_input
    +model.params["ext_exc_rate"] = input * 1e-3 # rate inputs need to be converted to kHz
    +model.params["ext_inh_rate"] = zero_input
    +
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = getstate(model)
    +target_input = np.concatenate( (zero_input, zero_input, input, zero_input), axis=0)[np.newaxis,:,:]
    +
    +# Remove stimuli and re-run the simulation
    +model.params["ext_exc_rate"] = zero_input
    +control = np.concatenate( (zero_input, zero_input, zero_input, zero_input), axis=0)[np.newaxis,:,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state = getstate(model)
    +
    +plot_oc_singlenode(duration, dt, state, target, control, target_input, plot_control_vars=[2,3])
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # Control matrix needs to be adjusted for rate inputs
    +control_mat = np.zeros((1,len(model.input_vars)))
    +control_mat[0,2] = 1.
    +
    +model_controlled = oc_aln.OcAln(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +model_controlled.weights["w_p"] = 1. # default value 1
    +model_controlled.weights["w_2"] = 0. # default value 0
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_singlenode(duration, dt, state, target, control*1e3, target_input, model_controlled.cost_history, plot_control_vars=[2,3])
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 27.349397232974408
    +Cost in iteration 25: 0.0006390076320320428
    +Cost in iteration 50: 0.00014311978667798868
    +Cost in iteration 75: 8.017957661471726e-05
    +Cost in iteration 100: 5.679617359217007e-05
    +Cost in iteration 125: 4.306794192661556e-05
    +Cost in iteration 150: 3.376433119895472e-05
    +Cost in iteration 175: 2.7066420641127278e-05
    +Cost in iteration 200: 2.2059610014723193e-05
    +Cost in iteration 225: 1.8212160897041168e-05
    +Cost in iteration 250: 1.5191277735291038e-05
    +Cost in iteration 275: 1.2778303406474285e-05
    +Cost in iteration 300: 1.0888696043551817e-05
    +Cost in iteration 325: 9.243703911351409e-06
    +Cost in iteration 350: 7.899581967191086e-06
    +Cost in iteration 375: 6.787562684851147e-06
    +Cost in iteration 400: 5.859013881863671e-06
    +Cost in iteration 425: 5.077487368901499e-06
    +Cost in iteration 450: 4.439379983051779e-06
    +Cost in iteration 475: 3.85899283693207e-06
    +Cost in iteration 500: 3.3690715490197364e-06
    +Final cost : 3.3690715490197364e-06
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # Do another 100 iterations if you want to.
    +# Repeated execution will continue with further 100 iterations.
    +model_controlled.optimize(100)
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_singlenode(duration, dt, state, target, control*1e3, target_input, model_controlled.cost_history, plot_control_vars=[2,3])
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 3.3690715490197364e-06
    +Cost in iteration 25: 2.9515384676759174e-06
    +Cost in iteration 50: 2.593417209868494e-06
    +Cost in iteration 75: 2.2845622320483142e-06
    +Cost in iteration 100: 2.024231674713015e-06
    +Final cost : 2.024231674713015e-06
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Network case

    +

    Let us know study a simple 2-node network of model oscillators. We first define the coupling matrix and the distance matrix. We can then initialize the model.

    +
    +
    +
    +
    +
    + +
    cmat = np.array( [[0., 0.5], [1., 0.]] )  # diagonal elements are zero, connection strength is 1 (0.5) from node 0 to node 1 (from node 1 to node 0)
    +dmat = np.array( [[0., 0.], [0., 0.]] )  # no delay
    +
    +model = ALNModel(Cmat=cmat, Dmat=dmat)
    +model.params.duration = 10000
    +model.params.mue_ext_mean = 2. # up state
    +model.params.de = 0.0
    +model.params.di = 0.0
    +model.run()
    +setinitstate(model, getfinalstate(model))
    +
    +# we define the control input matrix to enable or disable certain channels and nodes
    +control_mat = np.zeros( (model.params.N, len(model.input_vars)) )
    +control_mat[0,0] = 1. # only allow inputs in x-channel in node 0
    +
    +amplitude = 1.
    +model.params["duration"] = duration
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = np.copy(zero_input)
    +input[0,1:-3] = amplitude * np.sin(2.*np.pi*np.arange(0,duration-0.3, dt)/period) # other functions or random values can be used as well
    +
    +# We set the stimulus in x and y variables, and run the simulation
    +input_nw = np.concatenate( (np.vstack( [control_mat[0,0] * input, control_mat[0,1] * input, control_mat[0,2] * input, control_mat[0,3] * input] )[np.newaxis,:,:],
    +                            np.vstack( [control_mat[1,0] * input, control_mat[1,1] * input, control_mat[1,2] * input, control_mat[1,3] * input] )[np.newaxis,:,:]), axis=0)
    +zero_input_nw = np.concatenate( (np.vstack( [zero_input, zero_input, zero_input, zero_input] )[np.newaxis,:,:],
    +                                 np.vstack( [zero_input, zero_input, zero_input, zero_input] )[np.newaxis,:,:]), axis=0)
    +
    +model.params["ext_exc_current"] = input_nw[:,0,:]
    +model.params["ext_inh_current"] = input_nw[:,1,:]
    +model.params["ext_exc_rate"] = input_nw[:,2,:]
    +model.params["ext_inh_rate"] = input_nw[:,3,:]
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = getstate(model)
    +
    +# Remove stimuli and re-run the simulation
    +model.params["ext_exc_current"] = zero_input_nw[:,0,:]
    +model.params["ext_inh_current"] = zero_input_nw[:,1,:]
    +model.params["ext_exc_rate"] = zero_input_nw[:,2,:]
    +model.params["ext_inh_rate"] = zero_input_nw[:,3,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state = getstate(model)
    +plot_oc_network(model.params.N, duration, dt, state, target, zero_input_nw, input_nw)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # we define the precision matrix to specify, in which nodes and channels we measure deviations from the target
    +cost_mat = np.zeros( (model.params.N, len(model.output_vars)) )
    +cost_mat[1,0] = 1. # only measure in y-channel in node 1
    +
    +# We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
    +model.params["ext_exc_current"] = zero_input_nw[:,0,:]
    +model.params["ext_inh_current"] = zero_input_nw[:,1,:]
    +model.params["ext_exc_rate"] = zero_input_nw[:,2,:]
    +model.params["ext_inh_rate"] = zero_input_nw[:,3,:]
    +
    +# We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +model_controlled = oc_aln.OcAln(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.05681899553888795
    +Cost in iteration 25: 0.009049511507864006
    +Cost in iteration 50: 0.00385727901608276
    +Cost in iteration 75: 0.0018622667677526768
    +Cost in iteration 100: 0.000987085765866294
    +Cost in iteration 125: 0.000572356512723035
    +Cost in iteration 150: 0.0003547474327963845
    +Cost in iteration 175: 0.0002363751625995732
    +Cost in iteration 200: 0.0001619919185800181
    +Cost in iteration 225: 0.00011952382655835105
    +Cost in iteration 250: 9.020890267478555e-05
    +Cost in iteration 275: 7.169979753138072e-05
    +Cost in iteration 300: 5.8948947006216384e-05
    +Cost in iteration 325: 4.953649496402098e-05
    +Cost in iteration 350: 4.2578616654798227e-05
    +Cost in iteration 375: 3.721358584763165e-05
    +Cost in iteration 400: 3.294916084298363e-05
    +Cost in iteration 425: 2.9490826042506942e-05
    +Cost in iteration 450: 2.6637122691294857e-05
    +Cost in iteration 475: 2.418022517349344e-05
    +Cost in iteration 500: 2.213935579529806e-05
    +Final cost : 2.213935579529806e-05
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # Do another 1000 iterations if you want to.
    +# Repeated execution will continue with further 100 iterations.
    +model_controlled.zero_step_encountered = False
    +model_controlled.optimize(100)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 2.213935579529806e-05
    +Cost in iteration 25: 2.039986650084248e-05
    +Cost in iteration 50: 1.890816061870718e-05
    +Cost in iteration 75: 1.7543052398445186e-05
    +Cost in iteration 100: 1.6372947909519095e-05
    +Cost in iteration 125: 1.535146855935076e-05
    +Cost in iteration 150: 1.4407226990366437e-05
    +Cost in iteration 175: 1.3578403645605011e-05
    +Cost in iteration 200: 1.2839061879178726e-05
    +Cost in iteration 225: 1.215663786521688e-05
    +Cost in iteration 250: 1.1540904218753432e-05
    +Cost in iteration 275: 1.098339286406832e-05
    +Cost in iteration 300: 1.0476920392110899e-05
    +Cost in iteration 325: 1.001955972944213e-05
    +Cost in iteration 350: 9.57055264939235e-06
    +Cost in iteration 375: 9.17392006953542e-06
    +Cost in iteration 400: 8.809334792766664e-06
    +Cost in iteration 425: 8.475824235515095e-06
    +Cost in iteration 450: 8.147435560163446e-06
    +Cost in iteration 475: 7.852707565165967e-06
    +Cost in iteration 500: 7.579247993018956e-06
    +Final cost : 7.579247993018956e-06
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    +
    +

    Delayed network of neural populations

    +

    We now consider a network topology with delayed signalling between the two nodes.

    +
    +
    +
    +
    +
    + +
    cmat = np.array( [[0., 0.], [1., 0.]] ) # diagonal elements are zero, connection strength is 1 from node 0 to node 1
    +dmat = np.array( [[0., 0.], [18, 0.]] ) # distance from 0 to 1, delay is computed by dividing by the signal speed params.signalV
    +
    +model = ALNModel(Cmat=cmat, Dmat=dmat)
    +
    +model.params.mue_ext_mean = 2. # up state
    +model.run()
    +setinitstate(model, getfinalstate(model))
    +
    +duration = 6.
    +model.params.duration = duration
    +model.run()
    +
    +# we define the control input matrix to enable or disable certain channels and nodes
    +control_mat = np.zeros( (model.params.N, len(model.state_vars)) )
    +control_mat[0,0] = 1. # only allow inputs in E-channel in node 0
    +
    +zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
    +input = zero_input.copy()
    +input[0,10] = 10. 
    +input[0,20] = 10.
    +input[0,30] = 10. # Three pulses as control input
    +
    +input_nw = np.concatenate( (np.vstack( [control_mat[0,0] * input, control_mat[0,1] * input, control_mat[0,2] * input, control_mat[0,3] * input] )[np.newaxis,:,:],
    +                            np.vstack( [control_mat[1,0] * input, control_mat[1,1] * input, control_mat[1,2] * input, control_mat[1,3] * input] )[np.newaxis,:,:]), axis=0)
    +zero_input_nw = np.concatenate( (np.vstack( [zero_input, zero_input, zero_input, zero_input] )[np.newaxis,:,:],
    +                                 np.vstack( [zero_input, zero_input, zero_input, zero_input] )[np.newaxis,:,:]), axis=0)
    +
    +model.params["ext_exc_current"] = input_nw[:,0,:]
    +model.params["ext_inh_current"] = input_nw[:,1,:]
    +model.params["ext_exc_rate"] = input_nw[:,2,:]
    +model.params["ext_inh_rate"] = input_nw[:,3,:]
    +model.run()
    +
    +# Define the result of the stimulation as target
    +target = getstate(model)
    +
    +# Remove stimuli and re-run the simulation
    +model.params["ext_exc_current"] = zero_input_nw[:,0,:]
    +model.params["ext_inh_current"] = zero_input_nw[:,1,:]
    +model.params["ext_exc_rate"] = zero_input_nw[:,2,:]
    +model.params["ext_inh_rate"] = zero_input_nw[:,3,:]
    +model.run()
    +
    +# combine initial value and simulation result to one array
    +state = getstate(model)
    +plot_oc_network(model.params.N, duration, dt, state, target, zero_input_nw, input_nw)
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # We load the optimal control class
    +# print array (optional parameter) defines, for which iterations intermediate results will be printed
    +# Parameters will be taken from the input model
    +model.params["ext_exc_current"] = zero_input_nw[:,0,:]
    +model.params["ext_inh_current"] = zero_input_nw[:,1,:]
    +model.params["ext_exc_rate"] = zero_input_nw[:,2,:]
    +model.params["ext_inh_rate"] = zero_input_nw[:,3,:]
    +model_controlled = oc_aln.OcAln(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
    +
    +# We run 500 iterations of the optimal control gradient descent algorithm
    +model_controlled.optimize(500)
    +
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.3591626682338002
    +Cost in iteration 25: 0.0009615249415297563
    +Cost in iteration 50: 0.0007333032937119198
    +Cost in iteration 75: 0.0006259951827765307
    +Cost in iteration 100: 0.0005505407696329882
    +Cost in iteration 125: 0.0004885380123600698
    +Cost in iteration 150: 0.00043735661984762556
    +Cost in iteration 175: 0.00039467203255346946
    +Cost in iteration 200: 0.00035575090435742684
    +Cost in iteration 225: 0.00032290389213762856
    +Cost in iteration 250: 0.0002955564149879958
    +Cost in iteration 275: 0.0002706822302509814
    +Cost in iteration 300: 0.0002481078663686744
    +Cost in iteration 325: 0.0002287228008388444
    +Cost in iteration 350: 0.00021138912691190224
    +Cost in iteration 375: 0.00019614824660540533
    +Cost in iteration 400: 0.00018255547996069997
    +Cost in iteration 425: 0.00017091020493998155
    +Cost in iteration 450: 0.00016022332136043902
    +Cost in iteration 475: 0.0001503441843619978
    +Cost in iteration 500: 0.00014206923879279553
    +Final cost : 0.00014206923879279553
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    # perofrm another 100 iterations to improve result
    +# repeat execution to add another 100 iterations
    +# converence to the input stimulus is relatively slow for the WC nodel
    +model_controlled.optimize(100)
    +state = model_controlled.get_xs()
    +control = model_controlled.control
    +plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
    +
    + +
    +
    +
    +
    +
    +
    +Compute control for a deterministic system
    +Cost in iteration 0: 0.00014206923879279553
    +Cost in iteration 25: 0.0001344899989412419
    +Cost in iteration 50: 0.00012771190226165116
    +Cost in iteration 75: 0.00012170773950612534
    +Cost in iteration 100: 0.0001161846252066297
    +Final cost : 0.0001161846252066297
    +
    +
    +
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    +
    + +
    
    +
    + +
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/exploration/boxsearch/index.html b/exploration/boxsearch/index.html index e96ae1d..a28a8f8 100644 --- a/exploration/boxsearch/index.html +++ b/exploration/boxsearch/index.html @@ -9,7 +9,7 @@ - + @@ -708,6 +708,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/index.html b/index.html index e76b6b2..4c8c780 100644 --- a/index.html +++ b/index.html @@ -323,6 +323,13 @@ Evolutionary optimization + + +
  • + + Optimal control + +
  • @@ -852,6 +859,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + @@ -1289,6 +1376,13 @@ Evolutionary optimization + + +
  • + + Optimal control + +
  • @@ -1434,6 +1528,10 @@

    Project layout

    ├── optimize/ # Optimization submodule ├── evolution/ # Evolutionary optimization └── exploration/ # Parameter exploration +├── control/optimal_control/ # Optimal control submodule + ├── oc.py # Optimal control base class + ├── cost_functions.py # cost functions for OC + ├── /.../ # Implemented OC models ├── data/ # Empirical datasets (structural, functional) ├── utils/ # Utility belt ├── atlases.py # Atlases (Region names, coordinates) @@ -1456,6 +1554,7 @@

    Examples

  • Example 0.6 - Minimal example of how to implement your own model in neurolib
  • Example 1.2 - Parameter exploration of a brain network and fitting to BOLD data
  • Example 2.0 - A simple example of the evolutionary optimization framework
  • +
  • Example 5.2 - Example of optimal control of the noise-free Wilson-Cowan model
  • A basic overview of the functionality of neurolib is also given in the following.

    Single node

    @@ -1580,6 +1679,26 @@

    Evolutionary optimization

    +

    Optimal control

    +

    The optimal control module enables to compute efficient stimulation for your neural model. If you know how your output should look like, this module computes the optimal input. Detailes example notebooks can be found in the example folder (examples 5.1, 5.2, 5.3, 5.4). In optimal control computations, you trade precision with respect to a target against control strength. You can determine how much each contribution affects the results, by setting weights accordingly.

    +

    To compute an optimal control signal, you need to create a model (e.g., an FHN model) and define a target state (e.g., a sine curve with period 2). +

    from neurolib.models.fhn import FHNModel
    +model = FHNModel()
    +
    +duration = 10.
    +model.params["duration"] = duration
    +dt = model.params["dt"]
    +
    +period = 2.
    +target = np.sin(2. * np.pi * np.arange(0, duration+dt, dt) / period)
    +
    +You can then create a controlled model and run the iterative optimization to find the most efficient control input. The optimal control and the controlled model activity can be taken from the controlled model. +
    model_controlled = oc_fhn.OcFhn(model, target)
    +model_controlled.optimize(500) # run 500 iterations
    +optimal_control = model_controlled.control
    +optimal_state = model_controlled.get_xs()
    +

    +

    For a comprehensive study on optimal control of the Wilson-Cowan model based on the neurolib optimal control module, see Salfenmoser, L. & Obermayer, K. Optimal control of a Wilson–Cowan model of neural population dynamics. Chaos 33, 043135 (2023). https://doi.org/10.1063/5.0144682.

    More information

    Built With

    neurolib is built using other amazing open source projects:

    @@ -1606,9 +1725,10 @@

    How to cite

    Get in touch

    Caglar Cakan (cakan@ni.tu-berlin.de)
    Department of Software Engineering and Theoretical Computer Science, Technische Universität Berlin, Germany
    -Bernstein Center for Computational Neuroscience Berlin, Germany

    +Bernstein Center for Computational Neuroscience Berlin, Germany

    Acknowledgments

    This work was supported by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) with the project number 327654276 (SFB 1315) and the Research Training Group GRK1589/2.

    +

    The optimal control module was developed by Lena Salfenmoser and Martin Krück supported by the DFG project 163436311 (SFB 910).

    diff --git a/models/model/index.html b/models/model/index.html index 04eb304..4a1bc5f 100644 --- a/models/model/index.html +++ b/models/model/index.html @@ -708,6 +708,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/models/parameters/index.html b/models/parameters/index.html index ebb119a..8701e40 100644 --- a/models/parameters/index.html +++ b/models/parameters/index.html @@ -708,6 +708,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/optimization/evolution/index.html b/optimization/evolution/index.html index e76b52c..84c17de 100644 --- a/optimization/evolution/index.html +++ b/optimization/evolution/index.html @@ -708,6 +708,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/search/search_index.json b/search/search_index.json index 9429f77..23f3fad 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":""},{"location":"#what-is-neurolib","title":"What is neurolib?","text":"

    neurolib is a simulation and optimization framework for whole-brain modeling. It allows you to implement your own neural mass models which can simulate fMRI BOLD activity. neurolib helps you to analyse your simulations, to load and handle structural and functional brain data, and to use powerful evolutionary algorithms to tune your model's parameters and fit it to empirical data.

    You can chose from different neural mass models to simulate the activity of each brain area. The main implementation is a mean-field model of spiking adaptive exponential integrate-and-fire neurons (AdEx) called ALNModel where each brain area contains two populations of excitatory and inhibitory neurons. An analysis and validation of the ALNModel model can be found in our paper.

    \ud83d\udcda Please read the gentle introduction to neurolib for an overview of the basic functionality and the science behind whole-brain simulations or read the documentation for getting started.

    To browse the source code of neurolib visit out GitHub repository.

    \ud83d\udcdd Cite the following paper if you use neurolib for your own research:

    Cakan, C., Jajcay, N. & Obermayer, K. neurolib: A Simulation Framework for Whole-Brain Neural Mass Modeling. Cogn. Comput. (2021).

    The figure below shows a schematic of how a brain network is constructed:

    Examples: Single node simulation \u00b7 Whole-brain network \u00b7 Parameter exploration \u00b7 Evolutionary optimization

    "},{"location":"#whole-brain-modeling","title":"Whole-brain modeling","text":"

    Typically, in whole-brain modeling, diffusion tensor imaging (DTI) is used to infer the structural connectivity (the connection strengths) between different brain areas. In a DTI scan, the direction of the diffusion of molecules is measured across the whole brain. Using tractography, this information can yield the distribution of axonal fibers in the brain that connect distant brain areas, called the connectome. Together with an atlas that divides the brain into distinct areas, a matrix can be computed that encodes how many fibers go from one area to another, the so-called structural connectivity (SC) matrix. This matrix defines the coupling strengths between brain areas and acts as an adjacency matrix of the brain network. The fiber length determines the signal transmission delay between all brain areas. Combining the structural data with a computational model of the neuronal activity of each brain area, we can create a dynamical model of the whole brain.

    The resulting whole-brain model consists of interconnected brain areas, with each brain area having their internal neural dynamics. The neural activity can also be used to simulate hemodynamic BOLD activity using the Balloon-Windkessel model, which can be compared to empirical fMRI data. Often, BOLD activity is used to compute correlations of activity between brain areas, the so called resting state functional connectivity, resulting in a matrix with correlations between each brain area. This matrix can then be fitted to empirical fMRI recordings of the resting-state activity of the brain.

    Below is an animation of the neuronal activity of a whole-brain model plotted on a brain.

    "},{"location":"#installation","title":"Installation","text":"

    The easiest way to get going is to install the pypi package using pip:

    pip install neurolib\n
    Alternatively, you can also clone this repository and install all dependencies with

    git clone https://github.com/neurolib-dev/neurolib.git\ncd neurolib/\npip install -r requirements.txt\npip install .\n
    It is recommended to clone or fork the entire repository since it will also include all examples and tests.

    "},{"location":"#project-layout","title":"Project layout","text":"
    neurolib/                   # Main module\n\u251c\u2500\u2500 models/                     # Neural mass models\n    \u251c\u2500\u2500model.py                 # Base model class\n    \u2514\u2500\u2500 /.../               # Implemented mass models\n\u251c\u2500\u2500 optimize/                   # Optimization submodule\n    \u251c\u2500\u2500 evolution/              # Evolutionary optimization\n    \u2514\u2500\u2500 exploration/            # Parameter exploration\n\u251c\u2500\u2500 data/                   # Empirical datasets (structural, functional)\n\u251c\u2500\u2500 utils/                  # Utility belt\n    \u251c\u2500\u2500 atlases.py              # Atlases (Region names, coordinates)\n    \u251c\u2500\u2500 collections.py          # Custom data types\n    \u251c\u2500\u2500 functions.py            # Useful functions\n    \u251c\u2500\u2500 loadData.py             # Dataset loader\n    \u251c\u2500\u2500 parameterSpace.py           # Parameter space\n    \u251c\u2500\u2500 saver.py                # Save simulation outputs\n    \u251c\u2500\u2500 signal.py               # Signal processing functions\n    \u2514\u2500\u2500 stimulus.py             # Stimulus construction\n\u251c\u2500\u2500 examples/                   # Example Jupyter notebooks\n\u251c\u2500\u2500 docs/                   # Documentation \n\u2514\u2500\u2500 tests/                  # Automated tests\n
    "},{"location":"#examples","title":"Examples","text":"

    Example IPython Notebooks on how to use the library can be found in the ./examples/ directory, don't forget to check them out! You can run the examples in your browser using Binder by clicking here or one of the following links:

    A basic overview of the functionality of neurolib is also given in the following.

    "},{"location":"#single-node","title":"Single node","text":"

    This example is available in detail as a IPython Notebook.

    To create a single aln model with the default parameters, simply run

    from neurolib.models.aln import ALNModel\n\nmodel = ALNModel()\nmodel.params['sigma_ou'] = 0.1 # add some noise\n\nmodel.run()\n

    The results from this small simulation can be plotted easily:

    import matplotlib.pyplot as plt\nplt.plot(model.t, model.output.T)\n

    "},{"location":"#whole-brain-network","title":"Whole-brain network","text":"

    A detailed example is available as a IPython Notebook.

    To simulate a whole-brain network model, first we need to load a DTI and a resting-state fMRI dataset. neurolib already provides some example data for you:

    from neurolib.utils.loadData import Dataset\n\nds = Dataset(\"gw\")\n
    The dataset that we just loaded, looks like this:

    We initialize a model with the dataset and run it:

    model = ALNModel(Cmat = ds.Cmat, Dmat = ds.Dmat)\nmodel.params['duration'] = 5*60*1000 # in ms, simulates for 5 minutes\n\nmodel.run(bold=True)\n
    This can take several minutes to compute, since we are simulating 80 brain regions for 5 minutes realtime. Note that we specified bold=True which simulates the BOLD model in parallel to the neuronal model. The resulting firing rates and BOLD functional connectivity looks like this:

    The quality of the fit of this simulation can be computed by correlating the simulated functional connectivity matrix above to the empirical resting-state functional connectivity for each subject of the dataset. This gives us an estimate of how well the model reproduces inter-areal BOLD correlations. As a rule of thumb, a value above 0.5 is considered good.

    We can compute the quality of the fit of the simulated data using func.fc() which calculates a functional connectivity matrix of N (N = number of brain regions) time series. We use func.matrix_correlation() to compare this matrix to empirical data.

    scores = [func.matrix_correlation(func.fc(model.BOLD.BOLD[:, 5:]), fcemp) for fcemp in ds.FCs]\n\nprint(\"Correlation per subject:\", [f\"{s:.2}\" for s in scores])\nprint(f\"Mean FC/FC correlation: {np.mean(scores):.2}\")\n
    Correlation per subject: ['0.34', '0.61', '0.54', '0.7', '0.54', '0.64', '0.69', '0.47', '0.59', '0.72', '0.58']\nMean FC/FC correlation: 0.58\n

    "},{"location":"#parameter-exploration","title":"Parameter exploration","text":"

    A detailed example of a single-node exploration is available as a IPython Notebook. For an example of a brain network exploration, see this Notebook.

    Whenever you work with a model, it is of great importance to know what kind of dynamics it exhibits given a certain set of parameters. It is often useful to get an overview of the state space of a given model of interest. For example in the case of aln, the dynamics depends a lot on the mean inputs to the excitatory and the inhibitory population. neurolib makes it very easy to quickly explore parameter spaces of a given model:

    # create model\nmodel = ALNModel()\n# define the parameter space to explore\nparameters = ParameterSpace({\"mue_ext_mean\": np.linspace(0, 3, 21),  # input to E\n        \"mui_ext_mean\": np.linspace(0, 3, 21)}) # input to I\n\n# define exploration              \nsearch = BoxSearch(model, parameters)\n\nsearch.run()                \n
    That's it!. You can now use the builtin functions to load the simulation results from disk and perform your analysis:

    search.loadResults()\n\n# calculate maximum firing rate for each parameter\nfor i in search.dfResults.index:\n    search.dfResults.loc[i, 'max_r'] = np.max(search.results[i]['rates_exc'][:, -int(1000/model.params['dt']):])\n
    We can plot the results to get something close to a bifurcation diagram!

    "},{"location":"#evolutionary-optimization","title":"Evolutionary optimization","text":"

    A detailed example is available as a IPython Notebook.

    neurolib also implements evolutionary parameter optimization, which works particularly well with brain networks. In an evolutionary algorithm, each simulation is represented as an individual and the parameters of the simulation, for example coupling strengths or noise level values, are represented as the genes of each individual. An individual is a part of a population. In each generation, individuals are evaluated and ranked according to a fitness criterion. For whole-brain network simulations, this could be the fit of the simulated activity to empirical data. Then, individuals with a high fitness value are selected as parents and mate to create offspring. These offspring undergo random mutations of their genes. After all offspring are evaluated, the best individuals of the population are selected to transition into the next generation. This process goes on for a given amount generations until a stopping criterion is reached. This could be a predefined maximum number of generations or when a large enough population with high fitness values is found.

    An example genealogy tree is shown below. You can see the evolution starting at the top and individuals reproducing generation by generation. The color indicates the fitness.

    neurolib makes it very easy to set up your own evolutionary optimization and everything else is handled under the hood. You can chose between two implemented evolutionary algorithms: adaptive is a gaussian mutation and rank selection algorithm with adaptive step size that ensures convergence (a schematic is shown in the image below). nsga2 is an implementation of the popular multi-objective optimization algorithm by Deb et al. 2002.

    Of course, if you like, you can dig deeper, define your own selection, mutation and mating operators. In the following demonstration, we will simply evaluate the fitness of each individual as the distance to the unit circle. After a couple of generations of mating, mutating and selecting, only individuals who are close to the circle should survive:

    from neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.evolution import Evolution\n\ndef optimize_me(traj):\n    ind = evolution.getIndividualFromTraj(traj)\n\n    # let's make a circle\n    fitness_result = abs((ind.x**2 + ind.y**2) - 1)\n\n    # gather results\n    fitness_tuple = (fitness_result ,)\n    result_dict = {\"result\" : [fitness_result]}\n\n    return fitness_tuple, result_dict\n\n# we define a parameter space and its boundaries\npars = ParameterSpace(['x', 'y'], [[-5.0, 5.0], [-5.0, 5.0]])\n\n# initialize the evolution and go\nevolution = Evolution(optimize_me, pars, weightList = [-1.0], POP_INIT_SIZE= 100, POP_SIZE = 50, NGEN=10)\nevolution.run()    \n

    That's it! Now we can check the results:

    evolution.loadResults()\nevolution.info(plot=True)\n

    This will gives us a summary of the last generation and plots a distribution of the individuals (and their parameters). Below is an animation of 10 generations of the evolutionary process. Ass you can see, after a couple of generations, all remaining individuals lie very close to the unit circle.

    "},{"location":"#more-information","title":"More information","text":""},{"location":"#built-with","title":"Built With","text":"

    neurolib is built using other amazing open source projects:

    "},{"location":"#how-to-cite","title":"How to cite","text":"

    Cakan, C., Jajcay, N. & Obermayer, K. neurolib: A Simulation Framework for Whole-Brain Neural Mass Modeling. Cogn. Comput. (2021). https://doi.org/10.1007/s12559-021-09931-9

    @article{cakan2021,\nauthor={Cakan, Caglar and Jajcay, Nikola and Obermayer, Klaus},\ntitle={neurolib: A Simulation Framework for Whole-Brain Neural Mass Modeling},\njournal={Cognitive Computation},\nyear={2021},\nmonth={Oct},\nissn={1866-9964},\ndoi={10.1007/s12559-021-09931-9},\nurl={https://doi.org/10.1007/s12559-021-09931-9}\n}\n

    "},{"location":"#get-in-touch","title":"Get in touch","text":"

    Caglar Cakan (cakan@ni.tu-berlin.de) Department of Software Engineering and Theoretical Computer Science, Technische Universit\u00e4t Berlin, Germany Bernstein Center for Computational Neuroscience Berlin, Germany

    "},{"location":"#acknowledgments","title":"Acknowledgments","text":"

    This work was supported by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) with the project number 327654276 (SFB 1315) and the Research Training Group GRK1589/2.

    "},{"location":"contributing/","title":"Contributing to neurolib","text":"

    Thank you for your interest in contributing to neurolib. We welcome bug reports through the issues tab and pull requests for fixes or improvements. You are warlmy invited to join our development efforts and make brain network modeling easier and more useful for all researchers.

    "},{"location":"contributing/#pull-requests","title":"Pull requests","text":"

    To propose a change to neurolib's code, you should first clone the repository to your own Github account. Then, create a branch and make some changes. You can then send a pull request to neurolib's own repository and we will review and discuss your proposed changes.

    More information on how to make pull requests can be found in the Github help pages.

    "},{"location":"contributing/#maintaining-code","title":"Maintaining code","text":"

    Please be aware that we have a conservative policy for implementing new functionality. All new features need to be maintained, sometimes forever. We are a small team of developers and can only maintain a limited amount of code. Therefore, ideally, you should also feel responsible for the changes you have proposed and maintain it after it becomes part of neurolib.

    "},{"location":"contributing/#code-style","title":"Code style","text":"

    We are using the black code formatter with the additional argument --line-length=120. It's called the \"uncompromising formatter\" because it is completely deterministic and you have literally no control over how your code will look like. We like that! We recommend using black directly in your IDE, for example in VSCode.

    "},{"location":"contributing/#commenting-code","title":"Commenting Code","text":"

    We are using the sphinx format for commenting code. Comments are incredibly important to us since neurolib is supposed to be a library of user-facing code. It's encouraged to read the code, change it and build something on top of it. Our users are coders. Please write as many comments as you can, including a description of each function and method and its arguments but also single-line comments for the code itself.

    "},{"location":"contributing/#implementing-a-neural-mass-model","title":"Implementing a neural mass model","text":"

    You are very welcome to implement your favorite neural mass model and contribute it to neurolib.

    "},{"location":"contributing/#contributing-examples","title":"Contributing examples","text":"

    We very much welcome example contributions since they help new users to learn how to make use of neurolib. They can include basic usage examples or tutorials of neurolib's features, or a demonstration of how to solve a specific scientific task using neural mass models or whole-brain networks.

    "},{"location":"contributing/#contributing-brain-data","title":"Contributing brain data","text":"

    We have a few small datasets already in neurolib so everyone can start simulating right away. If you'd like to contribute more data to the project, please feel invited to do so. We're looking for more structural connectivity matrices and fiber length matrices in the MATLAB matrix .mat format (which can be loaded by scipy.loadmat). We also appreciate BOLD data, EEG data, or MEG data. Other modalities could be useful as well. Please be aware that the data has to be in a parcellated form, i.e., the brain areas need to be organized according to an atlas like the AAL2 atlas (or others).

    "},{"location":"examples/example-0-aln-minimal/","title":"Example 0 aln minimal","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n\nimport numpy as np\nimport scipy\n\n# Let's import the aln model\nfrom neurolib.models.aln import ALNModel\n\n# Some useful functions are provided here\nimport neurolib.utils.functions as func\n\n# a nice color map\nplt.rcParams['image.cmap'] = 'plasma'\n
    # Create the model\nmodel = ALNModel()\n\n# Each model comes with a set of default parameters which are a dictionary. \n# Let's change the parameter that controls the duration of a simulation to 10s.\nmodel.params['duration'] = 10.0 * 1000  \n\n# For convenience, we could also use:\nmodel.params.duration = 10.0 * 1000\n\n# In the aln model an Ornstein-Uhlenbeck process is simulated in parallel\n# as the source of input noise fluctuations. Here we can set the variance\n# of the process. \n# For more info: https://en.wikipedia.org/wiki/Ornstein%E2%80%93Uhlenbeck_process \n# Let's add some noise.\nmodel.params['sigma_ou'] = 0.1\n\n# Finally, we run the model\nmodel.run()\n

    Accessing the outputs is straight-forward. Every model's outputs are stored in the model.outputs attribute. According to the specific name of each of the model's outputs, they can also be accessed as a key of the Model object, i.e. model['rates_exc'].

    plt.plot(model['t'], model['rates_exc'].T, lw=2, c='k')\nplt.xlabel(\"t [ms]\")\nplt.ylabel(\"Rate [Hz]\")\nplt.xlim(1000, 2000);\n
    # Outputs are also available as an xr DataArray\nxr = model.xr()\nprint(xr.dims)\n# outputs can also be accessed via attributes in dot.notation\nprint(\"rates_exc\", model.rates_exc)\n
    \n('output', 'space', 'time')\nrates_exc [[0.54644307 0.48676051 0.43664265 ... 0.06910043 0.06969732 0.07031085]]\n\n

    Bifurcation diagrams can give us an overview of how different parameters of the model affect its dynamics. The simplest method for drawing a bifurcation diagram is to simply change relevant parameters step by step and record the model's behavior in response to these changes. In this example, we want to see how the model's dynamics changes with respect to the external input currents to the excitatory population. These input currents could be due to couplings with other nodes in a brain network or we could model other factors like external electrical stimulation.

    Below, you can see a schematic of the aln model. As you can see, a single node consists of one excitatory (red) and one inhibitory population (blue). The parameter that controls the mean input to the excitatory population is \\(\\mu_{E}\\) or model.params[\"mue_ext_mean\"] .

    Let's first decrease the duration of a single run so we can scan the parameter space a bit faster and let's also disable the noisy input.

    model.params['duration'] = 2.0 * 1000  \nmodel.params['sigma_ou'] = 0.0\n

    Let's fix the input to the inhibitory population:

    model.params['mui_ext_mean'] = 0.5\n

    We draw a one-dimensional bifurcation diagram, so it is enough to loop through different values of mue_ext_mean and record the minimum and maximum of the rate for each parameter.

    max_rate_e = []\nmin_rate_e = []\n# these are the different input values that we want to scan\nmue_inputs = np.linspace(0, 2, 50)\nfor mue in mue_inputs:\n    # Note: this has to be a vector since it is input for all nodes\n    # (but we have only one node in this example)\n    model.params['mue_ext_mean'] = mue\n    model.run()\n    # we add the maximum and the minimum of the last second of the \n    # simulation to a list\n    max_rate_e.append(np.max(model.output[0, -int(1000/model.params['dt']):]))\n    min_rate_e.append(np.min(model.output[0, -int(1000/model.params['dt']):]))\n

    Let's plot the results!

    plt.plot(mue_inputs, max_rate_e, c='k', lw = 2)\nplt.plot(mue_inputs, min_rate_e, c='k', lw = 2)\nplt.title(\"Bifurcation diagram of the aln model\")\nplt.xlabel(\"Input to excitatory population\")\nplt.ylabel(\"Min / max firing rate\")\n
    \nText(0, 0.5, 'Min / max firing rate')\n

    neurolib comes with some example datasets for exploring its functionality. Please be aware that these datasets are not tested and should not be used for your research, only for experimentation with the software.

    A dataset for whole-brain modeling can consists of the following parts:

    We can load a Dataset by passing the name of it in the constructor.

    from neurolib.utils.loadData import Dataset\nds = Dataset(\"gw\")\n

    We now create the aln model with a structural connectivity matrix and a delay matrix. In order to achieve a good fit of the BOLD activity to the empirical data, the model has to run for quite a while. A a rule of thumb, a simulation of resting-state BOLD activity should not be shorter than 3 minutes and preferably longer than 5 minutes real time. If the empirical recordings are for example 10 minutes long, ideally, a simulation of 10 minutes would be used to compare the output of the model to the resting state recording.

    model = ALNModel(Cmat = ds.Cmat, Dmat = ds.Dmat)\n\nmodel.params['duration'] = 0.2*60*1000 \n# Info: value 0.2*60*1000 is low for testing\n# use 5*60*1000 for real simulation\n

    After some optimization to the resting-state fMRI data of the dataset, we found a set of parameters that creates interesting whole-brain dynamics. We set the mean input of the excitatory and the inhibitory population to be close to the E-I limit cycle.

    model.params['mue_ext_mean'] = 1.57\nmodel.params['mui_ext_mean'] = 1.6\n# We set an appropriate level of noise\nmodel.params['sigma_ou'] = 0.09\n# And turn on adaptation with a low value of spike-triggered adaptation currents.\nmodel.params['b'] = 5.0\n

    Let's have a look what the data looks like. We can access the data of each model by calling its internal attributes. Here, we plot the structural connectivity matrix by calling model.params['Cmat'] and fiber length matrix by calling model.params['lengthMat']. Of course, we can also access the dataset using the Dataset object itself. For example the functional connectivity matrices of the BOLD timeseries in the datasets are given as list with ds.FCs.

    from matplotlib.colors import LogNorm\nfig, axs = plt.subplots(1, 3, figsize=(12,8), dpi=75)\nfig.subplots_adjust(wspace=0.28)\n\nim = axs[0].imshow(model.params['Cmat'], norm=LogNorm(vmin=10e-5, vmax=np.max(model.params['Cmat'])))\naxs[0].set_title(\"Cmat\")\nfig.colorbar(im, ax=axs[0],fraction=0.046, pad=0.04)\nim = axs[1].imshow(model.params['lengthMat'], cmap='inferno')\naxs[1].set_title(\"Dmat\")\nfig.colorbar(im, ax=axs[1],fraction=0.046, pad=0.04)\nim = axs[2].imshow(ds.FCs[0], cmap='inferno')\naxs[2].set_title(\"Empirical FC\")\nfig.colorbar(im, ax=axs[2],fraction=0.046, pad=0.04)\n
    \n<matplotlib.colorbar.Colorbar at 0x12c0ac5c0>\n

    We run the model with bold simulation by using bold=True. This simulates the Balloon-Windkessel BOLD model in parallel to the neural population model in order to estimate the blood oxygen levels of the underlying neural activity. The output of the bold model can be used to compare the simulated data to empirical fMRI data (resting-state fMRI for example).

    To save (a lot of) RAM, we can run the simulation in chunkwise mode. In this mode, the model will be simulated for a length of chunksize steps (not time in ms, but actual integration steps!), and the output of that chunk will be used to automatically reinitiate the model with the appropriate initial conditions. This allows for a serial continuation of the model without having to store all the data in memory and is particularly useful for very long and many parallel simulations.

    model.run(chunkwise=True, chunksize = 100000, bold=True)\n
    model.outputs\n
    \n{'t': array([0.000e+00, 1.000e-01, 2.000e-01, ..., 9.598e+02, 9.599e+02,\n        9.600e+02]),\n 'rates_exc': array([[0.00835719, 0.00840018, 0.008441  , ..., 0.07789972, 0.07678947,\n         0.07575822]]),\n 'rates_inh': array([[6.67987791, 6.74212832, 6.82498266, ..., 9.74761859, 9.76436539,\n         9.75417725]]),\n 'BOLD': {'t': array([1.00000e-01, 2.00010e+03, 4.00010e+03, 6.00010e+03, 8.00010e+03,\n         1.00001e+04, 1.20001e+04, 1.40001e+04, 1.60001e+04, 1.80001e+04,\n         2.00001e+04, 2.20001e+04, 2.40001e+04]),\n  'BOLD': array([[1.37324205e-10, 2.32894551e-02, 2.52461497e-02, 1.57354848e-02,\n          9.56109432e-03, 1.05825534e-02, 1.12229272e-02, 1.22928019e-02,\n          1.53881680e-02, 1.50792887e-02, 1.27970412e-02, 1.30106312e-02,\n          1.40587017e-02]])}}\n

    For convenience, they can also be accessed directly using attributes of the model with the outputs name, like model.rates_exc. The outputs are also available as xr DataArrays as model.xr().

    Since we used bold=True to simulate BOLD, we can also access model.BOLD.BOLD for the actual BOLD activity, and model.BOLD.t for the time steps of the BOLD simulation (which are downsampled to 0.5 Hz by default).

    # Plot functional connectivity and BOLD timeseries (z-scored)\nfig, axs = plt.subplots(1, 2, figsize=(6, 2), dpi=75, gridspec_kw={'width_ratios' : [1, 2]})\naxs[0].imshow(func.fc(model.BOLD.BOLD[:, 5:]))\naxs[1].imshow(scipy.stats.mstats.zscore(model.BOLD.BOLD[:, model.BOLD.t_BOLD>10000], axis=1), aspect='auto', extent=[model.BOLD.t_BOLD[model.BOLD.t_BOLD>10000][0], model.BOLD.t_BOLD[-1], 0, model.params['N']]);\n\naxs[0].set_title(\"FC\")\naxs[0].set_xlabel(\"Node\")\naxs[0].set_ylabel(\"Node\")\naxs[1].set_xlabel(\"t [ms]\")\n\n# the results of the model are also accessible through an xarray DataArray\nfig, axs = plt.subplots(1, 1, figsize=(6, 2), dpi=75)\nplt.plot(model.xr().time, model.xr().loc['rates_exc'].T);\n
    scores = [func.matrix_correlation(func.fc(model.BOLD.BOLD[:, 5:]), fcemp) for fcemp in ds.FCs]\n\nprint(\"Correlation per subject:\", [f\"{s:.2}\" for s in scores])\nprint(f\"Mean FC/FC correlation: {np.mean(scores):.2}\")\n
    \nCorrelation per subject: ['0.52', '0.54', '0.67', '0.49', '0.69']\nMean FC/FC correlation: 0.58\n\n
    "},{"location":"examples/example-0-aln-minimal/#the-neural-mass-model","title":"The neural mass model","text":"

    In this example, we will learn about the basic of neurolib. We will create a two-population mean-field model of exponential integrate-and-fire neurons called the aln model. We will learn how to create a Model, set some parameters and run a simulation. We will also see how we can easily access the output of each simulation.

    "},{"location":"examples/example-0-aln-minimal/#aln-the-adaptive-linear-nonlinear-cascade-model","title":"aln - the adaptive linear-nonlinear cascade model","text":"

    The adaptive linear-nonlinear (aln) cascade model is a low-dimensional population model of spiking neural networks. Mathematically, it is a dynamical system of non-linear ODEs. The dynamical variables of the system simulated in the aln model describe the average firing rate and other macroscopic variables of a randomly connected, delay-coupled network of excitatory and inhibitory adaptive exponential integrate-and-fire neurons (AdEx) with non-linear synaptic currents.

    Ultimately, the model is a result of various steps of model reduction starting from the Fokker-Planck equation of the AdEx neuron subject to white noise input at many steps of input means \\(\\mu\\) and variances \\(\\sigma\\). The resulting mean firing rates and mean membrane potentials are then stored in a lookup table and serve as the nonlinear firing rate transfer function, \\(r = \\Phi(\\mu, \\sigma)\\).

    "},{"location":"examples/example-0-aln-minimal/#basic-use","title":"Basic use","text":""},{"location":"examples/example-0-aln-minimal/#simulating-a-single-aln-node","title":"Simulating a single aln node","text":"

    To create a single node, we simply instantiate the model without any arguments.

    "},{"location":"examples/example-0-aln-minimal/#accessing-the-outputs","title":"Accessing the outputs","text":""},{"location":"examples/example-0-aln-minimal/#bifurcation-diagram","title":"Bifurcation diagram","text":""},{"location":"examples/example-0-aln-minimal/#whole-brain-model","title":"Whole-brain model","text":""},{"location":"examples/example-0-aln-minimal/#run-model","title":"Run model","text":""},{"location":"examples/example-0-aln-minimal/#results","title":"Results","text":"

    The outputs of the model can be accessed using the attribute model.outputs

    "},{"location":"examples/example-0-aln-minimal/#plot-simulated-activity","title":"Plot simulated activity","text":""},{"location":"examples/example-0-aln-minimal/#correlation-of-simulated-bold-to-empirical-data","title":"Correlation of simulated BOLD to empirical data","text":"

    We can compute the element-wise Pearson correlation of the functional connectivity matrices of the simulated data to the empirical data to estimate how well the model captures the inter-areal BOLD correlations found in empirical resting-state recordings.

    "},{"location":"examples/example-0.1-hopf-minimal/","title":"Example 0.1 hopf minimal","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n%load_ext autoreload\n%autoreload 2\n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n\nimport numpy as np\n\n# Let's import the Hopf model\nfrom neurolib.models.hopf import HopfModel\n\n# Some useful functions are provided here\nimport neurolib.utils.functions as func\n\n# a nice color map\nplt.rcParams['image.cmap'] = 'plasma'\n
    model = HopfModel()\nmodel.params['duration'] = 1.0*1000\nmodel.params['sigma_ou'] = 0.03\n\nmodel.run()\n
    plt.plot(model.t, model.x.T, c='k', lw = 2)\n# alternatively plot the results in the xarray:\n# plt.plot(hopfModel.xr[0, 0].time, hopfModel.xr[0, 0].values)\nplt.xlabel(\"t [ms]\")\nplt.ylabel(\"Activity\")\n
    \nText(0, 0.5, 'Activity')\n
    model = HopfModel()\nmodel.params['duration'] = 2.0*1000\n
    max_x = []\nmin_x = []\n# these are the different input values that we want to scan\na_s = np.linspace(-2, 2, 50)\nfor a in a_s:\n    model.params['a'] = a\n    model.run()\n    # we add the maximum and the minimum of the last second of the \n    # simulation to a list\n    max_x.append(np.max(model.x[0, -int(1000/model.params['dt']):]))\n    min_x.append(np.min(model.x[0, -int(1000/model.params['dt']):]))\n
    plt.plot(a_s, max_x, c='k', lw = 2)\nplt.plot(a_s, min_x, c='k', lw = 2)\nplt.title(\"Bifurcation diagram of the Hopf oscillator\")\nplt.xlabel(\"a\")\nplt.ylabel(\"Min / max x\")\n
    \nText(0, 0.5, 'Min / max x')\n
    from neurolib.utils.loadData import Dataset\n\nds = Dataset(\"hcp\")\n
    model = HopfModel(Cmat = ds.Cmat, Dmat = ds.Dmat)\n
    model.params['w'] = 1.0\nmodel.params['signalV'] = 0\nmodel.params['duration'] = 20 * 1000 \nmodel.params['sigma_ou'] = 0.14\nmodel.params['K_gl'] = 0.6\n\nmodel.run(chunkwise=True)\n
    plt.plot(model.t, model.x[::5, :].T, alpha=0.8);\nplt.xlim(0, 200)\nplt.xlabel(\"t [ms]\")\n
    \nText(0.5, 0, 't [ms]')\n
    fig, axs = plt.subplots(1, 2, figsize=(8, 2))\naxs[0].imshow(func.fc(model.x[:, -10000:]))\naxs[1].plot(model.t, model.x[::5, :].T, alpha=0.8);\n
    scores = [func.matrix_correlation(func.fc(model.x[:, -int(5000/model.params['dt']):]), fcemp) for fcemp in ds.FCs]\nprint(\"Correlation per subject:\", [f\"{s:.2}\" for s in scores])\nprint(\"Mean FC/FC correlation: {:.2f}\".format(np.mean(scores)))\n
    \nCorrelation per subject: ['0.54', '0.63', '0.66', '0.53', '0.55', '0.55', '0.69']\nMean FC/FC correlation: 0.59\n\n
    "},{"location":"examples/example-0.1-hopf-minimal/#single-node-simulation","title":"Single node simulation","text":""},{"location":"examples/example-0.1-hopf-minimal/#bifurcation-diagram","title":"Bifurcation diagram","text":""},{"location":"examples/example-0.1-hopf-minimal/#brain-network","title":"Brain network","text":""},{"location":"examples/example-0.2-basic_analysis/","title":"Example 0.2 basic analysis","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n\nfrom functools import partial\nimport numpy as np\n\nfrom neurolib.models.aln import ALNModel\nfrom neurolib.utils.loadData import Dataset\nfrom neurolib.utils.signal import RatesSignal, BOLDSignal\n\nplt.rcParams['image.cmap'] = 'plasma'\n
    ds = Dataset(\"gw\")\n# simulates the whole-brain model\naln = ALNModel(Cmat = ds.Cmat, Dmat = ds.Dmat)\n# Resting state fits\naln.params['mue_ext_mean'] = 1.57\naln.params['mui_ext_mean'] = 1.6\naln.params['sigma_ou'] = 0.09\naln.params['b'] = 5.0\naln.params['duration'] = 0.2*60*1000 \n# info: value 0.2*60*1000 is low for testing\n# use 5*60*1000 for real simulation\naln.run(chunkwise=True, bold = True)\n
    \nWARNING:root:aln: BOLD simulation is supported only with chunkwise integration. Enabling chunkwise integration.\n\n

    Now we can cast the modelling result into our Signal class. Signal is a parent base class for any neuro signal. We also provide three child class for particular signals: RatesSignal (for firing rate of the populations), VoltageSignal (for average membrane potential of the populations), and BOLDSignal (for simulated BOLD). They only differ in name, labels and units. Nothing fancy. Of course, you can implement your own class for your particular results very easily as:

    from neurolib.utils.signal import Signal\n\n\nclass PostSynapticCurrentSignal(Signal):\n    name = \"Population post-synaptic current signal\"\n    label = \"I_syn\"\n    signal_type = \"post_current\"\n    unit = \"mA\"\n

    and that's it. All useful methods and attributes are directly inherited from the Signal parent.

    # Create Signal out of firing rates\nfr = RatesSignal.from_model_output(aln, group=\"\", time_in_ms=True)\n# optional description\nfr.description = \"Output of the ALN model with default SC and fiber lengths\"\n\n# Create Signal out of BOLD simulated timeseries\nbold = BOLDSignal.from_model_output(aln, group=\"BOLD\", time_in_ms=True)\nbold.description = \"Simulated BOLD of the ALN model with default SC and fiber lengths\"\n
    # let's check what's inside\nprint(fr)\nprint(bold)\n
    \nPopulation firing rate representing rate signal with unit of Hz with user-provided description: `Output of the ALN model with default SC and fiber lengths`. Shape of the signal is (2, 80, 8831) with dimensions ('output', 'space', 'time').\nPopulation blood oxygen level-dependent signal representing bold signal with unit of % with user-provided description: `Simulated BOLD of the ALN model with default SC and fiber lengths`. Shape of the signal is (1, 80, 7) with dimensions ('output', 'space', 'time').\n\n

    Signal automatically computes useful attributes like dt, sampling rate, starting and ending times.

    # inherent attributes\nprint(\"Inherent attributes:\")\nprint(fr.name)\nprint(fr.label)\nprint(fr.unit)\nprint(fr.signal_type)\nprint(fr.description)\n\n# computed attributes\nprint(\"\\nComputed attributes:\")\nprint(fr.dt)\nprint(fr.sampling_frequency)\nprint(fr.start_time)\nprint(fr.end_time)\nprint(fr.shape)\n
    \nInherent attributes:\nPopulation firing rate\nq\nHz\nrate\nOutput of the ALN model with default SC and fiber lengths\n\nComputed attributes:\n0.0001\n10000.0\n0.0\n0.883\n(2, 80, 8831)\n\n
    # internal representation of the signal is just xarray's DataArray\nprint(fr.data)\n# xarray is just pandas on steroids, i.e. it supports multi-dimensional arrays, not only 2D\n\n# if you'd need simple numpy array just call .values on signal's data\nprint(type(fr.data.values))\nprint(fr.data.values.shape)\n
    \n<xarray.DataArray (output: 2, space: 80, time: 8831)>\narray([[[1.33261450e-02, 1.36917651e-02, 1.40695947e-02, ...,\n         3.48158384e-03, 3.46784876e-03, 3.46133411e-03],\n        [6.13965587e-01, 6.25356604e-01, 6.34768740e-01, ...,\n         3.59993904e-01, 3.54528049e-01, 3.49018287e-01],\n        [6.36038906e-02, 6.35557804e-02, 6.33770702e-02, ...,\n         4.42949449e-02, 4.37566338e-02, 4.32171260e-02],\n        ...,\n        [2.50859629e-03, 2.52563325e-03, 2.54037707e-03, ...,\n         8.00547429e-03, 7.78636724e-03, 7.61333390e-03],\n        [5.95617787e-02, 6.07513850e-02, 6.20942706e-02, ...,\n         3.26872805e-02, 3.33536801e-02, 3.40569905e-02],\n        [4.96090615e-02, 4.84730168e-02, 4.73428175e-02, ...,\n         1.05820581e-01, 1.05724932e-01, 1.05846529e-01]],\n\n       [[4.17821712e+00, 4.21196680e+00, 4.23883558e+00, ...,\n         1.01836901e+01, 1.00264571e+01, 9.86191716e+00],\n        [6.83616353e+00, 6.91560104e+00, 6.97566672e+00, ...,\n         8.07743197e+00, 8.08297235e+00, 8.07994833e+00],\n        [6.57108005e+00, 6.49135703e+00, 6.43050397e+00, ...,\n         8.93701663e+00, 8.95484465e+00, 8.97588108e+00],\n        ...,\n        [8.75902323e+00, 8.81787556e+00, 8.89506320e+00, ...,\n         7.48694404e+00, 7.41863238e+00, 7.35970182e+00],\n        [4.15841271e+00, 4.15037911e+00, 4.15057015e+00, ...,\n         6.61282248e+00, 6.60115808e+00, 6.58597805e+00],\n        [9.52609773e+00, 9.39861579e+00, 9.28794497e+00, ...,\n         5.83291993e+00, 5.90070345e+00, 5.94592106e+00]]])\nCoordinates:\n  * output   (output) <U9 'rates_exc' 'rates_inh'\n  * space    (space) int64 0 1 2 3 4 5 6 7 8 9 ... 70 71 72 73 74 75 76 77 78 79\n  * time     (time) float64 0.0 0.0001 0.0002 0.0003 ... 0.8828 0.8829 0.883\n<class 'numpy.ndarray'>\n(2, 80, 8831)\n\n

    Now let's see what Signal can do... Just a side note, all operations can be done inplace (everything happens inside signal class), or altered signal is returned with the same attributes as the original one

    # basic operations\nnorm = fr.normalize(std=True, inplace=False)\n# so, are all temporal means close to zero?\nprint(np.allclose(norm.data.mean(dim=\"time\"), 0.))\n# aand, are all temporal std close to 1?\nprint(np.allclose(norm.data.std(dim=\"time\"), 1.0))\nplt.plot(fr[\"rates_exc\"].data.sel({\"space\": 0}), label=\"original\")\nplt.plot(norm[\"rates_exc\"].data.sel({\"space\": 0}), label=\"normalised\")\n\n# you can detrend the signal, all of it, or by segments (as indices within the signal)\n# let's first normalise (so inplace=False), then detrend (we can inplace=True)\ndetrended = fr.normalize(std=True, inplace=False)\ndetrended.detrend(inplace=True)\nplt.plot(detrended[\"rates_exc\"].data.sel({\"space\": 0}), label=\"normalised & detrended\")\ndetrended_segments = fr.detrend(segments=np.arange(20000, 1000), inplace=False)\nplt.legend()\n
    \nTrue\nTrue\n\n
    \n<matplotlib.legend.Legend at 0x1301a4320>\n

    so, the sampling frequency is too high, let's resample

    print(fr.sampling_frequency)\nplt.plot(fr.data.time, fr[\"rates_exc\"].data.sel({\"space\": 0}), label=\"original\")\nfr.resample(to_frequency=1000., inplace=True)\nprint(fr.sampling_frequency)\nplt.plot(fr.data.time, fr[\"rates_exc\"].data.sel({\"space\": 0}), label=\"resampled\")\nplt.legend()\n
    \n10000.0\n1000.0\n\n
    \n<matplotlib.legend.Legend at 0x131e0e940>\n
    # init again to start fresh\nfr = RatesSignal.from_model_output(aln, group=\"\", time_in_ms=True)\nplt.plot(fr.data.time, fr[\"rates_exc\"].data.sel({\"space\": 0}), label=\"original\")\n\n# first resample\nfr.resample(to_frequency=1000., inplace=True)\n\n# next detrend\nfr.detrend(inplace=True)\nprint(fr.start_time, fr.end_time)\n\n# next pad with 0s for 0.5 seconds in order to suppress edge effect when filtering\npadded = fr.pad(how_much=0.5, in_seconds=True, padding_type=\"constant\", side=\"both\",\n                constant_values=0., inplace=False)\nprint(padded.start_time, padded.end_time)\n\n# now filter - by default uses mne, if not installed, falls back to scipy basic IIR filter\npadded.filter(low_freq=8., high_freq=12., inplace=True)\n\n# now cut back the original length\nfiltered = padded.sel([fr.start_time, fr.end_time], inplace=False)\nprint(filtered.start_time, filtered.end_time)\n\nplt.plot(filtered.data.time, filtered[\"rates_exc\"].data.sel({\"space\": 0}), label=r\"filtered $\\alpha$\")\n\n# finally, get phase and amplitude via Hilbert transform\nphase = filtered.hilbert_transform(return_as=\"phase_wrapped\", inplace=False)\nplt.plot(phase.data.time, phase[\"rates_exc\"].data.sel({\"space\": 0}), label=r\"phase $\\alpha$\")\namplitude = filtered.hilbert_transform(return_as=\"amplitude\", inplace=False)\nplt.plot(amplitude.data.time, amplitude[\"rates_exc\"].data.sel({\"space\": 0}), label=r\"amplitude $\\alpha$\")\nplt.legend()\n
    \n0.0 0.882\n-0.5 1.382\nSetting up band-pass filter from 8 - 12 Hz\n\nFIR filter parameters\n---------------------\nDesigning a one-pass, zero-phase, non-causal bandpass filter:\n- Windowed time-domain design (firwin) method\n- Hamming window with 0.0194 passband ripple and 53 dB stopband attenuation\n- Lower passband edge: 8.00\n- Lower transition bandwidth: 2.00 Hz (-6 dB cutoff frequency: 7.00 Hz)\n- Upper passband edge: 12.00 Hz\n- Upper transition bandwidth: 3.00 Hz (-6 dB cutoff frequency: 13.50 Hz)\n- Filter length: 1651 samples (1.651 sec)\n\n0.0 0.882\n\n
    \n<matplotlib.legend.Legend at 0x1322e6e80>\n
    # in case you forget that happened in the processing, you can easily check all steps:\nprint(phase.preprocessing_steps)\nprint(amplitude.preprocessing_steps)\n
    \nresample to 1000.0Hz -> detrend -> 0.5s constant both sides padding -> filter: low 8.0Hz - high 12.0Hz -> select x:0.882s -> Hilbert - wrapped phase\nresample to 1000.0Hz -> detrend -> 0.5s constant both sides padding -> filter: low 8.0Hz - high 12.0Hz -> select x:0.882s -> Hilbert - amplitude\n\n
    # and you can save your signal for future generations! (saved as netCDF file)\nphase.save(\"phase_from_some_experiment\")\n
    # and then load it\nphase_loaded = RatesSignal.from_file(\"phase_from_some_experiment\")\n# compare whether it is the same\nprint(phase == phase_loaded)\n# the attributes are saved/loaded as well\nprint(phase_loaded.name)\nprint(phase_loaded.unit)\nprint(phase_loaded.preprocessing_steps)\n# delete file\nos.remove(\"phase_from_some_experiment.nc\")\n
    \nTrue\nPopulation firing rate\nHz\nresample to 1000.0Hz -> detrend -> 0.5s constant both sides padding -> filter: low 8.0Hz - high 12.0Hz -> select x:0.882s -> Hilbert - wrapped phase\n\n
    # this will iterate over whole data and return one 1D temporal slice at the time, each slice is Signal class\nfor name, ts in fr.iterate(return_as=\"signal\"):\n    print(name, type(ts), ts.start_time, ts.end_time)\n\n# this will iterate over whole data and return one 1D temporal slice at the time, each slice is DataArray\nfor name, ts in fr.iterate(return_as=\"xr\"):\n    print(name, type(ts), ts.shape, ts.shape)\n
    \n('rates_exc', 0) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 1) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 2) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 3) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 4) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 5) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 6) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 7) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 8) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 9) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 10) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 11) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 12) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 13) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 14) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 15) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 16) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 17) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 18) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 19) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 20) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 21) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 22) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 23) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 24) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 25) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 26) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 27) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 28) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 29) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 30) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 31) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 32) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 33) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 34) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 35) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 36) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 37) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 38) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 39) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 40) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 41) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 42) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 43) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 44) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 45) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 46) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 47) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 48) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 49) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 50) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 51) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 52) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 53) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 54) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 55) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 56) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 57) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 58) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 59) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 60) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 61) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 62) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 63) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 64) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 65) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 66) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 67) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 68) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 69) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 70) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 71) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 72) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 73) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 74) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 75) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 76) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 77) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 78) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 79) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 0) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 1) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 2) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 3) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 4) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 5) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 6) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 7) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 8) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 9) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 10) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 11) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 12) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 13) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 14) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 15) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 16) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 17) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 18) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 19) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 20) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 21) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 22) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 23) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 24) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 25) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 26) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 27) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 28) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 29) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 30) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 31) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 32) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 33) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 34) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 35) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 36) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 37) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 38) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 39) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 40) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 41) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 42) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 43) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 44) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 45) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 46) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 47) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 48) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 49) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 50) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 51) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 52) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 53) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 54) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 55) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 56) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 57) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 58) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 59) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 60) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 61) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 62) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 63) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 64) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 65) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 66) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 67) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 68) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 69) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 70) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 71) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 72) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 73) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 74) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 75) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 76) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 77) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 78) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_inh', 79) <class 'neurolib.utils.signal.RatesSignal'> 0.0 0.882\n('rates_exc', 0) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 1) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 2) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 3) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 4) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 5) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 6) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 7) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 8) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 9) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 10) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 11) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 12) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 13) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 14) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 15) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 16) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 17) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 18) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 19) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 20) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 21) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 22) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 23) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 24) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 25) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 26) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 27) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 28) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 29) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 30) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 31) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 32) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 33) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 34) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 35) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 36) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 37) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 38) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 39) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 40) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 41) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 42) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 43) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 44) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 45) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 46) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 47) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 48) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 49) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 50) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 51) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 52) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 53) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 54) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 55) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 56) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 57) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 58) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 59) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 60) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 61) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 62) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 63) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 64) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 65) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 66) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 67) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 68) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 69) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 70) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 71) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 72) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 73) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 74) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 75) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 76) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 77) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 78) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_exc', 79) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 0) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 1) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 2) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 3) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 4) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 5) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 6) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 7) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 8) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 9) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 10) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 11) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 12) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 13) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 14) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 15) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 16) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 17) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 18) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 19) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 20) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 21) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 22) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 23) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 24) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 25) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 26) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 27) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 28) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 29) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 30) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 31) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 32) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 33) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 34) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 35) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 36) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 37) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 38) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 39) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 40) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 41) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 42) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 43) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 44) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 45) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 46) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 47) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 48) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 49) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 50) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 51) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 52) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 53) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 54) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 55) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 56) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 57) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 58) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 59) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 60) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 61) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 62) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 63) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 64) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 65) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 66) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 67) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 68) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 69) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 70) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 71) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 72) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 73) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 74) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 75) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 76) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 77) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 78) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n('rates_inh', 79) <class 'xarray.core.dataarray.DataArray'> (883,) (883,)\n\n
    # sliding window - let's iterate over temporal windows of 0.5seconds, with 0.1s translation and boxcar window function\nfor window in fr.sliding_window(length=0.5, step=0.1, window_function=\"boxcar\", lengths_in_seconds=True):\n    print(type(window), window.shape, window.start_time, window.end_time)\n
    \n<class 'neurolib.utils.signal.RatesSignal'> (2, 80, 500) 0.0 0.499\n<class 'neurolib.utils.signal.RatesSignal'> (2, 80, 500) 0.1 0.599\n<class 'neurolib.utils.signal.RatesSignal'> (2, 80, 500) 0.2 0.699\n<class 'neurolib.utils.signal.RatesSignal'> (2, 80, 500) 0.3 0.799\n\n
    # apply 1D function - Signal supports applying 1D function per temporal slice\n# both are supported: function that reduces temporal dimension (e.g. mean which reduces timeseries of length N to one number),\n# and functions that preserve shape\n\n# reduce\nmean = fr.apply(partial(np.mean, axis=-1), inplace=False)\n# mean is now xr.DataArray, not Signal; but the coordinates except time are preserved\nprint(type(mean), mean.shape, mean.coords)\n\n# preserve shape\nabsolute_value = fr.apply(np.abs, inplace=False)\n# still Signal\nprint(absolute_value.shape)\n
    \nWARNING:root:Shape changed after operation! Old shape: (2, 80, 883), new shape: (2, 80); Cannot cast to Signal class, returing as `xr.DataArray`\n\n
    \n<class 'xarray.core.dataarray.DataArray'> (2, 80) Coordinates:\n  * output   (output) <U9 'rates_exc' 'rates_inh'\n  * space    (space) int64 0 1 2 3 4 5 6 7 8 9 ... 70 71 72 73 74 75 76 77 78 79\n(2, 80, 883)\n\n
    # basic FC from excitatory rates - using correlation\nfc_exc = fr[\"rates_exc\"].functional_connectivity(fc_function=np.corrcoef)\n# results is DataArray with space coordinates\nprint(type(fc_exc), fc_exc.shape, fc_exc.coords)\nplt.subplot(1,2,1)\nplt.title(\"Correlation FC\")\nplt.imshow(fc_exc.values)\n\n# FC from covariance\nfc_cov_exc = fr[\"rates_exc\"].functional_connectivity(fc_function=np.cov)\nplt.subplot(1,2,2)\nplt.title(\"Covariance FC\")\nplt.imshow(fc_cov_exc.values)\n\n# so fc_function can be any function that can take (nodes x time) array and transform it to (nodes x nodes) connectivity matrix\n
    \n<class 'xarray.core.dataarray.DataArray'> (80, 80) Coordinates:\n  * space    (space) int64 0 1 2 3 4 5 6 7 8 9 ... 70 71 72 73 74 75 76 77 78 79\n\n
    \n<matplotlib.image.AxesImage at 0x131f1db70>\n
    # band-specific FC\nBANDS = {\n    \"delta\": {\"low_freq\": 2, \"high_freq\": 4},\n    \"theta\": {\"low_freq\": 4, \"high_freq\": 8},\n    \"alpha\": {\"low_freq\": 8, \"high_freq\": 12},\n    \"beta\": {\"low_freq\": 12, \"high_freq\": 30},\n    \"low_gamma\": {\"low_freq\": 30, \"high_freq\": 60},\n    \"high_gamma\": {\"low_freq\": 60, \"high_freq\": 120},\n}\npadded = fr.pad(how_much=0.5, in_seconds=True, padding_type=\"constant\", side=\"both\",\n                constant_values=0., inplace=False)\n\nplt.figure(figsize=(20, 4))\nfor ii, (band, filt_spec) in enumerate(BANDS.items()):\n    print(f\"Processing {band}...\")\n    filtered = padded.filter(**filt_spec, inplace=False)\n    filtered.sel([fr.start_time, fr.end_time], inplace=True)\n    plt.subplot(1, len(BANDS), ii + 1)\n    fc = filtered[\"rates_exc\"].functional_connectivity(fc_function=np.corrcoef)\n    print(filtered.preprocessing_steps)\n    plt.imshow(fc)\n    plt.title(f\"{band}: {filt_spec['low_freq']}-{filt_spec['high_freq']}Hz\")\nplt.show()\n
    \nProcessing delta...\nSetting up band-pass filter from 2 - 4 Hz\n\nFIR filter parameters\n---------------------\nDesigning a one-pass, zero-phase, non-causal bandpass filter:\n- Windowed time-domain design (firwin) method\n- Hamming window with 0.0194 passband ripple and 53 dB stopband attenuation\n- Lower passband edge: 2.00\n- Lower transition bandwidth: 2.00 Hz (-6 dB cutoff frequency: 1.00 Hz)\n- Upper passband edge: 4.00 Hz\n- Upper transition bandwidth: 2.00 Hz (-6 dB cutoff frequency: 5.00 Hz)\n- Filter length: 1651 samples (1.651 sec)\n\nresample to 1000.0Hz -> detrend -> 0.5s constant both sides padding -> filter: low 2Hz - high 4Hz -> select x:0.882s\nProcessing theta...\nSetting up band-pass filter from 4 - 8 Hz\n\nFIR filter parameters\n---------------------\nDesigning a one-pass, zero-phase, non-causal bandpass filter:\n- Windowed time-domain design (firwin) method\n- Hamming window with 0.0194 passband ripple and 53 dB stopband attenuation\n- Lower passband edge: 4.00\n- Lower transition bandwidth: 2.00 Hz (-6 dB cutoff frequency: 3.00 Hz)\n- Upper passband edge: 8.00 Hz\n- Upper transition bandwidth: 2.00 Hz (-6 dB cutoff frequency: 9.00 Hz)\n- Filter length: 1651 samples (1.651 sec)\n\nresample to 1000.0Hz -> detrend -> 0.5s constant both sides padding -> filter: low 4Hz - high 8Hz -> select x:0.882s\nProcessing alpha...\nSetting up band-pass filter from 8 - 12 Hz\n\nFIR filter parameters\n---------------------\nDesigning a one-pass, zero-phase, non-causal bandpass filter:\n- Windowed time-domain design (firwin) method\n- Hamming window with 0.0194 passband ripple and 53 dB stopband attenuation\n- Lower passband edge: 8.00\n- Lower transition bandwidth: 2.00 Hz (-6 dB cutoff frequency: 7.00 Hz)\n- Upper passband edge: 12.00 Hz\n- Upper transition bandwidth: 3.00 Hz (-6 dB cutoff frequency: 13.50 Hz)\n- Filter length: 1651 samples (1.651 sec)\n\nresample to 1000.0Hz -> detrend -> 0.5s constant both sides padding -> filter: low 8Hz - high 12Hz -> select x:0.882s\nProcessing beta...\nSetting up band-pass filter from 12 - 30 Hz\n\nFIR filter parameters\n---------------------\nDesigning a one-pass, zero-phase, non-causal bandpass filter:\n- Windowed time-domain design (firwin) method\n- Hamming window with 0.0194 passband ripple and 53 dB stopband attenuation\n- Lower passband edge: 12.00\n- Lower transition bandwidth: 3.00 Hz (-6 dB cutoff frequency: 10.50 Hz)\n- Upper passband edge: 30.00 Hz\n- Upper transition bandwidth: 7.50 Hz (-6 dB cutoff frequency: 33.75 Hz)\n- Filter length: 1101 samples (1.101 sec)\n\nresample to 1000.0Hz -> detrend -> 0.5s constant both sides padding -> filter: low 12Hz - high 30Hz -> select x:0.882s\nProcessing low_gamma...\nSetting up band-pass filter from 30 - 60 Hz\n\nFIR filter parameters\n---------------------\nDesigning a one-pass, zero-phase, non-causal bandpass filter:\n- Windowed time-domain design (firwin) method\n- Hamming window with 0.0194 passband ripple and 53 dB stopband attenuation\n- Lower passband edge: 30.00\n- Lower transition bandwidth: 7.50 Hz (-6 dB cutoff frequency: 26.25 Hz)\n- Upper passband edge: 60.00 Hz\n- Upper transition bandwidth: 15.00 Hz (-6 dB cutoff frequency: 67.50 Hz)\n- Filter length: 441 samples (0.441 sec)\n\nresample to 1000.0Hz -> detrend -> 0.5s constant both sides padding -> filter: low 30Hz - high 60Hz -> select x:0.882s\nProcessing high_gamma...\nSetting up band-pass filter from 60 - 1.2e+02 Hz\n\nFIR filter parameters\n---------------------\nDesigning a one-pass, zero-phase, non-causal bandpass filter:\n- Windowed time-domain design (firwin) method\n- Hamming window with 0.0194 passband ripple and 53 dB stopband attenuation\n- Lower passband edge: 60.00\n- Lower transition bandwidth: 15.00 Hz (-6 dB cutoff frequency: 52.50 Hz)\n- Upper passband edge: 120.00 Hz\n- Upper transition bandwidth: 30.00 Hz (-6 dB cutoff frequency: 135.00 Hz)\n- Filter length: 221 samples (0.221 sec)\n\nresample to 1000.0Hz -> detrend -> 0.5s constant both sides padding -> filter: low 60Hz - high 120Hz -> select x:0.882s\n\n
    # time-varying FC\nfor window in fr.sliding_window(length=0.5, step=0.2, window_function=\"boxcar\", lengths_in_seconds=True):\n    fc = window[\"rates_exc\"].functional_connectivity(fc_function=np.corrcoef)\n    plt.imshow(fc)\n    plt.title(f\"FC: {window.start_time}-{window.end_time}s\")\n    plt.show()\n
    "},{"location":"examples/example-0.2-basic_analysis/#introduction","title":"Introduction","text":""},{"location":"examples/example-0.2-basic_analysis/#run-the-aln-model","title":"Run the ALN model","text":"

    Firstly, let us run a network model given the structural connectivity and fiber lengths.

    "},{"location":"examples/example-0.2-basic_analysis/#more-complete-example","title":"More complete example","text":"

    Let's do a more complete example. Let's say, you run the model and want to extract phase and amplitude of the \\(\\alpha\\) band (i.e. 8-12Hz) for some phase-amplitude coupling analyses.

    "},{"location":"examples/example-0.2-basic_analysis/#saving-loading","title":"Saving / loading","text":""},{"location":"examples/example-0.2-basic_analysis/#iterators","title":"Iterators","text":"

    Sometimes it is useful to apply or see something in a loop. That's why Signal supports both: iterating over space / outputs variables and applying some 1D function over temporal dimensions.

    "},{"location":"examples/example-0.2-basic_analysis/#functional-connectivity","title":"Functional connectivity","text":"

    Lot of modelling effort actually goes to fitting the experimental functional connectivity with the modelled one. That's why Signal class supports functional connectivity computation and with other methods (like filtering and iterating over temporal windows) we can even do timeseries of FC or band-specific FC very easily within the couple of lines.

    "},{"location":"examples/example-0.3-fhn-minimal/","title":"Example 0.3 fhn minimal","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n\nimport numpy as np\n\n# Let's import the fhn model\nfrom neurolib.models.fhn import FHNModel\n\n# Some useful functions are provided here\nimport neurolib.utils.functions as func\n\n# a nice color map\nplt.rcParams['image.cmap'] = 'plasma'\n
    model = FHNModel()\nmodel.params['duration'] = 2.0*1000\n

    Let's draw a simple one-dimensional bifurcation diagram of this model to orient ourselves in the parameter space

    max_x = []\nmin_x = []\n# these are the different input values that we want to scan\nx_inputs = np.linspace(0, 2, 50)\nfor x_ext in x_inputs:\n    # Note: this has to be a vector since it is input for all nodes\n    # (but we have only one node in this example)\n    model.params['x_ext'] = [x_ext]\n    model.run()\n    # we add the maximum and the minimum of the last second of the \n    # simulation to a list\n    max_x.append(np.max(model.x[0, -int(1000/model.params['dt']):]))\n    min_x.append(np.min(model.x[0, -int(1000/model.params['dt']):]))\n
    plt.plot(x_inputs, max_x, c='k', lw = 2)\nplt.plot(x_inputs, min_x, c='k', lw = 2)\nplt.title(\"Bifurcation diagram of the FHN oscillator\")\nplt.xlabel(\"Input to x\")\nplt.ylabel(\"Min / max x\")\n
    \nText(0, 0.5, 'Min / max x')\n

    In this model, there is a Hopf bifurcation happening at two input values. We can see the oscillatory region at input values from roughly 0.75 to 1.3.

    from neurolib.utils.loadData import Dataset\n\nds = Dataset(\"hcp\")\n
    model = FHNModel(Cmat = ds.Cmat, Dmat = ds.Dmat)\n
    model.params['duration'] = 10 * 1000 \n# add some noise\nmodel.params['sigma_ou'] = .01\n# set the global coupling strenght of the brain network\nmodel.params['K_gl'] = 1.0\n# let's put all nodes close to the limit cycle such that\n# noise can kick them in and out of the oscillation\n# all nodes get the same constant input\nmodel.params['x_ext'] = [0.72] * model.params['N']\n\nmodel.run(chunkwise=True, append_outputs=True)\n
    plt.plot(model.t, model.x[::5, :].T, alpha=0.8);\nplt.xlabel(\"t [ms]\")\n
    \nText(0.5, 0, 't [ms]')\n
    fig, axs = plt.subplots(1, 2, figsize=(8, 2))\naxs[0].imshow(func.fc(model.x[:, -10000:]))\naxs[1].plot(model.t, model.x[::5, :].T, alpha=0.8);\n
    scores = [func.matrix_correlation(func.fc(model.x[:, -int(5000/model.params['dt']):]), fcemp) for fcemp in ds.FCs]\nprint(\"Correlation per subject:\", [f\"{s:.2}\" for s in scores])\nprint(\"Mean FC/FC correlation: {:.2f}\".format(np.mean(scores)))\n
    \nCorrelation per subject: ['0.41', '0.5', '0.5', '0.48', '0.49', '0.45', '0.54']\nMean FC/FC correlation: 0.48\n\n
    "},{"location":"examples/example-0.3-fhn-minimal/#the-fitz-hugh-nagumo-oscillator","title":"The Fitz-Hugh Nagumo oscillator","text":"

    In this notebook, the basic use of the implementation of the FitzHugh-Nagumo (fhn) model is presented. Usually, the fhn model is used to represent a single neuron (for example in Cakan et al. (2014), \"Heterogeneous delays in neural networks\"). This is due to the difference in timescales of the two equations that define the FHN model: The first equation is often referred to as the \"fast variable\" whereas the second one is the \"slow variable\". This makes it possible to create a model with a very fast spiking mechanism but with a slow refractory period.

    In our case, we are using a parameterization of the fhn model that is not quite as usual. Inspired by the paper by Kostova et al. (2004) \"FitzHugh\u2013Nagumo revisited: Types of bifurcations, periodical forcing and stability regions by a Lyapunov functional.\", the implementation in neurolib produces a slowly oscillating dynamics and has the advantage to incorporate an external input term that causes a Hopf bifurcation. This means, that the model roughly approximates the behaviour of the aln model: For low input values, there is a low-activity fixed point, for intermediate inputs, there is an oscillatory region, and for high input values, the system is in a high-activity fixed point. Thus, it offers a simple way of exploring the dynamics of a neural mass model with these properties, such as the aln model.

    We want to start by producing a bifurcation diagram of a single node. With neurolib, this can be done with a couple of lines of code, as seen further below.

    "},{"location":"examples/example-0.3-fhn-minimal/#single-node-simulation","title":"Single node simulation","text":""},{"location":"examples/example-0.3-fhn-minimal/#brain-network","title":"Brain network","text":""},{"location":"examples/example-0.4-wc-minimal/","title":"Example 0.4 wc minimal","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n
    %load_ext autoreload\n%autoreload 2\n
    import matplotlib.pyplot as plt\nimport numpy as np\nimport glob\n\nfrom neurolib.models.wc import WCModel\n\nimport neurolib.utils.loadData as ld\nimport neurolib.utils.functions as func\n\n# a nice color map\nplt.rcParams['image.cmap'] = 'plasma'\n
    model = WCModel()\nmodel.params['duration'] = 2.0*1000\n

    Let's draw a simple one-dimensional bifurcation diagram of this model to orient ourselves in the parameter space

    max_exc = []\nmin_exc = []\n# these are the different input values that we want to scan\nexc_inputs = np.linspace(0, 3.5, 50)\nfor exc_ext in exc_inputs:\n    # Note: this has to be a vector since it is input for all nodes\n    # (but we have only one node in this example)\n    model.params['exc_ext'] = exc_ext\n    model.run()\n    # we add the maximum and the minimum of the last second of the \n    # simulation to a list\n    max_exc.append(np.max(model.exc[0, -int(1000/model.params['dt']):]))\n    min_exc.append(np.min(model.exc[0, -int(1000/model.params['dt']):]))\n
    plt.plot(exc_inputs, max_exc, c='k', lw = 2)\nplt.plot(exc_inputs, min_exc, c='k', lw = 2)\nplt.title(\"Bifurcation diagram of the Wilson-Cowan model\")\nplt.xlabel(\"Input to exc\")\nplt.ylabel(\"Min / max exc\")\n
    \nText(0,0.5,'Min / max exc')\n
    model = WCModel()\nmodel.params['duration'] = 1.0*1000\nmodel.params['sigma_ou'] = 0.01\n\nmodel.run()\n
    plt.plot(model.t, model.exc.T, c='k', lw = 2)\nplt.xlabel(\"t [ms]\")\nplt.ylabel(\"Activity\")\n
    \nText(0,0.5,'Activity')\n
    from neurolib.utils.loadData import Dataset\n\nds = Dataset(\"hcp\")\n
    model = WCModel(Cmat = ds.Cmat, Dmat = ds.Dmat)\n
    model.params['exc_ext'] = 0.65\n\nmodel.params['signalV'] = 0\nmodel.params['duration'] = 20 * 1000 \nmodel.params['sigma_ou'] = 0.14\nmodel.params['K_gl'] = 3.15\n\n\nmodel.run(chunkwise=True)\n
    fig, axs = plt.subplots(1, 2, figsize=(10, 3))\naxs[0].imshow(func.fc(model.exc[:, -10000:]))\naxs[1].plot(model.t, model.exc[::5, :].T, alpha=0.8);\naxs[1].set_xlim(0, 200)\n
    \n(0, 200)\n
    scores = [func.matrix_correlation(func.fc(model.exc[:, -int(5000/model.params['dt']):]), fcemp) for fcemp in ds.FCs]\nprint(\"Correlation per subject:\", [f\"{s:.2}\" for s in scores])\nprint(\"Mean FC/FC correlation: {:.2f}\".format(np.mean(scores)))\n
    \nCorrelation per subject: ['0.13', '0.14', '0.13', '0.12', '0.11', '0.12', '0.12']\nMean FC/FC correlation: 0.13\n\n
    "},{"location":"examples/example-0.4-wc-minimal/#the-wilson-cowan-model","title":"The Wilson-Cowan model","text":"

    In this notebook, the basic use of the implementation of the Wilson-Cowan (wc) model is presented.

    In the wc model, the activity of a particular brain region is defined by a coupled system of excitatory (E) and inhibitory (I) neuronal populations with the mean firing rates of the E and I pools being the dynamic variables, as first described by Wilson and Cowan in 1972 ( H.R. Wilson and J.D. Cowan. Excitatory and inhibitory interactions in localized populations of model neurons. Biophys. J., 12:1\u201324 (1972))

    "},{"location":"examples/example-0.4-wc-minimal/#bifurcation-diagram","title":"Bifurcation diagram","text":""},{"location":"examples/example-0.4-wc-minimal/#single-node-simulation","title":"Single node simulation","text":""},{"location":"examples/example-0.4-wc-minimal/#brain-network","title":"Brain network","text":""},{"location":"examples/example-0.5-kuramoto/","title":"Example 0.5 kuramoto","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n\nimport numpy as np\n\n# Let's import the Kuramoto model\nfrom neurolib.models.kuramoto import KuramotoModel\n\n# Some useful functions are provided here\nimport neurolib.utils.functions as func\n
    model = KuramotoModel()\nmodel.params['duration'] = 10\nmodel.run()\n
    theta = model['theta'].T\ntheta_capped = np.mod(theta, 2*np.pi) # cap theta to [0, 2*pi]\n\nplt.plot(model.t, theta_capped)\nplt.xlabel(\"Time\")\nplt.ylabel(\"Theta\")\nplt.yticks(np.arange(0, 2*np.pi+0.1, np.pi/2), [ r\"$0$\", r\"$\\pi/2$\", r\"$\\pi$\", r\"$3/4\\pi$\", r\"$2\\pi$\",])# modify y-axis ticks to be in multiples of pi\nplt.show()\n

    Here we simulate networks of oscillators. We will simulate a network of 8 oscillators with a global coupling strength 0.3. Here we initialize a connectivity matrix with all-to-all connectivity. We then simulate the network for 30 milliseconds assuming dt is in ms. We will also plot the phase values over time.

    # setting parameters\nN = 8\ncmat = np.ones((N, N)) # fully connected network\ndmat = np.zeros((N,N))\n\nnetwork_model = KuramotoModel(Cmat=cmat, Dmat=dmat)\nnetwork_model.params['duration'] = 0.03*1000\nnetwork_model.params['k']=0.3\nnetwork_model.run()\n
    theta = network_model['theta'].T\n# cap the phase to be between 0 and 2pi\ntheta_capped = np.mod(theta, 2*np.pi)\n\n# set up the figure\nfig, ax = plt.subplots(1, 1, figsize=(16, 8))\n\nplt.plot(network_model.t, theta_capped)\nplt.xlabel(\"Time [ms]\")\nplt.ylabel(\"Theta\")\nplt.yticks(np.arange(0, 2*np.pi+0.1, np.pi/2), [ r\"$0$\", r\"$\\pi/2$\", r\"$\\pi$\", r\"$3/4\\pi$\", r\"$2\\pi$\",])# modify y-axis ticks to be in multiples of pi\nplt.show()\n

    We can see that there is synchronization between nodes after around 25 ms. This happened because the nodes do not really have strong connection with each others. Now we will try to increase global coupling to 1 to see if synchronization comes faster.

    network_model.params['k']=1\nnetwork_model.run()\n
    theta = network_model['theta'].T\n# cap the phase to be between 0 and 2pi\ntheta_capped = np.mod(theta, 2*np.pi)\n\n# set up the figure\nfig, ax = plt.subplots(1, 1, figsize=(16, 8))\n\nplt.plot(network_model.t, theta_capped)\nplt.xlabel(\"Time [ms]\")\nplt.ylabel(\"Theta\")\nplt.yticks(np.arange(0, 2*np.pi+0.1, np.pi/2), [ r\"$0$\", r\"$\\pi/2$\", r\"$\\pi$\", r\"$3/4\\pi$\", r\"$2\\pi$\",])# modify y-axis ticks to be in multiples of pi\nplt.show()\n

    Now the synchronization happens after 7 ms which is faster compared to the previous simulation.

    "},{"location":"examples/example-0.5-kuramoto/#kuramoto-model","title":"Kuramoto Model","text":"

    In this notebook, we will simulate the Kuramoto model. The Kuramoto model is defined by the following differential equation: $$ \\frac{d \\theta_i}{dt} = \\omega_i + \\zeta_i + \\frac{K}{N} \\sum_{j=1}^N A_{ij} sin(\\theta_j(t - \\tau_{ij}) - \\theta_i(t)) + h_i(t)$$ here \\(\\theta_i\\) is the phase of oscillator \\(i\\), \\(\\omega_i\\) is the natural frequency of oscillator \\(i\\), \\(\\zeta_i\\) is the noise term, \\(K\\) is the global coupling strength, \\(A\\) is the coupling matrix, \\(\\tau_{ij}\\) is the phase lag between oscillator \\(i\\) and \\(j\\), and \\(h_i(t)\\) is the external input to oscillator \\(i\\).

    The Kuramoto model describes synchronization between oscillators. Nodes in the network are influenced not only by their own natural frequency but also by the other nodes in the network. The strength of this influence is determined by the global coupling and the connectivity matrix. The degree of synchronization depends on the strength of the coupling. The Kuramoto model is relatively simple, mathematically tractable, and easy to understand. Kuramoto model firstly described in 1975 by Yoshiki Kuramoto (Y. Kuramoto. Self-entrainment of a population of coupled non-linear oscillators. in International Symposium on Mathematical Problems in Theoretical Physics, H. Araki, Ed. Berlin, Heidelberg: Springer Berlin Heidelberg, 1975, pp. 420\u2013422).

    "},{"location":"examples/example-0.5-kuramoto/#single-node-simulation","title":"Single node simulation","text":"

    Here we will simulate a signal node with no noise. We then cap the phase values to be between 0 and 2*pi. We also will plot the phase values over time.

    "},{"location":"examples/example-0.5-kuramoto/#network-simulation","title":"Network simulation","text":""},{"location":"examples/example-0.6-external-stimulus/","title":"Example 0.6 external stimulus","text":"
    # change to the root directory of the project\nimport os\n\nif os.getcwd().split(\"/\")[-1] in [\"examples\", \"dev\"]:\n    os.chdir(\"..\")\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2\n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n\n# Some useful functions are provided here\nimport neurolib.utils.functions as func\nimport neurolib.utils.stimulus as stim\nimport numpy as np\nimport scipy\n# Let's import the aln model\nfrom neurolib.models.aln import ALNModel\n
    duration = 5000  # 5 seconds\ndt = 0.1\ninp = stim.ZeroInput(n=2).as_array(duration, dt)\nplt.plot(inp.T);\n
    inp = stim.WienerProcess(n=2).as_array(duration, dt)\nplt.plot(inp.T, alpha=0.8);\n
    inp = stim.OrnsteinUhlenbeckProcess(mu=1.3, sigma=0.04, tau=10.0, n=2).as_array(duration, dt)\nplt.plot(inp.T);\n
    inp = stim.StepInput(step_size=1.43, n=2).as_array(duration, dt)\nplt.plot(inp.T);\n
    # you can also set stim_start and stim_end - in ms\ninp = stim.StepInput(\n    step_size=1.43, start=1200, end=2400, n=2\n).as_array(duration, dt)\nplt.plot(inp.T);\n
    # frequency in Hz; dc_bias=True shifts input by its amplitude\ninp = stim.SinusoidalInput(\n    amplitude=2.5, frequency=2.0, start=1200, dc_bias=True\n).as_array(duration, dt)\ninp2 = stim.SinusoidalInput(amplitude=2.5, frequency=2.0).as_array(\n    duration, dt\n)\nplt.plot(inp.T)\nplt.plot(inp2.T);\n
    # frequency in Hz; dc_bias=True shifts input by its amplitude\ninp = stim.SquareInput(\n    amplitude=2.5, frequency=2.0, start=1200, dc_bias=True\n).as_array(duration, dt)\ninp2 = stim.SquareInput(amplitude=2.5, frequency=2.0).as_array(\n    duration, dt\n)\nplt.plot(inp.T)\nplt.plot(inp2.T);\n
    # ramp_length in ms\ninp = stim.LinearRampInput(inp_max=1.7, ramp_length=3000, start=600).as_array(\n    duration, dt\n)\ninp2 = stim.LinearRampInput(inp_max=-0.7, ramp_length=2000, start=1600).as_array(\n    duration, dt\n)\nplt.plot(inp.T)\nplt.plot(inp2.T);\n
    inp = stim.ExponentialInput(inp_max=2.5, exp_coef=5.0, exp_type=\"rise\").as_array(\n    duration, dt\n)\ninp2 = stim.ExponentialInput(inp_max=2.5, exp_coef=35.0, exp_type=\"rise\").as_array(\n    duration, dt\n)\ninp3 = stim.ExponentialInput(inp_max=2.5, exp_coef=15.0, exp_type=\"decay\").as_array(\n    duration, dt\n)\nplt.plot(inp.T)\nplt.plot(inp2.T)\nplt.plot(inp3.T);\n
    inp = stim.RectifiedInput(amplitude=1.2).as_array(duration, dt)\nplt.plot(inp.T);\n
    # let's create some basic inputs\nou = stim.OrnsteinUhlenbeckProcess(mu=0.1, sigma=0.04, tau=2.0, n=2)\nsq = stim.SquareInput(amplitude=0.2, frequency=1.7, n=2)\nsin = stim.SinusoidalInput(amplitude=0.7, frequency=1.0, n=2)\n\n_, axs = plt.subplots(nrows=3, ncols=1, sharex=True)\nfor i, inp in enumerate([ou, sq, sin]):\n    axs[i].plot(inp.as_array(duration, dt).T);\n
    summed = ou + sq + sin\nplt.plot(summed.as_array(duration, dt).T);\n
    # same lengths - use &\nconc = ou & sq & sin\nplt.plot(conc.as_array(duration, dt).T);\n
    # can also do different length ratios, but for this you need to call ConcatenatedStimulus directly\nconc = stim.ConcatenatedStimulus([ou, sq, sin], length_ratios=[0.5, 2, 5])\nplt.plot(conc.as_array(duration, dt).T);\n
    beast = (ou + sq) & (sq + sin)\nplt.plot(beast.as_array(duration, dt).T);\n
    beast = stim.ConcatenatedStimulus([ou + sq, ou + sin], [2, 5])\nplt.plot(beast.as_array(duration, dt).T);\n
    class PoissonNoiseWithExpKernel(stim.Stimulus):\n\"\"\"\n    Poisson noise with exponential kernel.\n    By subclassing the `StimulusInput` we have an option to select `start` and `end`.\n    \"\"\"\n\n    def __init__(\n        self, amp, freq, tau_syn, start=None, end=None, n=1, seed=None\n    ):\n        # save parameters as attributes\n        self.freq = freq\n        self.amp = amp\n        self.tau_syn = tau_syn\n        # pass other params to parent class\n        super().__init__(\n            start=start, end=end, n=n, seed=seed\n        )\n\n    def generate_input(self, duration, dt):\n        # this is a helper function that creates self.times vector\n        self._get_times(duration=duration, dt=dt)\n        # do the magic here: prepare output vector\n        x = np.zeros((self.n, self.times.shape[0]))\n        # compute total number of spikes\n        total_spikes = int(self.freq * (self.times[-1] - self.times[0]) / 1000.0)\n        # randomly put spikes into the output vector\n        spike_indices = np.random.choice(\n            x.shape[1], (self.n, total_spikes), replace=True\n        )\n        x[np.arange(x.shape[0])[:, None], spike_indices] = 1.0\n        # create exponential kernel\n        time_spike_end = -self.tau_syn * np.log(0.001)\n        arg_spike_end = np.argmin(np.abs(self.times - time_spike_end))\n        spike_kernel = np.exp(-self.times[:arg_spike_end] / self.tau_syn)\n        # convolve over dimensions\n        x = np.apply_along_axis(np.convolve, axis=1, arr=x, v=spike_kernel, mode=\"same\")\n        # self._trim_stim_input takes care of trimming the stimulus based on stim_start and stim_end\n        return self._trim_stim(x * self.amp)\n
    # test ride\ninp = PoissonNoiseWithExpKernel(\n    amp=1.2, freq=20.0, tau_syn=50.0, n=1, end=4000\n).as_array(duration, dt)\ninp2 = PoissonNoiseWithExpKernel(\n    amp=2.2, freq=10.0, tau_syn=20.0, n=1, start=1000\n).as_array(duration, dt)\nplt.plot(inp.T)\nplt.plot(inp2.T);\n
    # sum and concat test\npois = PoissonNoiseWithExpKernel(freq=20.0, amp=1.2, tau_syn=50.0, n=2)\n\nsummed = pois + sin\nplt.plot(summed.as_array(duration, dt).T);\n
    concat = pois & sin\nplt.plot(concat.as_array(duration, dt).T);\n
    model = ALNModel()\nmodel.params[\"duration\"] = 5 * 1000\nmodel.params[\"sigma_ou\"] = 0.2  # we add some noise\n

    After creating a base for stimulus, we can simply call to_model(model) function and our stimulus is generated.

    stimulus = stim.SinusoidalInput(amplitude=1.0, frequency=1.0).to_model(model)\n

    The stimulus is then set as an input current parameter to the model. The parameter that models a current that goes to the excitatory population is called ext_exc_current. For the inhibitory population, we can use ext_inh_current. We can also set a firing rate input, that will then be integrated over the synapses using the parameter model.params['ext_exc_rate'].

    model.params[\"ext_exc_current\"] = stimulus\nmodel.run()\n

    When we plot the timeseries, we can see that the oscillatory activity locks to the stimulus.

    plt.figure(figsize=(10, 3), dpi=150)\nplt.title(\"1 Hz stimulus\")\nax1 = plt.gca()\nax1.plot(model.t, model.output.T, c=\"k\")\nax2 = plt.gca().twinx()\nax2.plot(model.t, stimulus.squeeze(), lw=2, c=\"r\", alpha=0.8)\nax1.set_xlabel(\"Time [ms]\")\nax1.set_ylabel(\"Activity [Hz]\")\nax2.set_ylabel(\"Stimulus [mV/ms]\", color=\"r\")\nax2.set_ylabel(\"Stimulus [mV/ms]\", color=\"r\")\nax2.tick_params(axis=\"y\", labelcolor=\"r\")\n
    from neurolib.utils.loadData import Dataset\n\nds = Dataset(\"hcp\")\n
    model = ALNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)\n\n# we chose a parameterization in which the brain network oscillates slowly\n# between up- and down-states\n\nmodel.params[\"mue_ext_mean\"] = 2.56\nmodel.params[\"mui_ext_mean\"] = 3.52\nmodel.params[\"b\"] = 4.67\nmodel.params[\"tauA\"] = 1522.68\nmodel.params[\"sigma_ou\"] = 0.40\n\nmodel.params[\"duration\"] = 0.2 * 60 * 1000\n
    def plot_output_and_spectrum(model, individual=False, vertical_mark=None):\n\"\"\"A simple plotting function for the timeseries\n    and the power spectrum of the activity.\n    \"\"\"\n    fig, axs = plt.subplots(\n        1, 2, figsize=(8, 2), dpi=150, gridspec_kw={\"width_ratios\": [2, 1]}\n    )\n    axs[0].plot(model.t, model.output.T, lw=1)\n    axs[0].set_xlabel(\"Time [ms]\")\n    axs[0].set_ylabel(\"Activity [Hz]\")\n\n    frs, powers = func.getMeanPowerSpectrum(model.output, dt=model.params.dt)\n    axs[1].plot(frs, powers, c=\"k\")\n\n    if individual:\n        for o in model.output:\n            frs, powers = func.getPowerSpectrum(o, dt=model.params.dt)\n            axs[1].plot(frs, powers)\n\n    axs[1].set_xlabel(\"Frequency [Hz]\")\n    axs[1].set_ylabel(\"Power\")\n\n    plt.show()\n
    model.run(chunkwise=True)\n
    plot_output_and_spectrum(model)\n

    neurolib helps you to create a few basic stimuli out of the box using the function stimulus.construct_stimulus().

    # construct a stimulus\n# we want 1-dim input - to all the nodes - 25Hz\nac_stimulus = stim.SinusoidalInput(amplitude=0.2, frequency=25.0).to_model(model)\nprint(ac_stimulus.shape)\n\n# this stimulus is 1-dimensional. neurolib will threfore automatically apply it to *all nodes*.\nmodel.params[\"ext_exc_current\"] = ac_stimulus * 5.0\n
    \n(80, 120000)\n\n
    model.run(chunkwise=True)\n
    plot_output_and_spectrum(model)\n
    # now we create multi-d input of 25Hz\nac_stimulus = stim.SinusoidalInput(amplitude=0.2, frequency=25.0).to_model(model)\nprint(ac_stimulus.shape)\n\n# We set the input to a bunch of nodes to zero.\n# This will have the effect that only nodes from 0 to 4 will be sitmulated!\nac_stimulus[5:, :] = 0\n\n# multiply the stimulus amplitude\nmodel.params[\"ext_exc_current\"] = ac_stimulus * 5.0\n
    \n(80, 120000)\n\n
    model.run(chunkwise=True)\n

    We can see that the spectrum has a peak at the frequency we stimulated with, but only in a subset of nodes (where we stimulated).

    plot_output_and_spectrum(model, individual=True)\n
    "},{"location":"examples/example-0.6-external-stimulus/#stimulation-example","title":"Stimulation example","text":"

    This notebook will demonstrate how to construct stimuli using a variety of different predefined classes in neurolib.

    You can then apply them as an input to a whole-brain model. As an example, we will see how to add an external current to the excitatory population of the ALNModel.

    "},{"location":"examples/example-0.6-external-stimulus/#lets-talk-stimuli","title":"Let's talk stimuli","text":"

    neurolib offers a range of external stimuli you can apply to your models. These range from basic noise processes like a Wiener process or an Ornstein-Uhlenbeck process, to more simple forms of inputs such as sinousoids, rectified inputs etc. All stimuli are based on the ModelInput class, and are available in the neurolib.utils.stimulus subpackage. In the following we will detail the implemented inputs and also show how to easily implement your own custom stimulus further below.

    All inputs are initialized as classes. Three different functions are provided for the generation of the actual stimulus as a usable input: - as_array(duration, dt) - will return numpy array. - as_cubic_splines(duration, dt) - will return a CubicHermiteSpline object, which represents a spline representation of the given input - useful for jitcdde backend in MultiModel. - to_model(model) - the easiest one - infers the duration, dt and number of nodes from the simulated model itself and returns numpy array of an appropriate shape.

    Each stimulus type has their own init function with attributes that apply to the specific kind of stimulus. However, all of them include the attributes n and seed. n controls how many spatial dimensions the stimulus should have, and in the case of stochastic inputs, such as a noisy Ornstein-Uhlenbeck process, this controls the number of independent realizations that are returned. For a deterministic stimulus, such as the sinusoidal input, this just returns a copy of itself.

    "},{"location":"examples/example-0.6-external-stimulus/#zero-input-for-convenience","title":"Zero input - for convenience","text":"

    You'll probably never use it, but you know, it's there... Maybe you can use it as a \"pause\" when concatenating two different stimuli.

    "},{"location":"examples/example-0.6-external-stimulus/#wienerprocess","title":"WienerProcess","text":"

    Basic Wiener process \\(dW\\), i.e. random numbers drawn from \\(N(0, \\sqrt{dt})\\)

    "},{"location":"examples/example-0.6-external-stimulus/#ornstein-uhlenbeck-process","title":"Ornstein-Uhlenbeck process","text":"

    Ornstein-Uhlenback process, i.e. \\(\\dot{x} = (\\mu - x)/\\tau \\cdot dt + \\sigma\\cdot dW\\)

    "},{"location":"examples/example-0.6-external-stimulus/#step-input","title":"Step input","text":"

    Just a bias, or a DC offset, that you can use in combination with other types of stimuli.

    "},{"location":"examples/example-0.6-external-stimulus/#sinusoidal-input","title":"Sinusoidal input","text":""},{"location":"examples/example-0.6-external-stimulus/#square-input","title":"Square input","text":""},{"location":"examples/example-0.6-external-stimulus/#linear-ramp","title":"Linear ramp","text":"

    When you need to go somwhhere slowly but surely

    "},{"location":"examples/example-0.6-external-stimulus/#exponential-input","title":"Exponential input","text":"

    When you need to get there fast.

    "},{"location":"examples/example-0.6-external-stimulus/#rectifiedinput","title":"RectifiedInput","text":"

    A mix of inputs that start with negative step, then we have exponential rise and subsequent decay to zero. Useful for detecting bistability

    "},{"location":"examples/example-0.6-external-stimulus/#operations-on-stimuli","title":"Operations on stimuli","text":"

    Sometimes you need to concatenate inputs in the temporal dimension to create a mix of different stimuli. This is easy with neurolib's stimuli. All of them allow two operations: + for a sum of different stimuli and & to concatenate them (one after another). Below, we will show some of the weird combinations you can make.

    "},{"location":"examples/example-0.6-external-stimulus/#sum","title":"Sum","text":""},{"location":"examples/example-0.6-external-stimulus/#concatenation","title":"Concatenation","text":""},{"location":"examples/example-0.6-external-stimulus/#mixing-the-operations","title":"Mixing the operations","text":"

    You should be able to use as many + and & as you want. Go crazy.

    "},{"location":"examples/example-0.6-external-stimulus/#creating-a-custom-stimulus","title":"Creating a custom stimulus","text":"

    Creating a custom stimulus is very easy and you can build your library of stimuili as inputs for your model. There are three necessary steps: 1. Subclass stim.Input for a basic input or stim.Stimulus to have the option to set start and end times. 2. Define an __init__() method with the necessary parameters of your stimulus and set the appropriate attributes. 3. Define a generate_input(duration, dt) method, which returns a numpy array as with a shape (space, time) and that's it. Everything else described above is taken care of. Your new input class will be also support operations like + and &.

    Below we implement a new stimulus class that represents currents caused by a Poission spike train convolved with an exponential kernel.

    "},{"location":"examples/example-0.6-external-stimulus/#using-stimuli-in-neurolib","title":"Using stimuli in neurolib","text":"

    First, we initialize a single node.

    "},{"location":"examples/example-0.6-external-stimulus/#brain-network-stimulation","title":"Brain network stimulation","text":""},{"location":"examples/example-0.6-external-stimulus/#without-stimulation","title":"Without stimulation","text":""},{"location":"examples/example-0.6-external-stimulus/#constructing-a-stimulus","title":"Constructing a stimulus","text":""},{"location":"examples/example-0.6-external-stimulus/#focal-stimulation","title":"Focal stimulation","text":"

    In the previous example, the stimulus was applied to all nodes simultaneously. We can also apply stimulation to a specific set of nodes.

    "},{"location":"examples/example-0.7-custom-model/","title":"Example 0.7 custom model","text":"

    This notebook demonstrates how to implement your own model in neurolib. There are two main parts of each model: its class that inherits from the Model base class and its timeIntegration() function.

    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-2] == \"neurolib\":\n    os.chdir('..')\n\n%load_ext autoreload\n%autoreload 2\n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n

    In this example we will implement a linear model with the following equation:

    \\(\\frac{d}{dt} x_i(t) = - \\frac{x_i(t)}{\\tau} + \\sum_{j=0}^{N} K G_{ij} x_j(t)\\).

    Here, we simulate \\(N\\) nodes that are coupled in a network. \\(x_i\\) are the elements of an \\(N\\)-dimensional state vector, \\(\\tau\\) is the decay time constant, \\(G\\) is the adjacency matrix and \\(K\\) is the global coupling strength.

    We first create a class for the model called LinearModel which inherits lots of functionality from the Model base class. We define state_vars and default_output so that neurolib knows how to handle the variables of the system. Next, we define init_vars in order to use the autochunk integration scheme, so we can save a lot of RAM when we run very long simulations.

    class LinearModel(Model):\n    state_vars = [\"x\"]\n    default_output = \"x\"\n    init_vars = [\"x_init\"]\n
    Next we define a simple parameter dictionary called params. In here, we can define all the necessary parameters of the model and change their values later. In this example, we set the timescale \\(\\tau\\), the coupling strength \\(K\\), the integration time step dt (in ms) and the duration to 100 ms.
    params = dict(tau=10, K=1e-2, dt=1e-1, duration=100)\n
    We are now ready to set up the constructor of our model! This method is supposed to set up the model and prepare it for integration. All the magic happens in the background! We pass the self.timeIntegration function and the parameter dictionary self.params to the base class using super().__init__().
    def __init__(self, Cmat=np.zeros((1,1))):\n        self.params['Cmat'] = Cmat\n        super().__init__(self.timeIntegration, self.params)\n
    That wasn't too bad, was it? We are finally ready to define the time integration method that prepares all variables and passes it to the last function that will crunch the numbers. Here we prepare the numpy arrays that will hold the simulation results. We have to prepare them before we can execute the numba code.

    def timeIntegration(self, p):\n    N = p['Cmat'].shape[0]\n    t = np.arange(1, p['duration']/p['dt']) # holds time steps\n    x = np.ndarray((N, len(t)+1)) # holds variable x\n
    Next, we make use of a neurolib convention to prepare the initial conditions of our model. If you remember, we defined init_vars above in order to use the autochunk feature. The autochunk feature will automatically fill this parameter with the last state of the last simulated chunk so the model integration can be continued without having to remember the entire output and state variables of the model indefinitely. In this line, we check whether x_init is set or not (which it will be, when we use chunkwise integration). If it is not set, we simply use random initial conditions using rand((N, 1)). Remember that the convention for array dimensions is array[space, time], meaning that we only fill in the first time step with the initial condition.
    # either use predefined initial conditions or random ones\nx[:, :1] = p.get('x_init') if p.get('x_init') is not None else rand((N, 1))\n
    We're ready to call our accelerated integration part and return the results \ud83d\ude80!
    return njit_integrate(x, t, p['tau'], p['K'], N, p['Cmat'], p['dt'])\n

    Remember to put this function outside of the class definition, so we can use use numba acceleration to greatly increase the performance of our code. We first have to let numba know which part of the code to precompile. We do this by simply placing the decorator @numba.njit in the line above the integration function. Easy way of getting 100x faster code! \u2764\ufe0f numba!

    @numba.njit\ndef njit_integrate(x, t, tau, K, N, Cmat, dt):\n
    Next, we do some simple math. We first loop over all time steps. If you have prepared the array t as described above, you can simply loop over its length. In the next line, we calculate the coupling term from the model equation above. However, instead of looping over the sum, we use a little trick here and simply compute the dot product between the coupling matrix G and the state vector x. This results in a N-dimensional vector that carries the amount of input each node receives at each time step. Finally, we loop over all nodes so we can finally add up everything.

    for i in range(1, 1 + len(t)): # loop over time\n    inp = Cmat.dot(x[:, i-1]) # input vector\n    for n in range(N): # loop over nodes\n
    In the next line, we integrate the equation that we have shown above. This integration scheme is called Euler integration and is the most simple way of solving an ODE. The idea is easy and is best expressed as x_next = x_before + f(x) * dt where f(x) is simply the time derivative \\(\\frac{d}{dt} x_i(t)\\) shown above.
    x[n, i] = x[n, i-1] + (- x[n, i-1] / tau + K * inp[n]) * dt # model equations\n
    We're done! The only thing left to do is to return the data so that neurolib can take over from here on. The outputs of this simulation will be available in the model.outputs attribute. You can see an example time series below.
    return t, x\n

    import numba\nimport numpy as np\nfrom numpy.random import random as rand\nfrom neurolib.models.model import Model\n\nclass LinearModel(Model):\n    state_vars = [\"x\"]\n    default_output = \"x\"\n    init_vars = [\"x_init\"]\n    params = dict(tau=10, K=1e-2, dt=1e-1, duration=100)\n    def __init__(self, Cmat=np.zeros((1,1))):\n        self.params['Cmat'] = Cmat\n        super().__init__(self.timeIntegration, self.params)\n\n    def timeIntegration(self, p):\n        p['N'] = p['Cmat'].shape[0] # number of nodes\n        t = np.arange(1, p['duration']/p['dt'] + 1) # holds time steps\n        x = np.ndarray((p['N'], len(t)+1)) # holds variable x\n        # either use predefined initial conditions or random ones\n        x[:, :1] = p['x_init'] if 'x_init' in p else rand((p['N'], 1))\n        return njit_integrate(x, t, p['tau'], p['K'], p['N'], p['Cmat'], p['dt'])\n\n@numba.njit\ndef njit_integrate(x, t, tau, K, N, Cmat, dt):\n    for i in range(1, 1 + len(t)): # loop over time\n        inp = Cmat.dot(x[:, i-1]) # input vector\n        for n in range(N): # loop over nodes\n            x[n, i] = x[n, i-1] +\\\n            (- x[n, i-1] / tau + K * inp[n]) * dt # model equations\n    return t, x\n

    We prepare a \"mock\" connectivity matrix, simply consisting of 12x12 random numbers, meaning that we will simulate 12 LinearModel's in a network.

    Cmat = rand((12, 12)) # use a random connectivity matrix\nmodel = LinearModel(Cmat) # initialize the model\n

    That's it, we are finally ready to run the model.

    model.run()\n
    plt.plot(model.t, model.output.T);\nplt.xlabel(\"Time [ms]\")\nplt.ylabel(\"Activity $x$\")\n
    \nText(0, 0.5, 'Activity $x$')\n

    Since we've followed the model implementation guidelines, the model is also compatible with chunkwise integration and can produce a BOLD signal. Let's try it out!

    model.params.duration=200000\nmodel.run(chunkwise=True, append_outputs=True, bold=True)\n
    plt.plot(model.BOLD.t_BOLD, model.BOLD.BOLD.T);\nplt.xlabel(\"Time [ms]\")\nplt.ylabel(\"BOLD activity\")\n
    \nText(0, 0.5, 'BOLD activity')\n
    "},{"location":"examples/example-0.7-custom-model/#minimal-model-implementation","title":"Minimal model implementation","text":""},{"location":"examples/example-0.7-custom-model/#model-equations","title":"Model equations","text":""},{"location":"examples/example-0.7-custom-model/#implementation","title":"Implementation","text":""},{"location":"examples/example-0.7-custom-model/#numba-time-integration","title":"Numba time integration","text":""},{"location":"examples/example-0.7-custom-model/#code","title":"Code","text":""},{"location":"examples/example-0.7-custom-model/#running-the-model","title":"Running the model","text":""},{"location":"examples/example-0.7-custom-model/#plot-outputs","title":"Plot outputs","text":""},{"location":"examples/example-0.7-custom-model/#bold-and-autochunk","title":"BOLD and autochunk","text":""},{"location":"examples/example-1-aln-parameter-exploration/","title":"Example 1 aln parameter exploration","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n\nimport numpy as np\n\nfrom neurolib.models.aln import ALNModel\nfrom neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.exploration import BoxSearch\n\n# a nice color map\nplt.rcParams['image.cmap'] = 'plasma'\n
    aln = ALNModel()\n
    parameters = ParameterSpace({\"mue_ext_mean\": np.linspace(0, 3, 2), \"mui_ext_mean\": np.linspace(0, 3, 2)})\n# info: chose np.linspace(0, 3, 21) or more, values here are low for testing\nsearch = BoxSearch(aln, parameters, filename=\"example-1.hdf\")\n
    search.run()\n
    search.loadResults()\n
    print(\"Number of results: {}\".format(len(search.results)))\n
    # Example analysis of the results\n# The .results attribute is a list and can be indexed by the run \n# number (which is also the index of the pandas dataframe .dfResults).\n# Here we compute the maximum firing rate of the node in the last second\n# and add the result (a float) to the pandas dataframe.\nfor i in search.dfResults.index:\n    search.dfResults.loc[i, 'max_r'] = np.max(search.results[i]['rates_exc'][:, -int(1000/aln.params['dt']):])\n
    plt.imshow(search.dfResults.pivot_table(values='max_r', index = 'mui_ext_mean', columns='mue_ext_mean'), \\\n           extent = [min(search.dfResults.mue_ext_mean), max(search.dfResults.mue_ext_mean),\n                     min(search.dfResults.mui_ext_mean), max(search.dfResults.mui_ext_mean)], origin='lower')\nplt.colorbar(label='Maximum rate [Hz]')\nplt.xlabel(\"Input to E\")\nplt.ylabel(\"Input to I\")\n
    \nText(0, 0.5, 'Input to I')\n
    "},{"location":"examples/example-1.1-custom-parameter-exploration/","title":"Example 1.1 custom parameter exploration","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n\nimport numpy as np\n\nfrom neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.exploration import BoxSearch\n
    def explore_me(traj):\n    pars = search.getParametersFromTraj(traj)\n    # let's calculate the distance to a circle\n    computation_result = abs((pars['x']**2 + pars['y']**2) - 1)\n    result_dict = {\"distance\" : computation_result}\n    search.saveToPypet(result_dict, traj)\n
    parameters = ParameterSpace({\"x\": np.linspace(-2, 2, 2), \"y\": np.linspace(-2, 2, 2)})\n# info: chose np.linspace(-2, 2, 40) or more, values here are low for testing\nsearch = BoxSearch(evalFunction = explore_me, parameterSpace = parameters, filename=\"example-1.1.hdf\")\n
    search.run()\n
    search.loadResults()\nprint(\"Number of results: {}\".format(len(search.results)))\n

    The runs are also ordered in a simple pandas dataframe called search.dfResults. We cycle through all results by calling search.results[i] and loading the desired result (here the distance to the circle) into the dataframe

    for i in search.dfResults.index:\n    search.dfResults.loc[i, 'distance'] = search.results[i]['distance']\n\nsearch.dfResults\n
    x y distance 0 -2.0 -2.000000 7.000000 1 -2.0 -1.897436 6.600263 2 -2.0 -1.794872 6.221565 3 -2.0 -1.692308 5.863905 4 -2.0 -1.589744 5.527285 ... ... ... ... 1595 2.0 1.589744 5.527285 1596 2.0 1.692308 5.863905 1597 2.0 1.794872 6.221565 1598 2.0 1.897436 6.600263 1599 2.0 2.000000 7.000000

    1600 rows \u00d7 3 columns

    And of course a plot can visualize the results very easily.

    plt.imshow(search.dfResults.pivot_table(values='distance', index = 'x', columns='y'), \\\n           extent = [min(search.dfResults.x), max(search.dfResults.x),\n                     min(search.dfResults.y), max(search.dfResults.y)], origin='lower')\nplt.colorbar(label='Distance to the unit circle')\n
    \n<matplotlib.colorbar.Colorbar at 0x124a71588>\n
    "},{"location":"examples/example-1.1-custom-parameter-exploration/#a-simple-parameter-exploration","title":"A simple parameter exploration","text":"

    This notebook demonstrates a very simple parameter exploration of a custom function that we have defined. It is a simple function that returns the distance to a unit circle, so we expect our parameter exploration to resemble a circle.

    "},{"location":"examples/example-1.1-custom-parameter-exploration/#define-the-evaluation-function","title":"Define the evaluation function","text":"

    Here we define a very simple evaluation function. The function needs to take in traj as an argument, which is the pypet trajectory. This is how the function knows what parameters were assigned to it. Using the builtin function search.getParametersFromTraj(traj) we can then retrieve the parameters for this run. They are returned as a dictionary and can be accessed in the function.

    In the last step, we use search.saveToPypet(result_dict, traj) to save the results to the pypet trajectory and to an HDF. In between, the computational magic happens!

    "},{"location":"examples/example-1.1-custom-parameter-exploration/#define-the-parameter-space-and-exploration","title":"Define the parameter space and exploration","text":"

    Here we define which space we want to cover. For this, we use the builtin class ParameterSpace which provides a very easy interface to the exploration. To initialize the exploration, we simply pass the evaluation function and the parameter space to the BoxSearch class.

    "},{"location":"examples/example-1.1-custom-parameter-exploration/#run","title":"Run","text":"

    And off we go!

    "},{"location":"examples/example-1.1-custom-parameter-exploration/#get-results","title":"Get results","text":"

    We can easily obtain the results from pypet. First we call search.loadResults() to make sure that the results are loaded from the hdf file to our instance.

    "},{"location":"examples/example-1.2-brain-network-exploration/","title":"Example 1.2 brain network exploration","text":"
    #hide\n# change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    \nThe autoreload extension is already loaded. To reload it, use:\n  %reload_ext autoreload\n\n
    #hide\nimport logging\nlogging.getLogger().setLevel(logging.INFO)\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n
    #hide\ntry:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n\nimport numpy as np\n\n# Let's import all the necessary functions for the parameter\nfrom neurolib.models.fhn import FHNModel\nfrom neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.exploration import BoxSearch\n\n# load some utilty functions for explorations\nimport neurolib.utils.pypetUtils as pu\nimport neurolib.utils.paths as paths\nimport neurolib.optimize.exploration.explorationUtils as eu\n\n# The brain network dataset\nfrom neurolib.utils.loadData import Dataset\n\n# Some useful functions are provided here\nimport neurolib.utils.functions as func\n\n# a nice color map\nplt.rcParams['image.cmap'] = 'plasma'\n

    We load a dataset (in this case the hcp dataset from the Human Connectome Project) and initialize a model to run on each node of the brain network (here the FHNModel which is the Fitz-Hugh Nagumo model).

    ds = Dataset(\"hcp\")\nmodel = FHNModel(Cmat = ds.Cmat, Dmat = ds.Dmat)\nmodel.params.duration = 20 * 1000 #ms\n# testing: model.params.duration = 20 * 1000 #ms\n# original: model.params.duration = 5 * 60 * 1000 #ms\n

    Running the model is as simple as entering model.run(chunkwise=True).

    We define a parameter range to explore. Our first parameter is x_ext, which is the input to each node of the FHNModel in a brain network. Therefore, this parameter is a list with N entries, one per node. Our next parameter is K_gl, the global coupling strength. Finally, we have the coupling parameter, which defines how each FHNModel is coupled to its adjacent nodes via either additive coupling (activity += input) or diffusive (activity += (activity - input) ).

    parameters = ParameterSpace({\"x_ext\": [np.ones((model.params['N'],)) * a for a in  np.linspace(0, 2, 2)] # testing: 2, original: 41\n                             ,\"K_gl\": np.linspace(0, 2, 2) # testing: 2, original: 41\n                             ,\"coupling\" : [\"additive\", \"diffusive\"]\n                            }, kind=\"grid\")\nsearch = BoxSearch(model=model, parameterSpace=parameters, filename=\"example-1.2.0.hdf\")\n

    We run the exploration, simply by calling the run() function of the BoxSearch class. We can pass parameters to this function, that will be directly passed to the FHNModel.run() function of the simulated model. This way, we can easily specify to run the simulation chunkwise, without storing all the activity in memory, and simulate bold activity as well.

    Note that the default behaviour of the BoxSearch class is to save the default_output of each model and if bold is simulated, then also the BOLD data. If the exploration is initialized with BoxSearch(saveAllModelOutputs=True), the exploration would save all outputs of the model. This can obviously create a lot of data to store, so please use this option at your own discretion.

    search.run(chunkwise=True, bold=True)\n

    A simple helper function for getting the trajectories of an hdf file created by pypet can be found in pypetUtils.py (aka pu). This way, you can explore which explorations are in the file and decide later which one you want to load for analysis

    pu.getTrajectorynamesInFile(os.path.join(paths.HDF_DIR, \"example-1.2.0.hdf\"))\n
    \n['results-2020-04-08-02H-01M-53S', 'results-2020-04-08-02H-50M-09S']\n

    The default behaviour will load the latest exploration. It's name is also stored in search.trajectoryName:

    search.trajectoryName\n
    \n'results-2020-04-08-02H-50M-09S'\n

    Now we load all results. As said above, the newest exploration will be loaded by default. You can load results from earlier explorations by adding the argument trajectoryName=results-from-earlier and also chose another hdf file by using the argument filename=/path/to/explorations.hdf.

    Remember that using search.loadResults() will load all results to memory. This can cause a lot of RAM, depending on how big the exploration was.

    search.loadResults()\n
    print(\"Number of results: {}\".format(len(search.results)))\n

    One way of loading a result without loading everything else into RAM is to use the builtin function search.getRun(). However, you need to know which runId you're looking for! For this, you can run search.loadDfResults() to create a pandas.DataFrame search.dfResults with all parameters (which also happens when you call search.loadResults()).

    search.getRun(6).params\n
    \n{'x_ext': array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),\n 'K_gl': 0.15000000000000002,\n 'coupling': 'additive'}\n

    After loading the results with search.loadResults() they are now available as a simple list using search.results. Let's look at the time series of one result.

    rId = 2 # test:2, original: 1327\nplt.plot(search.results[rId].t, search.results[rId].x.T);\nplt.xlabel(\"Time [ms]\")\nplt.ylabel(\"Activity\")\n
    \nText(0, 0.5, 'Activity')\n

    Using search.loadResults() also created a pandas.DataFrame with the individual run's parameters and their runId.

    search.dfResults.iloc[-4:]\n
    x_ext K_gl coupling fc max_x amp_x 3358 2.0 1.95 additive 0.304496 2.446207 1.463651e+00 3359 2.0 1.95 diffusive 0.221238 0.872110 2.275957e-14 3360 2.0 2.00 additive 0.310389 2.489208 1.503437e+00 3361 2.0 2.00 diffusive 0.226729 0.872110 2.253753e-14

    If you remember from before, the external input parameter x_ext is a list of length N (one per node). Since they're all the same in this example, we reduce the parameter to only the first entry of each list.

    search.dfResults.x_ext = [a[0] for a in list(search.dfResults.x_ext)]\n
    search.dfResults.iloc[-4:]\n
    x_ext K_gl coupling 3358 2.0 1.95 additive 3359 2.0 1.95 diffusive 3360 2.0 2.00 additive 3361 2.0 2.00 diffusive

    We can use eu.processExplorationResults() from explorationUtils.py (aka eu) to process the results from the simluation and store results in our pandas.DataFrame of all results called search.dfResults:

    eu.processExplorationResults(search, model=model, ds=ds, bold_transient=10000)\n

    This finally gives us a dataframe with parameters and respective values from postprocessing the results, which we can access using search.dfResults.

    We can use the utility function eu.findCloseResults() to navigate in this DataFrame and find for example the runId of a run for a specific parameter configuration.

    eu.findCloseResults(search.dfResults, dist=0.2, K_gl=0.5, x_ext = 1.0)\n
    x_ext K_gl coupling fc max_x amp_x 1324 0.80 0.30 additive 0.364910 1.192267 1.428502 1325 0.80 0.30 diffusive 0.302487 0.576765 0.467873 1326 0.80 0.35 additive 0.337226 1.241613 1.511995 1327 0.80 0.35 diffusive 0.187238 0.547917 0.423548 1328 0.80 0.40 additive 0.200489 1.287626 1.590182 ... ... ... ... ... ... ... 1909 1.15 0.55 diffusive 0.363809 0.772698 0.577180 1910 1.15 0.60 additive 0.348988 1.234206 1.050313 1911 1.15 0.60 diffusive 0.278103 0.768822 0.566546 1912 1.15 0.65 additive 0.371943 1.276929 1.091328 1913 1.15 0.65 diffusive 0.292993 0.762355 0.550818

    128 rows \u00d7 6 columns

    To understand what is happening in eu.processExplorationResults(), it helps to see how we could do postprocessing on the loaded data ourselves. Let's calculate the correlation to empirical functional connectivity using the builtin funtions func.fc() and func.matrix_correlation().

    mean_corr = np.mean([func.matrix_correlation(func.fc(search.results[rId]['BOLD']), fc) for fc in ds.FCs])\n\nprint(f\"Mean correlation of run {rId} with empirical FC matrices is {mean_corr:.02}\")\n
    \nMean correlation of run 3324 with empirical FC matrices is 0.28\n\n

    Another usefull function is eu.plotExplorationResults(), which helps you to visualize the results from the exploration. You can specify which parameters should be the x- and the y-axis using the par1=[parameter_name, parameter_label] and par2 arguments, and you can define by which paramter plane the results should be \"sliced\".

    plot_key_label = \"Maximum of output\"\neu.plotExplorationResults(search.dfResults, par1=['x_ext', '$x_{ext}$'], par2=['K_gl', '$K$'], plot_key='max_x',  by=['coupling'], by_label = ['coupling'], plot_key_label=plot_key_label, one_figure=True)\n

    We want to find parameter for which the brain network model produces realistic BOLD functional connectivity. For this, we calculated the entry fc in search.dfResults by taking the func.fc() of the model.BOLD timeseries and compared it to empirical data using func.matrix_correlation.

    Below, the average of this value across all subjects of the dataset is plotted. A higher value (brighter color) means a better fit to the empirical data. Observe how the best solutions tend to cluster at the edges of bifurcations, indicating that correlations in the network are generated by multiple nodes undergoing bifurcation together, such as transitioning from the constant activity (fixed point) solution to an oscillation.

    plot_key_label = \"FC correlation\"\neu.plotExplorationResults(search.dfResults, par1=['x_ext', '$x_{ext}$'], par2=['K_gl', '$K$'], plot_key='fc',  by=['coupling'], by_label = ['coupling'], plot_key_label=plot_key_label, one_figure=True)\n
    "},{"location":"examples/example-1.2-brain-network-exploration/#parameter-exploration-of-a-brain-network-model","title":"Parameter exploration of a brain network model","text":"

    This notebook demonstrates how to scan the parameter space of a brain network model using neurolib. We will simulate BOLD activity and compare the results to empirical data to identify optimal parameters of the model.

    The steps outlined in this notebook are the following:

    1. We load a DTI and resting-state fMRI dataset (hcp) and set up a brain network using the FHNModel.
    2. We simulate the system for a range of different parameter configurations.
    3. We load the simulated data from disk.
    4. We postprocess the results and obtain the model fit.
    5. Finally, we plot the results in the parameter space of the exploration.
    "},{"location":"examples/example-1.2-brain-network-exploration/#1-set-up-a-brain-network","title":"1. Set up a brain network","text":""},{"location":"examples/example-1.2-brain-network-exploration/#2-run-the-exploration","title":"2. Run the exploration","text":""},{"location":"examples/example-1.2-brain-network-exploration/#3-load-results","title":"3. Load results","text":""},{"location":"examples/example-1.2-brain-network-exploration/#4-postprocessing","title":"4. Postprocessing","text":""},{"location":"examples/example-1.2-brain-network-exploration/#5-plot","title":"5. Plot","text":""},{"location":"examples/example-1.2-brain-network-exploration/#bold-functional-connectivity","title":"BOLD functional connectivity","text":""},{"location":"examples/example-1.2.1-brain-exploration-postprocessing/","title":"Example 1.2.1 brain exploration postprocessing","text":"
    #hide\n# change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    #hide\ntry:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib\n    import matplotlib.pyplot as plt\n# a nice color map\nplt.rcParams['image.cmap'] = 'plasma'\n\nimport numpy as np\n\nfrom neurolib.models.aln import ALNModel\nfrom neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.exploration import BoxSearch\nimport neurolib.utils.functions as func\n\nfrom neurolib.utils.loadData import Dataset\nds = Dataset(\"hcp\")\n
    model = ALNModel(Cmat = ds.Cmat, Dmat = ds.Dmat) # simulates the whole-brain model in 10s chunks by default if bold == True\n# Resting state fits\nmodel.params['mue_ext_mean'] = 1.57\nmodel.params['mui_ext_mean'] = 1.6\n#model.params['sigma_ou'] = 0.09\nmodel.params['b'] = 5.0\nmodel.params['dt'] = 0.2\nmodel.params['duration'] = 0.2 * 1000 #ms\n# testing: model.params['duration'] = 0.2 * 60 * 1000 #ms\n# real: model.params['duration'] = 1.0 * 60 * 1000 #ms\n
    \nMainProcess root INFO     aln: Model initialized.\n\n
    def evaluateSimulation(traj):\n    # get the model from the trajectory using `search.getModelFromTraj(traj)`\n    model = search.getModelFromTraj(traj)\n    # initiate the model with random initial contitions\n    model.randomICs()\n    defaultDuration = model.params['duration']\n    invalid_result = {\"fc\" : np.nan, \"fcd\" : np.nan}\n\n    # -------- STAGEWISE EVALUATION  --------\n    stagewise = True\n    if stagewise:\n        # -------- stage wise simulation --------\n\n        # Stage 1 : simulate for a few seconds to see if there is any activity\n        # ---------------------------------------\n        model.params['duration'] = 3*1000.\n        model.run()\n\n        # check if stage 1 was successful\n        amplitude = np.max(model.output[:, model.t > 500]) - np.min(model.output[:, model.t > 500])\n        if amplitude < 0.05:\n            search.saveToPypet(invalid_result, traj)\n            return invalid_result, {}\n\n        # Stage 2: simulate BOLD for a few seconds to see if it moves\n        # ---------------------------------------\n        model.params['duration'] = 30*1000.\n        model.run(chunkwise=True, bold = True)\n\n        if np.max(np.std(model.outputs.BOLD.BOLD[:, 10:15], axis=1)) < 1e-5:\n            search.saveToPypet(invalid_result, traj)\n            return invalid_result, {}\n\n    # Stage 3: full and final simulation\n    # ---------------------------------------\n    model.params['duration'] = defaultDuration\n    model.run(chunkwise=True, bold = True)\n\n    # -------- POSTPROCESSING  --------\n    # FC matrix correlation to all subject rs-fMRI\n    BOLD_TRANSIENT = 10000\n    fc_score = np.mean([func.matrix_correlation(func.fc(model.BOLD.BOLD[:, model.BOLD.t_BOLD > BOLD_TRANSIENT]), fc) for fc in ds.FCs])\n\n    # FCD to all subject rs-fMRI\n    try:\n        fcd_score = np.mean([func.ts_kolmogorov(model.BOLD.BOLD[:, model.BOLD.t_BOLD > BOLD_TRANSIENT], ds.BOLDs[i]) for i in range(len(ds.BOLDs))])\n    except:\n        fcd_score = np.nan\n\n    # let's build the results dictionary\n    result_dict = {\"fc\" : fc_score, \"fcd\" : fcd_score}\n    # we could also save the output of the model by adding to the results_dict like this:\n    # result_dict = {\"fc\" : fc_score, \"fcd\" : fcd_score, \"outputs\" : model.outputs}\n\n    # Save the results to pypet. \n    # Remember: This has to be dictionary!\n    search.saveToPypet(result_dict, traj)\n
    parameters = ParameterSpace({\"mue_ext_mean\": np.linspace(0, 3.0, 2), \"mui_ext_mean\": np.linspace(0.2, 3.0, 2)})\n# info: chose np.linspace(0, 3, 21) or more, values here are low for testing\nsearch = BoxSearch(evalFunction = evaluateSimulation, model=model, parameterSpace=parameters, filename=\"example-1.2.1.hdf\")\n
    \nMainProcess root INFO     Number of processes: 80\nMainProcess pypet.storageservice.HDF5StorageService INFO     I will use the hdf5 file `/mnt/raid/data/cakan/hdf/example-1.2.1.hdf`.\nMainProcess pypet.environment.Environment INFO     Environment initialized.\n/home/cakan/anaconda/lib/python3.7/site-packages/pypet/parameter.py:884: FutureWarning: Conversion of the second argument of issubdtype from `str` to `str` is deprecated. In future, it will be treated as `np.str_ == np.dtype(str).type`.\n  if np.issubdtype(dtype, np.str):\nMainProcess root INFO     Number of parameter configurations: 4\nMainProcess root INFO     BoxSearch: Environment initialized.\n\n
    search.run()\n
    \nMainProcess pypet.environment.Environment INFO     I am preparing the Trajectory for the experiment and initialise the store.\nMainProcess pypet.environment.Environment INFO     Initialising the storage for the trajectory.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Initialising storage or updating meta data of Trajectory `results-2020-04-08-01H-16M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Finished init or meta data update for `results-2020-04-08-01H-16M-48S`.\nMainProcess pypet.environment.Environment INFO     \n************************************************************\nSTARTING runs of trajectory\n`results-2020-04-08-01H-16M-48S`.\n************************************************************\n\nMainProcess pypet.storageservice.HDF5StorageService INFO     Initialising storage or updating meta data of Trajectory `results-2020-04-08-01H-16M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Finished init or meta data update for `results-2020-04-08-01H-16M-48S`.\nMainProcess pypet.environment.Environment INFO     Starting multiprocessing with at most 80 processes running at the same time.\nMainProcess pypet INFO     PROGRESS: Finished 0/4 runs [                    ]  0.0%\nMainProcess pypet INFO     PROGRESS: Finished 1/4 runs [=====               ] 25.0%, remaining: 0:00:02\nMainProcess pypet INFO     PROGRESS: Finished 2/4 runs [==========          ] 50.0%, remaining: 0:00:00\nMainProcess pypet INFO     PROGRESS: Finished 3/4 runs [===============     ] 75.0%, remaining: 0:00:09\nMainProcess pypet INFO     PROGRESS: Finished 4/4 runs [====================]100.0%\nMainProcess pypet.storageservice.HDF5StorageService INFO     Initialising storage or updating meta data of Trajectory `results-2020-04-08-01H-16M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Finished init or meta data update for `results-2020-04-08-01H-16M-48S`.\nMainProcess pypet.environment.Environment INFO     \n************************************************************\nFINISHED all runs of trajectory\n`results-2020-04-08-01H-16M-48S`.\n************************************************************\n\nMainProcess pypet.environment.Environment INFO     \n************************************************************\nSTARTING FINAL STORING of trajectory\n`results-2020-04-08-01H-16M-48S`\n************************************************************\n\nMainProcess pypet.storageservice.HDF5StorageService INFO     Start storing Trajectory `results-2020-04-08-01H-16M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Storing branch `config`.\n/home/cakan/anaconda/lib/python3.7/site-packages/pypet/storageservice.py:4597: FutureWarning: Conversion of the second argument of issubdtype from `str` to `str` is deprecated. In future, it will be treated as `np.str_ == np.dtype(str).type`.\n  if (np.issubdtype(val.dtype, str) or\n/home/cakan/anaconda/lib/python3.7/site-packages/pypet/storageservice.py:4598: FutureWarning: Conversion of the second argument of issubdtype from `bytes` to `bytes` is deprecated. In future, it will be treated as `np.bytes_ == np.dtype(bytes).type`.\n  np.issubdtype(val.dtype, bytes)):\nMainProcess pypet.storageservice.HDF5StorageService INFO     Storing branch `parameters`.\n/home/cakan/anaconda/lib/python3.7/site-packages/pypet/storageservice.py:3110: FutureWarning: Conversion of the second argument of issubdtype from `str` to `str` is deprecated. In future, it will be treated as `np.str_ == np.dtype(str).type`.\n  np.issubdtype(data.dtype, str)):\nMainProcess pypet.storageservice.HDF5StorageService INFO     Finished storing Trajectory `results-2020-04-08-01H-16M-48S`.\nMainProcess pypet.environment.Environment INFO     \n************************************************************\nFINISHED FINAL STORING of trajectory\n`results-2020-04-08-01H-16M-48S`.\n************************************************************\n\nMainProcess pypet.environment.Environment INFO     All runs of trajectory `results-2020-04-08-01H-16M-48S` were completed successfully.\n\n
    search.loadResults()\nprint(\"Number of results: {}\".format(len(search.results)))\n
    \nMainProcess root INFO     Loading results from /mnt/raid/data/cakan/hdf/example-1.2.1.hdf\n/mnt/antares_raid/home/cakan/projects/neurolib/neurolib/utils/pypetUtils.py:21: H5pyDeprecationWarning: The default file mode will change to 'r' (read-only) in h5py 3.0. To suppress this warning, pass the mode you need to h5py.File(), or set the global default h5.get_config().default_file_mode, or set the environment variable H5PY_DEFAULT_READONLY=1. Available modes are: 'r', 'r+', 'w', 'w-'/'x', 'a'. See the docs for details.\n  hdf = h5py.File(filename)\nMainProcess root INFO     Analyzing trajectory results-2020-04-08-01H-16M-48S\nMainProcess pypet.storageservice.HDF5StorageService INFO     I will use the hdf5 file `/mnt/raid/data/cakan/hdf/example-1.2.1.hdf`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading trajectory `results-2020-04-08-01H-16M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `config` in mode `2`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `parameters` in mode `2`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `results` in mode `1`.\nMainProcess root INFO     Creating pandas dataframe ...\nMainProcess root INFO     Creating results dictionary ...\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 4/4 [00:00<00:00, 219.06it/s]\nMainProcess root INFO     All results loaded.\n\n
    \nNumber of results: 4\n\n
    for i in search.dfResults.index:\n    search.dfResults.loc[i, 'bold_cc'] = np.mean(search.results[i]['fc'])\nsearch.dfResults\n
    mue_ext_mean mui_ext_mean bold_cc 0 0.0 0.0 0.174085 1 0.0 0.1 0.113122 2 0.0 0.2 0.488884 3 0.0 0.3 0.000000 4 0.0 0.4 0.000000 ... ... ... ... 956 3.0 2.6 -0.223068 957 3.0 2.7 -0.220481 958 3.0 2.8 -0.232276 959 3.0 2.9 -0.182681 960 3.0 3.0 -0.228365

    961 rows \u00d7 3 columns

    plt.figure(dpi=150)\nplt.imshow(search.dfResults.pivot_table(values='bold_cc', index = 'mui_ext_mean', columns='mue_ext_mean'), \\\n           extent = [min(search.dfResults.mue_ext_mean), max(search.dfResults.mue_ext_mean),\n                     min(search.dfResults.mui_ext_mean), max(search.dfResults.mui_ext_mean)], origin='lower')\nplt.colorbar(label='Mean correlation to empirical rs-FC')\nplt.xlabel(\"Input to E\")\nplt.ylabel(\"Input to I\")\n
    \nText(0, 0.5, 'Input to I')\n
    "},{"location":"examples/example-1.2.1-brain-exploration-postprocessing/#parameter-exploration-with-custom-run-function-and-postprocessing","title":"Parameter exploration with custom run function and postprocessing","text":"

    This notebook demonstrates how to scan the parameter space of a brain network model using neurolib with a custom evaluation function to quickly find regions of interest. The evaluation function is designed to increase the speed for the exploration by focussing on regions where the simulated dynamics meets certain criteria. For this, the simulation is run in multiple, successive steps, that increase in duration.

    "},{"location":"examples/example-1.2.1-brain-exploration-postprocessing/#iterative-evaluation","title":"Iterative evaluation","text":"

    The evaluation of a simulation takes multiple steps:

    "},{"location":"examples/example-1.2.1-brain-exploration-postprocessing/#postprocessing","title":"Postprocessing","text":"

    In this scenario, we want to postprocess the simulated data as soon as the simulation is done and before writing the results to the hard disk. After the full simulation is run, the funciotnal connectivity (FC) of the BOLD signal is computed and compared to the empirical FC dataset. The Pearson correlation of the FC matrices is computed and the average is taken. We then tell pypet to save these postprocessed results along with the model output.

    "},{"location":"examples/example-1.2.1-brain-exploration-postprocessing/#set-up-model","title":"Set up model","text":""},{"location":"examples/example-1.2.1-brain-exploration-postprocessing/#define-evaluation-function","title":"Define evaluation function","text":""},{"location":"examples/example-1.2.1-brain-exploration-postprocessing/#set-up-parameter-exploration","title":"Set up parameter exploration","text":""},{"location":"examples/example-1.2.1-brain-exploration-postprocessing/#load-data","title":"Load data","text":""},{"location":"examples/example-1.2.1-brain-exploration-postprocessing/#plot","title":"Plot","text":""},{"location":"examples/example-1.3-aln-bifurcation-diagram/","title":"Example 1.3 aln bifurcation diagram","text":"
    # change into the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n
    import logging\nlogger = logging.getLogger()\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n#logger.setLevel(logging.DEBUG)\n#logging.disable(logging.WARNING)\n#logging.disable(logging.WARN)\n\n%load_ext autoreload\n%autoreload 2\n
    import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom neurolib.models.aln import ALNModel\nfrom neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.exploration import BoxSearch\nimport neurolib.utils.functions as func\nimport neurolib.utils.stimulus as stim\nimport neurolib.optimize.exploration.explorationUtils as eu\nimport neurolib.utils.devutils as du\nfrom neurolib.utils.loadData import Dataset\n
    plt.style.use(\"seaborn-white\")\nplt.rcParams['image.cmap'] = 'plasma'\n
    model = ALNModel()\nmodel.params['dt'] = 0.1 # Integration time step, ms\nmodel.params['duration'] = 20 * 1000 # Simulation time, ms\n\nmodel.params['save_dt'] = 10.0 # 10 ms sampling steps for saving data, should be multiple of dt\nmodel.params[\"tauA\"] = 600.0 # Adaptation timescale, ms\n

    The aln model has a region of bistability, in which two states are stable at the same time: the low-activity down-state, and the high-activity up-state. We can find these states by constructing a stimulus, which uncovers the bistable nature of the system: Initially, we apply a negative push to the system, to make sure that it is in the down-state. We then relax this stimulus slowly and wait for the system to settle. We then apply a sharp push in order to reach the up-state and release the stimulus slowly back again. The difference of the two states after the stimulus has relaxed back to zero is a sign for bistability.

    # we place the system in the bistable region\nmodel.params['mue_ext_mean'] = 2.5\nmodel.params['mui_ext_mean'] = 2.5\n\n# construct a stimulus\nrect_stimulus = stim.RectifiedInput(amplitude=0.2).to_model(model)\nmodel.params['ext_exc_current'] = rect_stimulus * 5.0    \n\nmodel.run()\n
    plt.figure(figsize=(5, 3), dpi=150)\nplt.plot(model.t, model.output.T, lw = 3, c='k', label='rate')\nplt.plot(model.t, (rect_stimulus * 100).squeeze(), lw = 3, c='r', label=\"stimulus\")\nplt.text(3000, 7, 'down-state', fontsize=16)\nplt.text(15000, 35, 'up-state', fontsize=16)\nplt.legend(fontsize=14)\nplt.xlim(1, model.t[-1])\nplt.xlabel(\"Time [ms]\")\nplt.ylabel(\"Activity [Hz]\")\n
    \nText(0, 0.5, 'Activity [Hz]')\n

    Let's construct a rather lengthy evaluation function which does exactly that, for every parameter configuration that we want to explore. We will also measure other things like the dominant frequency and amplitude of oscillations and the maximum rate of the excitatory population.

    def evaluateSimulation(traj):\n    # get the model from the trajectory using `search.getModelFromTraj(traj)`\n    model = search.getModelFromTraj(traj)\n    # initiate the model with random initial contitions\n    model.randomICs()\n    defaultDuration = model.params['duration']\n\n    # -------- stage wise simulation --------\n\n    # Stage 3: full and final simulation\n    # ---------------------------------------    \n    model.params['duration'] = defaultDuration\n\n    rect_stimulus = stim.RectifiedInput(amplitude=0.2).to_model(model)\n    model.params['ext_exc_current'] = rect_stimulus * 5.0    \n\n    model.run()\n\n    # up down difference    \n    state_length = 2000\n    last_state = (model.t > defaultDuration - state_length)\n    down_window = (defaultDuration/2-state_length<model.t) & (model.t<defaultDuration/2) # time period in ms where we expect the down-state\n    up_window = (defaultDuration-state_length<model.t) & (model.t<defaultDuration) # and up state\n    up_state_rate = np.mean(model.output[:, up_window], axis=1)\n    down_state_rate = np.mean(model.output[:, down_window], axis=1)\n    up_down_difference = np.max(up_state_rate - down_state_rate)\n\n    # check rates!\n    max_amp_output = np.max(\n          np.max(model.output[:, up_window], axis=1) \n        - np.min(model.output[:, up_window], axis=1)\n    )\n    max_output = np.max(model.output[:, up_window])\n\n    model_frs, model_pwrs = func.getMeanPowerSpectrum(model.output, \n                                                      dt=model.params.dt, \n                                                      maxfr=40, \n                                                      spectrum_windowsize=10)\n    max_power = np.max(model_pwrs)     \n\n    model_frs, model_pwrs = func.getMeanPowerSpectrum(model.output[:, up_window], dt=model.params.dt, maxfr=40, spectrum_windowsize=5)\n    domfr = model_frs[np.argmax(model_pwrs)]    \n\n    result = {\n        \"end\" : 3,\n        \"max_output\": max_output, \n        \"max_amp_output\" : max_amp_output,\n        \"max_power\" : max_power,\n        #\"model_pwrs\" : model_pwrs,\n        #\"output\": model.output[:, ::int(model.params['save_dt']/model.params['dt'])],\n        \"domfr\" : domfr,\n        \"up_down_difference\" : up_down_difference\n    }\n\n    search.saveToPypet(result, traj)\n    return    \n

    Let's now define the parameter space over which we want to serach. We apply a grid search over the mean external input parameters to the excitatory and the inhibitory population mue_ext_mean/mui_ext_mean and do this for two values of spike-frequency adapation strength \\(b\\), once without and once with adaptation.

    # low number of parameters for testing:\nparameters = ParameterSpace({\"mue_ext_mean\": np.linspace(0.0, 4, 2), \n                             \"mui_ext_mean\": np.linspace(0.0, 4, 2),\n                              \"b\": [0.0, 20.0]\n                             }, kind=\"grid\")\n# real: \n# parameters = ParameterSpace({\"mue_ext_mean\": np.linspace(0.0, 4, 41), \n#                              \"mui_ext_mean\": np.linspace(0.0, 4, 41),\n#                               \"b\": [0.0, 20.0]\n#                              })\nsearch = BoxSearch(evalFunction = evaluateSimulation, model=model, parameterSpace=parameters, filename='example-1.3-aln-bifurcation-diagram.hdf')\n
    \nMainProcess pypet.storageservice.HDF5StorageService INFO     I will use the hdf5 file `./data/hdf/example-1.3-aln-bifurcation-diagram.hdf`.\nMainProcess pypet.environment.Environment INFO     Environment initialized.\nMainProcess root INFO     Number of parameter configurations: 3362\nMainProcess root INFO     BoxSearch: Environment initialized.\n\n
    search.run()\n
    \nMainProcess pypet.environment.Environment INFO     I am preparing the Trajectory for the experiment and initialise the store.\nMainProcess pypet.environment.Environment INFO     Initialising the storage for the trajectory.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Initialising storage or updating meta data of Trajectory `results-2021-06-19-01H-23M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Finished init or meta data update for `results-2021-06-19-01H-23M-48S`.\nMainProcess pypet.environment.Environment INFO     \n************************************************************\nSTARTING runs of trajectory\n`results-2021-06-19-01H-23M-48S`.\n************************************************************\n\nMainProcess pypet.storageservice.HDF5StorageService INFO     Initialising storage or updating meta data of Trajectory `results-2021-06-19-01H-23M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Finished init or meta data update for `results-2021-06-19-01H-23M-48S`.\nMainProcess pypet.environment.Environment INFO     Starting multiprocessing with at most 8 processes running at the same time.\nMainProcess pypet INFO     PROGRESS: Finished    0/3362 runs [                    ]  0.0%\nMainProcess pypet INFO     PROGRESS: Finished  169/3362 runs [=                   ]  5.0%, remaining: 0:01:27\nMainProcess pypet INFO     PROGRESS: Finished  337/3362 runs [==                  ] 10.0%, remaining: 0:01:24\nMainProcess pypet INFO     PROGRESS: Finished  505/3362 runs [===                 ] 15.0%, remaining: 0:01:27\nMainProcess pypet INFO     PROGRESS: Finished  673/3362 runs [====                ] 20.0%, remaining: 0:01:26\nMainProcess pypet INFO     PROGRESS: Finished  841/3362 runs [=====               ] 25.0%, remaining: 0:01:26\nMainProcess pypet INFO     PROGRESS: Finished 1009/3362 runs [======              ] 30.0%, remaining: 0:01:24\nMainProcess pypet INFO     PROGRESS: Finished 1177/3362 runs [=======             ] 35.0%, remaining: 0:01:19\nMainProcess pypet INFO     PROGRESS: Finished 1345/3362 runs [========            ] 40.0%, remaining: 0:01:15\nMainProcess pypet INFO     PROGRESS: Finished 1513/3362 runs [=========           ] 45.0%, remaining: 0:01:10\nMainProcess pypet INFO     PROGRESS: Finished 1681/3362 runs [==========          ] 50.0%, remaining: 0:01:05\nMainProcess pypet INFO     PROGRESS: Finished 1850/3362 runs [===========         ] 55.0%, remaining: 0:00:59\nMainProcess pypet INFO     PROGRESS: Finished 2018/3362 runs [============        ] 60.0%, remaining: 0:00:55\nMainProcess pypet INFO     PROGRESS: Finished 2186/3362 runs [=============       ] 65.0%, remaining: 0:00:49\nMainProcess pypet INFO     PROGRESS: Finished 2354/3362 runs [==============      ] 70.0%, remaining: 0:00:42\nMainProcess pypet INFO     PROGRESS: Finished 2522/3362 runs [===============     ] 75.0%, remaining: 0:00:36\nMainProcess pypet INFO     PROGRESS: Finished 2690/3362 runs [================    ] 80.0%, remaining: 0:00:29\nMainProcess pypet INFO     PROGRESS: Finished 2858/3362 runs [=================   ] 85.0%, remaining: 0:00:22\nMainProcess pypet INFO     PROGRESS: Finished 3026/3362 runs [==================  ] 90.0%, remaining: 0:00:15\nMainProcess pypet INFO     PROGRESS: Finished 3194/3362 runs [=================== ] 95.0%, remaining: 0:00:07\nMainProcess pypet INFO     PROGRESS: Finished 3362/3362 runs [====================]100.0%\nMainProcess pypet.storageservice.HDF5StorageService INFO     Initialising storage or updating meta data of Trajectory `results-2021-06-19-01H-23M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Finished init or meta data update for `results-2021-06-19-01H-23M-48S`.\nMainProcess pypet.environment.Environment INFO     \n************************************************************\nFINISHED all runs of trajectory\n`results-2021-06-19-01H-23M-48S`.\n************************************************************\n\nMainProcess pypet.environment.Environment INFO     \n************************************************************\nSTARTING FINAL STORING of trajectory\n`results-2021-06-19-01H-23M-48S`\n************************************************************\n\nMainProcess pypet.storageservice.HDF5StorageService INFO     Start storing Trajectory `results-2021-06-19-01H-23M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Storing branch `config`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Storing branch `parameters`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Finished storing Trajectory `results-2021-06-19-01H-23M-48S`.\nMainProcess pypet.environment.Environment INFO     \n************************************************************\nFINISHED FINAL STORING of trajectory\n`results-2021-06-19-01H-23M-48S`.\n************************************************************\n\nMainProcess pypet.environment.Environment INFO     All runs of trajectory `results-2021-06-19-01H-23M-48S` were completed successfully.\n\n
    search.loadResults(all=False)\n
    \nMainProcess root INFO     Loading results from ./data/hdf/example-1.3-aln-bifurcation-diagram.hdf\nMainProcess root INFO     Analyzing trajectory results-2021-06-19-01H-23M-48S\nMainProcess pypet.storageservice.HDF5StorageService INFO     I will use the hdf5 file `./data/hdf/example-1.3-aln-bifurcation-diagram.hdf`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading trajectory `results-2021-06-19-01H-23M-48S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `config` in mode `2`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `parameters` in mode `2`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `results` in mode `1`.\nMainProcess root INFO     Creating `dfResults` dataframe ...\nMainProcess root INFO     Aggregating results to `dfResults` ...\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 3362/3362 [00:22<00:00, 152.47it/s]\nMainProcess root INFO     All results loaded.\n\n
    search.dfResults\n
    mue_ext_mean mui_ext_mean b up_down_difference max_power max_output max_amp_output end domfr 0 0.0 0.0 0.0 0.000019 17.665778 0.038741 3.747003e-16 3.0 0.500025 1 0.0 0.0 20.0 0.000009 5.151548 0.037037 2.042323e-05 3.0 0.500025 2 0.0 0.1 0.0 0.000010 15.441657 0.024629 1.110223e-16 3.0 0.500025 3 0.0 0.1 20.0 0.000006 4.140089 0.024062 8.806806e-06 3.0 0.500025 4 0.0 0.2 0.0 0.000006 12.466037 0.012277 7.112366e-17 3.0 0.500025 ... ... ... ... ... ... ... ... ... ... 3357 4.0 3.8 20.0 0.000038 14.426393 29.965853 1.263433e-06 3.0 0.000000 3358 4.0 3.9 0.0 0.000674 58.447442 93.451476 2.842171e-14 3.0 0.000000 3359 4.0 3.9 20.0 0.000038 14.482713 29.928097 1.243205e-06 3.0 0.000000 3360 4.0 4.0 0.0 0.000674 58.494615 93.380582 1.421085e-14 3.0 0.000000 3361 4.0 4.0 20.0 0.000038 14.534853 29.891972 1.224562e-06 3.0 0.000000

    3362 rows \u00d7 9 columns

    Let's draw the bifurcation diagrams. We will use a white contour for oscillatory areas (measured by max_amp_output) and a green dashed lined for the bistable region (measured by up_down_difference). We can use the function explorationUtils.plotExplorationResults() for this.

    plot_key_label = \"Max. $r_E$\"\neu.plotExplorationResults(search.dfResults, \n                          par1=['mue_ext_mean', '$\\mu_e$'], \n                          par2=['mui_ext_mean', '$\\mu_i$'], \n                          by=['b'], \n                          plot_key='max_output',\n                          plot_clim=[0.0, 80.0],\n                          nan_to_zero=False,\n                          plot_key_label=plot_key_label, \n                          one_figure=False,\n                          contour=[\"max_amp_output\", \"up_down_difference\"],\n                          contour_color=[['white'], ['springgreen']],\n                          contour_levels=[[10], [10]],\n                          contour_alpha=[1.0, 1.0],\n                          contour_kwargs={0 : {\"linewidths\" : (5,)}, 1 : {\"linestyles\" : \"--\", \"linewidths\" : (5,)}},\n                          #alpha_mask=\"relative_amplitude_BOLD\",\n                          mask_threshold=0.1,\n                          mask_alpha=0.2)\n
    "},{"location":"examples/example-1.3-aln-bifurcation-diagram/#bifurcation-diagram-of-the-aln-model","title":"Bifurcation diagram of the aln model","text":"

    In this notebook, we will discover how easy it is to draw bifurcation diagrams in neurolib using its powerful BoxSearch class.

    Bifurcation diagrams are an important tool to understand a dynamical system, may it be a single neuron model or a whole-brain network. They show how a system behaves when certain parameters of the model are changed: whether the system transitions into an oscillation for example, or whethter the system remains in a fixed point (of sustained constant activity).

    We will use this to draw a map of the aln model: Since the aln model consists of two populations of Adex neurons, we will change its inputs to the excitatory and to the inhibitory population independently and do so for two different values of spike-frequency adaptation strength \\(b\\). We will measure the activity of the system and identify regions of oscillatory activity and discover bistable states, in which the system can be in two different stable states for the same set of parameters.

    "},{"location":"examples/example-1.3-aln-bifurcation-diagram/#create-the-model","title":"Create the model","text":""},{"location":"examples/example-1.3-aln-bifurcation-diagram/#measuring-bistability","title":"Measuring bistability","text":""},{"location":"examples/example-1.3-aln-bifurcation-diagram/#define-evaluation-function","title":"Define evaluation function","text":""},{"location":"examples/example-1.3-aln-bifurcation-diagram/#exploration-parameters","title":"Exploration parameters","text":""},{"location":"examples/example-1.3-aln-bifurcation-diagram/#run","title":"Run","text":""},{"location":"examples/example-1.3-aln-bifurcation-diagram/#analysis","title":"Analysis","text":""},{"location":"examples/example-1.3-aln-bifurcation-diagram/#the-results-dataframe","title":"The results dataframe","text":""},{"location":"examples/example-1.3-aln-bifurcation-diagram/#plotting-2d-bifurcation-diagrams","title":"Plotting 2D bifurcation diagrams","text":""},{"location":"examples/example-2-evolutionary-optimization-minimal/","title":"Example 2 evolutionary optimization minimal","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib seaborn\n    import matplotlib.pyplot as plt\n\nimport numpy as np\nimport logging\n\nfrom neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.evolution import Evolution\n\nimport neurolib.optimize.evolution.evolutionaryUtils as eu\nimport neurolib.utils.functions as func\n\ndef optimize_me(traj):\n    ind = evolution.getIndividualFromTraj(traj)\n    logging.info(\"Hello, I am {}\".format(ind.id))\n    logging.info(\"You can also call me {}, or simply ({:.2}, {:.2}).\".format(ind.params, ind.x, ind.y))\n\n    # let's make a circle\n    computation_result = abs((ind.x**2 + ind.y**2) - 1)\n    # DEAP wants a tuple as fitness, ALWAYS!\n    fitness_tuple = (computation_result ,)\n\n    # we also require a dictionary with at least a single result for storing the results in the hdf\n    result_dict = {}\n\n    return fitness_tuple, result_dict\n\n\npars = ParameterSpace(['x', 'y'], [[-5.0, 5.0], [-5.0, 5.0]])\nevolution = Evolution(optimize_me, pars, weightList = [-1.0],\n                      POP_INIT_SIZE=10, POP_SIZE = 6, NGEN=4, filename=\"example-2.0.hdf\")\n# info: chose POP_INIT_SIZE=100, POP_SIZE = 50, NGEN=10 for real exploration, \n# values here are low for testing: POP_INIT_SIZE=10, POP_SIZE = 6, NGEN=4\n\nevolution.run(verbose = True)\n
    evolution.loadResults()\n
    evolution.info(plot=True)\n
    gens, all_scores = evolution.getScoresDuringEvolution(reverse=True)\n\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(8, 4), dpi=200)   \nplt.plot(gens, np.nanmean(all_scores, axis=1))\nplt.fill_between(gens, np.nanmin(all_scores, axis=1), np.nanmax(all_scores, axis=1), alpha=0.3)\nplt.xlabel(\"Generation #\")\nplt.ylabel(\"Score\")\n
    "},{"location":"examples/example-2-evolutionary-optimization-minimal/#simple-example-of-the-evolutionary-optimization-framework","title":"Simple example of the evolutionary optimization framework","text":"

    This notebook provides a simple example for the use of the evolutionary optimization framework builtin to the library. Under the hood, the implementation of the evolutionary algorithm is powered by deap and pypet cares about the parallelization and storage of the simulation data for us.

    Here we demonstrate how to fit parameters of a the evaluation function optimize_me which simply computes the distance of the parameters to the unit circle and returns this as the fitness_tuple that DEAP expects.

    "},{"location":"examples/example-2.0.1-save-and-load-evolution/","title":"Example 2.0.1 save and load evolution","text":"

    In this example, we will demonstrate how to save an evolutionary optimization on one machine or instance and load the results in another machine. This is useful, when the optimization is carried out on another computer as the analysis of the results are done.

    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-2] == \"neurolib\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    # prepare logging\nimport logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n%load_ext autoreload\n%autoreload 2\n

    We import the modules that we need for evolution

    from neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.evolution import Evolution\nimport numpy as np\n

    We will simply run the basic optimization on a circle from Example 2.

    def optimize_me(traj):\n    ind = evolution.getIndividualFromTraj(traj)\n    result = tuple([abs((ind.x**2 + ind.y**2) - 1)])\n    return result, {\"random_output\" : np.random.randint(100)}\n\n\npars = ParameterSpace(['x', 'y'], [[-5.0, 5.0], [-5.0, 5.0]])\nevolution = Evolution(optimize_me, pars, weightList = [-1.0],\n                      POP_INIT_SIZE=10, POP_SIZE = 6, NGEN=4, filename=\"example-2.0.1.hdf\")\n\nevolution.run(verbose = True)\n

    Now that the optimization is done, we can serialize and save the evolution using the dill module.

    EVOLUTION_DILL = \"saved_evolution.dill\"\nevolution.saveEvolution(EVOLUTION_DILL)\n
    \nMainProcess root INFO     Saving evolution to saved_evolution.dill\n\n

    Here, we pretend as if we're on a completely new machine. We need to instantiate the Evolution class in order to fill it with the data from the previous optimization. For this, we create a \"mock\" evolution with some fake parameters and then load the dill file to fill out the mock values with the real ones.

    # initialize mock evolution for loading previously generated data\npars = ParameterSpace(['mock'], \n                      [[0, 1]])\nevaluateSimulation = lambda x: x\nevolution_new = Evolution(evaluateSimulation, \n                      pars)\nevolution_new = evolution_new.loadEvolution(EVOLUTION_DILL)\n
    \nMainProcess root INFO     weightList not set, assuming single fitness value to be maximized.\nMainProcess root INFO     Trajectory Name: results-2021-02-15-12H-13M-39S\nMainProcess root INFO     Storing data to: ./data/hdf/evolution.hdf\nMainProcess root INFO     Trajectory Name: results-2021-02-15-12H-13M-39S\nMainProcess root INFO     Number of cores: 8\nMainProcess pypet.storageservice.HDF5StorageService INFO     I will use the hdf5 file `./data/hdf/evolution.hdf`.\nMainProcess pypet.environment.Environment INFO     Environment initialized.\nMainProcess root INFO     Evolution: Using algorithm: adaptive\n/Users/caglar/anaconda/lib/python3.7/site-packages/deap/creator.py:141: RuntimeWarning: A class named 'FitnessMulti' has already been created and it will be overwritten. Consider deleting previous creation of that class or rename it.\n  RuntimeWarning)\n/Users/caglar/anaconda/lib/python3.7/site-packages/deap/creator.py:141: RuntimeWarning: A class named 'Individual' has already been created and it will be overwritten. Consider deleting previous creation of that class or rename it.\n  RuntimeWarning)\nMainProcess root INFO     Evolution: Individual generation: <function randomParametersAdaptive at 0x7fd122dfa950>\nMainProcess root INFO     Evolution: Mating operator: <function cxBlend at 0x7fd122dcdb70>\nMainProcess root INFO     Evolution: Mutation operator: <function gaussianAdaptiveMutation_nStepSizes at 0x7fd122dfad90>\nMainProcess root INFO     Evolution: Parent selection: <function selRank at 0x7fd122dfaae8>\nMainProcess root INFO     Evolution: Selection operator: <function selBest_multiObj at 0x7fd122dfab70>\n\n

    Now, we should be able to do everything we want with the new evolution object.

    dfEvolution = evolution_new.dfEvolution()\ndfEvolution\n
    x y score id gen f0 0 1.767126 0.547244 -2.422212 1 0 2.422212 1 1.908967 -0.899728 -3.453668 7 0 3.453668 2 2.047736 1.437642 -5.260036 9 0 5.260036 3 -1.521826 2.259241 -6.420126 8 0 6.420126 4 -0.898959 2.578525 -6.456920 0 0 6.456920 5 2.622927 -1.558091 -8.307394 3 0 8.307394 6 0.517562 1.942211 -3.040056 10 1 3.040056 7 -1.820438 2.712097 -9.669464 11 1 9.669464 8 0.777049 1.272183 -1.222253 12 1 1.222253 9 3.143349 0.980240 -9.841516 13 1 9.841516 10 2.267286 -0.238797 -4.197609 14 1 4.197609 11 2.098299 3.682854 -16.966271 15 1 16.966271 12 -1.746393 0.288008 -2.132837 16 2 2.132837 13 0.759040 0.168302 -0.395532 17 2 0.395532 14 -1.477419 2.202671 -6.034527 18 2 6.034527 15 0.384431 3.804135 -13.619231 19 2 13.619231 16 1.236164 -2.969863 -9.348190 20 2 9.348190 17 1.478068 0.033220 -1.185788 21 2 1.185788 18 2.544810 3.003174 -14.495107 22 3 14.495107 19 0.606182 -0.408578 -0.465607 23 3 0.465607 20 0.741795 0.783160 -0.163599 24 3 0.163599 21 1.678066 2.696300 -9.085941 25 3 9.085941 22 1.190213 -3.732895 -14.351114 26 3 14.351114 23 -2.492132 -1.219275 -6.697355 27 3 6.697355

    We can also be able to load the hdf file in which all simulated was stored (\"random_output\" in the evaluation function above).

    evolution_new.loadResults()\n
    \nMainProcess root INFO     Loading results from ./data/hdf/example-2.0.1.hdf\nMainProcess root INFO     Analyzing trajectory results-2021-02-15-12H-13M-24S\nMainProcess pypet.storageservice.HDF5StorageService INFO     I will use the hdf5 file `./data/hdf/example-2.0.1.hdf`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading trajectory `results-2021-02-15-12H-13M-24S`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `config` in mode `2`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `derived_parameters` in mode `1`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `parameters` in mode `2`.\nMainProcess pypet.storageservice.HDF5StorageService INFO     Loading branch `results` in mode `1`.\n\n

    We can load the output from the hdf file by passing the argument outputs=True to the dfEvolution() method:

    evolution_new.dfEvolution(outputs=True)\n
    x y score id gen random_output f0 0 1.767126 0.547244 -2.422212 1 0 1.0 2.422212 1 1.908967 -0.899728 -3.453668 7 0 1.0 3.453668 2 2.047736 1.437642 -5.260036 9 0 1.0 5.260036 3 -1.521826 2.259241 -6.420126 8 0 1.0 6.420126 4 -0.898959 2.578525 -6.456920 0 0 1.0 6.456920 5 2.622927 -1.558091 -8.307394 3 0 1.0 8.307394 6 0.517562 1.942211 -3.040056 10 1 51.0 3.040056 7 -1.820438 2.712097 -9.669464 11 1 51.0 9.669464 8 0.777049 1.272183 -1.222253 12 1 51.0 1.222253 9 3.143349 0.980240 -9.841516 13 1 51.0 9.841516 10 2.267286 -0.238797 -4.197609 14 1 51.0 4.197609 11 2.098299 3.682854 -16.966271 15 1 51.0 16.966271 12 -1.746393 0.288008 -2.132837 16 2 36.0 2.132837 13 0.759040 0.168302 -0.395532 17 2 36.0 0.395532 14 -1.477419 2.202671 -6.034527 18 2 36.0 6.034527 15 0.384431 3.804135 -13.619231 19 2 36.0 13.619231 16 1.236164 -2.969863 -9.348190 20 2 36.0 9.348190 17 1.478068 0.033220 -1.185788 21 2 36.0 1.185788 18 2.544810 3.003174 -14.495107 22 3 23.0 14.495107 19 0.606182 -0.408578 -0.465607 23 3 23.0 0.465607 20 0.741795 0.783160 -0.163599 24 3 23.0 0.163599 21 1.678066 2.696300 -9.085941 25 3 23.0 9.085941 22 1.190213 -3.732895 -14.351114 26 3 23.0 14.351114 23 -2.492132 -1.219275 -6.697355 27 3 23.0 6.697355
    evolution.info()\n
    \n> Simulation parameters\nHDF file storage: ./data/hdf/example-2.0.1.hdf\nTrajectory Name: results-2021-02-15-12H-13M-24S\nDuration of evaluating initial population 0:00:01.093011\nDuration of evolution 0:00:08.117928\nEval function: <function optimize_me at 0x7fd124ee4840>\nParameter space: {'x': [-5.0, 5.0], 'y': [-5.0, 5.0]}\n> Evolution parameters\nNumber of generations: 4\nInitial population size: 10\nPopulation size: 6\n> Evolutionary operators\nMating operator: <function cxBlend at 0x7fd122dcdb70>\nMating paramter: {'alpha': 0.5}\nSelection operator: <function selBest_multiObj at 0x7fd122dfab70>\nSelection paramter: {}\nParent selection operator: <function selRank at 0x7fd122dfaae8>\nComments: no comments\n--- Info summary ---\nValid: 6\nMean score (weighted fitness): -0.93\nParameter distribution (Generation 3):\nx:   mean: 0.4360,   std: 1.0159\ny:   mean: 0.3560,   std: 0.5401\n--------------------\nBest 5 individuals:\nPrinting 5 individuals\nIndividual 0\n    Fitness values:  0.16\n    Score:  -0.16\n    Weighted fitness:  -0.16\n    Stats mean 0.16 std 0.00 min 0.16 max 0.16\n    model.params[\"x\"] = 0.74\n    model.params[\"y\"] = 0.78\nIndividual 1\n    Fitness values:  0.4\n    Score:  -0.4\n    Weighted fitness:  -0.4\n    Stats mean 0.40 std 0.00 min 0.40 max 0.40\n    model.params[\"x\"] = 0.76\n    model.params[\"y\"] = 0.17\nIndividual 2\n    Fitness values:  0.47\n    Score:  -0.47\n    Weighted fitness:  -0.47\n    Stats mean 0.47 std 0.00 min 0.47 max 0.47\n    model.params[\"x\"] = 0.61\n    model.params[\"y\"] = -0.41\nIndividual 3\n    Fitness values:  1.19\n    Score:  -1.19\n    Weighted fitness:  -1.19\n    Stats mean 1.19 std 0.00 min 1.19 max 1.19\n    model.params[\"x\"] = 1.48\n    model.params[\"y\"] = 0.03\nIndividual 4\n    Fitness values:  1.22\n    Score:  -1.22\n    Weighted fitness:  -1.22\n    Stats mean 1.22 std 0.00 min 1.22 max 1.22\n    model.params[\"x\"] = 0.78\n    model.params[\"y\"] = 1.27\n--------------------\n\n
    \n/Users/caglar/anaconda/lib/python3.7/site-packages/neurolib/optimize/evolution/evolutionaryUtils.py:212: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.\n  plt.tight_layout()\n\n
    \nMainProcess root INFO     Saving plot to ./data/figures/results-2021-02-15-12H-13M-24S_hist_3.png\n\n
    \nThere are 6 valid individuals\nMean score across population: -0.93\n\n
    \n<Figure size 432x288 with 0 Axes>\n
    "},{"location":"examples/example-2.0.1-save-and-load-evolution/#saving-and-loading-evolution","title":"Saving and loading Evolution","text":""},{"location":"examples/example-2.0.1-save-and-load-evolution/#save-evolution","title":"Save evolution","text":""},{"location":"examples/example-2.0.1-save-and-load-evolution/#load-evolution","title":"Load evolution","text":""},{"location":"examples/example-2.1-evolutionary-optimization-aln/","title":"Example 2.1 evolutionary optimization aln","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2\n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib seaborn\n    import matplotlib.pyplot as plt\n\nimport numpy as np\nimport logging \n\nfrom neurolib.models.aln import ALNModel\nfrom neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.evolution import Evolution\nimport neurolib.utils.functions as func\n\nimport neurolib.optimize.evolution.deapUtils as deapUtils\n\n# a nice color map\nplt.rcParams['image.cmap'] = 'plasma'\n
    aln = ALNModel()\n
    # Here we define our evaluation function. This function will\n# be called reapedly and perform a single simulation. The object\n# that is passed to the function, `traj`, is a pypet trajectory\n# and serves as a \"bridge\" to load the parameter set of this \n# particular trajectory and execute a run.\n# Then the power spectrum of the run is computed and its maximum\n# is fitted to the target of 25 Hz peak frequency.\ndef evaluateSimulation(traj):\n    # The trajectory id is provided as an attribute\n    rid = traj.id\n    logging.info(\"Running run id {}\".format(rid))\n    # this function provides the a model with the partuclar\n    # parameter set for this given run\n    model = evolution.getModelFromTraj(traj)\n    # parameters can also be modified after loading\n    model.params['dt'] = 0.1\n    model.params['duration'] = 2*1000.\n    # and the simulation is run\n    model.run()\n\n    # compute power spectrum\n    frs, powers = func.getPowerSpectrum(model.rates_exc[:, -int(1000/model.params['dt']):], dt=model.params['dt'])\n    # find the peak frequency\n    domfr = frs[np.argmax(powers)] \n    # fitness evaluation: let's try to find a 25 Hz oscillation\n    fitness = abs(domfr - 25) \n    # deap needs a fitness *tuple*!\n    fitness_tuple = ()\n    # more fitness values could be added\n    fitness_tuple += (fitness, )\n    # we need to return the fitness tuple and the outputs of the model\n    return fitness_tuple, model.outputs\n

    The evolutionary algorithm tries to find the optimal parameter set that will maximize (or minimize) a certain fitness function.

    This achieved by seeding an initial population of size POP_INIT_SIZE that is randomly initiated in the parameter space parameterSpace. INIT: After simulating the initial population using evalFunction, only a subset of the individuals is kept, defined by POP_SIZE.

    START: Members of the remaining population are chosen based on their fitness (using rank selection) to mate and produce offspring. These offspring have parameters that are drawn from a normal distribution defined by the mean of the parameters between the two parents. Then the offspring population is evaluated and the process loops back to START:

    This process is repeated for NGEN generations.

    # Here we define the parameters and the range in which we want\n# to perform the evolutionary optimization.\n# Create a `ParameterSpace` \npars = ParameterSpace(['mue_ext_mean', 'mui_ext_mean'], [[0.0, 4.0], [0.0, 4.0]])\n# Iitialize evolution with\n# :evaluateSimulation: The function that returns a fitness, \n# :pars: The parameter space and its boundaries to optimize\n# :model: The model that should be passed to the evaluation function\n# :weightList: A list of optimization weights for the `fitness_tuple`,\n#              positive values will lead to a maximization, negative \n#              values to a minimzation. The length of this list must\n#              be the same as the length of the `fitness_tuple`.\n# \n# :POP_INIT_SIZE: The size of the initial population that will be \n#              randomly sampled in the parameter space `pars`.\n#              Should be higher than POP_SIZE. 50-200 might be a good\n#              range to start experimenting with.\n# :POP_SIZE: Size of the population that should evolve. Must be an\n#              even number. 20-100 might be a good range to start with.\n# :NGEN: Number of generations to simulate the evolution for. A good\n#              range to start with might be 20-100.\n\nweightList = [-1.0]\n\nevolution = Evolution(evalFunction = evaluateSimulation, parameterSpace = pars, model = aln, weightList = [-1.0],\n                      POP_INIT_SIZE=4, POP_SIZE = 4, NGEN=2, filename=\"example-2.1.hdf\")\n# info: chose POP_INIT_SIZE=50, POP_SIZE = 20, NGEN=20 for real exploration, \n# values are lower here for testing\n
    # Enabling `verbose = True` will print statistics and generate plots \n# of the current population for each generation.\nevolution.run(verbose = False)\n
    # the current population is always accesible via\npop = evolution.pop\n# we can also use the functions registered to deap\n# to select the best of the population:\nbest_10 = evolution.toolbox.selBest(pop, k=10)\n# Remember, we performed a minimization so a fitness\n# of 0 is optimal\nprint(\"Best individual\", best_10[0], \"fitness\", best_10[0].fitness)\n
    \nBest individual [1.182184510022096, 0.29660620374273683, 0.4936712969767474, 0.07875430013351538] fitness (0.0,)\n\n

    We can look at the current population by calling evolution.dfPop() which returns a pandas dataframe with the parameters of each individual, its id, generation of birth, its outputs, and the fitness (called \"f0\" here).

    evolution.dfPop(outputs=True)\n
    mue_ext_mean mui_ext_mean score id gen t rates_exc rates_inh IA f0 0 1.182185 0.296606 0.0 294 13 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 0.0 1 1.114270 0.240422 0.0 368 16 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 0.0 2 0.910558 0.075463 0.0 403 18 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 0.0 3 1.188440 0.356385 -1.0 171 7 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 4 1.007371 0.113623 -1.0 177 7 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 5 1.031484 0.120989 -1.0 192 8 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 6 0.900787 0.038763 -1.0 193 8 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 7 1.217021 0.213936 -1.0 245 10 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 8 1.241895 0.365758 -1.0 248 10 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 9 1.062928 0.265389 -1.0 267 11 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 10 1.007366 0.110587 -1.0 286 12 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 11 0.904612 0.123308 -1.0 320 14 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 12 1.119281 0.188307 -1.0 330 15 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 13 1.158463 0.227194 -1.0 342 15 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 14 1.053327 0.281852 -1.0 344 15 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 15 1.124747 0.318747 -1.0 360 16 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 16 1.266317 0.360644 -1.0 364 16 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 17 1.329988 0.388133 -1.0 365 16 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 18 0.986030 0.189384 -1.0 390 18 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0 19 0.896915 0.125212 -1.0 399 18 [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.60... [[4.3201372225314945, 3.9353836030865286, 3.58... [[10.920182546008649, 11.229353479381396, 11.5... [[111.41612911461853, 111.36042105006122, 111.... 1.0

    You can also view all individuals that were created during the entire evolution, by calling evolution.dfEvolution():

    evolution.dfEvolution()\n
    mue_ext_mean mui_ext_mean score id gen f0 0 1.400310 1.209331 -4.0 39 0 4.0 1 1.173593 0.662050 -5.0 31 0 5.0 2 1.134601 0.809371 -6.0 22 0 6.0 3 0.992049 0.694590 -6.0 29 0 6.0 4 1.470708 1.073607 -7.0 47 0 7.0 ... ... ... ... ... ... ... 395 1.881591 0.299691 -24.0 425 19 24.0 396 0.681422 0.489003 -8.0 426 19 8.0 397 1.430791 0.268028 -24.0 427 19 24.0 398 1.275903 0.534227 -3.0 428 19 3.0 399 0.870652 0.326687 -5.0 429 19 5.0

    400 rows \u00d7 6 columns

    # a sinple overview of the current population (in this case the \n# last one) is given via the `info()` method. This provides a \n# a histogram of the score (= mean fitness) and scatterplots\n# and density estimates across orthogonal parameter space cross \n# sections.\nevolution.info(plot=True)\n
    \n> Simulation parameters\nHDF file storage: ./data/hdf/example-2.1.hdf\nTrajectory Name: results-2020-07-02-14H-20M-45S\nDuration of evaluating initial population 0:00:29.656935\nDuration of evolution 0:03:50.565418\nModel: <class 'neurolib.models.aln.model.ALNModel'>\nModel name: aln\nEval function: <function evaluateSimulation at 0x10ba8cae8>\nParameter space: {'mue_ext_mean': [0.0, 4.0], 'mui_ext_mean': [0.0, 4.0]}\n> Evolution parameters\nNumber of generations: 20\nInitial population size: 50\nPopulation size: 20\n> Evolutionary operators\nMating operator: <function cxBlend at 0x11dcaf510>\nMating paramter: {'alpha': 0.5}\nSelection operator: <function selBest_multiObj at 0x11f4d9d08>\nSelection paramter: {}\nParent selection operator: <function selRank at 0x11f4d9c80>\nComments: no comments\n--- Info summary ---\nValid: 20\nMean score (weighted fitness): -0.85\nParameter distribution (Generation 19):\nmue_ext_mean:    mean: 1.0852,   std: 0.1270\nmui_ext_mean:    mean: 0.2200,   std: 0.1042\n--------------------\nBest 5 individuals:\nPrinting 5 individuals\nIndividual 0\n    Fitness values:  0.0\n    Score:  0.0\n    Weighted fitness:  -0.0\n    Stats mean 0.00 std 0.00 min 0.00 max 0.00\n    model.params[\"mue_ext_mean\"] = 1.18\n    model.params[\"mui_ext_mean\"] = 0.30\nIndividual 1\n    Fitness values:  0.0\n    Score:  0.0\n    Weighted fitness:  -0.0\n    Stats mean 0.00 std 0.00 min 0.00 max 0.00\n    model.params[\"mue_ext_mean\"] = 1.11\n    model.params[\"mui_ext_mean\"] = 0.24\nIndividual 2\n    Fitness values:  0.0\n    Score:  0.0\n    Weighted fitness:  -0.0\n    Stats mean 0.00 std 0.00 min 0.00 max 0.00\n    model.params[\"mue_ext_mean\"] = 0.91\n    model.params[\"mui_ext_mean\"] = 0.08\nIndividual 3\n    Fitness values:  1.0\n    Score:  -1.0\n    Weighted fitness:  -1.0\n    Stats mean 1.00 std 0.00 min 1.00 max 1.00\n    model.params[\"mue_ext_mean\"] = 1.19\n    model.params[\"mui_ext_mean\"] = 0.36\nIndividual 4\n    Fitness values:  1.0\n    Score:  -1.0\n    Weighted fitness:  -1.0\n    Stats mean 1.00 std 0.00 min 1.00 max 1.00\n    model.params[\"mue_ext_mean\"] = 1.01\n    model.params[\"mui_ext_mean\"] = 0.11\n--------------------\n\n
    \nMainProcess root INFO     Saving plot to ./data/figures/results-2020-07-02-14H-20M-45S_hist_19.png\n\n
    \nThere are 20 valid individuals\nMean score across population: -0.85\n\n
    \n<Figure size 432x288 with 0 Axes>\n

    neurolib keeps track of all individuals during the evolution. You can see all individuals from each generation by calling evolution.history. The object evolution.tree provides a network description of the genealogy of the evolution: each individual (indexed by its unique .id) is connected to its parents. We can use this object in combination with the network library networkx to plot the tree:

    # we put this into a try except block since we don't do testing on networkx\ntry:\n    import matplotlib.pyplot as plt\n    import networkx as nx\n    from networkx.drawing.nx_pydot import graphviz_layout\n\n    G = nx.DiGraph(evolution.tree)\n    G = G.reverse()     # Make the graph top-down\n    pos = graphviz_layout(G, prog='dot')\n    plt.figure(figsize=(8, 8))\n    nx.draw(G, pos, node_size=50, alpha=0.5, node_color=list(evolution.id_score.values()), with_labels=False)\n    plt.show()\nexcept:\n    print(\"It looks like networkx or pydot are not installed\")\n
    \n/Users/caglar/anaconda/lib/python3.7/site-packages/networkx/drawing/nx_pylab.py:579: MatplotlibDeprecationWarning: \nThe iterable function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use np.iterable instead.\n  if not cb.iterable(width):\n/Users/caglar/anaconda/lib/python3.7/site-packages/networkx/drawing/nx_pylab.py:676: MatplotlibDeprecationWarning: \nThe iterable function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use np.iterable instead.\n  if cb.iterable(node_size):  # many node sizes\n\n
    "},{"location":"examples/example-2.1-evolutionary-optimization-aln/#evolutionary-parameter-search-with-a-single-neural-mass-model","title":"Evolutionary parameter search with a single neural mass model","text":"

    This notebook provides a simple example for the use of the evolutionary optimization framework built-in to the library. Under the hood, the implementation of the evolutionary algorithm is powered by deap and pypet cares about the parallelization and storage of the simulation data for us.

    We want to optimize for a simple target, namely finding a parameter configuration that produces activity with a peak power frequency spectrum at 25 Hz.

    In this notebook, we will also plot the evolutionary genealogy tree, to visualize how the population evolves over generations.

    "},{"location":"examples/example-2.1-evolutionary-optimization-aln/#model-definition","title":"Model definition","text":""},{"location":"examples/example-2.1-evolutionary-optimization-aln/#initialize-and-run-evolution","title":"Initialize and run evolution","text":""},{"location":"examples/example-2.1-evolutionary-optimization-aln/#analysis","title":"Analysis","text":""},{"location":"examples/example-2.1-evolutionary-optimization-aln/#population","title":"Population","text":""},{"location":"examples/example-2.1-evolutionary-optimization-aln/#plotting-genealogy-tree","title":"Plotting genealogy tree","text":""},{"location":"examples/example-2.2-evolution-brain-network-aln-resting-state-fit/","title":"Example 2.2 evolution brain network aln resting state fit","text":"
    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    try:\n    import matplotlib.pyplot as plt\nexcept ImportError:\n    import sys\n    !{sys.executable} -m pip install matplotlib seaborn\n    import matplotlib.pyplot as plt\n\nimport numpy as np\nimport logging \n\nfrom neurolib.models.aln import ALNModel\nfrom neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.evolution import Evolution\nimport neurolib.utils.functions as func\n\nfrom neurolib.utils.loadData import Dataset\nds = Dataset(\"hcp\")\n\n# a nice color map\nplt.rcParams['image.cmap'] = 'plasma'\n

    We create a brain network model using the empirical dataset ds:

    model = ALNModel(Cmat = ds.Cmat, Dmat = ds.Dmat) # simulates the whole-brain model in 10s chunks by default if bold == True\n# Resting state fits\nmodel.params['mue_ext_mean'] = 1.57\nmodel.params['mui_ext_mean'] = 1.6\nmodel.params['sigma_ou'] = 0.09\nmodel.params['b'] = 5.0\nmodel.params['signalV'] = 2\nmodel.params['dt'] = 0.2\nmodel.params['duration'] = 0.2 * 60 * 1000 #ms\n# testing: aln.params['duration'] = 0.2 * 60 * 1000 #ms\n# real: aln.params['duration'] = 1.0 * 60 * 1000 #ms\n

    Our evaluation function will do the following: first it will simulate the model for a short time to see whether there is any sufficient activity. This speeds up the evolution considerably, since large regions of the state space show almost no neuronal activity. Only then do we simulate the model for the full duration and compute the fitness using the empirical dataset.

    def evaluateSimulation(traj):\n    rid = traj.id\n    model = evolution.getModelFromTraj(traj)\n    defaultDuration = model.params['duration']\n    invalid_result = (np.nan,)* len(ds.BOLDs)\n\n    # -------- stage wise simulation --------\n\n    # Stage 1 : simulate for a few seconds to see if there is any activity\n    # ---------------------------------------\n    model.params['duration'] = 3*1000.\n    model.run()\n\n    # check if stage 1 was successful\n    if np.max(model.output[:, model.t > 500]) > 160 or np.max(model.output[:, model.t > 500]) < 10:\n        return invalid_result, {}\n\n\n    # Stage 2: full and final simulation\n    # ---------------------------------------\n    model.params['duration'] = defaultDuration\n    model.run(chunkwise=True, bold = True)\n\n    # -------- fitness evaluation here --------\n\n    scores = []\n    for i, fc in enumerate(ds.FCs):#range(len(ds.FCs)):\n        fc_score = func.matrix_correlation(func.fc(model.BOLD.BOLD[:, 5:]), fc)\n        scores.append(fc_score)\n\n    meanFitness = np.mean(scores)\n    fitness_tuple = (meanFitness,)\n    #print(f\"fitness {meanFitness}\")\n    #print(f\"scores {scores}\")\n\n    fitness_tuple = tuple(scores)\n    return fitness_tuple, {}\n

    We specify the parameter space that we want to search.

    pars = ParameterSpace(['mue_ext_mean', 'mui_ext_mean', 'b', 'sigma_ou', 'Ke_gl', 'signalV'], \n                      [[0.0, 3.0], [0.0, 3.0], [0.0, 100.0], [0.0, 0.3], [0.0, 500.0], [0.0, 400.0]])\n

    Note that we chose algorithm='nsga2' when we create the Evolution. This will use the multi-objective optimization algorithm by Deb et al. 2002. Although we have only one objective here (namely the FC fit), we could in principle add more objectives, like the FCD matrix fit or other objectives. For this, we would have to add these values to the fitness in the evaluation function above and add more weights in the definition of the Evolution. We can use positive weights for that objective to be maximized and negative ones for minimization. Please refer to the DEAP documentation for more information.

    evolution = Evolution(evaluateSimulation, pars, algorithm = 'nsga2', weightList = [1.0] * len(ds.BOLDs), model = model, POP_INIT_SIZE=4, POP_SIZE = 4, NGEN=2, filename=\"example-2.2.hdf\")\n#testing: evolution = Evolution(evaluateSimulation, pars, algorithm = 'nsga2', weightList = [1.0] * len(ds.BOLDs), model = model, POP_INIT_SIZE=4, POP_SIZE = 4, NGEN=2)\n# real: evolution = Evolution(evaluateSimulation, pars, algorithm = 'nsga2', weightList = [1.0] * len(ds.BOLDs), model = model, POP_INIT_SIZE=1600, POP_SIZE = 160, NGEN=100)\n

    That's it, we can run the evolution now.

    evolution.run(verbose = False)\n

    We could now save the full evolution object for later analysis using evolution.saveEvolution().

    The info() method gives us a useful overview of the evolution, like a summary of the evolution parameters, the statistics of the population and also scatterplots of the individuals in our search space.

    evolution.info()\n
    \n--- Info summary ---\nValid: 160\nMean score (weighted fitness): 0.53\nParameters dictribution (Generation 99):\nmue_ext_mean:    mean: 0.147,    std: 0.02449\nmui_ext_mean:    mean: 0.1343,   std: 0.05387\nb:   mean: 93.05,    std: 5.84\nsigma_ou:    mean: 0.05296,  std: 0.01099\nKe_gl:   mean: 233.1,    std: 20.57\nsignalV:     mean: 344.3,    std: 68.9\n--------------------\nBest 5 individuals:\nPrinting 5 individuals\nIndividual 0 pars mue_ext_mean 0.1557, mui_ext_mean 0.08049, b 96.18, sigma_ou 0.05687, Ke_gl 222.8, signalV 354.9\n    Fitness values:  0.5426 0.4137 0.6459 0.5287 0.552 0.7209 0.5181 0.4997 0.42 0.4226 0.4279 0.5029 0.652 0.5667 0.5394 0.5894 0.472 0.6361 0.5217 0.5899 0.6456 0.5204 0.637 0.7114\nIndividual 1 pars mue_ext_mean 0.172, mui_ext_mean 0.1519, b 83.8, sigma_ou 0.06809, Ke_gl 219.2, signalV 308.3\n    Fitness values:  0.5798 0.4495 0.6525 0.4953 0.5876 0.7077 0.5263 0.5381 0.4222 0.4486 0.4347 0.5051 0.6232 0.5411 0.5383 0.5532 0.4716 0.6162 0.5442 0.5476 0.6644 0.5176 0.5826 0.6867\nIndividual 2 pars mue_ext_mean 0.09511, mui_ext_mean 0.1325, b 84.53, sigma_ou 0.04644, Ke_gl 206.9, signalV 382.1\n    Fitness values:  0.5212 0.4309 0.6206 0.5142 0.551 0.6844 0.5321 0.4911 0.4151 0.4368 0.4358 0.4803 0.6534 0.535 0.5388 0.5712 0.4784 0.659 0.5016 0.5962 0.6281 0.5063 0.6328 0.7097\nIndividual 3 pars mue_ext_mean 0.1333, mui_ext_mean 0.1794, b 92.41, sigma_ou 0.04781, Ke_gl 247.8, signalV 374.4\n    Fitness values:  0.5359 0.4445 0.622 0.4913 0.5438 0.717 0.5579 0.4572 0.3963 0.4511 0.4247 0.4688 0.6558 0.5271 0.5403 0.5763 0.4736 0.6079 0.4863 0.6064 0.6628 0.5144 0.6055 0.6958\nIndividual 4 pars mue_ext_mean 0.2655, mui_ext_mean 0.2683, b 88.81, sigma_ou 0.04314, Ke_gl 231.0, signalV 371.8\n    Fitness values:  0.5668 0.4402 0.6421 0.5091 0.5613 0.6858 0.4896 0.516 0.4525 0.437 0.4513 0.5346 0.5927 0.5819 0.5021 0.5367 0.4718 0.6038 0.563 0.5354 0.5889 0.5078 0.5844 0.7061\n--------------------\nThere are 160 valid individuals\nMean score across population: 0.53\n\n
    \n<Figure size 432x288 with 0 Axes>\n
    # This will load results from disk in case the session is \n# started newly and the trajectory is not in memory\ntraj = evolution.loadResults()\n
    gens, all_scores = evolution.getScoresDuringEvolution(reverse=True)\n
    plt.figure(figsize=(8, 4), dpi=200)   \nplt.plot(gens, np.nanmean(all_scores, axis=1))\nplt.fill_between(gens, np.nanmin(all_scores, axis=1), np.nanmax(all_scores, axis=1), alpha=0.3)\nplt.xlabel(\"Generation #\")\nplt.ylabel(\"Score\")\n
    \nText(0, 0.5, 'Score')\n
    "},{"location":"examples/example-2.2-evolution-brain-network-aln-resting-state-fit/#evolutionary-optimization-of-a-whole-brain-model","title":"Evolutionary optimization of a whole-brain model","text":"

    This notebook provides an example for the use of the evolutionary optimization framework built-in to the library. Under the hood, the implementation of the evolutionary algorithm is powered by deap and pypet cares about the parallelization and storage of the simulation data for us.

    We want to optimize a whole-brain network that should produce simulated BOLD activity (fMRI data) that is similar to the empirical dataset. We measure the fitness of each simulation by computing the func.matrix_correlation of the functional connectivity func.fc(model.BOLD.BOLD) to the empirical data ds.FCs. The ones that are closest to the empirical data get a higher fitness and have a higher chance of reproducing and survival.

    "},{"location":"examples/example-2.2-evolution-brain-network-aln-resting-state-fit/#analysis","title":"Analysis","text":""},{"location":"examples/example-3-meg-functional-connectivity/","title":"Example 3 meg functional connectivity","text":"

    In this example we will learn how to use neurolib to simulate resting state functional connectivity of MEG recordings.

    In the first part of the notebook, we will compute the frequency specific functional connectivity matrix of an examplary resting state MEG recording from the YouR-Study Uhlhaas, P.J., Gajwani, R., Gross, J. et al. The Youth Mental Health Risk and Resilience Study (YouR-Study). BMC Psychiatry 17, 43 (2017).

    To this end we will:

    We follow the approach presented in Hipp, J., Hawellek, D., Corbetta, M. et al., Large-scale cortical correlation structure of spontaneous oscillatory activity. Nat Neurosci 15, 884\u2013890 (2012)

    In the second part of this notebook, we will use a whole-brain model to simulate brain activity and compute functional connectivity matrix of the simulated signal envelope, as was done for the empirical MEG data. The parameters of this model have been previously optimized with neurolib's evolutionary algorithms (not shown here).

    Finally, we will compute the fit (Pearson correlation) of the simulated functional connectivity to the empirical MEG data, which was used as a fitting objective in a previous optimization procedure.

    # change to the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"examples\":\n    os.chdir('..')\n\n# This will reload all imports as soon as the code changes\n%load_ext autoreload\n%autoreload 2    \n
    import os\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport ipywidgets as widgets\nfrom IPython.utils import io\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport time\nimport pandas as pd\n
    \n/Users/caglar/anaconda/lib/python3.7/site-packages/pandas/compat/_optional.py:138: UserWarning: Pandas requires version '2.7.0' or newer of 'numexpr' (version '2.6.9' currently installed).\n  warnings.warn(msg, UserWarning)\n\n
    from neurolib.utils.signal import Signal \n\nsignal = Signal.from_file(os.path.join('examples', 'data','rs-meg.nc'))\nregion_labels = signal.data.regions.values\nnr_regions = len(region_labels)\ndisplay(signal.data)\n
    <xarray.DataArray (regions: 94, time: 6000)>\narray([[-0.17628077, -0.33449804, -0.27283166, ...,  0.20004052,\n         0.19379806,  0.0271034 ],\n       [ 0.00513031,  0.0319704 ,  0.18478207, ...,  0.22112991,\n         0.30244658,  0.21108818],\n       [ 0.01999333, -0.1601617 , -0.21931987, ...,  0.01844522,\n        -0.03713842,  0.08175757],\n       ...,\n       [-0.2381615 , -0.34838511, -0.50638238, ..., -0.08763395,\n        -0.05396606, -0.06218967],\n       [-0.09900261, -0.1525903 , -0.16444704, ...,  0.04080438,\n         0.01664182,  0.15847579],\n       [ 0.13203698,  0.17482835,  0.21212731, ..., -0.19971229,\n        -0.01869223, -0.16379495]])\nCoordinates:\n  * time     (time) float64 0.0 0.01 0.02 0.03 0.04 ... 59.96 59.97 59.98 59.99\n  * regions  (regions) object 'PreCG.L' 'PreCG.R' 'SFG.L' ... 'ITG.L' 'ITG.R'\nAttributes:\n    name:             rest meg\n    label:            \n    signal_type:      \n    unit:             T\n    description:      MEG recording in AAL2 space\n    process_steps_0:  resample to 100.0Hz
    xarray.DataArray

    diff --git a/utils/parameterspace/index.html b/utils/parameterspace/index.html index 80cf2a7..00218e8 100644 --- a/utils/parameterspace/index.html +++ b/utils/parameterspace/index.html @@ -708,6 +708,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/utils/signal/index.html b/utils/signal/index.html index 64f3baf..3132209 100644 --- a/utils/signal/index.html +++ b/utils/signal/index.html @@ -708,6 +708,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + + diff --git a/utils/stimulus/index.html b/utils/stimulus/index.html index 7c6f76e..cfb0477 100644 --- a/utils/stimulus/index.html +++ b/utils/stimulus/index.html @@ -706,6 +706,86 @@ + + + + + +
  • + + + + + Example 5.1 oc phenomenological model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.2 oc wc model deterministic + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.3 oc wc model noisy + + + + +
  • + + + + + + + + + +
  • + + + + + Example 5.4 oc aln model deterministic + + + + +
  • + + + +