├── .github └── workflows │ ├── ci.yml │ ├── documentation.yml │ └── test-notebooks.yml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── codecov.yml ├── docs ├── README.md ├── contributing.md ├── examples ├── exploration │ └── boxsearch.md ├── models │ ├── model.md │ └── parameters.md ├── optimization │ └── evolution.md └── utils │ ├── dataset.md │ ├── functions.md │ ├── parameterspace.md │ ├── signal.md │ └── stimulus.md ├── examples ├── data │ ├── AAL2_atlas_data │ │ ├── AAL2.nii │ │ └── AAL2.xml │ └── rs-meg.nc ├── example-0-aln-minimal.ipynb ├── example-0.1-hopf-minimal.ipynb ├── example-0.2-basic_analysis.ipynb ├── example-0.3-fhn-minimal.ipynb ├── example-0.4-wc-minimal.ipynb ├── example-0.5-kuramoto.ipynb ├── example-0.6-external-stimulus.ipynb ├── example-0.7-custom-model.ipynb ├── example-0.8-leadfield-matrix.ipynb ├── example-1-aln-parameter-exploration.ipynb ├── example-1.1-custom-parameter-exploration.ipynb ├── example-1.2-brain-network-exploration.ipynb ├── example-1.2.1-brain-exploration-postprocessing.ipynb ├── example-1.3-aln-bifurcation-diagram.ipynb ├── example-2-evolutionary-optimization-minimal.ipynb ├── example-2.0.1-save-and-load-evolution.ipynb ├── example-2.1-evolutionary-optimization-aln.ipynb ├── example-2.2-evolution-brain-network-aln-resting-state-fit.ipynb ├── example-3-meg-functional-connectivity.ipynb ├── example-4-multimodel-intro.ipynb ├── example-4.1-multimodel-custom-model.ipynb ├── example-4.2-multimodel-backends-and-optimization.ipynb ├── example-5.1-oc-phenomenological-model-deterministic.ipynb ├── example-5.2-oc-wc-model-deterministic.ipynb ├── example-5.3-oc-wc-model-noisy.ipynb └── example-5.4-oc-aln-model-deterministic.ipynb ├── mkdocs.yml ├── neurolib ├── __init__.py ├── control │ ├── __init__.py │ └── optimal_control │ │ ├── __init__.py │ │ ├── cost_functions.py │ │ ├── oc.py │ │ ├── oc_aln │ │ ├── __init__.py │ │ └── oc_aln.py │ │ ├── oc_fhn │ │ ├── __init__.py │ │ └── oc_fhn.py │ │ ├── oc_hopf │ │ ├── __init__.py │ │ └── oc_hopf.py │ │ └── oc_wc │ │ ├── __init__.py │ │ └── oc_wc.py ├── data │ └── datasets │ │ ├── gw │ │ └── subjects │ │ │ ├── NAP_001 │ │ │ ├── functional │ │ │ │ └── BOLD_rsfMRI.mat │ │ │ └── structural │ │ │ │ ├── DTI_CM.mat │ │ │ │ └── DTI_LEN.mat │ │ │ ├── NAP_002 │ │ │ ├── functional │ │ │ │ └── BOLD_rsfMRI.mat │ │ │ └── structural │ │ │ │ ├── DTI_CM.mat │ │ │ │ └── DTI_LEN.mat │ │ │ ├── NAP_007 │ │ │ ├── functional │ │ │ │ └── BOLD_rsfMRI.mat │ │ │ └── structural │ │ │ │ ├── DTI_CM.mat │ │ │ │ └── DTI_LEN.mat │ │ │ ├── NAP_009 │ │ │ ├── functional │ │ │ │ └── BOLD_rsfMRI.mat │ │ │ └── structural │ │ │ │ ├── DTI_CM.mat │ │ │ │ └── DTI_LEN.mat │ │ │ └── NAP_013 │ │ │ ├── functional │ │ │ └── BOLD_rsfMRI.mat │ │ │ └── structural │ │ │ ├── DTI_CM.mat │ │ │ └── DTI_LEN.mat │ │ └── hcp │ │ └── subjects │ │ ├── 101309 │ │ ├── functional │ │ │ └── TC_rsfMRI_REST1_LR.mat │ │ └── structural │ │ │ ├── DTI_CM.mat │ │ │ ├── DTI_LEN.mat │ │ │ ├── nvoxel.txt │ │ │ └── waytotal.txt │ │ ├── 102311 │ │ ├── functional │ │ │ └── TC_rsfMRI_REST1_LR.mat │ │ └── structural │ │ │ ├── DTI_CM.mat │ │ │ ├── DTI_LEN.mat │ │ │ ├── nvoxel.txt │ │ │ └── waytotal.txt │ │ ├── 102816 │ │ ├── functional │ │ │ └── TC_rsfMRI_REST1_LR.mat │ │ └── structural │ │ │ ├── DTI_CM.mat │ │ │ ├── DTI_LEN.mat │ │ │ ├── nvoxel.txt │ │ │ └── waytotal.txt │ │ ├── 131217 │ │ ├── functional │ │ │ └── TC_rsfMRI_REST1_LR.mat │ │ └── structural │ │ │ ├── DTI_CM.mat │ │ │ ├── DTI_LEN.mat │ │ │ ├── nvoxel.txt │ │ │ └── waytotal.txt │ │ ├── 211619 │ │ ├── functional │ │ │ └── TC_rsfMRI_REST1_LR.mat │ │ └── structural │ │ │ ├── DTI_CM.mat │ │ │ ├── DTI_LEN.mat │ │ │ ├── nvoxel.txt │ │ │ └── waytotal.txt │ │ ├── 213522 │ │ ├── functional │ │ │ └── TC_rsfMRI_REST1_LR.mat │ │ └── structural │ │ │ ├── DTI_CM.mat │ │ │ ├── DTI_LEN.mat │ │ │ ├── nvoxel.txt │ │ │ └── waytotal.txt │ │ └── 377451 │ │ ├── functional │ │ └── TC_rsfMRI_REST1_LR.mat │ │ └── structural │ │ ├── DTI_CM.mat │ │ ├── DTI_LEN.mat │ │ ├── nvoxel.txt │ │ └── waytotal.txt ├── models │ ├── __init__.py │ ├── aln │ │ ├── __init__.py │ │ ├── aln-precalc │ │ │ ├── precompute_quantities │ │ │ │ ├── calculate_quantities_cascade.py │ │ │ │ ├── methods_cascade.py │ │ │ │ └── params.py │ │ │ └── quantities_cascade.h5 │ │ ├── loadDefaultParams.py │ │ ├── model.py │ │ └── timeIntegration.py │ ├── bold │ │ ├── __init__.py │ │ ├── model.py │ │ └── timeIntegration.py │ ├── fhn │ │ ├── __init__.py │ │ ├── loadDefaultParams.py │ │ ├── model.py │ │ └── timeIntegration.py │ ├── hopf │ │ ├── __init__.py │ │ ├── loadDefaultParams.py │ │ ├── model.py │ │ └── timeIntegration.py │ ├── kuramoto │ │ ├── __init__.py │ │ ├── loadDefaultParams.py │ │ ├── model.py │ │ └── timeIntegration.py │ ├── model.py │ ├── multimodel │ │ ├── __init__.py │ │ ├── builder │ │ │ ├── __init__.py │ │ │ ├── aln.py │ │ │ ├── base │ │ │ │ ├── __init__.py │ │ │ │ ├── backend.py │ │ │ │ ├── constants.py │ │ │ │ ├── network.py │ │ │ │ ├── neural_mass.py │ │ │ │ └── params.py │ │ │ ├── fitzhugh_nagumo.py │ │ │ ├── hopf.py │ │ │ ├── thalamus.py │ │ │ ├── wilson_cowan.py │ │ │ └── wong_wang.py │ │ └── model.py │ ├── thalamus │ │ ├── __init__.py │ │ ├── loadDefaultParams.py │ │ ├── model.py │ │ └── timeIntegration.py │ ├── wc │ │ ├── __init__.py │ │ ├── loadDefaultParams.py │ │ ├── model.py │ │ └── timeIntegration.py │ └── ww │ │ ├── __init__.py │ │ ├── loadDefaultParams.py │ │ ├── model.py │ │ └── timeIntegration.py ├── optimize │ ├── __init__.py │ ├── evolution │ │ ├── __init__.py │ │ ├── deapUtils.py │ │ ├── evolution.py │ │ └── evolutionaryUtils.py │ └── exploration │ │ ├── __init__.py │ │ ├── exploration.py │ │ └── explorationUtils.py └── utils │ ├── __init__.py │ ├── atlases.py │ ├── collections.py │ ├── devutils.py │ ├── functions.py │ ├── leadfield.py │ ├── loadData.py │ ├── model_utils.py │ ├── parameterSpace.py │ ├── paths.py │ ├── plot_oc.py │ ├── pypetUtils.py │ ├── pypet_logging.ini │ ├── saver.py │ ├── signal.py │ └── stimulus.py ├── requirements.txt ├── resources ├── adex.png ├── aln.png ├── brain_slow_waves_small.gif ├── element_logo.png ├── evolution_animated.gif ├── evolution_minimal.png ├── evolution_tree.png ├── evolutionary-algorithm.png ├── exploration_aln.png ├── gw_data.png ├── gw_simulated.png ├── icon_block.png ├── pipeline.jpg ├── readme_header.png └── single_timeseries.png ├── setup.py └── tests ├── control └── optimal_control │ ├── test_oc.py │ ├── test_oc_aln.py │ ├── test_oc_cost_functions.py │ ├── test_oc_fhn.py │ ├── test_oc_fhn_noisy.py │ ├── test_oc_hopf.py │ ├── test_oc_utils.py │ └── test_oc_wc.py ├── multimodel ├── base │ ├── test_backend.py │ ├── test_network.py │ ├── test_neural_mass.py │ └── test_params.py ├── test_aln.py ├── test_fitzhugh_nagumo.py ├── test_hopf.py ├── test_thalamus.py ├── test_wilson_cowan.py └── test_wong_wang.py ├── test_autochunk.py ├── test_collections.py ├── test_datasets.py ├── test_evolution.py ├── test_evolutionUtils.py ├── test_exploration.py ├── test_explorationUtils.py ├── test_functions.py ├── test_imports.py ├── test_models.py ├── test_parameterspace.py ├── test_signal.py ├── test_stimulus.py └── test_subsampling.py /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | pull_request: 6 | 7 | jobs: 8 | build: 9 | runs-on: ${{ matrix.os }} 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | os: [ubuntu-latest, macos-latest] 14 | python-version: [3.7, 3.8] 15 | exclude: 16 | - os: macos-latest 17 | python-version: 3.8 18 | 19 | steps: 20 | - uses: actions/checkout@v2 21 | - name: Set up Python ${{ matrix.python-version }} 🚜 22 | uses: actions/setup-python@v2 23 | with: 24 | python-version: ${{ matrix.python-version }} 25 | - name: Install dependencies 🛠 26 | run: | 27 | python -m pip install --upgrade pip setuptools 28 | pip install flake8 codecov pytest-cov wheel matplotlib seaborn mne 29 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 30 | pip install . 31 | - name: Lint with flake8 🎓 32 | run: | 33 | # stop the build if there are Python syntax errors or undefined names 34 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 35 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 36 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 37 | - name: Test with pytest 🧪 38 | run: | 39 | PYTHONPATH=. pytest --durations=0 --cov-report=xml --cov=neurolib tests/ 40 | - name: Upload coverage to Codecov 📊 41 | uses: codecov/codecov-action@v1 42 | with: 43 | file: ./coverage.xml 44 | files: ./coverage1.xml,./coverage2.xml 45 | flags: unittests 46 | env_vars: OS,PYTHON 47 | name: codecov-umbrella 48 | verbose: true 49 | -------------------------------------------------------------------------------- /.github/workflows/documentation.yml: -------------------------------------------------------------------------------- 1 | name: documentation 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | build: 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | os: [ubuntu-latest] 18 | python-version: [3.7] 19 | 20 | steps: 21 | - name: Copy Repository Contents ↩ 22 | uses: actions/checkout@v2 23 | with: 24 | persist-credentials: false 25 | - name: Set up Python ${{ matrix.python-version }} 🚜 26 | uses: actions/setup-python@v2 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | - name: Install dependencies 🛠 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install mkdocs mkdocs-material mkdocstrings mkdocstrings-python mknotebooks Pygments livereload 33 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 34 | pip install . 35 | - name: Build documentation 👷‍♀️ 36 | run: | 37 | mkdocs build 38 | - name: Predeploy on PR ✈️ 39 | if: github.event_name == 'pull_request' 40 | uses: JamesIves/github-pages-deploy-action@4.0.0 41 | with: 42 | folder: site 43 | target-folder: "predeploy" 44 | token: ${{ secrets.DOC_ACCESS_TOKEN }} 45 | branch: master 46 | commit-message: "PR predeploy" 47 | repository-name: neurolib-dev/neurolib-dev.github.io 48 | - name: Deploy 🚀 49 | if: github.event_name == 'push' 50 | uses: JamesIves/github-pages-deploy-action@4.0.0 51 | with: 52 | folder: site 53 | token: ${{ secrets.DOC_ACCESS_TOKEN }} 54 | branch: master 55 | repository-name: neurolib-dev/neurolib-dev.github.io 56 | -------------------------------------------------------------------------------- /.github/workflows/test-notebooks.yml: -------------------------------------------------------------------------------- 1 | name: notebooks 2 | 3 | on: 4 | push: 5 | pull_request: 6 | 7 | jobs: 8 | build: 9 | runs-on: ${{ matrix.os }} 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | os: [ubuntu-latest, macos-latest] 14 | python-version: [3.7] 15 | 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Set up Python ${{ matrix.python-version }} 🚜 19 | uses: actions/setup-python@v2 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | - name: Install dependencies 🛠 23 | run: | 24 | python -m pip install --upgrade pip 25 | pip install treon wheel setuptools jupyterlab matplotlib seaborn 26 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 27 | pip uninstall -y importlib-metadata 28 | pip install "importlib-metadata<5.0" 29 | pip install . 30 | - name: Test notebooks with treon 🧪 31 | run: | 32 | treon examples/ 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # ipython notebook 10 | .ipynb_checkpoints/ 11 | 12 | # Mac 13 | .DS_Store 14 | 15 | logs/ 16 | *.hdf 17 | hdf/ 18 | 19 | # Distribution / packaging 20 | .Python 21 | build/ 22 | develop-eggs/ 23 | dist/ 24 | downloads/ 25 | eggs/ 26 | .eggs/ 27 | lib/ 28 | lib64/ 29 | parts/ 30 | sdist/ 31 | var/ 32 | wheels/ 33 | pip-wheel-metadata/ 34 | share/python-wheels/ 35 | *.egg-info/ 36 | .installed.cfg 37 | *.egg 38 | MANIFEST 39 | 40 | # PyInstaller 41 | # Usually these files are written by a python script from a template 42 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 43 | *.manifest 44 | *.spec 45 | 46 | # Installer logs 47 | pip-log.txt 48 | pip-delete-this-directory.txt 49 | 50 | # Unit test / coverage reports 51 | htmlcov/ 52 | .tox/ 53 | .nox/ 54 | .coverage 55 | .coverage.* 56 | .cache 57 | nosetests.xml 58 | coverage.xml 59 | *.cover 60 | *.py,cover 61 | .hypothesis/ 62 | .pytest_cache/ 63 | 64 | # Translations 65 | *.mo 66 | *.pot 67 | 68 | # Django stuff: 69 | *.log 70 | local_settings.py 71 | db.sqlite3 72 | db.sqlite3-journal 73 | 74 | # Flask stuff: 75 | instance/ 76 | .webassets-cache 77 | 78 | # Scrapy stuff: 79 | .scrapy 80 | 81 | # Sphinx documentation 82 | docs/_build/ 83 | 84 | # PyBuilder 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # pyenv 95 | .python-version 96 | 97 | # pipenv 98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 101 | # install all needed dependencies. 102 | #Pipfile.lock 103 | 104 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 105 | __pypackages__/ 106 | 107 | # Celery stuff 108 | celerybeat-schedule 109 | celerybeat.pid 110 | 111 | # SageMath parsed files 112 | *.sage.py 113 | 114 | # Environments 115 | .env 116 | .venv 117 | env/ 118 | venv/ 119 | ENV/ 120 | env.bak/ 121 | venv.bak/ 122 | 123 | # Spyder project settings 124 | .spyderproject 125 | .spyproject 126 | 127 | # Rope project settings 128 | .ropeproject 129 | 130 | # mkdocs documentation 131 | /site 132 | 133 | # mypy 134 | .mypy_cache/ 135 | .dmypy.json 136 | dmypy.json 137 | 138 | # Pyre type checker 139 | .pyre/ 140 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | **v0.5.10** 2 | 3 | - models now have the parameter `sampling_dt` which will downsample the output to a specified step size (in ms) 4 | - loadData: add subject-wise length matrices `ds.Dmats` 5 | 6 | **v0.5.9** 7 | 8 | - `ALN` model added to the multimodel framework 9 | - `ThalamicMassModel` now works with autochunk for very long simulations with minimal RAM usage! 10 | 11 | **v0.5.8** 12 | 13 | - Hotfix: include `pypet_logging.ini` in pypi package 14 | - Evolution: new method `getIndividualFromHistory()` 15 | 16 | **v0.5.7** 17 | 18 | - `example-0.5`: Demonstrating the use of external stimuli on brain networks 19 | - `example-1.3`: 2D bifurcation diagrams using `pypet` 20 | - `bold`: BOLD numerical overflow bug fixed 21 | - `evolution`: dfEvolution and dfPop fix 22 | - `exploration`: fix seed for random initial conditions 23 | - various minor bugfixes 24 | 25 | **v0.5.5** 26 | 27 | - Hotfix for RNG seed in exploration: Seed `None` is now converted to `"None"` for for `pypet` compatibility only when saving the `model.params` to the trajectory. 28 | - Fix: `dfEvolution` drops duplicate entries from the `evolution.history`. 29 | 30 | **v0.5.4** 31 | 32 | - New function `func.construct_stimulus()` 33 | - New example of stimulus usage: `examples/example-0.5-aln-external-stimulus.ipynb` 34 | - Fixed RNG seed bug, where the seed value None was converted to 0 (because of pypet) and lead to a predictable random number generator 35 | 36 | **v0.5.3** 37 | 38 | - `ALNModel` now records adaptation currents! Accessible via model.outputs.IA 39 | 40 | **v0.5.1** 41 | 42 | *Evolution:* 43 | 44 | - NSGA-2 algorithm implemented (Deb et al. 2002) 45 | - Preselect complete algorithms (using `algorithm="adaptive"` or `"nsga2"`) 46 | - Implement custom operators for all evolutionary operations 47 | - Keep track of the evolution history using `evolution.history` 48 | - Genealogy `evolution.tree` available from `evolution.buildEvolutionTree()` that is `networkx` compatible [1] 49 | - Continue working: `saveEvolution()` and `loadEvolution()` can load an evolution from another session [2] 50 | - Overview dataframe `evolution.dfPop` now has all fitness values as well 51 | - Get scores using `getScores()` 52 | - Plot evolution progress with `evolutionaryUtils.plotProgress()` 53 | 54 | *Exploration:* 55 | 56 | - Use `loadResults(all=True)` to load all simulated results from disk to memory (available as `.results`) or use `all=False` to load runs individually from hdf. Both options populate `dfResults`. 57 | - `loadResults()` has memory cap to avoid filling up RAM 58 | - `loadDfResults()` creates the parameter table from a previous simulation 59 | - `explorationUtils.plotExplorationResults()` for plotting 2D slices of the explored results with some advanced functions like alpha maps and contours for predefined regions. 60 | 61 | *devUtils* 62 | 63 | - A module that we are using for development and research with some nice features. Please do not rely on this file since there might be breaking changes in the future. 64 | - `plot_outputs()` like a true numerical simlord 65 | - `model_fit()` to compute the model's FC and FCD fit to the dataset, could be usefull for everyone 66 | - `getPowerSpectrum()` does what is says 67 | - `getMeanPowerSpectrum()` same 68 | - a very neat `rolling_window()` from a `numpy` PR that never got accepted 69 | 70 | *Other:* 71 | 72 | - Data loading: 73 | - `Dataset` can load different SC matrix normalizations: `"max", "waytotal", "nvoxel"` 74 | - Can precompute FCD matrices to avoid having to do it later (`fcd=True`) 75 | - `neurolib/utils/atlas.py` added with aal2 region names (thanks @jajcayn) and coordinates of centers of regions (from scans of @caglorithm's brain 🤯) 76 | - `ParameterSpace` has `.lowerBound` and `.upperBound`. 77 | - `pypet` finally doesn't create a billion log files anymore due to a custom log config 78 | 79 | **v0.5.0** 80 | 81 | - **New model**: Thalamus model `ThalamicMassModel` (thanks to @jajcayn) 82 | - Model by Costa et al. 2016, PLOS Computational Biology 83 | - New tools for parameter exploration: `explorationUtils.py` aka `eu` 84 | - Postprocessing of exploration results using `eu.processExplorationResults()` 85 | - Find parameters of explored simulations using `eu.findCloseResults()` 86 | - Plot exploration results via `eu.plotExplorationResults()` (see example image below) 87 | - Custom transformation of the inputs to the `BOLDModel`. 88 | - This is particularly handy for phenomenological models (such as `FHNModel`, `HopfModel` and `WCModel`) which do not produce firing rate outputs with units in `Hz`. 89 | - Improvements 90 | - Models can now generate random initial conditions using `model.randomICs()` 91 | - `model.params['bold'] = True` forces BOLD simulation 92 | - `BoxSearch` class: `search.run()` passes arguments to `model.run()` 93 | - BOLD output time array renamed to `t_BOLD` 94 | 95 | **v0.4.1** 96 | 97 | - **New model:** Wilson-Cowan neural mass model implemented (thanks to @ChristophMetzner ) 98 | - Simulations now start their output at `t=dt` (as opposed to `t=0` before). Everything before is now considered an initial condition. 99 | - Fix: Running a simulation chunkwise (using `model.run(chunkwise=True)`) and normally (using `model.run()`) produces output of the same length 100 | - Fix: `aln` network coupling, which apparent when simulating chunkwise with `model.run(chunkwise=True, chunksize=1)` 101 | - Fix: Correct use of seed for RNG 102 | - Fix: Matrices are not normalized to max-1 anymore before each run. 103 | - Fix: Kolmogorov distance of FCD matrices and timeseries 104 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to neurolib 2 | 3 | Thank you for your interest in contributing to `neurolib`. We welcome bug reports through the issues tab and pull requests for fixes or improvements. You are warlmy invited to join our development efforts and make brain network modeling easier and more useful for all researchers. 4 | 5 | ## Pull requests 6 | 7 | To propose a change to `neurolib`'s code, you should first clone the repository to your own Github account. 8 | Then, create a branch and make some changes. You can then send a pull request to neurolib's own repository 9 | and we will review and discuss your proposed changes. 10 | 11 | 12 | More information on how to make pull requests can be found in the 13 | [Github help](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request) pages. 14 | 15 | ### Maintaining code 16 | 17 | Please be aware that we have a conservative policy for implementing new functionality. All new features need to be maintained, sometimes forever. We are a small team of developers and can only maintain a limited amount of code. Therefore, ideally, you should also feel responsible for the changes you have proposed and maintain it after it becomes part of `neurolib`. 18 | 19 | ## Code style 20 | 21 | We are using the [black](https://github.com/psf/black) code formatter with the additional argument `--line-length=120`. 22 | It's called the "uncompromising formatter" because it is completely deterministic and you have literally no control over how your code will look like. 23 | We like that! We recommend using black directly in your IDE, 24 | for example in [VSCode](https://marcobelo.medium.com/setting-up-python-black-on-visual-studio-code-5318eba4cd00). 25 | 26 | ### Commenting Code 27 | 28 | We are using the [sphinx format](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html) for commenting code. Comments are incredibly important to us since `neurolib` is supposed to be a library of user-facing code. It's encouraged to read the code, change it and build something on top of it. Our users are coders. Please write as many comments as you can, including a description of each function and method and its arguments but also single-line comments for the code itself. 29 | 30 | ## Implementing a neural mass model 31 | 32 | You are very welcome to implement your favorite neural mass model and contribute it to `neurolib`. 33 | 34 | * The easiest way of implementing a model is to copy a model directory and adapt the relevant parts of it to your own model. Please have a look of how other models are implemented. We recommend having a look at the `HopfModel` which is a fairly simple model. 35 | * All models inherit from the `Model` base class which can be found in `neurolib/models/model.py`. 36 | * You can also check out the [model implementation example](https://neurolib-dev.github.io/examples/example-0.6-custom-model/) to find out how a model is implemented. 37 | * All models need to pass tests. Tests are located in the `tests/` directory of the project. A model should be added to the test files `tests/test_models.py` and `tests/test_autochunk.py`. However, you should also make sure that your model supports as many `neurolib` features as possible, such as exploration and optimization. If you did everything right, this should be the case. 38 | * As of now, models consist of three parts: 39 | * The `model.py` file which contains the class of the model. Here the model specifies attributes like its name, its state variables, its initial value parameters. Additionally, in the constructor (the `__init__()` method), the model loads its default parameters. 40 | * The `loadDefaultParams.py` file contains a function (`loadDefaultParams()`) which has the arguments `Cmat` for the structural connectivity matrix, `Dmat` for the delay matrix and `seed` for the seed of the random number generator. This function returns a dictionary (or `dotdict`, see `neurolib/utils/collections.py`) with all parrameters inside. 41 | * The `timeIntegration.py` file which contains a `timeIntegration()` function which has the argument `params` coming from the previous step. Here, we need to prepare the numerical integration. We load all relevant parameters from the `params` dictionary and pass it to the main integration loop. The integration loop is written such that it can be accelerated by `numba` ([numba's page](https://numba.pydata.org/)) which speeds up the integration by a factor of around 1000. 42 | 43 | ## Contributing examples 44 | 45 | We very much welcome example contributions since they help new users to learn how to make use of `neurolib`. They can include basic usage examples or tutorials of `neurolib`'s features, or a demonstration of how to solve a specific scientific task using neural mass models or whole-brain networks. 46 | 47 | * Examples are provided as Jupyter Notebooks in the `/examples/` directory of the project repository. 48 | * Notebooks should have a brief description of what they are trying to accomplish at the beginning. 49 | * It is recommended to change the working directory to the root directory at the very beginning of the notebook (`os.chdir('..')`). 50 | * Notebooks should be structured with different subheadings (Markdown style). Please also describe in words what you are doing in code. 51 | 52 | 53 | ## Contributing brain data 54 | 55 | We have a few small datasets already in neurolib so everyone can start simulating right away. If you'd like to contribute more data to the project, please feel invited to do so. We're looking for more structural connectivity matrices and fiber length matrices in the MATLAB matrix `.mat` format (which can be loaded by `scipy.loadmat`). We also appreciate BOLD data, EEG data, or MEG data. Other modalities could be useful as well. Please be aware that the data has to be in a parcellated form, i.e., the brain areas need to be organized according to an atlas like the AAL2 atlas (or others). 56 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 neurolib-dev 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt 2 | include neurolib/models/aln/aln-precalc/quantities_cascade.h5 3 | include neurolib/utils/pypet_logging.ini 4 | 5 | # datasets 6 | # recursive-include neurolib/data * 7 | 8 | recursive-include neurolib/data/datasets/hcp * 9 | recursive-include neurolib/data/datasets/gw * 10 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | precision: 2 3 | round: nearest 4 | range: "50...90" 5 | ignore: 6 | - "neurolib/models/**/timeIntegration.py" 7 | - "neurolib/utils/devutils.py" 8 | - "neurolib/utils/atlases.py" 9 | - "neurolib/models/multimodel/builder/aln.py" 10 | status: 11 | project: 12 | default: 13 | enabled: yes 14 | target: auto 15 | threshold: 5% 16 | patch: off 17 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | ../CONTRIBUTING.md -------------------------------------------------------------------------------- /docs/examples: -------------------------------------------------------------------------------- 1 | ../examples -------------------------------------------------------------------------------- /docs/exploration/boxsearch.md: -------------------------------------------------------------------------------- 1 | # BoxSearch 2 | 3 | ::: neurolib.optimize.exploration.exploration.BoxSearch -------------------------------------------------------------------------------- /docs/models/model.md: -------------------------------------------------------------------------------- 1 | # Models 2 | 3 | Models are the core of `neurolib`. The `Model` superclass will help you to load, simulate, and analyse models. It also makes it very easy to implement your own neural mass model (see [Example 0.6 custom model](/examples/example-0.6-custom-model/)). 4 | 5 | ## Loading a model 6 | To load a model, we need to import the submodule of a model and instantiate it. This example shows how to load a single node of the `ALNModel`. See [Example 0 aln minimal](/examples/example-0-aln-minimal/) on how to simulate a whole-brain network using this model. 7 | 8 | 9 | ``` 10 | from neurolib.models.aln import ALNModel # Import the model 11 | model = ALNModel() # Create an instance 12 | model.run() # Run it 13 | ``` 14 | 15 | ## Model base class methods 16 | 17 | ::: neurolib.models.model.Model -------------------------------------------------------------------------------- /docs/models/parameters.md: -------------------------------------------------------------------------------- 1 | # Parameters 2 | 3 | Model parameters in `neurolib` are stored as a dictionary-like object `params` as one of a model's attributes. Changing parameters is straightforward: 4 | 5 | ``` python 6 | from neurolib.models.aln import ALNModel # Import the model 7 | model = ALNModel() # Create an instance 8 | 9 | model.params['duration'] = 10 * 1000 # in ms 10 | model.run() # Run it 11 | ``` 12 | 13 | Parameters are `dotdict` objects that can also be accessed using the more simple syntax `model.params.parameter_name = 123` (see [Collections](/utils/collections/)). 14 | 15 | ## Default parameters 16 | 17 | The default parameters of a model are stored in the `loadDefaultParams.py` within each model's directory. This function is called by the `model.py` file upon initialisation and returns all necessary parameters of the model. 18 | 19 | Below is an example function that prepares the structural connectivity matrices `Cmat` and `Dmat`, all parameters of the model, and its initial values. 20 | 21 | ``` python 22 | def loadDefaultParams(Cmat=None, Dmat=None, seed=None): 23 | """Load default parameters for a model 24 | 25 | :param Cmat: Structural connectivity matrix (adjacency matrix) of coupling strengths, will be normalized to 1. If not given, then a single node simulation will be assumed, defaults to None 26 | :type Cmat: numpy.ndarray, optional 27 | :param Dmat: Fiber length matrix, will be used for computing the delay matrix together with the signal transmission speed parameter `signalV`, defaults to None 28 | :type Dmat: numpy.ndarray, optional 29 | :param seed: Seed for the random number generator, defaults to None 30 | :type seed: int, optional 31 | 32 | :return: A dictionary with the default parameters of the model 33 | :rtype: dict 34 | """ 35 | 36 | params = dotdict({}) 37 | 38 | ### runtime parameters 39 | params.dt = 0.1 # ms 0.1ms is reasonable 40 | params.duration = 2000 # Simulation duration (ms) 41 | np.random.seed(seed) # seed for RNG of noise and ICs 42 | # set seed to 0 if None, pypet will complain otherwise 43 | params.seed = seed or 0 44 | 45 | # make sure that seed=0 remains None 46 | if seed == 0: 47 | seed = None 48 | 49 | # ------------------------------------------------------------------------ 50 | # global whole-brain network parameters 51 | # ------------------------------------------------------------------------ 52 | 53 | # the coupling parameter determines how nodes are coupled. 54 | # "diffusive" for diffusive coupling, "additive" for additive coupling 55 | params.coupling = "diffusive" 56 | 57 | params.signalV = 20.0 58 | params.K_gl = 0.6 # global coupling strength 59 | 60 | if Cmat is None: 61 | params.N = 1 62 | params.Cmat = np.zeros((1, 1)) 63 | params.lengthMat = np.zeros((1, 1)) 64 | 65 | else: 66 | params.Cmat = Cmat.copy() # coupling matrix 67 | np.fill_diagonal(params.Cmat, 0) # no self connections 68 | params.N = len(params.Cmat) # number of nodes 69 | params.lengthMat = Dmat 70 | 71 | # ------------------------------------------------------------------------ 72 | # local node parameters 73 | # ------------------------------------------------------------------------ 74 | 75 | # external input parameters: 76 | params.tau_ou = 5.0 # ms Timescale of the Ornstein-Uhlenbeck noise process 77 | params.sigma_ou = 0.0 # mV/ms/sqrt(ms) noise intensity 78 | params.x_ou_mean = 0.0 # mV/ms (OU process) [0-5] 79 | params.y_ou_mean = 0.0 # mV/ms (OU process) [0-5] 80 | 81 | # neural mass model parameters 82 | params.a = 0.25 # Hopf bifurcation parameter 83 | params.w = 0.2 # Oscillator frequency, 32 Hz at w = 0.2 84 | 85 | # ------------------------------------------------------------------------ 86 | 87 | # initial values of the state variables 88 | params.xs_init = 0.5 * np.random.uniform(-1, 1, (params.N, 1)) 89 | params.ys_init = 0.5 * np.random.uniform(-1, 1, (params.N, 1)) 90 | 91 | # Ornstein-Uhlenbeck noise state variables 92 | params.x_ou = np.zeros((params.N,)) 93 | params.y_ou = np.zeros((params.N,)) 94 | 95 | # values of the external inputs 96 | params.x_ext = np.zeros((params.N,)) 97 | params.y_ext = np.zeros((params.N,)) 98 | 99 | return params 100 | 101 | ``` -------------------------------------------------------------------------------- /docs/optimization/evolution.md: -------------------------------------------------------------------------------- 1 | # Evolution 2 | 3 | ::: neurolib.optimize.evolution.evolution.Evolution -------------------------------------------------------------------------------- /docs/utils/dataset.md: -------------------------------------------------------------------------------- 1 | # Dataset 2 | 3 | ::: neurolib.utils.loadData.Dataset -------------------------------------------------------------------------------- /docs/utils/functions.md: -------------------------------------------------------------------------------- 1 | # Functions 2 | 3 | ::: neurolib.utils.functions -------------------------------------------------------------------------------- /docs/utils/parameterspace.md: -------------------------------------------------------------------------------- 1 | # ParameterSpace 2 | 3 | ::: neurolib.utils.parameterSpace.ParameterSpace -------------------------------------------------------------------------------- /docs/utils/signal.md: -------------------------------------------------------------------------------- 1 | # Signal 2 | 3 | ::: neurolib.utils.signal.Signal -------------------------------------------------------------------------------- /docs/utils/stimulus.md: -------------------------------------------------------------------------------- 1 | # Stimulus 2 | 3 | ::: neurolib.utils.stimulus -------------------------------------------------------------------------------- /examples/data/AAL2_atlas_data/AAL2.nii: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/examples/data/AAL2_atlas_data/AAL2.nii -------------------------------------------------------------------------------- /examples/data/rs-meg.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/examples/data/rs-meg.nc -------------------------------------------------------------------------------- /examples/example-2-evolutionary-optimization-minimal.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Simple example of the evolutionary optimization framework\n", 8 | "\n", 9 | "This notebook provides a simple example for the use of the evolutionary optimization framework builtin to the library. Under the hood, the implementation of the evolutionary algorithm is powered by `deap` and `pypet` cares about the parallelization and storage of the simulation data for us. \n", 10 | "\n", 11 | "Here we demonstrate how to fit parameters of a the evaluation function `optimize_me` which simply computes the distance of the parameters to the unit circle and returns this as the `fitness_tuple` that DEAP expects." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 1, 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "# change to the root directory of the project\n", 21 | "import os\n", 22 | "if os.getcwd().split(\"/\")[-1] == \"examples\":\n", 23 | " os.chdir('..')\n", 24 | " \n", 25 | "# This will reload all imports as soon as the code changes\n", 26 | "%load_ext autoreload\n", 27 | "%autoreload 2 " 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "try:\n", 37 | " import matplotlib.pyplot as plt\n", 38 | "except ImportError:\n", 39 | " import sys\n", 40 | " !{sys.executable} -m pip install matplotlib seaborn\n", 41 | " import matplotlib.pyplot as plt\n", 42 | " \n", 43 | "import numpy as np\n", 44 | "import logging\n", 45 | "\n", 46 | "from neurolib.utils.parameterSpace import ParameterSpace\n", 47 | "from neurolib.optimize.evolution import Evolution\n", 48 | "\n", 49 | "import neurolib.optimize.evolution.evolutionaryUtils as eu\n", 50 | "import neurolib.utils.functions as func\n", 51 | "\n", 52 | "def optimize_me(traj):\n", 53 | " ind = evolution.getIndividualFromTraj(traj)\n", 54 | " logging.info(\"Hello, I am {}\".format(ind.id))\n", 55 | " logging.info(\"You can also call me {}, or simply ({:.2}, {:.2}).\".format(ind.params, ind.x, ind.y))\n", 56 | " \n", 57 | " # let's make a circle\n", 58 | " computation_result = abs((ind.x**2 + ind.y**2) - 1)\n", 59 | " # DEAP wants a tuple as fitness, ALWAYS!\n", 60 | " fitness_tuple = (computation_result ,)\n", 61 | " \n", 62 | " # we also require a dictionary with at least a single result for storing the results in the hdf\n", 63 | " result_dict = {}\n", 64 | " \n", 65 | " return fitness_tuple, result_dict\n", 66 | "\n", 67 | " \n", 68 | "pars = ParameterSpace(['x', 'y'], [[-5.0, 5.0], [-5.0, 5.0]])\n", 69 | "evolution = Evolution(optimize_me, pars, weightList = [-1.0],\n", 70 | " POP_INIT_SIZE=10, POP_SIZE = 6, NGEN=4, filename=\"example-2.0.hdf\")\n", 71 | "# info: chose POP_INIT_SIZE=100, POP_SIZE = 50, NGEN=10 for real exploration, \n", 72 | "# values here are low for testing: POP_INIT_SIZE=10, POP_SIZE = 6, NGEN=4\n", 73 | "\n", 74 | "evolution.run(verbose = True)" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "evolution.loadResults()" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": null, 89 | "metadata": {}, 90 | "outputs": [], 91 | "source": [ 92 | "evolution.info(plot=True)" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [ 101 | "gens, all_scores = evolution.getScoresDuringEvolution(reverse=True)\n", 102 | "\n", 103 | "import matplotlib.pyplot as plt\n", 104 | "plt.figure(figsize=(8, 4), dpi=200) \n", 105 | "plt.plot(gens, np.nanmean(all_scores, axis=1))\n", 106 | "plt.fill_between(gens, np.nanmin(all_scores, axis=1), np.nanmax(all_scores, axis=1), alpha=0.3)\n", 107 | "plt.xlabel(\"Generation #\")\n", 108 | "plt.ylabel(\"Score\")" 109 | ] 110 | } 111 | ], 112 | "metadata": { 113 | "kernelspec": { 114 | "display_name": "Python 3", 115 | "language": "python", 116 | "name": "python3" 117 | }, 118 | "language_info": { 119 | "codemirror_mode": { 120 | "name": "ipython", 121 | "version": 3 122 | }, 123 | "file_extension": ".py", 124 | "mimetype": "text/x-python", 125 | "name": "python", 126 | "nbconvert_exporter": "python", 127 | "pygments_lexer": "ipython3", 128 | "version": "3.7.3" 129 | } 130 | }, 131 | "nbformat": 4, 132 | "nbformat_minor": 5 133 | } 134 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Neurolib 2 | repo_name: 'neurolib-dev/neurolib' 3 | repo_url: https://github.com/neurolib-dev/neurolib 4 | 5 | theme: 6 | name: "material" 7 | 8 | extra: 9 | generator: false 10 | social: 11 | - icon: fontawesome/brands/twitter 12 | link: https://twitter.com/neurolib-dev 13 | name: neurolib on Twitter 14 | 15 | plugins: 16 | - search 17 | - mkdocstrings: 18 | handlers: 19 | python: 20 | selection: 21 | docstring_style: sphinx 22 | - mknotebooks: 23 | binder: true 24 | binder_service_name: "gh" 25 | binder_branch: "master" 26 | 27 | markdown_extensions: 28 | - codehilite 29 | - pymdownx.arithmatex: 30 | generic: true 31 | - pymdownx.highlight 32 | - pymdownx.superfences 33 | 34 | extra_javascript: 35 | - js/config.js 36 | - https://polyfill.io/v3/polyfill.min.js?features=es6 37 | - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js 38 | 39 | google_analytics: 40 | - UA-189722517-1 41 | - auto 42 | -------------------------------------------------------------------------------- /neurolib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/__init__.py -------------------------------------------------------------------------------- /neurolib/control/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/control/__init__.py -------------------------------------------------------------------------------- /neurolib/control/optimal_control/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/control/optimal_control/__init__.py -------------------------------------------------------------------------------- /neurolib/control/optimal_control/oc_aln/__init__.py: -------------------------------------------------------------------------------- 1 | from .oc_aln import OcAln 2 | -------------------------------------------------------------------------------- /neurolib/control/optimal_control/oc_fhn/__init__.py: -------------------------------------------------------------------------------- 1 | from .oc_fhn import OcFhn 2 | -------------------------------------------------------------------------------- /neurolib/control/optimal_control/oc_fhn/oc_fhn.py: -------------------------------------------------------------------------------- 1 | import numba 2 | 3 | from neurolib.control.optimal_control.oc import OC 4 | from neurolib.models.fhn.timeIntegration import compute_hx, compute_hx_nw, Dxdoth, Duh 5 | 6 | 7 | class OcFhn(OC): 8 | """Class for optimal control specific to neurolib's implementation of the FitzHugh-Nagumo (FHN) model. 9 | 10 | :param model: Instance of FHN model (can describe a single FHN node or a network of coupled FHN nodes. 11 | :type model: neurolib.models.fhn.model.FHNModel 12 | """ 13 | 14 | def __init__( 15 | self, 16 | model, 17 | target, 18 | weights=None, 19 | maximum_control_strength=None, 20 | print_array=[], 21 | cost_interval=(None, None), 22 | control_interval=(None, None), 23 | cost_matrix=None, 24 | control_matrix=None, 25 | M=1, 26 | M_validation=0, 27 | validate_per_step=False, 28 | ): 29 | super().__init__( 30 | model, 31 | target, 32 | weights=weights, 33 | maximum_control_strength=maximum_control_strength, 34 | print_array=print_array, 35 | cost_interval=cost_interval, 36 | control_interval=control_interval, 37 | cost_matrix=cost_matrix, 38 | control_matrix=control_matrix, 39 | M=M, 40 | M_validation=M_validation, 41 | validate_per_step=validate_per_step, 42 | ) 43 | 44 | assert self.model.name == "fhn" 45 | 46 | def compute_dxdoth(self): 47 | """Derivative of systems dynamics wrt. change of systems variables.""" 48 | return Dxdoth(self.N, self.dim_vars) 49 | 50 | def get_model_params(self): 51 | """Model params as an ordered tuple 52 | 53 | :rtype: tuple""" 54 | return ( 55 | self.model.params.alpha, 56 | self.model.params.beta, 57 | self.model.params.gamma, 58 | self.model.params.tau, 59 | self.model.params.epsilon, 60 | ) 61 | 62 | def Duh(self): 63 | """4 x 4 Jacobian of systems dynamics wrt. external inputs (control signals) to all 'state_vars'. There are no 64 | inputs to the noise variables 'x_ou' and 'y_ou' in the model. 65 | 66 | :rtype: np.ndarray of shape 4 x 4 67 | """ 68 | return Duh( 69 | self.N, 70 | self.dim_in, 71 | self.dim_vars, 72 | self.T, 73 | self.state_vars_dict, 74 | ) 75 | 76 | def compute_hx_list(self): 77 | """List of Jacobians without and with time delays (e.g. in the ALN model) and list of respective time step delays as integers (0 for undelayed) 78 | 79 | :return: List of Jacobian matrices, list of time step delays 80 | : rtype: List of np.ndarray, List of integers 81 | """ 82 | hx = self.compute_hx() 83 | return numba.typed.List([hx]), numba.typed.List([0]) 84 | 85 | def compute_hx(self): 86 | """Jacobians of FHN model wrt. its 'state_vars' at each time step. 87 | 88 | :return: Array that contains Jacobians for all nodes in all time steps. 89 | :rtype: np.ndarray of shape N x T x 4 x 4 90 | """ 91 | return compute_hx( 92 | self.model_params, 93 | self.model.params["K_gl"], 94 | self.model.params["Cmat"], 95 | self.model.params["coupling"], 96 | self.N, 97 | self.dim_vars, 98 | self.T, 99 | self.get_xs(), 100 | self.state_vars_dict, 101 | ) 102 | 103 | def compute_hx_nw(self): 104 | """Jacobians for each time step for the network coupling. 105 | 106 | :return: Jacobians for network connectivity in all time steps. 107 | :rtype: np.ndarray of shape N x N x T x 4 x 4 108 | """ 109 | return compute_hx_nw( 110 | self.model.params["K_gl"], 111 | self.model.params["Cmat"], 112 | self.N, 113 | self.dim_vars, 114 | self.T, 115 | self.state_vars_dict, 116 | ) 117 | -------------------------------------------------------------------------------- /neurolib/control/optimal_control/oc_hopf/__init__.py: -------------------------------------------------------------------------------- 1 | from .oc_hopf import OcHopf 2 | -------------------------------------------------------------------------------- /neurolib/control/optimal_control/oc_hopf/oc_hopf.py: -------------------------------------------------------------------------------- 1 | import numba 2 | 3 | from neurolib.control.optimal_control.oc import OC 4 | from neurolib.models.hopf.timeIntegration import compute_hx, compute_hx_nw, Dxdoth, Duh 5 | 6 | 7 | class OcHopf(OC): 8 | """Class for optimal control specific to neurolib's implementation of the Stuart-Landau model with Hopf 9 | bifurcation ("Hopf model"). 10 | 11 | :param model: Instance of Hopf model (can describe a single Hopf node or a network of coupled Hopf nodes. 12 | :type model: neurolib.models.hopf.model.HopfModel 13 | """ 14 | 15 | # Remark: very similar to FHN! 16 | def __init__( 17 | self, 18 | model, 19 | target, 20 | weights=None, 21 | maximum_control_strength=None, 22 | print_array=[], 23 | cost_interval=(None, None), 24 | control_interval=(None, None), 25 | cost_matrix=None, 26 | control_matrix=None, 27 | M=1, 28 | M_validation=0, 29 | validate_per_step=False, 30 | ): 31 | super().__init__( 32 | model, 33 | target, 34 | weights=weights, 35 | maximum_control_strength=maximum_control_strength, 36 | print_array=print_array, 37 | cost_interval=cost_interval, 38 | control_interval=control_interval, 39 | cost_matrix=cost_matrix, 40 | control_matrix=control_matrix, 41 | M=M, 42 | M_validation=M_validation, 43 | validate_per_step=validate_per_step, 44 | ) 45 | 46 | assert self.model.name == "hopf" 47 | 48 | def compute_dxdoth(self): 49 | """Derivative of systems dynamics wrt. change of systems variables.""" 50 | return Dxdoth(self.N, self.dim_vars) 51 | 52 | def get_model_params(self): 53 | """Model params as an ordered tuple. 54 | 55 | :rtype: tuple 56 | """ 57 | return ( 58 | self.model.params.a, 59 | self.model.params.w, 60 | ) 61 | 62 | def Duh(self): 63 | """4 x 4 Jacobian of systems dynamics wrt. external inputs (control signals) to all 'state_vars'. There are no 64 | inputs to the noise variables 'x_ou' and 'y_ou' in the model. 65 | 66 | :rtype: np.ndarray of shape 4 x 4 67 | """ 68 | return Duh( 69 | self.N, 70 | self.dim_in, 71 | self.dim_vars, 72 | self.T, 73 | self.state_vars_dict, 74 | ) 75 | 76 | def compute_hx_list(self): 77 | """List of Jacobians without and with time delays (e.g. in the ALN model) and list of respective time step delays as integers (0 for undelayed) 78 | 79 | :return: List of Jacobian matrices, list of time step delays 80 | : rtype: List of np.ndarray, List of integers 81 | """ 82 | hx = self.compute_hx() 83 | return numba.typed.List([hx]), numba.typed.List([0]) 84 | 85 | def compute_hx(self): 86 | """Jacobians of Hopf model wrt. its 'state_vars' at each time step. 87 | 88 | :return: Array that contains Jacobians for all nodes in all time steps. 89 | :rtype: np.ndarray of shape N x T x 4 x 4 90 | """ 91 | return compute_hx( 92 | self.model_params, 93 | self.model.params["K_gl"], 94 | self.model.params["Cmat"], 95 | self.model.params["coupling"], 96 | self.N, 97 | self.dim_vars, 98 | self.T, 99 | self.get_xs(), 100 | self.state_vars_dict, 101 | ) 102 | 103 | def compute_hx_nw(self): 104 | """Jacobians for each time step for the network coupling. 105 | 106 | :return: Jacobians for network connectivity in all time steps. 107 | :rtype: np.ndarray of shape N x N x T x (4x4) 108 | """ 109 | return compute_hx_nw( 110 | self.model.params["K_gl"], 111 | self.model.params["Cmat"], 112 | self.N, 113 | self.dim_vars, 114 | self.T, 115 | self.state_vars_dict, 116 | ) 117 | -------------------------------------------------------------------------------- /neurolib/control/optimal_control/oc_wc/__init__.py: -------------------------------------------------------------------------------- 1 | from .oc_wc import OcWc 2 | -------------------------------------------------------------------------------- /neurolib/control/optimal_control/oc_wc/oc_wc.py: -------------------------------------------------------------------------------- 1 | import numba 2 | 3 | from neurolib.control.optimal_control.oc import OC 4 | from neurolib.models.wc.timeIntegration import compute_hx, compute_hx_nw, Duh, Dxdoth 5 | 6 | 7 | class OcWc(OC): 8 | """Class for optimal control specific to neurolib's implementation of the two-population Wilson-Cowan model 9 | ("WCmodel"). 10 | 11 | :param model: Instance of Wilson-Cowan model (can describe a single Wilson-Cowan node or a network of coupled 12 | Wilson-Cowan nodes. 13 | :type model: neurolib.models.wc.model.WCModel 14 | """ 15 | 16 | def __init__( 17 | self, 18 | model, 19 | target, 20 | weights=None, 21 | print_array=[], 22 | cost_interval=(None, None), 23 | control_interval=(None, None), 24 | cost_matrix=None, 25 | control_matrix=None, 26 | M=1, 27 | M_validation=0, 28 | validate_per_step=False, 29 | ): 30 | super().__init__( 31 | model, 32 | target, 33 | weights=weights, 34 | print_array=print_array, 35 | cost_interval=cost_interval, 36 | control_interval=control_interval, 37 | cost_matrix=cost_matrix, 38 | control_matrix=control_matrix, 39 | M=M, 40 | M_validation=M_validation, 41 | validate_per_step=validate_per_step, 42 | ) 43 | 44 | assert self.model.name == "wc" 45 | 46 | def compute_dxdoth(self): 47 | """Derivative of systems dynamics wrt. change of systems variables.""" 48 | return Dxdoth(self.N, self.dim_vars) 49 | 50 | def get_model_params(self): 51 | """Model params as an ordered tuple. 52 | 53 | :rtype: tuple 54 | """ 55 | return ( 56 | self.model.params.tau_exc, 57 | self.model.params.tau_inh, 58 | self.model.params.a_exc, 59 | self.model.params.a_inh, 60 | self.model.params.mu_exc, 61 | self.model.params.mu_inh, 62 | self.model.params.c_excexc, 63 | self.model.params.c_inhexc, 64 | self.model.params.c_excinh, 65 | self.model.params.c_inhinh, 66 | self.model.params.exc_ext_baseline, 67 | self.model.params.inh_ext_baseline, 68 | ) 69 | 70 | def Duh(self): 71 | """Jacobian of systems dynamics wrt. external control input. 72 | 73 | :return: N x 4 x 4 x T Jacobians. 74 | :rtype: np.ndarray 75 | """ 76 | 77 | xs = self.get_xs() 78 | xsd = self.get_xs_delay() 79 | 80 | return Duh( 81 | self.model_params, 82 | self.N, 83 | self.dim_in, 84 | self.dim_vars, 85 | self.T, 86 | self.control[:, self.state_vars_dict["exc"], :], 87 | self.control[:, self.state_vars_dict["inh"], :], 88 | xs[:, self.state_vars_dict["exc"], :], 89 | xs[:, self.state_vars_dict["inh"], :], 90 | self.model.params.K_gl, 91 | self.model.params.Cmat, 92 | self.Dmat_ndt, 93 | xsd[:, self.state_vars_dict["exc"], :], 94 | self.state_vars_dict, 95 | ) 96 | 97 | def compute_hx_list(self): 98 | """List of Jacobians without and with time delays (e.g. in the ALN model) and list of respective time step delays as integers (0 for undelayed) 99 | 100 | :return: List of Jacobian matrices, list of time step delays 101 | : rtype: List of np.ndarray, List of integers 102 | """ 103 | hx = self.compute_hx() 104 | return numba.typed.List([hx]), numba.typed.List([0]) 105 | 106 | def compute_hx(self): 107 | """Jacobians of WCModel wrt. the 'e'- and 'i'-variable for each time step. 108 | 109 | :return: N x T x 4 x 4 Jacobians. 110 | :rtype: np.ndarray 111 | """ 112 | return compute_hx( 113 | self.model_params, 114 | self.model.params.K_gl, 115 | self.model.Cmat, 116 | self.Dmat_ndt, 117 | self.N, 118 | self.dim_vars, 119 | self.T, 120 | self.get_xs(), 121 | self.get_xs_delay(), 122 | self.control, 123 | self.state_vars_dict, 124 | ) 125 | 126 | def compute_hx_nw(self): 127 | """Jacobians for each time step for the network coupling. 128 | 129 | :return: N x N x T x (4x4) array 130 | :rtype: np.ndarray 131 | """ 132 | 133 | xs = self.get_xs() 134 | 135 | return compute_hx_nw( 136 | self.model_params, 137 | self.model.params.K_gl, 138 | self.model.Cmat, 139 | self.Dmat_ndt, 140 | self.N, 141 | self.dim_vars, 142 | self.T, 143 | xs[:, self.state_vars_dict["exc"], :], 144 | xs[:, self.state_vars_dict["inh"], :], 145 | self.get_xs_delay()[:, self.state_vars_dict["exc"], :], 146 | self.control[:, self.state_vars_dict["exc"], :], 147 | self.state_vars_dict, 148 | ) 149 | -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_001/functional/BOLD_rsfMRI.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_001/functional/BOLD_rsfMRI.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_001/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_001/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_001/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_001/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_002/functional/BOLD_rsfMRI.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_002/functional/BOLD_rsfMRI.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_002/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_002/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_002/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_002/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_007/functional/BOLD_rsfMRI.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_007/functional/BOLD_rsfMRI.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_007/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_007/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_007/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_007/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_009/functional/BOLD_rsfMRI.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_009/functional/BOLD_rsfMRI.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_009/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_009/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_009/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_009/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_013/functional/BOLD_rsfMRI.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_013/functional/BOLD_rsfMRI.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_013/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_013/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/gw/subjects/NAP_013/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/gw/subjects/NAP_013/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/101309/functional/TC_rsfMRI_REST1_LR.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/101309/functional/TC_rsfMRI_REST1_LR.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/101309/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/101309/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/101309/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/101309/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/101309/structural/nvoxel.txt: -------------------------------------------------------------------------------- 1 | 3766 30128.000000 2 | 3784 30272.000000 3 | 5247 41976.000000 4 | 5726 45808.000000 5 | 5064 40512.000000 6 | 5285 42280.000000 7 | 1322 10576.000000 8 | 1696 13568.000000 9 | 2609 20872.000000 10 | 2384 19072.000000 11 | 938 7504.000000 12 | 982 7856.000000 13 | 1270 10160.000000 14 | 1526 12208.000000 15 | 2460 19680.000000 16 | 2347 18776.000000 17 | 438 3504.000000 18 | 446 3568.000000 19 | 3314 26512.000000 20 | 2207 17656.000000 21 | 812 6496.000000 22 | 1088 8704.000000 23 | 946 7568.000000 24 | 931 7448.000000 25 | 754 6032.000000 26 | 811 6488.000000 27 | 642 5136.000000 28 | 877 7016.000000 29 | 700 5600.000000 30 | 781 6248.000000 31 | 272 2176.000000 32 | 280 2240.000000 33 | 2165 17320.000000 34 | 2131 17048.000000 35 | 1748 13984.000000 36 | 1518 12144.000000 37 | 2241 17928.000000 38 | 2396 19168.000000 39 | 626 5008.000000 40 | 480 3840.000000 41 | 1236 9888.000000 42 | 1230 9840.000000 43 | 1309 10472.000000 44 | 1369 10952.000000 45 | 339 2712.000000 46 | 334 2672.000000 47 | 2527 20216.000000 48 | 2055 16440.000000 49 | 1715 13720.000000 50 | 1633 13064.000000 51 | 2358 18864.000000 52 | 2537 20296.000000 53 | 1711 13688.000000 54 | 1829 14632.000000 55 | 3601 28808.000000 56 | 2334 18672.000000 57 | 1251 10008.000000 58 | 1162 9296.000000 59 | 2695 21560.000000 60 | 3083 24664.000000 61 | 4141 33128.000000 62 | 4246 33968.000000 63 | 2240 17920.000000 64 | 2532 20256.000000 65 | 2698 21584.000000 66 | 1595 12760.000000 67 | 1517 12136.000000 68 | 2169 17352.000000 69 | 1374 10992.000000 70 | 2026 16208.000000 71 | 3951 31608.000000 72 | 3483 27864.000000 73 | 1517 12136.000000 74 | 933 7464.000000 75 | 1080 8640.000000 76 | 1185 9480.000000 77 | 1311 10488.000000 78 | 1339 10712.000000 79 | 413 3304.000000 80 | 324 2592.000000 81 | 1135 9080.000000 82 | 1070 8560.000000 83 | 317 2536.000000 84 | 384 3072.000000 85 | 2542 20336.000000 86 | 3243 25944.000000 87 | 1589 12712.000000 88 | 1618 12944.000000 89 | 5083 40664.000000 90 | 4614 36912.000000 91 | 1019 8152.000000 92 | 1457 11656.000000 93 | 3547 28376.000000 94 | 3756 30048.000000 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/101309/structural/waytotal.txt: -------------------------------------------------------------------------------- 1 | 13145863 2 | 10274154 3 | 16466859 4 | 17081722 5 | 16047650 6 | 13832979 7 | 4144984 8 | 4543567 9 | 8322682 10 | 5856283 11 | 3123257 12 | 2598082 13 | 4102181 14 | 4845148 15 | 6902061 16 | 8808517 17 | 1032638 18 | 1163528 19 | 9799408 20 | 7700227 21 | 2566326 22 | 3150054 23 | 2313316 24 | 2167774 25 | 1373941 26 | 1637623 27 | 1719440 28 | 2177893 29 | 1908047 30 | 1512678 31 | 737158 32 | 428157 33 | 7382741 34 | 6930718 35 | 5385834 36 | 5247739 37 | 7533183 38 | 9378937 39 | 2038698 40 | 1617885 41 | 3402647 42 | 3927800 43 | 3324352 44 | 4378557 45 | 875051 46 | 1044052 47 | 8061831 48 | 7810102 49 | 5381282 50 | 5994349 51 | 7172813 52 | 8423797 53 | 4980405 54 | 5873509 55 | 10890900 56 | 7555625 57 | 3790102 58 | 3631957 59 | 7499873 60 | 7892802 61 | 13509768 62 | 11181686 63 | 6954885 64 | 6022555 65 | 9097161 66 | 5686553 67 | 5137519 68 | 7139712 69 | 4541130 70 | 6628195 71 | 12376108 72 | 12604585 73 | 3851121 74 | 3039646 75 | 3560491 76 | 4012713 77 | 3936865 78 | 4796674 79 | 1015913 80 | 818981 81 | 4447503 82 | 4539772 83 | 1028988 84 | 1052209 85 | 9335928 86 | 10344444 87 | 3075206 88 | 2606820 89 | 16306748 90 | 14630105 91 | 1731337 92 | 2407121 93 | 9649513 94 | 9521774 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102311/functional/TC_rsfMRI_REST1_LR.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/102311/functional/TC_rsfMRI_REST1_LR.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102311/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/102311/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102311/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/102311/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102311/structural/nvoxel.txt: -------------------------------------------------------------------------------- 1 | 3618 28944.000000 2 | 3472 27776.000000 3 | 5205 41640.000000 4 | 5279 42232.000000 5 | 4800 38400.000000 6 | 5076 40608.000000 7 | 1297 10376.000000 8 | 1622 12976.000000 9 | 2527 20216.000000 10 | 2287 18296.000000 11 | 848 6784.000000 12 | 913 7304.000000 13 | 1195 9560.000000 14 | 1464 11712.000000 15 | 2115 16920.000000 16 | 2629 21032.000000 17 | 420 3360.000000 18 | 495 3960.000000 19 | 2969 23752.000000 20 | 2344 18752.000000 21 | 752 6016.000000 22 | 960 7680.000000 23 | 950 7600.000000 24 | 943 7544.000000 25 | 757 6056.000000 26 | 699 5592.000000 27 | 552 4416.000000 28 | 894 7152.000000 29 | 614 4912.000000 30 | 676 5408.000000 31 | 251 2008.000000 32 | 280 2240.000000 33 | 2209 17672.000000 34 | 1989 15912.000000 35 | 1513 12104.000000 36 | 1438 11504.000000 37 | 2086 16688.000000 38 | 2542 20336.000000 39 | 565 4520.000000 40 | 499 3992.000000 41 | 1156 9248.000000 42 | 1178 9424.000000 43 | 1220 9760.000000 44 | 1359 10872.000000 45 | 289 2312.000000 46 | 385 3080.000000 47 | 2253 18024.000000 48 | 1976 15808.000000 49 | 1729 13832.000000 50 | 1667 13336.000000 51 | 2357 18856.000000 52 | 2520 20160.000000 53 | 1695 13560.000000 54 | 1659 13272.000000 55 | 3473 27784.000000 56 | 2240 17920.000000 57 | 1112 8896.000000 58 | 1018 8144.000000 59 | 2771 22168.000000 60 | 2819 22552.000000 61 | 3943 31544.000000 62 | 4043 32344.000000 63 | 2312 18496.000000 64 | 2408 19264.000000 65 | 2545 20360.000000 66 | 1465 11720.000000 67 | 1374 10992.000000 68 | 2137 17096.000000 69 | 1267 10136.000000 70 | 1801 14408.000000 71 | 3668 29344.000000 72 | 3578 28624.000000 73 | 1322 10576.000000 74 | 957 7656.000000 75 | 1236 9888.000000 76 | 1121 8968.000000 77 | 1175 9400.000000 78 | 1271 10168.000000 79 | 397 3176.000000 80 | 358 2864.000000 81 | 1060 8480.000000 82 | 1050 8400.000000 83 | 329 2632.000000 84 | 373 2984.000000 85 | 2413 19304.000000 86 | 3289 26312.000000 87 | 1466 11728.000000 88 | 1490 11920.000000 89 | 4891 39128.000000 90 | 4450 35600.000000 91 | 882 7056.000000 92 | 1296 10368.000000 93 | 3308 26464.000000 94 | 3650 29200.000000 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102311/structural/waytotal.txt: -------------------------------------------------------------------------------- 1 | 10966800 2 | 7608188 3 | 16206023 4 | 16262408 5 | 15221981 6 | 13919799 7 | 3930853 8 | 4551069 9 | 7494166 10 | 5569196 11 | 3061829 12 | 2570452 13 | 4260607 14 | 4950013 15 | 7132929 16 | 8719271 17 | 1054703 18 | 1194763 19 | 10425606 20 | 8595258 21 | 2628955 22 | 3511383 23 | 1879240 24 | 2078601 25 | 1237432 26 | 1363353 27 | 1691314 28 | 1953598 29 | 1991296 30 | 1725554 31 | 802488 32 | 424644 33 | 7019979 34 | 6950435 35 | 5343542 36 | 5558694 37 | 7528504 38 | 9018882 39 | 2016936 40 | 1582720 41 | 3691618 42 | 3648174 43 | 3427400 44 | 4322642 45 | 915797 46 | 1069963 47 | 7289622 48 | 7427919 49 | 5964728 50 | 6090753 51 | 6890530 52 | 7757290 53 | 5455375 54 | 5722769 55 | 11753308 56 | 7056740 57 | 3972967 58 | 3700832 59 | 7666400 60 | 8947179 61 | 11449636 62 | 10301140 63 | 6927893 64 | 6756796 65 | 9238669 66 | 5482130 67 | 4754830 68 | 6565746 69 | 4658519 70 | 6269692 71 | 12181267 72 | 13263282 73 | 3098569 74 | 2945143 75 | 2635919 76 | 3597158 77 | 4080581 78 | 4706866 79 | 1058433 80 | 872880 81 | 4595630 82 | 4739409 83 | 975649 84 | 1016294 85 | 9018849 86 | 10436254 87 | 3264756 88 | 3530723 89 | 16498476 90 | 15636330 91 | 2447436 92 | 2761838 93 | 10534622 94 | 10091916 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102816/functional/TC_rsfMRI_REST1_LR.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/102816/functional/TC_rsfMRI_REST1_LR.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102816/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/102816/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102816/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/102816/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102816/structural/nvoxel.txt: -------------------------------------------------------------------------------- 1 | 3465 27720.000000 2 | 3388 27104.000000 3 | 4817 38536.000000 4 | 5131 41048.000000 5 | 4464 35712.000000 6 | 4846 38768.000000 7 | 1193 9544.000000 8 | 1556 12448.000000 9 | 2359 18872.000000 10 | 2121 16968.000000 11 | 837 6696.000000 12 | 929 7432.000000 13 | 1014 8112.000000 14 | 1450 11600.000000 15 | 2020 16160.000000 16 | 2152 17216.000000 17 | 357 2856.000000 18 | 408 3264.000000 19 | 2841 22728.000000 20 | 2040 16320.000000 21 | 789 6312.000000 22 | 979 7832.000000 23 | 911 7288.000000 24 | 898 7184.000000 25 | 695 5560.000000 26 | 723 5784.000000 27 | 594 4752.000000 28 | 741 5928.000000 29 | 668 5344.000000 30 | 721 5768.000000 31 | 313 2504.000000 32 | 228 1824.000000 33 | 2032 16256.000000 34 | 1880 15040.000000 35 | 1516 12128.000000 36 | 1404 11232.000000 37 | 1910 15280.000000 38 | 2281 18248.000000 39 | 570 4560.000000 40 | 496 3968.000000 41 | 1109 8872.000000 42 | 1105 8840.000000 43 | 1168 9344.000000 44 | 1307 10456.000000 45 | 321 2568.000000 46 | 356 2848.000000 47 | 2215 17720.000000 48 | 1856 14848.000000 49 | 1605 12840.000000 50 | 1544 12352.000000 51 | 2050 16400.000000 52 | 2367 18936.000000 53 | 1529 12232.000000 54 | 1636 13088.000000 55 | 3196 25568.000000 56 | 2053 16424.000000 57 | 1018 8144.000000 58 | 1123 8984.000000 59 | 2519 20152.000000 60 | 2716 21728.000000 61 | 3683 29464.000000 62 | 3656 29248.000000 63 | 2095 16760.000000 64 | 2211 17688.000000 65 | 2528 20224.000000 66 | 1334 10672.000000 67 | 1291 10328.000000 68 | 2047 16376.000000 69 | 1224 9792.000000 70 | 1828 14624.000000 71 | 3324 26592.000000 72 | 3184 25472.000000 73 | 1331 10648.000000 74 | 805 6440.000000 75 | 1098 8784.000000 76 | 1066 8528.000000 77 | 1042 8336.000000 78 | 1137 9096.000000 79 | 364 2912.000000 80 | 316 2528.000000 81 | 1057 8456.000000 82 | 1024 8192.000000 83 | 301 2408.000000 84 | 325 2600.000000 85 | 2258 18064.000000 86 | 3170 25360.000000 87 | 1322 10576.000000 88 | 1431 11448.000000 89 | 4468 35744.000000 90 | 4343 34744.000000 91 | 915 7320.000000 92 | 1284 10272.000000 93 | 3179 25432.000000 94 | 3436 27488.000000 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/102816/structural/waytotal.txt: -------------------------------------------------------------------------------- 1 | 10687367 2 | 10178132 3 | 16944527 4 | 17981887 5 | 15088661 6 | 16496606 7 | 3960503 8 | 5280129 9 | 8147693 10 | 6895160 11 | 3078179 12 | 2991467 13 | 4076279 14 | 5591541 15 | 7506829 16 | 8562525 17 | 1323226 18 | 1372066 19 | 10932936 20 | 8606539 21 | 2834796 22 | 3532273 23 | 3053872 24 | 2661394 25 | 1736711 26 | 1955320 27 | 1651277 28 | 2369832 29 | 2448854 30 | 2101578 31 | 687087 32 | 613005 33 | 7271939 34 | 7571128 35 | 6069031 36 | 5836360 37 | 8122203 38 | 9180148 39 | 2155328 40 | 1622213 41 | 4060072 42 | 4434494 43 | 3612566 44 | 4181443 45 | 999319 46 | 1175972 47 | 8684625 48 | 8057291 49 | 6094210 50 | 6167371 51 | 7165611 52 | 8043520 53 | 5863661 54 | 6091871 55 | 11514758 56 | 8072369 57 | 3960103 58 | 3871337 59 | 8170229 60 | 8666804 61 | 12684219 62 | 12143684 63 | 7227266 64 | 7009715 65 | 9451001 66 | 5945296 67 | 5183586 68 | 7766193 69 | 4729430 70 | 6989517 71 | 13039828 72 | 13745931 73 | 4003347 74 | 3108965 75 | 3375472 76 | 3606567 77 | 4504910 78 | 5043458 79 | 1061833 80 | 1010110 81 | 4856695 82 | 4691025 83 | 998419 84 | 1075568 85 | 9133529 86 | 12007675 87 | 3334483 88 | 3761380 89 | 16843885 90 | 15719872 91 | 1965298 92 | 2557071 93 | 9935626 94 | 10813278 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/131217/functional/TC_rsfMRI_REST1_LR.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/131217/functional/TC_rsfMRI_REST1_LR.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/131217/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/131217/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/131217/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/131217/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/131217/structural/nvoxel.txt: -------------------------------------------------------------------------------- 1 | 3624 28992.000000 2 | 3483 27864.000000 3 | 5270 42160.000000 4 | 5254 42032.000000 5 | 4804 38432.000000 6 | 5113 40904.000000 7 | 1319 10552.000000 8 | 1701 13608.000000 9 | 2498 19984.000000 10 | 2154 17232.000000 11 | 876 7008.000000 12 | 988 7904.000000 13 | 1177 9416.000000 14 | 1464 11712.000000 15 | 2029 16232.000000 16 | 2264 18112.000000 17 | 370 2960.000000 18 | 380 3040.000000 19 | 2964 23712.000000 20 | 2484 19872.000000 21 | 841 6728.000000 22 | 911 7288.000000 23 | 1004 8032.000000 24 | 796 6368.000000 25 | 701 5608.000000 26 | 809 6472.000000 27 | 591 4728.000000 28 | 730 5840.000000 29 | 696 5568.000000 30 | 647 5176.000000 31 | 315 2520.000000 32 | 280 2240.000000 33 | 2253 18024.000000 34 | 2129 17032.000000 35 | 1538 12304.000000 36 | 1550 12400.000000 37 | 1927 15416.000000 38 | 2505 20040.000000 39 | 582 4656.000000 40 | 519 4152.000000 41 | 1153 9224.000000 42 | 1125 9000.000000 43 | 1207 9656.000000 44 | 1445 11560.000000 45 | 304 2432.000000 46 | 311 2488.000000 47 | 2290 18320.000000 48 | 1966 15728.000000 49 | 1633 13064.000000 50 | 1620 12960.000000 51 | 2394 19152.000000 52 | 2583 20664.000000 53 | 1718 13744.000000 54 | 1655 13240.000000 55 | 3464 27712.000000 56 | 2186 17488.000000 57 | 1104 8832.000000 58 | 1048 8384.000000 59 | 2696 21568.000000 60 | 2878 23024.000000 61 | 3966 31728.000000 62 | 3997 31976.000000 63 | 2262 18096.000000 64 | 2282 18256.000000 65 | 2519 20152.000000 66 | 1459 11672.000000 67 | 1408 11264.000000 68 | 2048 16384.000000 69 | 1378 11024.000000 70 | 1910 15280.000000 71 | 3535 28280.000000 72 | 3424 27392.000000 73 | 1403 11224.000000 74 | 921 7368.000000 75 | 1198 9584.000000 76 | 1133 9064.000000 77 | 1044 8352.000000 78 | 1187 9496.000000 79 | 350 2800.000000 80 | 335 2680.000000 81 | 1206 9648.000000 82 | 1088 8704.000000 83 | 361 2888.000000 84 | 385 3080.000000 85 | 2481 19848.000000 86 | 3098 24784.000000 87 | 1351 10808.000000 88 | 1517 12136.000000 89 | 4842 38736.000000 90 | 4353 34824.000000 91 | 970 7760.000000 92 | 1404 11232.000000 93 | 3324 26592.000000 94 | 3619 28952.000000 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/131217/structural/waytotal.txt: -------------------------------------------------------------------------------- 1 | 10029010 2 | 9567402 3 | 14345542 4 | 14666385 5 | 13726440 6 | 14556491 7 | 4070420 8 | 4855998 9 | 8309629 10 | 6869615 11 | 3412607 12 | 3225286 13 | 4279604 14 | 5197281 15 | 6048160 16 | 8592433 17 | 1191209 18 | 1304188 19 | 8388457 20 | 7913735 21 | 2756927 22 | 3545006 23 | 2486119 24 | 2505470 25 | 1408548 26 | 1474018 27 | 1527587 28 | 1876431 29 | 2338682 30 | 2094045 31 | 861843 32 | 609119 33 | 7437285 34 | 7102839 35 | 5646320 36 | 5607684 37 | 7100203 38 | 8838820 39 | 2037793 40 | 1572826 41 | 3436210 42 | 3954947 43 | 3199689 44 | 4329844 45 | 1019179 46 | 1167394 47 | 7247327 48 | 7132141 49 | 6081742 50 | 6090320 51 | 6754118 52 | 7418593 53 | 5842129 54 | 5886012 55 | 12099289 56 | 8065808 57 | 4097031 58 | 3986392 59 | 7552408 60 | 8697303 61 | 11436749 62 | 10658274 63 | 5939706 64 | 5716560 65 | 8671430 66 | 4244130 67 | 4656214 68 | 5688472 69 | 4178319 70 | 6044349 71 | 11677681 72 | 11760680 73 | 3326926 74 | 2757000 75 | 2536556 76 | 2645428 77 | 4354497 78 | 4658250 79 | 975505 80 | 879720 81 | 4489525 82 | 4472890 83 | 981139 84 | 1076749 85 | 8519938 86 | 9732200 87 | 3116950 88 | 3848281 89 | 15381727 90 | 14774349 91 | 2012786 92 | 3601345 93 | 9914893 94 | 9871503 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/211619/functional/TC_rsfMRI_REST1_LR.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/211619/functional/TC_rsfMRI_REST1_LR.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/211619/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/211619/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/211619/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/211619/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/211619/structural/nvoxel.txt: -------------------------------------------------------------------------------- 1 | 3685 29480.000000 2 | 3618 28944.000000 3 | 5320 42560.000000 4 | 5570 44560.000000 5 | 4662 37296.000000 6 | 5279 42232.000000 7 | 1236 9888.000000 8 | 1659 13272.000000 9 | 2697 21576.000000 10 | 2277 18216.000000 11 | 914 7312.000000 12 | 952 7616.000000 13 | 1215 9720.000000 14 | 1591 12728.000000 15 | 2090 16720.000000 16 | 2523 20184.000000 17 | 456 3648.000000 18 | 412 3296.000000 19 | 3019 24152.000000 20 | 2392 19136.000000 21 | 876 7008.000000 22 | 888 7104.000000 23 | 1020 8160.000000 24 | 861 6888.000000 25 | 740 5920.000000 26 | 792 6336.000000 27 | 673 5384.000000 28 | 925 7400.000000 29 | 699 5592.000000 30 | 700 5600.000000 31 | 278 2224.000000 32 | 280 2240.000000 33 | 2048 16384.000000 34 | 2137 17096.000000 35 | 1588 12704.000000 36 | 1417 11336.000000 37 | 1975 15800.000000 38 | 2584 20672.000000 39 | 550 4400.000000 40 | 498 3984.000000 41 | 1133 9064.000000 42 | 1130 9040.000000 43 | 1232 9856.000000 44 | 1409 11272.000000 45 | 270 2160.000000 46 | 349 2792.000000 47 | 2335 18680.000000 48 | 2049 16392.000000 49 | 1603 12824.000000 50 | 1717 13736.000000 51 | 2358 18864.000000 52 | 2580 20640.000000 53 | 1776 14208.000000 54 | 1506 12048.000000 55 | 3382 27056.000000 56 | 2347 18776.000000 57 | 1231 9848.000000 58 | 1154 9232.000000 59 | 2613 20904.000000 60 | 2900 23200.000000 61 | 4076 32608.000000 62 | 4083 32664.000000 63 | 2361 18888.000000 64 | 2337 18696.000000 65 | 2615 20920.000000 66 | 1487 11896.000000 67 | 1384 11072.000000 68 | 2000 16000.000000 69 | 1277 10216.000000 70 | 1992 15936.000000 71 | 3574 28592.000000 72 | 3640 29120.000000 73 | 1375 11000.000000 74 | 1028 8224.000000 75 | 1185 9480.000000 76 | 1234 9872.000000 77 | 1153 9224.000000 78 | 1061 8488.000000 79 | 419 3352.000000 80 | 445 3560.000000 81 | 1178 9424.000000 82 | 1190 9520.000000 83 | 374 2992.000000 84 | 415 3320.000000 85 | 2472 19776.000000 86 | 3269 26152.000000 87 | 1534 12272.000000 88 | 1543 12344.000000 89 | 4898 39184.000000 90 | 4512 36096.000000 91 | 976 7808.000000 92 | 1351 10808.000000 93 | 3593 28744.000000 94 | 3614 28912.000000 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/211619/structural/waytotal.txt: -------------------------------------------------------------------------------- 1 | 10398538 2 | 9246269 3 | 15708299 4 | 15666068 5 | 14393038 6 | 14138492 7 | 4304837 8 | 5290726 9 | 7784857 10 | 6449042 11 | 3325567 12 | 3132574 13 | 4467752 14 | 5947665 15 | 6576272 16 | 8827763 17 | 1174618 18 | 1276997 19 | 9175506 20 | 8016282 21 | 2794122 22 | 3560872 23 | 2796917 24 | 2509063 25 | 1484810 26 | 1475818 27 | 1710328 28 | 1730097 29 | 2472165 30 | 1970220 31 | 808176 32 | 644888 33 | 7722799 34 | 7504416 35 | 5098526 36 | 4960826 37 | 7275749 38 | 9619263 39 | 1964768 40 | 1632464 41 | 4109021 42 | 4481319 43 | 3729823 44 | 3981459 45 | 1005586 46 | 1178714 47 | 7652316 48 | 7808286 49 | 5264653 50 | 6219350 51 | 6705933 52 | 7155246 53 | 5180420 54 | 6007842 55 | 12023276 56 | 7794323 57 | 3507675 58 | 3128813 59 | 7334646 60 | 8528352 61 | 11862416 62 | 10866126 63 | 6846400 64 | 6496392 65 | 9082769 66 | 5420382 67 | 4838296 68 | 7192947 69 | 4352050 70 | 6752078 71 | 11597640 72 | 12793903 73 | 3023700 74 | 2915516 75 | 1855776 76 | 2010063 77 | 4099054 78 | 4850462 79 | 1231868 80 | 1115559 81 | 4311681 82 | 4351596 83 | 1002149 84 | 1100464 85 | 9407529 86 | 11348068 87 | 3864930 88 | 3709641 89 | 17054864 90 | 15321522 91 | 2659428 92 | 3736468 93 | 9387413 94 | 9854565 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/213522/functional/TC_rsfMRI_REST1_LR.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/213522/functional/TC_rsfMRI_REST1_LR.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/213522/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/213522/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/213522/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/213522/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/213522/structural/nvoxel.txt: -------------------------------------------------------------------------------- 1 | 3928 31424.000000 2 | 4025 32200.000000 3 | 5405 43240.000000 4 | 6013 48104.000000 5 | 5133 41064.000000 6 | 5784 46272.000000 7 | 1429 11432.000000 8 | 1763 14104.000000 9 | 2793 22344.000000 10 | 2493 19944.000000 11 | 1000 8000.000000 12 | 1110 8880.000000 13 | 1327 10616.000000 14 | 1680 13440.000000 15 | 2440 19520.000000 16 | 2653 21224.000000 17 | 427 3416.000000 18 | 442 3536.000000 19 | 3430 27440.000000 20 | 2587 20696.000000 21 | 895 7160.000000 22 | 1072 8576.000000 23 | 1015 8120.000000 24 | 1008 8064.000000 25 | 689 5512.000000 26 | 871 6968.000000 27 | 670 5360.000000 28 | 877 7016.000000 29 | 755 6040.000000 30 | 788 6304.000000 31 | 329 2632.000000 32 | 289 2312.000000 33 | 2422 19376.000000 34 | 2323 18584.000000 35 | 1794 14352.000000 36 | 1692 13536.000000 37 | 2282 18256.000000 38 | 2616 20928.000000 39 | 656 5248.000000 40 | 498 3984.000000 41 | 1307 10456.000000 42 | 1269 10152.000000 43 | 1341 10728.000000 44 | 1652 13216.000000 45 | 314 2512.000000 46 | 406 3248.000000 47 | 2380 19040.000000 48 | 2230 17840.000000 49 | 1699 13592.000000 50 | 1852 14816.000000 51 | 2455 19640.000000 52 | 2837 22696.000000 53 | 1635 13080.000000 54 | 1880 15040.000000 55 | 3838 30704.000000 56 | 2549 20392.000000 57 | 1239 9912.000000 58 | 1248 9984.000000 59 | 3088 24704.000000 60 | 3262 26096.000000 61 | 4391 35128.000000 62 | 4455 35640.000000 63 | 2306 18448.000000 64 | 2696 21568.000000 65 | 2860 22880.000000 66 | 1690 13520.000000 67 | 1534 12272.000000 68 | 2392 19136.000000 69 | 1491 11928.000000 70 | 2151 17208.000000 71 | 4131 33048.000000 72 | 3991 31928.000000 73 | 1637 13096.000000 74 | 1090 8720.000000 75 | 1128 9024.000000 76 | 1281 10248.000000 77 | 1227 9816.000000 78 | 1346 10768.000000 79 | 383 3064.000000 80 | 391 3128.000000 81 | 1134 9072.000000 82 | 1209 9672.000000 83 | 365 2920.000000 84 | 424 3392.000000 85 | 2775 22200.000000 86 | 3572 28576.000000 87 | 1695 13560.000000 88 | 1769 14152.000000 89 | 5502 44016.000000 90 | 4990 39920.000000 91 | 1015 8120.000000 92 | 1506 12048.000000 93 | 3712 29696.000000 94 | 4070 32560.000000 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/213522/structural/waytotal.txt: -------------------------------------------------------------------------------- 1 | 10377619 2 | 9866963 3 | 15757546 4 | 16605214 5 | 14339746 6 | 14434958 7 | 3855505 8 | 4963272 9 | 8095608 10 | 5971674 11 | 3194813 12 | 2573835 13 | 4027106 14 | 5341351 15 | 7349403 16 | 8252102 17 | 1238279 18 | 1253264 19 | 9542891 20 | 7546482 21 | 2844859 22 | 3540304 23 | 2775048 24 | 2606047 25 | 1563271 26 | 1880865 27 | 2028979 28 | 2432969 29 | 2353850 30 | 2030339 31 | 675211 32 | 303222 33 | 7393958 34 | 7231027 35 | 5215103 36 | 5495140 37 | 7649337 38 | 8593193 39 | 1922108 40 | 1428147 41 | 3689391 42 | 4006962 43 | 3288407 44 | 4049712 45 | 1044960 46 | 1168995 47 | 8302125 48 | 7516776 49 | 5685215 50 | 6255337 51 | 6840748 52 | 7115164 53 | 5384956 54 | 6074972 55 | 11585840 56 | 8105013 57 | 4191697 58 | 3943834 59 | 7597057 60 | 8002390 61 | 12733062 62 | 12200012 63 | 6539690 64 | 6302187 65 | 9043370 66 | 4986317 67 | 4993668 68 | 6869375 69 | 4407649 70 | 5808615 71 | 11631184 72 | 11864306 73 | 3746038 74 | 2845462 75 | 3085425 76 | 3078070 77 | 4045517 78 | 4476885 79 | 872217 80 | 892947 81 | 4848106 82 | 4576826 83 | 999681 84 | 1084781 85 | 9523059 86 | 11547451 87 | 4037583 88 | 4114694 89 | 17249718 90 | 15463079 91 | 2746578 92 | 3928949 93 | 10337362 94 | 10203052 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/377451/functional/TC_rsfMRI_REST1_LR.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/377451/functional/TC_rsfMRI_REST1_LR.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/377451/structural/DTI_CM.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/377451/structural/DTI_CM.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/377451/structural/DTI_LEN.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/data/datasets/hcp/subjects/377451/structural/DTI_LEN.mat -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/377451/structural/nvoxel.txt: -------------------------------------------------------------------------------- 1 | 3674 29392.000000 2 | 3625 29000.000000 3 | 5550 44400.000000 4 | 5558 44464.000000 5 | 5003 40024.000000 6 | 5483 43864.000000 7 | 1377 11016.000000 8 | 1767 14136.000000 9 | 2463 19704.000000 10 | 2388 19104.000000 11 | 1020 8160.000000 12 | 1176 9408.000000 13 | 1220 9760.000000 14 | 1463 11704.000000 15 | 2082 16656.000000 16 | 2652 21216.000000 17 | 401 3208.000000 18 | 470 3760.000000 19 | 3071 24568.000000 20 | 2512 20096.000000 21 | 814 6512.000000 22 | 963 7704.000000 23 | 937 7496.000000 24 | 946 7568.000000 25 | 728 5824.000000 26 | 772 6176.000000 27 | 540 4320.000000 28 | 723 5784.000000 29 | 748 5984.000000 30 | 776 6208.000000 31 | 274 2192.000000 32 | 215 1720.000000 33 | 2331 18648.000000 34 | 2185 17480.000000 35 | 1515 12120.000000 36 | 1649 13192.000000 37 | 1998 15984.000000 38 | 2548 20384.000000 39 | 514 4112.000000 40 | 520 4160.000000 41 | 1186 9488.000000 42 | 1266 10128.000000 43 | 1310 10480.000000 44 | 1420 11360.000000 45 | 254 2032.000000 46 | 306 2448.000000 47 | 2510 20080.000000 48 | 2078 16624.000000 49 | 1715 13720.000000 50 | 1843 14744.000000 51 | 2389 19112.000000 52 | 2609 20872.000000 53 | 1725 13800.000000 54 | 1587 12696.000000 55 | 3536 28288.000000 56 | 2345 18760.000000 57 | 1237 9896.000000 58 | 1103 8824.000000 59 | 2762 22096.000000 60 | 2906 23248.000000 61 | 4059 32472.000000 62 | 4130 33040.000000 63 | 2269 18152.000000 64 | 2374 18992.000000 65 | 2717 21736.000000 66 | 1497 11976.000000 67 | 1482 11856.000000 68 | 2260 18080.000000 69 | 1345 10760.000000 70 | 2037 16296.000000 71 | 3666 29328.000000 72 | 3739 29912.000000 73 | 1465 11720.000000 74 | 1061 8488.000000 75 | 1244 9952.000000 76 | 1203 9624.000000 77 | 1181 9448.000000 78 | 1336 10688.000000 79 | 394 3152.000000 80 | 341 2728.000000 81 | 1129 9032.000000 82 | 1127 9016.000000 83 | 394 3152.000000 84 | 418 3344.000000 85 | 2679 21432.000000 86 | 3139 25112.000000 87 | 1551 12408.000000 88 | 1680 13440.000000 89 | 4973 39784.000000 90 | 4467 35736.000000 91 | 1052 8416.000000 92 | 1532 12256.000000 93 | 3473 27784.000000 94 | 3717 29736.000000 95 | -------------------------------------------------------------------------------- /neurolib/data/datasets/hcp/subjects/377451/structural/waytotal.txt: -------------------------------------------------------------------------------- 1 | 11798647 2 | 9995560 3 | 16318562 4 | 16112929 5 | 13299905 6 | 12821532 7 | 4287302 8 | 4762230 9 | 8293835 10 | 5710949 11 | 3369923 12 | 3018002 13 | 4209445 14 | 5582641 15 | 7582486 16 | 9101922 17 | 1176494 18 | 1263999 19 | 10732865 20 | 8627477 21 | 2901662 22 | 3541797 23 | 2492836 24 | 2163959 25 | 1454816 26 | 1329954 27 | 1699039 28 | 1863042 29 | 2522941 30 | 2037021 31 | 794391 32 | 598550 33 | 7371793 34 | 6765325 35 | 5405219 36 | 5338322 37 | 7746932 38 | 9191657 39 | 1931228 40 | 1534293 41 | 3576365 42 | 3560822 43 | 3164411 44 | 4211001 45 | 946592 46 | 1185043 47 | 8314999 48 | 7949125 49 | 6148414 50 | 6155163 51 | 7069036 52 | 7655192 53 | 5305437 54 | 5319879 55 | 11158865 56 | 6964595 57 | 3804841 58 | 3668137 59 | 7357588 60 | 8165837 61 | 13123086 62 | 11614846 63 | 6063869 64 | 5333274 65 | 8876930 66 | 5555381 67 | 4924717 68 | 6850330 69 | 4509910 70 | 5758798 71 | 11962221 72 | 12740125 73 | 3973506 74 | 3407653 75 | 2437203 76 | 3316161 77 | 4216615 78 | 4648679 79 | 1014852 80 | 782144 81 | 4430730 82 | 4517691 83 | 932393 84 | 1076259 85 | 8678946 86 | 10889503 87 | 3970034 88 | 3729677 89 | 16992992 90 | 15140951 91 | 2674366 92 | 3732039 93 | 10152079 94 | 8732410 95 | -------------------------------------------------------------------------------- /neurolib/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/models/__init__.py -------------------------------------------------------------------------------- /neurolib/models/aln/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import ALNModel 2 | -------------------------------------------------------------------------------- /neurolib/models/aln/aln-precalc/quantities_cascade.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/models/aln/aln-precalc/quantities_cascade.h5 -------------------------------------------------------------------------------- /neurolib/models/aln/model.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from . import loadDefaultParams as dp 4 | from . import timeIntegration as ti 5 | from ..model import Model 6 | 7 | 8 | class ALNModel(Model): 9 | """ 10 | Multi-population mean-field model with exciatory and inhibitory neurons per population. 11 | """ 12 | 13 | name = "aln" 14 | description = "Adaptive linear-nonlinear model of exponential integrate-and-fire neurons" 15 | 16 | init_vars = [ 17 | "rates_exc_init", 18 | "rates_inh_init", 19 | "mufe_init", 20 | "mufi_init", 21 | "IA_init", 22 | "seem_init", 23 | "seim_init", 24 | "siem_init", 25 | "siim_init", 26 | "seev_init", 27 | "seiv_init", 28 | "siev_init", 29 | "siiv_init", 30 | "mue_ou", 31 | "mui_ou", 32 | ] 33 | 34 | state_vars = [ 35 | "rates_exc", 36 | "rates_inh", 37 | "mufe", 38 | "mufi", 39 | "IA", 40 | "seem", 41 | "seim", 42 | "siem", 43 | "siim", 44 | "seev", 45 | "seiv", 46 | "siev", 47 | "siiv", 48 | "mue_ou", 49 | "mui_ou", 50 | ] 51 | output_vars = ["rates_exc", "rates_inh", "IA"] 52 | default_output = "rates_exc" 53 | input_vars = ["ext_exc_current", "ext_inh_current", "ext_exc_rate", "ext_inh_rate"] 54 | default_input = "ext_exc_rate" 55 | 56 | def __init__(self, params=None, Cmat=None, Dmat=None, lookupTableFileName=None, seed=None): 57 | """ 58 | :param params: parameter dictionary of the model 59 | :param Cmat: Global connectivity matrix (connects E to E) 60 | :param Dmat: Distance matrix between all nodes (in mm) 61 | :param lookupTableFileName: Filename for precomputed transfer functions and tables 62 | :param seed: Random number generator seed 63 | """ 64 | 65 | # Global attributes 66 | self.Cmat = Cmat # Connectivity matrix 67 | self.Dmat = Dmat # Delay matrix 68 | self.lookupTableFileName = lookupTableFileName # Filename for aLN lookup functions 69 | self.seed = seed # Random seed 70 | 71 | integration = ti.timeIntegration 72 | 73 | # load default parameters if none were given 74 | if params is None: 75 | params = dp.loadDefaultParams( 76 | Cmat=self.Cmat, Dmat=self.Dmat, lookupTableFileName=self.lookupTableFileName, seed=self.seed 77 | ) 78 | 79 | # Initialize base class Model 80 | super().__init__(integration=integration, params=params) 81 | 82 | def getMaxDelay(self): 83 | # compute maximum delay of model 84 | ndt_de = round(self.params["de"] / self.params["dt"]) 85 | ndt_di = round(self.params["di"] / self.params["dt"]) 86 | max_dmat_delay = super().getMaxDelay() 87 | return int(max(max_dmat_delay, ndt_de, ndt_di)) 88 | -------------------------------------------------------------------------------- /neurolib/models/bold/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import BOLDModel 2 | from .timeIntegration import simulateBOLD 3 | -------------------------------------------------------------------------------- /neurolib/models/bold/model.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .timeIntegration import simulateBOLD 4 | 5 | 6 | class BOLDModel: 7 | """ 8 | Balloon-Windkessel BOLD simulator class. 9 | BOLD activity is downsampled to 0.5 Hz by default. 10 | 11 | BOLD simulation results are saved in t_BOLD, BOLD instance attributes. 12 | """ 13 | 14 | def __init__(self, N, dt, normalize_input=False, normalize_max=50): 15 | self.N = N 16 | self.dt = dt # dt of input activity in ms 17 | self.samplingRate_NDt = int(round(2000 / dt)) # downsample (0.5 Hz fMRI sampling rate) 18 | 19 | self.normalize_input = normalize_input 20 | self.normalize_max = normalize_max 21 | # return arrays 22 | self.t_BOLD = np.array([], dtype="f", ndmin=2) 23 | self.BOLD = np.array([], dtype="f", ndmin=2) 24 | self.all_Rates = np.array([], dtype="f", ndmin=2) 25 | self.BOLD_chunk = np.array([], dtype="f", ndmin=2) 26 | 27 | self.idxLastT = 0 # Index of the last computed t 28 | 29 | # initialize BOLD model variables 30 | self.X_BOLD = np.ones((N,)) 31 | # Vasso dilatory signal 32 | self.F_BOLD = np.ones((N,)) 33 | # Blood flow 34 | self.Q_BOLD = np.ones((N,)) 35 | # Deoxyhemoglobin 36 | self.V_BOLD = np.ones((N,)) 37 | # Blood volume 38 | 39 | def run(self, activity): 40 | """Runs the Balloon-Windkessel BOLD simulation. 41 | 42 | Parameters: 43 | :param activity: Neuronal firing rate in Hz 44 | 45 | :param activity: Neuronal firing rate in Hz 46 | :type activity: numpy.ndarray 47 | """ 48 | 49 | # Compute the BOLD signal for the chunk 50 | BOLD_chunk, self.X_BOLD, self.F_BOLD, self.Q_BOLD, self.V_BOLD = simulateBOLD( 51 | activity, 52 | self.dt * 1e-3, 53 | X=self.X_BOLD, 54 | F=self.F_BOLD, 55 | Q=self.Q_BOLD, 56 | V=self.V_BOLD, 57 | ) 58 | 59 | # downsample BOLD 60 | BOLD_resampled = BOLD_chunk[ 61 | :, self.samplingRate_NDt - np.mod(self.idxLastT - 1, self.samplingRate_NDt) :: self.samplingRate_NDt 62 | ] 63 | t_new_idx = self.idxLastT + np.arange(activity.shape[1]) 64 | t_BOLD_resampled = ( 65 | t_new_idx[self.samplingRate_NDt - np.mod(self.idxLastT - 1, self.samplingRate_NDt) :: self.samplingRate_NDt] 66 | * self.dt 67 | ) 68 | 69 | self.t_BOLD = t_BOLD_resampled 70 | self.BOLD = BOLD_resampled 71 | self.BOLD_chunk = BOLD_resampled 72 | 73 | self.idxLastT = self.idxLastT + activity.shape[1] 74 | -------------------------------------------------------------------------------- /neurolib/models/bold/timeIntegration.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numba 3 | 4 | 5 | def simulateBOLD(Z, dt, voxelCounts=None, X=None, F=None, Q=None, V=None): 6 | """Simulate BOLD activity using the Balloon-Windkessel model. 7 | See Friston 2000, Friston 2003 and Deco 2013 for reference on how the BOLD signal is simulated. 8 | The returned BOLD signal should be downsampled to be comparable to a recorded fMRI signal. 9 | 10 | :param Z: Synaptic activity 11 | :type Z: numpy.ndarray 12 | :param dt: dt of input activity in s 13 | :type dt: float 14 | :param voxelCounts: Number of voxels in each region (not used yet!) # TODO 15 | :type voxelCounts: numpy.ndarray 16 | :param X: Initial values of Vasodilatory signal, defaults to None 17 | :type X: numpy.ndarray, optional 18 | :param F: Initial values of Blood flow, defaults to None 19 | :type F: numpy.ndarray, optional 20 | :param Q: Initial values of Deoxyhemoglobin, defaults to None 21 | :type Q: numpy.ndarray, optional 22 | :param V: Initial values of Blood volume, defaults to None 23 | :type V: numpy.ndarray, optional 24 | 25 | :return: BOLD, X, F, Q, V 26 | :rtype: (numpy.ndarray,) 27 | """ 28 | 29 | N = np.shape(Z)[0] 30 | 31 | # Balloon-Windkessel model parameters (from Friston 2003): 32 | # Friston paper: Nonlinear responses in fMRI: The balloon model, Volterra kernels, and other hemodynamics 33 | # Note: the distribution of each Balloon-Windkessel models parameters are given per voxel 34 | # Since we usually average the empirical fMRI of each voxel for a given area, the standard 35 | # deviation of the gaussian distribution should be divided by the number of voxels in each area 36 | # voxelCountsSqrtInv = 1 / np.sqrt(voxelCounts) 37 | # 38 | # See Friston 2003, Table 1 mean values and variances: 39 | # rho = np.random.normal(0.34, np.sqrt(0.0024) / np.sqrt( sum(voxelCounts) ) ) # Capillary resting net oxygen extraction 40 | # alpha = np.random.normal(0.32, np.sqrt(0.0015) / np.sqrt( sum(voxelCounts) ) ) # Grubb's vessel stiffness exponent 41 | # V0 = 0.02 42 | # k1 = 7 * rho 43 | # k2 = 2.0 44 | # k3 = 2 * rho - 0.2 45 | # Gamma = np.random.normal(0.41 * np.ones(N), np.sqrt(0.002) * voxelCountsSqrtInv) # Rate constant for autoregulatory feedback by blood flow 46 | # K = np.random.normal(0.65 * np.ones(N), np.sqrt(0.015) * voxelCountsSqrtInv) # Vasodilatory signal decay 47 | # Tau = np.random.normal(0.98 * np.ones(N), np.sqrt(0.0568) * voxelCountsSqrtInv) # Transit time 48 | # 49 | # If no voxel counts are given, we can use scalar values for each region's parameter: 50 | rho = 0.34 # Capillary resting net oxygen extraction (dimensionless), E_0 in Friston2000 51 | alpha = 0.32 # Grubb's vessel stiffness exponent (dimensionless), \alpha in Friston2000 52 | V0 = 0.02 # Resting blood volume fraction (dimensionless) 53 | k1 = 7 * rho # (dimensionless) 54 | k2 = 2.0 # (dimensionless) 55 | k3 = 2 * rho - 0.2 # (dimensionless) 56 | Gamma = 0.41 * np.ones((N,)) # Rate constant for autoregulatory feedback by blood flow (1/s) 57 | K = 0.65 * np.ones((N,)) # Vasodilatory signal decay (1/s) 58 | Tau = 0.98 * np.ones((N,)) # Transit time (s) 59 | 60 | # initialize state variables 61 | # NOTE: We need to use np.copy() because these variables 62 | # will be overwritten later and numba doesn't like to do that 63 | # with anything that was defined outside the scope of the @njit'ed function 64 | X = np.zeros((N,)) if X is None else np.copy(X) # Vasso dilatory signal 65 | F = np.zeros((N,)) if F is None else np.copy(F) # Blood flow 66 | Q = np.zeros((N,)) if Q is None else np.copy(Q) # Deoxyhemoglobin 67 | V = np.zeros((N,)) if V is None else np.copy(V) # Blood volume 68 | 69 | BOLD = np.zeros(np.shape(Z)) 70 | # return integrateBOLD_numba(BOLD, X, Q, F, V, Z, dt, N, rho, alpha, V0, k1, k2, k3, Gamma, K, Tau) 71 | BOLD, X, F, Q, V = integrateBOLD_numba(BOLD, X, Q, F, V, Z, dt, N, rho, alpha, V0, k1, k2, k3, Gamma, K, Tau) 72 | return BOLD, X, F, Q, V 73 | 74 | 75 | @numba.njit 76 | def integrateBOLD_numba(BOLD, X, Q, F, V, Z, dt, N, rho, alpha, V0, k1, k2, k3, Gamma, K, Tau): 77 | """Integrate the Balloon-Windkessel model. 78 | 79 | Reference: 80 | 81 | Friston et al. (2000), Nonlinear responses in fMRI: The balloon model, Volterra kernels, and other hemodynamics. 82 | Friston et al. (2003), Dynamic causal modeling 83 | 84 | Variable names in Friston2000: 85 | X = x1, Q = x4, V = x3, F = x2 86 | 87 | Friston2003: see Equation (3) 88 | 89 | NOTE: A very small constant EPS is added to F to avoid F become too small / negative 90 | and cause a floating point error in EQ. Q due to the exponent **(1 / F[j]) 91 | 92 | """ 93 | 94 | EPS = 1e-120 # epsilon for softening 95 | 96 | for i in range(len(Z[0, :])): # loop over all timesteps 97 | # component-wise loop for compatibilty with numba 98 | for j in range(N): # loop over all areas 99 | F[j] = max(F[j], EPS) 100 | 101 | X[j] = X[j] + dt * (Z[j, i] - K[j] * X[j] - Gamma[j] * (F[j] - 1)) 102 | Q[j] = Q[j] + dt / Tau[j] * (F[j] / rho * (1 - (1 - rho) ** (1 / F[j])) - Q[j] * V[j] ** (1 / alpha - 1)) 103 | V[j] = V[j] + dt / Tau[j] * (F[j] - V[j] ** (1 / alpha)) 104 | F[j] = F[j] + dt * X[j] 105 | 106 | BOLD[j, i] = V0 * (k1 * (1 - Q[j]) + k2 * (1 - Q[j] / V[j]) + k3 * (1 - V[j])) 107 | return BOLD, X, F, Q, V 108 | -------------------------------------------------------------------------------- /neurolib/models/fhn/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import FHNModel 2 | -------------------------------------------------------------------------------- /neurolib/models/fhn/loadDefaultParams.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ...utils.collections import dotdict 4 | 5 | 6 | def loadDefaultParams(Cmat=None, Dmat=None, seed=None): 7 | """Load default parameters for the FHN model 8 | 9 | :param Cmat: Structural connectivity matrix (adjacency matrix) of coupling strengths, will be normalized to 1. If not given, then a single node simulation will be assumed, defaults to None 10 | :type Cmat: numpy.ndarray, optional 11 | :param Dmat: Fiber length matrix, will be used for computing the delay matrix together with the signal transmission speed parameter `signalV`, defaults to None 12 | :type Dmat: numpy.ndarray, optional 13 | :param seed: Seed for the random number generator, defaults to None 14 | :type seed: int, optional 15 | 16 | :return: A dictionary with the default parameters of the model 17 | :rtype: dict 18 | """ 19 | 20 | params = dotdict({}) 21 | 22 | ### runtime parameters 23 | params.dt = 0.1 # ms 0.1ms is reasonable 24 | params.duration = 2000 # Simulation duration (ms) 25 | np.random.seed(seed) # seed for RNG of noise and ICs 26 | params.seed = seed 27 | 28 | # ------------------------------------------------------------------------ 29 | # global whole-brain network parameters 30 | # ------------------------------------------------------------------------ 31 | 32 | # the coupling parameter determines how nodes are coupled. 33 | # "diffusive" for diffusive coupling, "additive" for additive coupling 34 | params.coupling = "diffusive" 35 | 36 | # signal transmission speec between areas 37 | params.signalV = 20.0 38 | params.K_gl = 0.6 # global coupling strength 39 | 40 | if Cmat is None: 41 | params.N = 1 42 | params.Cmat = np.zeros((1, 1)) 43 | params.lengthMat = np.zeros((1, 1)) 44 | 45 | else: 46 | params.Cmat = Cmat.copy() # coupling matrix 47 | np.fill_diagonal(params.Cmat, 0) # no self connections 48 | params.N = len(params.Cmat) # number of nodes 49 | params.lengthMat = Dmat 50 | 51 | # ------------------------------------------------------------------------ 52 | # local node parameters 53 | # ------------------------------------------------------------------------ 54 | 55 | # external input parameters: 56 | params.tau_ou = 5.0 # ms Timescale of the Ornstein-Uhlenbeck noise process 57 | params.sigma_ou = 0.0 # mV/ms/sqrt(ms) noise intensity 58 | params.x_ou_mean = 0.0 # mV/ms (OU process) [0-5] 59 | params.y_ou_mean = 0.0 # mV/ms (OU process) [0-5] 60 | 61 | # neural mass model parameters 62 | params.alpha = 3.0 # Eqpsilon in Kostova et al. (2004) FitzHugh–Nagumo revisited: Types of bifurcations, periodical forcing and stability regions by a Lyapunov functional 63 | params.beta = 4.0 # eps(1+lam) 64 | params.gamma = -1.5 # lam eps 65 | params.delta = 0.0 66 | params.epsilon = 0.5 # a 67 | params.tau = 20.0 68 | # ------------------------------------------------------------------------ 69 | 70 | params.xs_init = 0.05 * np.random.uniform(0, 1, (params.N, 1)) 71 | params.ys_init = 0.05 * np.random.uniform(0, 1, (params.N, 1)) 72 | 73 | # Ornstein-Uhlenbeck noise state variables 74 | params.x_ou = np.zeros((params.N,)) 75 | params.y_ou = np.zeros((params.N,)) 76 | 77 | # values of the external inputs 78 | params.x_ext = np.ones((params.N,)) 79 | params.y_ext = np.zeros((params.N,)) 80 | 81 | return params 82 | -------------------------------------------------------------------------------- /neurolib/models/fhn/model.py: -------------------------------------------------------------------------------- 1 | from . import loadDefaultParams as dp 2 | from . import timeIntegration as ti 3 | from ..model import Model 4 | 5 | 6 | class FHNModel(Model): 7 | """ 8 | Fitz-Hugh Nagumo oscillator. 9 | """ 10 | 11 | name = "fhn" 12 | description = "Fitz-Hugh Nagumo oscillator" 13 | 14 | init_vars = ["xs_init", "ys_init", "x_ou", "y_ou"] 15 | state_vars = ["x", "y", "x_ou", "y_ou"] 16 | output_vars = ["x", "y"] 17 | default_output = "x" 18 | input_vars = ["x_ext", "y_ext"] 19 | default_input = "x_ext" 20 | 21 | # because this is not a rate model, the input 22 | # to the bold model must be transformed 23 | boldInputTransform = lambda self, x: x * 50 24 | 25 | def __init__(self, params=None, Cmat=None, Dmat=None, seed=None): 26 | 27 | self.Cmat = Cmat 28 | self.Dmat = Dmat 29 | self.seed = seed 30 | 31 | # the integration function must be passed 32 | integration = ti.timeIntegration 33 | 34 | # load default parameters if none were given 35 | if params is None: 36 | params = dp.loadDefaultParams(Cmat=self.Cmat, Dmat=self.Dmat, seed=self.seed) 37 | 38 | # Initialize base class Model 39 | super().__init__(integration=integration, params=params) 40 | -------------------------------------------------------------------------------- /neurolib/models/hopf/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import HopfModel 2 | -------------------------------------------------------------------------------- /neurolib/models/hopf/loadDefaultParams.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ...utils.collections import dotdict 4 | 5 | 6 | def loadDefaultParams(Cmat=None, Dmat=None, seed=None): 7 | """Load default parameters for the Hopf model 8 | 9 | :param Cmat: Structural connectivity matrix (adjacency matrix) of coupling strengths, will be normalized to 1. If not given, then a single node simulation will be assumed, defaults to None 10 | :type Cmat: numpy.ndarray, optional 11 | :param Dmat: Fiber length matrix, will be used for computing the delay matrix together with the signal transmission speed parameter `signalV`, defaults to None 12 | :type Dmat: numpy.ndarray, optional 13 | :param seed: Seed for the random number generator, defaults to None 14 | :type seed: int, optional 15 | 16 | :return: A dictionary with the default parameters of the model 17 | :rtype: dict 18 | """ 19 | 20 | params = dotdict({}) 21 | 22 | ### runtime parameters 23 | params.dt = 0.1 # ms 0.1ms is reasonable 24 | params.duration = 2000 # Simulation duration (ms) 25 | np.random.seed(seed) # seed for RNG of noise and ICs 26 | # set seed to 0 if None, pypet will complain otherwise 27 | params.seed = seed 28 | 29 | # ------------------------------------------------------------------------ 30 | # global whole-brain network parameters 31 | # ------------------------------------------------------------------------ 32 | 33 | # the coupling parameter determines how nodes are coupled. 34 | # "diffusive" for diffusive coupling, "additive" for additive coupling 35 | params.coupling = "diffusive" 36 | 37 | params.signalV = 20.0 38 | params.K_gl = 0.6 # global coupling strength 39 | 40 | if Cmat is None: 41 | params.N = 1 42 | params.Cmat = np.zeros((1, 1)) 43 | params.lengthMat = np.zeros((1, 1)) 44 | 45 | else: 46 | params.Cmat = Cmat.copy() # coupling matrix 47 | np.fill_diagonal(params.Cmat, 0) # no self connections 48 | params.N = len(params.Cmat) # number of nodes 49 | params.lengthMat = Dmat 50 | 51 | # ------------------------------------------------------------------------ 52 | # local node parameters 53 | # ------------------------------------------------------------------------ 54 | 55 | # external input parameters: 56 | params.tau_ou = 5.0 # ms Timescale of the Ornstein-Uhlenbeck noise process 57 | params.sigma_ou = 0.0 # mV/ms/sqrt(ms) noise intensity 58 | params.x_ou_mean = 0.0 # mV/ms (OU process) [0-5] 59 | params.y_ou_mean = 0.0 # mV/ms (OU process) [0-5] 60 | 61 | # neural mass model parameters 62 | params.a = 0.25 # Hopf bifurcation parameter 63 | params.w = 0.2 # Oscillator frequency, 32 Hz at w = 0.2 64 | 65 | # ------------------------------------------------------------------------ 66 | 67 | # initial values of the state variables 68 | params.xs_init = 0.5 * np.random.uniform(-1, 1, (params.N, 1)) 69 | params.ys_init = 0.5 * np.random.uniform(-1, 1, (params.N, 1)) 70 | 71 | # Ornstein-Uhlenbeck noise state variables 72 | params.x_ou = np.zeros((params.N,)) 73 | params.y_ou = np.zeros((params.N,)) 74 | 75 | # values of the external inputs 76 | params.x_ext = np.zeros((params.N,)) 77 | params.y_ext = np.zeros((params.N,)) 78 | 79 | return params 80 | -------------------------------------------------------------------------------- /neurolib/models/hopf/model.py: -------------------------------------------------------------------------------- 1 | from . import loadDefaultParams as dp 2 | from . import timeIntegration as ti 3 | from ..model import Model 4 | 5 | 6 | class HopfModel(Model): 7 | """ 8 | Stuart-Landau model with Hopf bifurcation. 9 | """ 10 | 11 | name = "hopf" 12 | description = "Stuart-Landau model with Hopf bifurcation" 13 | 14 | init_vars = ["xs_init", "ys_init", "x_ou", "y_ou"] 15 | state_vars = ["x", "y", "x_ou", "y_ou"] 16 | output_vars = ["x", "y"] 17 | default_output = "x" 18 | input_vars = ["x_ext", "y_ext"] 19 | default_input = "x_ext" 20 | 21 | # because this is not a rate model, the input 22 | # to the bold model must be transformed 23 | boldInputTransform = lambda self, x: (x + 1) * 4 24 | 25 | def __init__(self, params=None, Cmat=None, Dmat=None, seed=None): 26 | 27 | self.Cmat = Cmat 28 | self.Dmat = Dmat 29 | self.seed = seed 30 | 31 | # the integration function must be passed 32 | integration = ti.timeIntegration 33 | 34 | # load default parameters if none were given 35 | if params is None: 36 | params = dp.loadDefaultParams(Cmat=self.Cmat, Dmat=self.Dmat, seed=self.seed) 37 | 38 | # Initialize base class Model 39 | super().__init__(integration=integration, params=params) 40 | -------------------------------------------------------------------------------- /neurolib/models/kuramoto/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import KuramotoModel 2 | -------------------------------------------------------------------------------- /neurolib/models/kuramoto/loadDefaultParams.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from neurolib.utils.collections import dotdict 4 | 5 | 6 | def loadDefaultParams(Cmat=None, Dmat=None, seed=None): 7 | """Load default parameters for the Kuramoto Model model 8 | 9 | :param Cmat: Structural connectivity matrix (adjacency matrix) of coupling strengths, will be normalized to 1. If not given, then a single node simulation will be assumed, defaults to None 10 | :type Cmat: numpy.ndarray, optional 11 | :param Dmat: Fiber length matrix, will be used for computing the delay matrix together with the signal transmission speed parameter `signalV`, defaults to None 12 | :type Dmat: numpy.ndarray, optional 13 | :param seed: Seed for the random number generator, defaults to None 14 | :type seed: int, optional 15 | 16 | :return: A dictionary with the default parameters of the model 17 | :rtype: dict 18 | """ 19 | params = dotdict({}) 20 | 21 | ### runtime parameters 22 | 23 | params.dt = 0.1 24 | params.duration = 2000 25 | 26 | np.random.seed(seed) 27 | params.seed = seed 28 | 29 | # model parameters 30 | params.N = 1 31 | params.k = 2 32 | 33 | # connectivity 34 | if Cmat is None: 35 | params.N = 1 36 | params.Cmat = np.zeros((1, 1)) 37 | params.lengthMat = np.zeros((1, 1)) 38 | else: 39 | params.Cmat = Cmat.copy() # coupling matrix 40 | np.fill_diagonal(params.Cmat, 0) # no self connections 41 | params.N = len(params.Cmat) # override number of nodes 42 | params.lengthMat = Dmat 43 | 44 | params.omega = np.ones((params.N,)) * np.pi 45 | 46 | params.signalV = 20.0 47 | 48 | # Ornstein-Uhlenbeck process 49 | params.tau_ou = 5.0 # ms Timescale of the Ornstein-Uhlenbeck noise process 50 | params.sigma_ou = 0.0 # 1/ms/sqrt(ms) noise intensity 51 | 52 | # init values 53 | params.theta_init = np.random.uniform(low=0, high=2*np.pi, size=(params.N, 1)) 54 | 55 | # Ornstein-Uhlenbeck process 56 | params.theta_ou = np.zeros((params.N,)) 57 | 58 | # external input 59 | params.theta_ext = np.zeros((params.N,)) 60 | 61 | return params 62 | -------------------------------------------------------------------------------- /neurolib/models/kuramoto/model.py: -------------------------------------------------------------------------------- 1 | from . import loadDefaultParams as dp 2 | from . import timeIntegration as ti 3 | 4 | from neurolib.models.model import Model 5 | 6 | 7 | class KuramotoModel(Model): 8 | """ 9 | Kuramoto Model 10 | 11 | Based on: 12 | Kuramoto, Yoshiki (1975). H. Araki (ed.). Lecture Notes in Physics, International Symposium on Mathematical Problems in Theoretical Physics. 13 | """ 14 | 15 | name = "kuramoto" 16 | description = "Kuramoto Model" 17 | 18 | init_vars = ['theta_init', 'theta_ou'] 19 | state_vars = ['theta', 'theta_ou'] 20 | output_vars = ['theta'] 21 | default_output = 'theta' 22 | input_vars = ['theta_ext'] 23 | default_input = 'theta_ext' 24 | 25 | def __init__(self, params=None, Cmat=None, Dmat=None, seed=None): 26 | self.Cmat = Cmat 27 | self.Dmat = Dmat 28 | self.seed = seed 29 | 30 | integration = ti.timeIntegration 31 | 32 | if params is None: 33 | params = dp.loadDefaultParams(Cmat=self.Cmat, Dmat=self.Dmat, seed=self.seed) 34 | 35 | super().__init__(params=params, integration=integration) -------------------------------------------------------------------------------- /neurolib/models/kuramoto/timeIntegration.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numba 3 | 4 | from ...utils import model_utils as mu 5 | 6 | 7 | def timeIntegration(params): 8 | """ 9 | setting up parameters for time integration 10 | 11 | :param params: Parameter dictionary of the model 12 | :type params: dict 13 | 14 | :return: Integrated activity of the model 15 | :rtype: (numpy.ndarray, ) 16 | """ 17 | dt = params["dt"] # Time step for the Euler intergration (ms) 18 | duration = params["duration"] # imulation duration (ms) 19 | RNGseed = params["seed"] # seed for RNG 20 | 21 | np.random.seed(RNGseed) 22 | 23 | # ------------------------------------------------------------------------ 24 | # model parameters 25 | # ------------------------------------------------------------------------ 26 | 27 | N = params["N"] # number of oscillators 28 | 29 | omega = params["omega"] # frequencies of oscillators 30 | 31 | # ornstein uhlenbeck noise param 32 | tau_ou = params["tau_ou"] # noise time constant 33 | sigma_ou = params["sigma_ou"] # noise strength 34 | 35 | # ------------------------------------------------------------------------ 36 | # global coupling parameters 37 | # ------------------------------------------------------------------------ 38 | 39 | # Connectivity matrix and Delay 40 | Cmat = params["Cmat"] 41 | 42 | # Interareal connection delay 43 | lengthMat = params["lengthMat"] 44 | signalV = params["signalV"] 45 | k = params["k"] # coupling strength 46 | 47 | if N == 1: 48 | Dmat = np.zeros((N, N)) 49 | else: 50 | # Interareal connection delays, Dmat(i,j) Connnection from jth node to ith (ms) 51 | Dmat = mu.computeDelayMatrix(lengthMat, signalV) 52 | 53 | # no self-feedback delay 54 | Dmat[np.eye(len(Dmat)) == 1] = np.zeros(len(Dmat)) 55 | Dmat = Dmat.astype(int) 56 | Dmat_ndt = np.around(Dmat / dt).astype(int) # delay matrix in multiples of dt 57 | 58 | # ------------------------------------------------------------------------ 59 | # Initialization 60 | # ------------------------------------------------------------------------ 61 | 62 | t = np.arange(1, round(duration, 6) / dt + 1) * dt # Time variable (ms) 63 | sqrt_dt = np.sqrt(dt) 64 | 65 | max_global_delay = np.max(Dmat_ndt) # maximum global delay 66 | startind = int(max_global_delay + 1) # start simulation after delay 67 | 68 | # Placeholders 69 | theta_ou = params['theta_ou'].copy() 70 | theta = np.zeros((N, startind + len(t))) 71 | 72 | theta_ext = mu.adjustArrayShape(params["theta_ext"], theta) 73 | 74 | # ------------------------------------------------------------------------ 75 | # initial values 76 | # ------------------------------------------------------------------------ 77 | 78 | if params["theta_init"].shape[1] == 1: 79 | theta_init = np.dot(params["theta_init"], np.ones((1, startind))) 80 | else: 81 | theta_init = params["theta_init"][:, -startind:] 82 | 83 | # put noise to instantiated array to save memory 84 | theta[:, :startind] = theta_init 85 | theta[:, startind:] = np.random.standard_normal((N, len(t))) 86 | 87 | 88 | k_n = k/N # auxiliary variable 89 | 90 | # ------------------------------------------------------------------------ 91 | # time integration 92 | # ------------------------------------------------------------------------ 93 | 94 | return timeIntegration_njit_elementwise( 95 | startind, 96 | t, 97 | dt, 98 | sqrt_dt, 99 | N, 100 | omega, 101 | k_n, 102 | Cmat, 103 | Dmat, 104 | theta, 105 | theta_ext, 106 | tau_ou, 107 | sigma_ou, 108 | theta_ou, 109 | ) 110 | 111 | 112 | @numba.njit 113 | def timeIntegration_njit_elementwise( 114 | startind, 115 | t, 116 | dt, 117 | sqrt_dt, 118 | N, 119 | omega, 120 | k_n, 121 | Cmat, 122 | Dmat, 123 | theta, 124 | theta_ext, 125 | tau_ou, 126 | sigma_ou, 127 | theta_ou, 128 | ): 129 | """ 130 | Kuramoto Model 131 | """ 132 | for i in range(startind, startind+len(t)): 133 | # Kuramoto model 134 | for no in range(N): 135 | noise_theta = theta[no, i] 136 | theta_input_d = 0.0 137 | 138 | # adding input from other nodes 139 | for m in range(N): 140 | theta_input_d += k_n * Cmat[no, m] * np.sin(theta[m, i-1-Dmat[no, m]] - theta[no, i-1]) 141 | 142 | theta_rhs = omega[no] + theta_input_d + theta_ou[no] + theta_ext[no, i-1] 143 | 144 | # time integration 145 | theta[no, i] = theta[no, i-1] + dt * theta_rhs 146 | 147 | # phase reset 148 | theta[no, i] = np.mod(theta[no, i], 2*np.pi) 149 | 150 | # Ornstein-Uhlenbeck 151 | theta_ou[no] = theta_ou[no] - theta_ou[no] * dt / tau_ou + sigma_ou * sqrt_dt * noise_theta 152 | 153 | return t, theta, theta_ou 154 | -------------------------------------------------------------------------------- /neurolib/models/multimodel/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder.aln import ALNNetwork, ALNNode 2 | from .builder.fitzhugh_nagumo import FitzHughNagumoNetwork, FitzHughNagumoNode 3 | from .builder.hopf import HopfNetwork, HopfNode 4 | from .builder.thalamus import ThalamicNode 5 | from .builder.wilson_cowan import WilsonCowanNetwork, WilsonCowanNode 6 | from .builder.wong_wang import ReducedWongWangNetwork, ReducedWongWangNode, WongWangNetwork, WongWangNode 7 | from .model import MultiModel 8 | -------------------------------------------------------------------------------- /neurolib/models/multimodel/builder/__init__.py: -------------------------------------------------------------------------------- 1 | from .aln import ALNNetwork, ALNNode 2 | from .base.network import Network, Node, SingleCouplingExcitatoryInhibitoryNode 3 | from .base.neural_mass import NeuralMass 4 | from .fitzhugh_nagumo import FitzHughNagumoNetwork, FitzHughNagumoNode 5 | from .hopf import HopfNetwork, HopfNode 6 | from .thalamus import ThalamicNode 7 | from .wilson_cowan import WilsonCowanNetwork, WilsonCowanNode 8 | from .wong_wang import ReducedWongWangNetwork, ReducedWongWangNode, WongWangNetwork, WongWangNode 9 | -------------------------------------------------------------------------------- /neurolib/models/multimodel/builder/base/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/models/multimodel/builder/base/__init__.py -------------------------------------------------------------------------------- /neurolib/models/multimodel/builder/base/constants.py: -------------------------------------------------------------------------------- 1 | ### -- NAMING CONVENTIONS -- 2 | # names for excitatory and inhibitory masses 3 | EXC = "EXC" 4 | INH = "INH" 5 | # names for matrices 6 | NODE_CONNECTIVITY = "local_connectivity" 7 | NODE_DELAYS = "local_delays" 8 | NETWORK_CONNECTIVITY = "connectivity" 9 | NETWORK_DELAYS = "delays" 10 | 11 | # speed for dummy fixed-point-dynamical variable 12 | LAMBDA_SPEED = 10.0 13 | -------------------------------------------------------------------------------- /neurolib/models/multimodel/builder/base/params.py: -------------------------------------------------------------------------------- 1 | """ 2 | Set of convenience functions for parameter handling in MultiModel. 3 | """ 4 | 5 | import numpy as np 6 | import sympy as sp 7 | from sympy.core import symbol 8 | 9 | 10 | def count_float_params(param_dict): 11 | """ 12 | Count number of float parameters in a dictionary, do not count noise 13 | parameters. 14 | """ 15 | return len( 16 | { 17 | k: v 18 | for k, v in param_dict.items() 19 | if isinstance(v, (int, float)) 20 | if not any(["input" in sp for sp in k.split(".")]) 21 | } 22 | ) 23 | 24 | 25 | def float_params_to_vector_symbolic(param_dict): 26 | """ 27 | Transforms float / array / int parameters to symbolic ones. Assumes flat 28 | dictionary with dot as a separator. Does not translate noise parameters. 29 | 30 | :param param_dict: dictionary with parameters and their values 31 | :type param_dict: dict 32 | :return: dictionary with parameters and symbols 33 | :rtype: dict 34 | """ 35 | param_vec = sp.MatrixSymbol("param", n=count_float_params(param_dict), m=1) 36 | 37 | cnt = 0 38 | symbol_dict = {} 39 | for k, v in param_dict.items(): 40 | splitted_key = k.split(".") 41 | if any(["input" in sp for sp in splitted_key]): 42 | continue 43 | if isinstance(v, (float, int)): 44 | symbol_dict[k] = param_vec[cnt, 0] 45 | cnt += 1 46 | elif isinstance(v, np.ndarray): 47 | symbol_dict[k] = sp.MatrixSymbol(k.replace(".", "DOT"), *v.shape) 48 | else: 49 | raise ValueError(f"Cannot handle {type(v)} type of {k}") 50 | return symbol_dict 51 | 52 | 53 | def float_params_to_individual_symbolic(param_dict): 54 | """ 55 | Transforms float / array / int parameters to symbolic ones. Assumes flat 56 | dictionary with dot as a separator. Does not translate noise parameters. All 57 | parameters are Symbols, compatible with symengine. 58 | """ 59 | symbol_dict = {} 60 | for k, v in param_dict.items(): 61 | splitted_key = k.split(".") 62 | if any(["input" in sp for sp in splitted_key]): 63 | continue 64 | if isinstance(v, (float, int)): 65 | symbol_dict[k] = sp.Symbol(k.replace(".", "DOT")) 66 | elif isinstance(v, np.ndarray): 67 | symbol_base_str = k.replace(".", "DOT") 68 | symbol_dict[k] = np.array( 69 | [sp.Symbol(f"{symbol_base_str}_{i}_{j}") for i in range(v.shape[0]) for j in range(v.shape[1])] 70 | ).reshape(v.shape) 71 | else: 72 | raise ValueError(f"Cannot handle {type(v)} type of {k}") 73 | return symbol_dict 74 | -------------------------------------------------------------------------------- /neurolib/models/multimodel/builder/fitzhugh_nagumo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from jitcdde import input as system_input 3 | 4 | from ....utils.stimulus import OrnsteinUhlenbeckProcess 5 | from ..builder.base.network import Network, Node 6 | from ..builder.base.neural_mass import NeuralMass 7 | 8 | FHN_DEFAULT_PARAMS = { 9 | "alpha": 3.0, 10 | "beta": 4.0, 11 | "gamma": -1.5, 12 | "delta": 0.0, 13 | "epsilon": 0.5, 14 | "tau": 20.0, 15 | "x_ext": 1.0, 16 | "y_ext": 0.0, 17 | } 18 | 19 | 20 | class FitzHughNagumoMass(NeuralMass): 21 | """ 22 | FitzHugh-Nagumo model. 23 | """ 24 | 25 | name = "FitzHugh-Nagumo mass" 26 | label = "FHNmass" 27 | 28 | num_state_variables = 2 29 | num_noise_variables = 2 30 | coupling_variables = {0: "x", 1: "y"} 31 | state_variable_names = ["x", "y"] 32 | required_params = [ 33 | "alpha", 34 | "beta", 35 | "gamma", 36 | "delta", 37 | "epsilon", 38 | "tau", 39 | "x_ext", 40 | "y_ext", 41 | ] 42 | required_couplings = ["network_x", "network_y"] 43 | _noise_input = [ 44 | OrnsteinUhlenbeckProcess(mu=0.0, sigma=0.0, tau=5.0), 45 | OrnsteinUhlenbeckProcess(mu=0.0, sigma=0.0, tau=5.0), 46 | ] 47 | 48 | def __init__(self, params=None, seed=None): 49 | super().__init__(params=params or FHN_DEFAULT_PARAMS, seed=seed) 50 | 51 | def _initialize_state_vector(self): 52 | """ 53 | Initialize state vector. 54 | """ 55 | np.random.seed(self.seed) 56 | self.initial_state = (0.05 * np.random.uniform(-1, 1, size=(self.num_state_variables,))).tolist() 57 | 58 | def _derivatives(self, coupling_variables): 59 | [x, y] = self._unwrap_state_vector() 60 | 61 | d_x = ( 62 | -self.params["alpha"] * x ** 3 63 | + self.params["beta"] * x ** 2 64 | + self.params["gamma"] * x 65 | - y 66 | + coupling_variables["network_x"] 67 | + system_input(self.noise_input_idx[0]) 68 | + self.params["x_ext"] 69 | ) 70 | 71 | d_y = ( 72 | (x - self.params["delta"] - self.params["epsilon"] * y) / self.params["tau"] 73 | + coupling_variables["network_y"] 74 | + system_input(self.noise_input_idx[1]) 75 | + self.params["y_ext"] 76 | ) 77 | 78 | return [d_x, d_y] 79 | 80 | 81 | class FitzHughNagumoNode(Node): 82 | """ 83 | Default FitzHugh-Nagumo node with 1 neural mass modelled as FitzHugh-Nagumo 84 | oscillator. 85 | """ 86 | 87 | name = "FitzHugh-Nagumo node" 88 | label = "FHNnode" 89 | 90 | default_network_coupling = {"network_x": 0.0, "network_y": 0.0} 91 | default_output = "x" 92 | output_vars = ["x", "y"] 93 | 94 | def __init__(self, params=None, seed=None): 95 | """ 96 | :param params: parameters of the FitzHugh-Nagumo mass 97 | :type params: dict|None 98 | :param seed: seed for random number generator 99 | :type seed: int|None 100 | """ 101 | fhn_mass = FitzHughNagumoMass(params, seed=seed) 102 | fhn_mass.index = 0 103 | super().__init__(neural_masses=[fhn_mass]) 104 | 105 | def _sync(self): 106 | return [] 107 | 108 | 109 | class FitzHughNagumoNetwork(Network): 110 | """ 111 | Whole brain network of FitzHugh-Nagumo oscillators. 112 | """ 113 | 114 | name = "FitzHugh-Nagumo network" 115 | label = "FHNnet" 116 | 117 | sync_variables = ["network_x", "network_y"] 118 | # define default coupling in FitzHugh-Nagumo network 119 | default_coupling = {"network_x": "diffusive", "network_y": "none"} 120 | output_vars = ["x", "y"] 121 | 122 | def __init__( 123 | self, 124 | connectivity_matrix, 125 | delay_matrix, 126 | mass_params=None, 127 | seed=None, 128 | ): 129 | """ 130 | :param connectivity_matrix: connectivity matrix for between nodes 131 | coupling, typically DTI structural connectivity, matrix as [to, 132 | from] 133 | :type connectivity_matrix: np.ndarray 134 | :param delay_matrix: delay matrix between nodes, typically derived from 135 | length matrix, if None, delays are all zeros, in ms, matrix as 136 | [to, from] 137 | :type delay_matrix: np.ndarray|None 138 | :param mass_params: parameters for each Hopf normal form neural 139 | mass, if None, will use default 140 | :type mass_params: list[dict]|dict|None 141 | :type y_coupling: str 142 | :param seed: seed for random number generator 143 | :type seed: int|None 144 | """ 145 | mass_params = self._prepare_mass_params(mass_params, connectivity_matrix.shape[0]) 146 | seeds = self._prepare_mass_params(seed, connectivity_matrix.shape[0], native_type=int) 147 | 148 | nodes = [] 149 | for i, node_params in enumerate(mass_params): 150 | node = FitzHughNagumoNode(params=node_params, seed=seeds[i]) 151 | node.index = i 152 | node.idx_state_var = i * node.num_state_variables 153 | nodes.append(node) 154 | 155 | super().__init__( 156 | nodes=nodes, 157 | connectivity_matrix=connectivity_matrix, 158 | delay_matrix=delay_matrix, 159 | ) 160 | # get all coupling variables 161 | all_couplings = [mass.coupling_variables for node in self.nodes for mass in node.masses] 162 | # assert they are the same 163 | assert all(all_couplings[0] == coupling for coupling in all_couplings) 164 | # invert as to name: idx 165 | self.coupling_symbols = {v: k for k, v in all_couplings[0].items()} 166 | -------------------------------------------------------------------------------- /neurolib/models/multimodel/builder/hopf.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from jitcdde import input as system_input 3 | 4 | from ....utils.stimulus import OrnsteinUhlenbeckProcess 5 | from ..builder.base.network import Network, Node 6 | from ..builder.base.neural_mass import NeuralMass 7 | 8 | HOPF_DEFAULT_PARAMS = { 9 | "a": 0.25, 10 | "w": 0.2, 11 | "x_ext": 0.0, 12 | "y_ext": 0.0, 13 | } 14 | 15 | 16 | class HopfMass(NeuralMass): 17 | """ 18 | Hopf normal form (Landau-Stuart oscillator). 19 | 20 | References: 21 | Landau, L. D. (1944). On the problem of turbulence. In Dokl. Akad. Nauk USSR 22 | (Vol. 44, p. 311). 23 | 24 | Stuart, J. T. (1960). On the non-linear mechanics of wave disturbances in 25 | stable and unstable parallel flows Part 1. The basic behaviour in plane 26 | Poiseuille flow. Journal of Fluid Mechanics, 9(3), 353-370. 27 | """ 28 | 29 | name = "Hopf normal form mass" 30 | label = "HopfMass" 31 | 32 | num_state_variables = 2 33 | num_noise_variables = 2 34 | coupling_variables = {0: "x", 1: "y"} 35 | state_variable_names = ["x", "y"] 36 | required_params = ["a", "w", "x_ext", "y_ext"] 37 | required_couplings = ["network_x", "network_y"] 38 | _noise_input = [ 39 | OrnsteinUhlenbeckProcess(mu=0.0, sigma=0.0, tau=5.0), 40 | OrnsteinUhlenbeckProcess(mu=0.0, sigma=0.0, tau=5.0), 41 | ] 42 | 43 | def __init__(self, params=None, seed=None): 44 | super().__init__(params=params or HOPF_DEFAULT_PARAMS, seed=seed) 45 | 46 | def _initialize_state_vector(self): 47 | """ 48 | Initialize state vector. 49 | """ 50 | np.random.seed(self.seed) 51 | self.initial_state = (0.5 * np.random.uniform(-1, 1, size=(self.num_state_variables,))).tolist() 52 | 53 | def _derivatives(self, coupling_variables): 54 | [x, y] = self._unwrap_state_vector() 55 | 56 | d_x = ( 57 | (self.params["a"] - x ** 2 - y ** 2) * x 58 | - self.params["w"] * y 59 | + coupling_variables["network_x"] 60 | + system_input(self.noise_input_idx[0]) 61 | + self.params["x_ext"] 62 | ) 63 | 64 | d_y = ( 65 | (self.params["a"] - x ** 2 - y ** 2) * y 66 | + self.params["w"] * x 67 | + coupling_variables["network_y"] 68 | + system_input(self.noise_input_idx[1]) 69 | + self.params["y_ext"] 70 | ) 71 | 72 | return [d_x, d_y] 73 | 74 | 75 | class HopfNode(Node): 76 | """ 77 | Default Hopf normal form node with 1 neural mass modelled as Landau-Stuart 78 | oscillator. 79 | """ 80 | 81 | name = "Hopf normal form node" 82 | label = "HopfNode" 83 | 84 | default_network_coupling = {"network_x": 0.0, "network_y": 0.0} 85 | default_output = "x" 86 | output_vars = ["x", "y"] 87 | 88 | def __init__(self, params=None, seed=None): 89 | """ 90 | :param params: parameters of the Hopf mass 91 | :type params: dict|None 92 | :param seed: seed for random number generator 93 | :type seed: int|None 94 | """ 95 | hopf_mass = HopfMass(params, seed=seed) 96 | hopf_mass.index = 0 97 | super().__init__(neural_masses=[hopf_mass]) 98 | 99 | def _sync(self): 100 | return [] 101 | 102 | 103 | class HopfNetwork(Network): 104 | """ 105 | Whole brain network of Hopf normal form oscillators. 106 | """ 107 | 108 | name = "Hopf normal form network" 109 | label = "HopfNet" 110 | 111 | sync_variables = ["network_x", "network_y"] 112 | # define default coupling in Hopf network 113 | default_coupling = {"network_x": "diffusive", "network_y": "none"} 114 | output_vars = ["x", "y"] 115 | 116 | def __init__( 117 | self, 118 | connectivity_matrix, 119 | delay_matrix, 120 | mass_params=None, 121 | seed=None, 122 | ): 123 | """ 124 | :param connectivity_matrix: connectivity matrix for between nodes 125 | coupling, typically DTI structural connectivity, matrix as [to, 126 | from] 127 | :type connectivity_matrix: np.ndarray 128 | :param delay_matrix: delay matrix between nodes, typically derived from 129 | length matrix, if None, delays are all zeros, in ms, matrix as 130 | [to, from] 131 | :type delay_matrix: np.ndarray|None 132 | :param mass_params: parameters for each Hopf normal form neural 133 | mass, if None, will use default 134 | :type mass_params: list[dict]|dict|None 135 | :param x_coupling: how to couple `x` variables in the nodes, 136 | "diffusive", "additive", or "none" 137 | :type x_coupling: str 138 | :param y_coupling: how to couple `y` variables in the nodes, 139 | "diffusive", "additive", or "none" 140 | :type y_coupling: str 141 | :param seed: seed for random number generator 142 | :type seed: int|None 143 | """ 144 | mass_params = self._prepare_mass_params(mass_params, connectivity_matrix.shape[0]) 145 | seeds = self._prepare_mass_params(seed, connectivity_matrix.shape[0], native_type=int) 146 | 147 | nodes = [] 148 | for i, node_params in enumerate(mass_params): 149 | node = HopfNode(params=node_params, seed=seeds[i]) 150 | node.index = i 151 | node.idx_state_var = i * node.num_state_variables 152 | nodes.append(node) 153 | 154 | super().__init__( 155 | nodes=nodes, 156 | connectivity_matrix=connectivity_matrix, 157 | delay_matrix=delay_matrix, 158 | ) 159 | # get all coupling variables 160 | all_couplings = [mass.coupling_variables for node in self.nodes for mass in node.masses] 161 | # assert they are the same 162 | assert all(all_couplings[0] == coupling for coupling in all_couplings) 163 | # invert as to name: idx 164 | self.coupling_symbols = {v: k for k, v in all_couplings[0].items()} 165 | -------------------------------------------------------------------------------- /neurolib/models/thalamus/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import ThalamicMassModel 2 | -------------------------------------------------------------------------------- /neurolib/models/thalamus/loadDefaultParams.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ...utils.collections import dotdict 4 | 5 | 6 | def loadDefaultParams(seed=None): 7 | """ 8 | Load default parameters for the thalamic mass model due to Costa et al. 9 | Subscript t (_t) referes to thalamocortical relay population (TCR), while 10 | sucscript r (_r) referes to the thalamic reticular nuclei (TRN). 11 | 12 | :return: A dictionary with the default parameters of the model 13 | :rtype: dict 14 | """ 15 | 16 | params = dotdict({}) 17 | 18 | ### runtime parameters 19 | # thalamus is really sensitive, so either you integrate with very small dt or use an adaptive integration step 20 | params.dt = 0.01 # ms 21 | params.duration = 60000 # Simulation duration (ms) 22 | np.random.seed(seed) # seed for RNG of noise and ICs 23 | params.seed = seed 24 | 25 | # local parameters for both populations 26 | params.tau = 20.0 27 | params.Q_max = 400.0e-3 # 1/ms 28 | params.theta = -58.5 # mV 29 | params.sigma = 6.0 30 | params.C1 = 1.8137993642 31 | params.C_m = 1.0 # muF/cm^2 32 | params.gamma_e = 70.0e-3 # 1/ms 33 | params.gamma_r = 100.0e-3 # 1/ms 34 | params.g_L = 1.0 # AU 35 | params.g_GABA = 1.0 # ms 36 | params.g_AMPA = 1.0 # ms 37 | params.g_LK = 0.018 # mS/cm^2 38 | params.E_AMPA = 0.0 # mV 39 | params.E_GABA = -70.0 # mV 40 | params.E_L = -70.0 # mV 41 | params.E_K = -100.0 # mV 42 | params.E_Ca = 120.0 # mV 43 | 44 | # specific thalamo-cortical neurons population - TCR (excitatory) 45 | params.g_T_t = 3.0 # mS/cm^2 46 | params.g_h = 0.062 # mS/cm^2 47 | params.E_h = -40.0 # mV 48 | params.alpha_Ca = -51.8e-6 # nmol 49 | params.tau_Ca = 10.0 # ms 50 | params.Ca_0 = 2.4e-4 51 | params.k1 = 2.5e7 52 | params.k2 = 4.0e-4 53 | params.k3 = 1.0e-1 54 | params.k4 = 1.0e-3 55 | params.n_P = 4.0 56 | params.g_inc = 2.0 57 | # connectivity 58 | params.N_tr = 5.0 59 | # noise 60 | params.d_phi = 0.0 61 | 62 | # specific thalamic reticular nuclei population - TRN (inhibitory) 63 | params.g_T_r = 2.3 # mS/cm^2 64 | # connectivity 65 | params.N_rt = 3.0 66 | params.N_rr = 25.0 67 | 68 | # external input 69 | params.ext_current_t = 0.0 70 | params.ext_current_r = 0.0 71 | 72 | # init 73 | ( 74 | params.V_t_init, 75 | params.V_r_init, 76 | params.Q_t_init, 77 | params.Q_r_init, 78 | params.Ca_init, 79 | params.h_T_t_init, 80 | params.h_T_r_init, 81 | params.m_h1_init, 82 | params.m_h2_init, 83 | params.s_et_init, 84 | params.s_gt_init, 85 | params.s_er_init, 86 | params.s_gr_init, 87 | params.ds_et_init, 88 | params.ds_gt_init, 89 | params.ds_er_init, 90 | params.ds_gr_init, 91 | ) = generateRandomICs(seed=seed) 92 | 93 | # always 1 node only - no network of multiple "thalamuses" 94 | params.N = 1 95 | params.Cmat = np.zeros((1, 1)) 96 | params.lengthMat = np.zeros((1, 1)) 97 | 98 | return params 99 | 100 | 101 | def generateRandomICs(seed=None): 102 | """Generates random Initial Conditions for the interareal network 103 | 104 | :returns: A tuple of 15 floats for representing initial state of the 105 | thalamus 106 | """ 107 | np.random.seed(seed) 108 | 109 | V_t_init = np.random.uniform(-75, -50, (1,)) 110 | V_r_init = np.random.uniform(-75, -50, (1,)) 111 | Q_t_init = np.random.uniform(0.0, 200.0, (1,)) 112 | Q_r_init = np.random.uniform(0.0, 200.0, (1,)) 113 | Ca_init = 2.4e-4 114 | h_T_t_init = 0.0 115 | h_T_r_init = 0.0 116 | m_h1_init = 0.0 117 | m_h2_init = 0.0 118 | s_et_init = 0.0 119 | s_gt_init = 0.0 120 | s_er_init = 0.0 121 | s_gr_init = 0.0 122 | ds_et_init = 0.0 123 | ds_gt_init = 0.0 124 | ds_er_init = 0.0 125 | ds_gr_init = 0.0 126 | 127 | return ( 128 | V_t_init, 129 | V_r_init, 130 | Q_t_init, 131 | Q_r_init, 132 | np.array(Ca_init), 133 | np.array(h_T_t_init), 134 | np.array(h_T_r_init), 135 | np.array(m_h1_init), 136 | np.array(m_h2_init), 137 | np.array(s_et_init), 138 | np.array(s_gt_init), 139 | np.array(s_er_init), 140 | np.array(s_gr_init), 141 | np.array(ds_et_init), 142 | np.array(ds_gt_init), 143 | np.array(ds_er_init), 144 | np.array(ds_gr_init), 145 | ) 146 | -------------------------------------------------------------------------------- /neurolib/models/thalamus/model.py: -------------------------------------------------------------------------------- 1 | from ..model import Model 2 | from . import loadDefaultParams as dp 3 | from . import timeIntegration as ti 4 | 5 | 6 | class ThalamicMassModel(Model): 7 | """ 8 | Two population thalamic model 9 | 10 | Reference: 11 | Costa, M. S., Weigenand, A., Ngo, H. V. V., Marshall, L., Born, J., 12 | Martinetz, T., & Claussen, J. C. (2016). A thalamocortical neural mass 13 | model of the EEG during NREM sleep and its response to auditory stimulation. 14 | PLoS computational biology, 12(9). 15 | 16 | """ 17 | 18 | name = "thalamus" 19 | description = "Two population thalamic mass model" 20 | 21 | init_vars = [ 22 | "V_t_init", 23 | "V_r_init", 24 | "Q_t_init", 25 | "Q_r_init", 26 | "Ca_init", 27 | "h_T_t_init", 28 | "h_T_r_init", 29 | "m_h1_init", 30 | "m_h2_init", 31 | "s_et_init", 32 | "s_gt_init", 33 | "s_er_init", 34 | "s_gr_init", 35 | "ds_et_init", 36 | "ds_gt_init", 37 | "ds_er_init", 38 | "ds_gr_init", 39 | ] 40 | state_vars = [ 41 | "V_t", 42 | "V_r", 43 | "Q_t", 44 | "Q_r", 45 | "Ca", 46 | "h_T_t", 47 | "h_T_r", 48 | "m_h1", 49 | "m_h2", 50 | "s_et", 51 | "s_gt", 52 | "s_er", 53 | "s_gr", 54 | "ds_et", 55 | "ds_gt", 56 | "ds_er", 57 | "ds_gr", 58 | ] 59 | output_vars = ["V_t", "V_r", "Q_t", "Q_r"] 60 | default_output = "Q_t" 61 | input_vars = [] 62 | default_input = None 63 | 64 | def __init__(self, params=None, seed=None): 65 | self.seed = seed 66 | 67 | # the integration function must be passed 68 | integration = ti.timeIntegration 69 | 70 | # load default parameters if none were given 71 | if params is None: 72 | params = dp.loadDefaultParams(seed=seed) 73 | 74 | # Initialize base class Model 75 | super().__init__(integration=integration, params=params) 76 | 77 | def randomICs(self): 78 | ics = dp.generateRandomICs() 79 | for idx, iv in enumerate(self.init_vars): 80 | self.params[iv] = ics[idx] 81 | -------------------------------------------------------------------------------- /neurolib/models/wc/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import WCModel 2 | -------------------------------------------------------------------------------- /neurolib/models/wc/loadDefaultParams.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ...utils.collections import dotdict 4 | 5 | 6 | def loadDefaultParams(Cmat=None, Dmat=None, seed=None): 7 | """Load default parameters for the Wilson-Cowan model 8 | 9 | :param Cmat: Structural connectivity matrix (adjacency matrix) of coupling strengths, will be normalized to 1. If not given, then a single node simulation will be assumed, defaults to None 10 | :type Cmat: numpy.ndarray, optional 11 | :param Dmat: Fiber length matrix, will be used for computing the delay matrix together with the signal transmission speed parameter `signalV`, defaults to None 12 | :type Dmat: numpy.ndarray, optional 13 | :param seed: Seed for the random number generator, defaults to None 14 | :type seed: int, optional 15 | 16 | :return: A dictionary with the default parameters of the model 17 | :rtype: dict 18 | """ 19 | 20 | params = dotdict({}) 21 | 22 | ### runtime parameters 23 | params.dt = 0.1 # ms 0.1ms is reasonable 24 | params.duration = 2000 # Simulation duration (ms) 25 | np.random.seed(seed) # seed for RNG of noise and ICs 26 | params.seed = seed 27 | 28 | # ------------------------------------------------------------------------ 29 | # global whole-brain network parameters 30 | # ------------------------------------------------------------------------ 31 | 32 | # signal transmission speed between areas 33 | params.signalV = 20.0 34 | params.K_gl = 0.6 # global coupling strength 35 | 36 | if Cmat is None: 37 | params.N = 1 38 | params.Cmat = np.zeros((1, 1)) 39 | params.lengthMat = np.zeros((1, 1)) 40 | 41 | else: 42 | params.Cmat = Cmat.copy() # coupling matrix 43 | np.fill_diagonal(params.Cmat, 0) # no self connections 44 | params.N = len(params.Cmat) # number of nodes 45 | params.lengthMat = Dmat 46 | 47 | # ------------------------------------------------------------------------ 48 | # local node parameters 49 | # ------------------------------------------------------------------------ 50 | 51 | # external input parameters: 52 | params.tau_ou = 5.0 # ms Timescale of the Ornstein-Uhlenbeck noise process 53 | params.sigma_ou = 0.0 # noise intensity 54 | params.exc_ou_mean = 0.0 # OU process mean 55 | params.inh_ou_mean = 0.0 # OU process mean 56 | 57 | # neural mass model parameters 58 | params.tau_exc = 2.5 # excitatory time constant 59 | params.tau_inh = 3.75 # inhibitory time constant 60 | params.c_excexc = 16 # local E-E coupling 61 | params.c_excinh = 15 # local E-I coupling 62 | params.c_inhexc = 12 # local I-E coupling 63 | params.c_inhinh = 3 # local I-I coupling 64 | params.a_exc = 1.5 # excitatory gain 65 | params.a_inh = 1.5 # inhibitory gain 66 | params.mu_exc = 3.0 # excitatory firing threshold 67 | params.mu_inh = 3.0 # inhibitory firing threshold 68 | 69 | # values of the external inputs 70 | params.exc_ext_baseline = 0 # baseline external input to E (static) 71 | params.inh_ext_baseline = 0 # baseline external input to I (static) 72 | params.exc_ext = 0 # time-dependent external input to E 73 | params.inh_ext = 0 # time-dependent external input to I 74 | 75 | # ------------------------------------------------------------------------ 76 | 77 | params.exc_init = 0.05 * np.random.uniform(0, 1, (params.N, 1)) 78 | params.inh_init = 0.05 * np.random.uniform(0, 1, (params.N, 1)) 79 | 80 | # Ornstein-Uhlenbeck noise state variables 81 | params.exc_ou = np.zeros((params.N,)) 82 | params.inh_ou = np.zeros((params.N,)) 83 | 84 | return params 85 | -------------------------------------------------------------------------------- /neurolib/models/wc/model.py: -------------------------------------------------------------------------------- 1 | from . import loadDefaultParams as dp 2 | from . import timeIntegration as ti 3 | from ..model import Model 4 | 5 | 6 | class WCModel(Model): 7 | """ 8 | The two-population Wilson-Cowan model 9 | """ 10 | 11 | name = "wc" 12 | description = "Wilson-Cowan model" 13 | 14 | init_vars = ["exc_init", "inh_init", "exc_ou", "inh_ou"] 15 | state_vars = ["exc", "inh", "exc_ou", "inh_ou"] 16 | output_vars = ["exc", "inh"] 17 | default_output = "exc" 18 | input_vars = ["exc_ext", "inh_ext"] 19 | default_input = "exc_ext" 20 | 21 | # because this is not a rate model, the input 22 | # to the bold model must be transformed 23 | boldInputTransform = lambda self, x: x * 50 24 | 25 | def __init__(self, params=None, Cmat=None, Dmat=None, seed=None): 26 | 27 | self.Cmat = Cmat 28 | self.Dmat = Dmat 29 | self.seed = seed 30 | 31 | # the integration function must be passed 32 | integration = ti.timeIntegration 33 | 34 | # load default parameters if none were given 35 | if params is None: 36 | params = dp.loadDefaultParams(Cmat=self.Cmat, Dmat=self.Dmat, seed=self.seed) 37 | 38 | # Initialize base class Model 39 | super().__init__(integration=integration, params=params) 40 | -------------------------------------------------------------------------------- /neurolib/models/ww/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import WWModel 2 | -------------------------------------------------------------------------------- /neurolib/models/ww/loadDefaultParams.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ...utils.collections import dotdict 4 | 5 | 6 | def loadDefaultParams(Cmat=None, Dmat=None, seed=None): 7 | """Load default parameters for the Wong-Wang model 8 | 9 | :param Cmat: Structural connectivity matrix (adjacency matrix) of coupling strengths, will be normalized to 1. If not given, then a single node simulation will be assumed, defaults to None 10 | :type Cmat: numpy.ndarray, optional 11 | :param Dmat: Fiber length matrix, will be used for computing the delay matrix together with the signal transmission speed parameter `signalV`, defaults to None 12 | :type Dmat: numpy.ndarray, optional 13 | :param seed: Seed for the random number generator, defaults to None 14 | :type seed: int, optional 15 | 16 | :return: A dictionary with the default parameters of the model 17 | :rtype: dict 18 | """ 19 | 20 | params = dotdict({}) 21 | 22 | ### runtime parameters 23 | params.dt = 0.1 # ms 0.1ms is reasonable 24 | params.duration = 2000 # Simulation duration (ms) 25 | np.random.seed(seed) # seed for RNG of noise and ICs 26 | params.seed = seed 27 | 28 | # ------------------------------------------------------------------------ 29 | # global whole-brain network parameters 30 | # ------------------------------------------------------------------------ 31 | 32 | # signal transmission speec between areas 33 | params.signalV = 20.0 34 | params.K_gl = 0.6 # global coupling strength 35 | 36 | if Cmat is None: 37 | params.N = 1 38 | params.Cmat = np.zeros((1, 1)) 39 | params.lengthMat = np.zeros((1, 1)) 40 | 41 | else: 42 | params.Cmat = Cmat.copy() # coupling matrix 43 | np.fill_diagonal(params.Cmat, 0) # no self connections 44 | params.N = len(params.Cmat) # number of nodes 45 | params.lengthMat = Dmat 46 | 47 | # ------------------------------------------------------------------------ 48 | # local node parameters 49 | # ------------------------------------------------------------------------ 50 | 51 | # # the coupling parameter determines how nodes are coupled. 52 | # # "original" for original wong-wang model, "reduced" for reduced wong-wang model 53 | # params.version = "original" 54 | 55 | # external noise parameters: 56 | params.tau_ou = 5.0 # ms Timescale of the Ornstein-Uhlenbeck noise process 57 | params.sigma_ou = 0.0 # noise intensity 58 | params.exc_ou_mean = 0.0 # OU process mean 59 | params.inh_ou_mean = 0.0 # OU process mean 60 | 61 | # neural mass model parameters 62 | params.a_exc = 0.31 # nC^-1 63 | params.b_exc = 0.125 # kHz 64 | params.d_exc = 160.0 # ms 65 | params.tau_exc = 100.0 # ms 66 | params.gamma_exc = 0.641 67 | params.w_exc = 1.0 68 | params.exc_current_baseline = 0.382 # nA, baseline external input current (static) 69 | params.exc_current = 0 # time-dependent external input current to E 70 | 71 | params.a_inh = 0.615 # nC^-1 72 | params.b_inh = 0.177 # kHz 73 | params.d_inh = 87.0 # ms 74 | params.tau_inh = 10.0 # ms 75 | params.w_inh = 0.7 76 | params.inh_current_baseline = 0.382 # nA, baseline external input current (static) 77 | params.inh_current = 0 # time-dependent external input current to E 78 | 79 | params.J_NMDA = 0.15 # nA, excitatory synaptic coupling 80 | params.J_I = 1.0 # nA, inhibitory synaptic coupling 81 | params.w_ee = 1.4 # excitatory feedback coupling strength 82 | 83 | # ------------------------------------------------------------------------ 84 | 85 | params.ses_init = 0.05 * np.random.uniform(0, 1, (params.N, 1)) 86 | params.sis_init = 0.05 * np.random.uniform(0, 1, (params.N, 1)) 87 | 88 | # Ornstein-Uhlenbeck noise state variables 89 | params.exc_ou = np.zeros((params.N,)) 90 | params.inh_ou = np.zeros((params.N,)) 91 | 92 | return params 93 | -------------------------------------------------------------------------------- /neurolib/models/ww/model.py: -------------------------------------------------------------------------------- 1 | from . import loadDefaultParams as dp 2 | from . import timeIntegration as ti 3 | from ..model import Model 4 | 5 | 6 | class WWModel(Model): 7 | """ 8 | Wong-Wang model. Original version and reduced version. 9 | 10 | Main reference: 11 | [original] Wong, K. F., & Wang, X. J. (2006). A recurrent network mechanism 12 | of time integration in perceptual decisions. Journal of Neuroscience, 26(4), 13 | 1314-1328. 14 | 15 | Additional references: 16 | [reduced] Deco, G., Ponce-Alvarez, A., Mantini, D., Romani, G. L., Hagmann, 17 | P., & Corbetta, M. (2013). Resting-state functional connectivity emerges 18 | from structurally and dynamically shaped slow linear fluctuations. Journal 19 | of Neuroscience, 33(27), 11239-11252. 20 | 21 | [original] Deco, G., Ponce-Alvarez, A., Hagmann, P., Romani, G. L., Mantini, 22 | D., & Corbetta, M. (2014). How local excitation–inhibition ratio impacts the 23 | whole brain dynamics. Journal of Neuroscience, 34(23), 7886-7898. 24 | """ 25 | 26 | name = "wongwang" 27 | description = "Wong-Wang neural mass model" 28 | 29 | init_vars = ["r_exc", "r_inh", "ses_init", "sis_init", "exc_ou", "inh_ou"] 30 | state_vars = ["r_exc", "r_inh", "se", "si", "exc_ou", "inh_ou"] 31 | output_vars = ["r_exc", "r_inh", "se", "si"] 32 | default_output = "r_exc" 33 | 34 | def __init__(self, params=None, Cmat=None, Dmat=None, seed=None): 35 | 36 | self.Cmat = Cmat 37 | self.Dmat = Dmat 38 | self.seed = seed 39 | 40 | # the integration function must be passed 41 | integration = ti.timeIntegration 42 | 43 | # load default parameters if none were given 44 | if params is None: 45 | params = dp.loadDefaultParams(Cmat=self.Cmat, Dmat=self.Dmat, seed=self.seed) 46 | 47 | # Initialize base class Model 48 | super().__init__(integration=integration, params=params) 49 | -------------------------------------------------------------------------------- /neurolib/optimize/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/optimize/__init__.py -------------------------------------------------------------------------------- /neurolib/optimize/evolution/__init__.py: -------------------------------------------------------------------------------- 1 | from .evolution import Evolution 2 | -------------------------------------------------------------------------------- /neurolib/optimize/exploration/__init__.py: -------------------------------------------------------------------------------- 1 | from .exploration import BoxSearch 2 | -------------------------------------------------------------------------------- /neurolib/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/neurolib/utils/__init__.py -------------------------------------------------------------------------------- /neurolib/utils/model_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def adjustArrayShape(original, target): 5 | """ 6 | Tiles and then cuts an array (or list or float) such that 7 | it has the same shape as target at the end. 8 | This is used to make sure that any input parameter like external current has 9 | the same shape as the rate array. 10 | """ 11 | 12 | # make an ext_exc_current ARRAY from a LIST or INT 13 | if not hasattr(original, "__len__"): 14 | original = [original] 15 | original = np.array(original) 16 | 17 | # repeat original in y until larger (or same size) as target 18 | 19 | # tile until N 20 | 21 | # either (x,) shape or (y,x) shape 22 | if len(original.shape) == 1: 23 | # if original.shape[0] > 1: 24 | rep_y = target.shape[0] 25 | elif target.shape[0] > original.shape[0]: 26 | rep_y = int(target.shape[0] / original.shape[0]) + 1 27 | else: 28 | rep_y = 1 29 | 30 | # tile once so the array has shape (N,1) 31 | original = np.tile(original, (rep_y, 1)) 32 | 33 | # tile until t 34 | 35 | if target.shape[1] > original.shape[1]: 36 | rep_x = int(target.shape[1] / original.shape[1]) + 1 37 | else: 38 | rep_x = 1 39 | original = np.tile(original, (1, rep_x)) 40 | 41 | # cut from end because the beginning can be initial condition 42 | original = original[: target.shape[0], -target.shape[1] :] 43 | 44 | return original 45 | 46 | 47 | def computeDelayMatrix(lengthMat, signalV, segmentLength=1): 48 | """ 49 | Compute the delay matrix from the fiber length matrix and the signal 50 | velocity 51 | 52 | :param lengthMat: A matrix containing the connection length in 53 | segment 54 | :param signalV: Signal velocity in m/s 55 | :param segmentLength: Length of a single segment in mm 56 | 57 | :returns: A matrix of connexion delay in ms 58 | """ 59 | 60 | normalizedLenMat = lengthMat * segmentLength 61 | if signalV > 0: 62 | Dmat = normalizedLenMat / signalV # Interareal delays in ms 63 | else: 64 | Dmat = lengthMat * 0.0 65 | return Dmat 66 | -------------------------------------------------------------------------------- /neurolib/utils/paths.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file is a configuration file that contains default directories for saving data 3 | """ 4 | import os 5 | 6 | HDF_DIR = "./data/hdf/" 7 | FIGURES_DIR = "./data/figures/" 8 | 9 | PYPET_LOGGING_CONFIG = os.path.join(os.path.dirname(__file__), "pypet_logging.ini") 10 | -------------------------------------------------------------------------------- /neurolib/utils/plot_oc.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | colors = ["red", "blue", "green", "orange"] 5 | 6 | 7 | def plot_oc_singlenode( 8 | duration, 9 | dt, 10 | state, 11 | target, 12 | control, 13 | orig_input, 14 | cost_array=(), 15 | plot_state_vars=[0, 1], 16 | plot_control_vars=[0, 1], 17 | ): 18 | """Plot target and controlled dynamics for a single node. 19 | :param duration: Duration of simulation (in ms). 20 | :type duration: float 21 | :param dt: Time discretization (in ms). 22 | :type dt: float 23 | :param state: The state of the system controlled with the found oc-input. 24 | :type state: np.ndarray 25 | :param target: The target state. 26 | :type target: np.ndarray 27 | :param control: The control signal found by the oc-algorithm. 28 | :type control: np.ndarray 29 | :param orig_input: The inputs that were used to generate target time series. 30 | :type orig_input: np.ndarray 31 | :param cost_array: Array of costs in optimization iterations. 32 | :type cost_array: np.ndarray, optional 33 | :param plot_state_vars: List of indices of state variables that should be plotted 34 | :type plot_state_vars: List, optional 35 | :param plot_control_vars: List of indices of control variables that should be plotted 36 | :type plot_control_vars: List, optional 37 | 38 | """ 39 | fig, ax = plt.subplots(3, 1, figsize=(8, 6), constrained_layout=True) 40 | 41 | # Plot the target (dashed line) and unperturbed activity 42 | t_array = np.arange(0, duration + dt, dt) 43 | 44 | # Plot the controlled state and the initial/ original state (dashed line) 45 | for v in plot_state_vars: 46 | ax[0].plot( 47 | t_array, state[0, v, :], label="state var " + str(v), color=colors[v] 48 | ) 49 | ax[0].plot( 50 | t_array, 51 | target[0, v, :], 52 | linestyle="dashed", 53 | label="target var " + str(v), 54 | color=colors[v], 55 | ) 56 | ax[0].legend(loc="upper right") 57 | ax[0].set_title("Activity without stimulation and target activity") 58 | 59 | # Plot the computed control signal and the initial/ original control signal (dashed line) 60 | for v in plot_control_vars: 61 | ax[1].plot( 62 | t_array, 63 | control[0, v, :], 64 | label="stimulation var " + str(v), 65 | color=colors[v], 66 | ) 67 | ax[1].plot( 68 | t_array, 69 | orig_input[0, v, :], 70 | linestyle="dashed", 71 | label="input var " + str(v), 72 | color=colors[v], 73 | ) 74 | ax[1].legend(loc="upper right") 75 | ax[1].set_title("Active stimulation and input stimulation") 76 | ax[2].plot(cost_array) 77 | ax[2].set_title("Cost throughout optimization.") 78 | plt.show() 79 | 80 | 81 | def plot_oc_network( 82 | N, 83 | duration, 84 | dt, 85 | state, 86 | target, 87 | control, 88 | orig_input, 89 | cost_array=(), 90 | step_array=(), 91 | plot_state_vars=[0, 1], 92 | plot_control_vars=[0, 1], 93 | ): 94 | """Plot target and controlled dynamics for a network of N nodes. 95 | :param N: Number of nodes in the network. 96 | :type N: int 97 | :param duration: Duration of simulation (in ms). 98 | :type duration: float 99 | :param dt: Time discretization (in ms). 100 | :type dt: float 101 | :param state: The state of the system controlled with the found oc-input. 102 | :type state: np.ndarray 103 | :param target: The target state. 104 | :type target: np.ndarray 105 | :param control: The control signal found by the oc-algorithm. 106 | :type control: np.ndarray 107 | :param orig_input: The inputs that were used to generate target time series. 108 | :type orig_input: np.ndarray 109 | :param cost_array: Array of costs in optimization iterations. 110 | :type cost_array: np.ndarray, optional 111 | :param step_array: Array of step sizes in optimization iterations. 112 | :type step_array: np.ndarray, optional 113 | :param plot_state_vars: List of indices of state variables that should be plotted 114 | :type plot_state_vars: List, optional 115 | :param plot_control_vars: List of indices of control variables that should be plotted 116 | :type plot_control_vars: List, optional 117 | """ 118 | 119 | t_array = np.arange(0, duration + dt, dt) 120 | fig, ax = plt.subplots(3, N, figsize=(12, 8), constrained_layout=True) 121 | 122 | # Plot the controlled state and the initial/ original state (dashed line) 123 | for n in range(N): 124 | for v in plot_state_vars: 125 | ax[0, n].plot( 126 | t_array, state[n, v, :], label="state var " + str(v), color=colors[v] 127 | ) 128 | ax[0, n].plot( 129 | t_array, 130 | target[n, v, :], 131 | linestyle="dashed", 132 | label="target var " + str(v), 133 | color=colors[v], 134 | ) 135 | # ax[0, n].legend(loc="upper right") 136 | ax[0, n].set_title(f"Activity and target, node %s" % (n)) 137 | 138 | # Plot the computed control signal and the initial/ original control signal (dashed line) 139 | for v in plot_control_vars: 140 | ax[1, n].plot( 141 | t_array, 142 | control[n, v, :], 143 | label="stimulation var " + str(v), 144 | color=colors[v], 145 | ) 146 | ax[1, n].plot( 147 | t_array, 148 | orig_input[n, v, :], 149 | linestyle="dashed", 150 | label="input var " + str(v), 151 | color=colors[v], 152 | ) 153 | ax[1, n].set_title(f"Stimulation and input, node %s" % (n)) 154 | 155 | ax[2, 0].plot(cost_array) 156 | ax[2, 0].set_title("Cost throughout optimization.") 157 | ax[2, 1].plot(step_array) 158 | ax[2, 1].set_title("Step size throughout optimization.") 159 | ax[2, 1].set_ylim(bottom=0, top=None) 160 | 161 | 162 | plt.show() 163 | -------------------------------------------------------------------------------- /neurolib/utils/pypetUtils.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import pypet 3 | import pathlib 4 | import logging 5 | import copy 6 | 7 | from .collections import dotdict 8 | 9 | 10 | def getTrajectorynamesInFile(filename): 11 | """ 12 | Return a list of all pypet trajectory names in a a given hdf5 file. 13 | 14 | :param filename: Name of the hdf file 15 | :type filename: str 16 | 17 | :return: List of strings containing the trajectory names 18 | :rtype: list[str] 19 | """ 20 | assert pathlib.Path(filename).exists(), f"{filename} does not exist!" 21 | hdf = h5py.File(filename, "r") 22 | all_traj_names = list(hdf.keys()) 23 | hdf.close() 24 | return all_traj_names 25 | 26 | 27 | def loadPypetTrajectory(filename, trajectoryName): 28 | """Read HDF file with simulation results and return the chosen trajectory. 29 | 30 | :param filename: HDF file path 31 | :type filename: str 32 | 33 | :return: pypet trajectory 34 | """ 35 | assert pathlib.Path(filename).exists(), f"{filename} does not exist!" 36 | logging.info(f"Loading results from {filename}") 37 | 38 | # if trajectoryName is not specified, load the most recent trajectory 39 | if trajectoryName == None: 40 | trajectoryName = getTrajectorynamesInFile(filename)[-1] 41 | logging.info(f"Analyzing trajectory {trajectoryName}") 42 | 43 | pypetTrajectory = pypet.Trajectory(trajectoryName, add_time=False) 44 | pypetTrajectory.f_load(trajectoryName, filename=filename, force=True) 45 | pypetTrajectory.v_auto_load = True 46 | return pypetTrajectory 47 | 48 | 49 | def getRun(runId, pypetTrajectory, pypetShortNames=True): 50 | """Load the simulated data of a run and its parameters from a pypetTrajectory. 51 | 52 | :param runId: ID of the run 53 | :type runId: int 54 | :param pypetTrajectory: Pypet trajectory to get run from. 55 | :type pypetTrajectory: pypet.Trajectory 56 | :param pypetShortNames: Use pypet short names as keys for the results dictionary. Use if you are experiencing errors due to natural naming collisions. 57 | :type pypetShortNames: bool 58 | 59 | :return: Dictionary with simulated data and parameters of the run. 60 | :type return: dict 61 | """ 62 | exploredParameters = pypetTrajectory.f_get_explored_parameters() 63 | niceParKeys = [p.split(".")[-1] for p in exploredParameters.keys()] 64 | 65 | pypetTrajectory.results[runId].f_load() 66 | result = pypetTrajectory.results[runId].f_to_dict(fast_access=True, short_names=pypetShortNames) 67 | pypetTrajectory.results[runId].f_remove() 68 | 69 | # convert to dotdict 70 | result = dotdict(result) 71 | 72 | # Postprocess result keys if pypet short names aren't used 73 | # Before: results.run_00000001.outputs.rates_inh 74 | # After: outputs.rates_inh 75 | if pypetShortNames == False: 76 | new_dict = {} 77 | for key, value in result.items(): 78 | new_key = "".join(key.split(".", 2)[2:]) 79 | new_dict[new_key] = result[key] 80 | result = copy.deepcopy(new_dict) 81 | 82 | # add parameters of this run 83 | result["params"] = {} 84 | 85 | for nicep, p in zip(niceParKeys, exploredParameters.keys()): 86 | result["params"][nicep] = exploredParameters[p].f_get_range()[runId] 87 | 88 | return result 89 | -------------------------------------------------------------------------------- /neurolib/utils/pypet_logging.ini: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [logger_root] 5 | handlers=stream 6 | level=INFO 7 | 8 | [formatters] 9 | keys=stream 10 | 11 | [formatter_file] 12 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 13 | 14 | [formatter_stream] 15 | format=%(processName)-10s %(name)s %(levelname)-8s %(message)s 16 | 17 | [handlers] 18 | keys=stream 19 | 20 | [handler_file_error] 21 | class=FileHandler 22 | level=ERROR 23 | args=('logs/$traj/$env/ERROR.txt',) 24 | formatter=file 25 | 26 | [handler_file_main] 27 | class=FileHandler 28 | args=('logs/$traj/$env/LOG.txt',) 29 | formatter=file 30 | 31 | [handler_stream] 32 | class=StreamHandler 33 | level=INFO 34 | args=() 35 | formatter=stream 36 | 37 | 38 | [multiproc_loggers] 39 | keys=root 40 | 41 | [multiproc_logger_root] 42 | handlers=stream 43 | level=ERROR 44 | 45 | [multiproc_formatters] 46 | keys=file 47 | 48 | [multiproc_formatter_file] 49 | format=%(asctime)s %(name)s %(levelname)-8s %(message)s 50 | 51 | [multiproc_handlers] 52 | keys=stream 53 | 54 | 55 | [multiproc_handler_stream] 56 | class=StreamHandler 57 | level=ERROR 58 | args=() 59 | formatter=file 60 | 61 | [multiproc_handler_file_error] 62 | class=FileHandler 63 | level=ERROR 64 | args=('logs/$traj/$env/$run_$host_$proc_ERROR.txt',) 65 | formatter=file 66 | 67 | [multiproc_handler_file_main] 68 | class=FileHandler 69 | args=('logs/$traj/$env/$run_$host_$proc_LOG.txt',) 70 | formatter=file 71 | 72 | -------------------------------------------------------------------------------- /neurolib/utils/saver.py: -------------------------------------------------------------------------------- 1 | """ 2 | Saving model output. 3 | """ 4 | 5 | import json 6 | 7 | import pickle 8 | from copy import deepcopy 9 | 10 | import os 11 | import numpy as np 12 | import xarray as xr 13 | 14 | 15 | def save_to_pickle(datafield, filename): 16 | """ 17 | Save datafield to pickle file. Keep in mind that restoring a pickle 18 | requires that the internal structure of the types for the pickled data 19 | remain unchanged, o.e. not recommended for long-term storage. 20 | 21 | :param datafield: datafield or dataarray to save 22 | :type datafield: xr.Dataset|xr.DataArray 23 | :param filename: filename 24 | :type filename: str 25 | """ 26 | assert isinstance(datafield, (xr.DataArray, xr.Dataset)) 27 | if not filename.endswith(".pkl"): 28 | filename += ".pkl" 29 | with open(filename, "wb") as handle: 30 | pickle.dump(datafield, handle, protocol=pickle.HIGHEST_PROTOCOL) 31 | 32 | 33 | def save_to_netcdf(datafield, filename): 34 | """ 35 | Save datafield to NetCDF. NetCDF cannot handle structured attributes, 36 | hence they are stripped and if there are some, they are saved as json 37 | with the same filename. 38 | 39 | :param datafield: datafield or dataarray to save 40 | :type datafield: xr.Dataset|xr.DataArray 41 | :param filename: filename 42 | :type filename: str 43 | """ 44 | assert isinstance(datafield, (xr.DataArray, xr.Dataset)) 45 | datafield = deepcopy(datafield) 46 | if not filename.endswith(".nc"): 47 | filename += ".nc" 48 | if datafield.attrs: 49 | attributes_copy = deepcopy(datafield.attrs) 50 | _save_attrs_json(attributes_copy, filename) 51 | datafield.attrs = {} 52 | datafield.to_netcdf(filename) 53 | 54 | 55 | def _save_attrs_json(attrs, filename): 56 | """ 57 | Save attributes to json. 58 | 59 | :param attrs: attributes to save 60 | :type attrs: dict 61 | :param filename: filename for the json file 62 | :type filename: str 63 | """ 64 | 65 | def sanitise_attrs(attrs): 66 | sanitised = {} 67 | for k, v in attrs.items(): 68 | if isinstance(v, list): 69 | sanitised[k] = [ 70 | sanitise_attrs(vv) if isinstance(vv, dict) else vv.tolist() if isinstance(vv, np.ndarray) else vv 71 | for vv in v 72 | ] 73 | elif isinstance(v, dict): 74 | sanitised[k] = sanitise_attrs(v) 75 | elif isinstance(v, np.ndarray): 76 | sanitised[k] = v.tolist() 77 | else: 78 | sanitised[k] = v 79 | return sanitised 80 | 81 | filename = os.path.splitext(filename)[0] + ".json" 82 | with open(filename, "w") as handle: 83 | json.dump(sanitise_attrs(attrs), handle) 84 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | psutil 2 | h5py 3 | numba 4 | numpy 5 | scipy 6 | xarray 7 | pandas 8 | tqdm 9 | pypet 10 | deap 11 | dill 12 | sympy 13 | jitcdde 14 | dpath 15 | -------------------------------------------------------------------------------- /resources/adex.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/adex.png -------------------------------------------------------------------------------- /resources/aln.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/aln.png -------------------------------------------------------------------------------- /resources/brain_slow_waves_small.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/brain_slow_waves_small.gif -------------------------------------------------------------------------------- /resources/element_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/element_logo.png -------------------------------------------------------------------------------- /resources/evolution_animated.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/evolution_animated.gif -------------------------------------------------------------------------------- /resources/evolution_minimal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/evolution_minimal.png -------------------------------------------------------------------------------- /resources/evolution_tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/evolution_tree.png -------------------------------------------------------------------------------- /resources/evolutionary-algorithm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/evolutionary-algorithm.png -------------------------------------------------------------------------------- /resources/exploration_aln.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/exploration_aln.png -------------------------------------------------------------------------------- /resources/gw_data.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/gw_data.png -------------------------------------------------------------------------------- /resources/gw_simulated.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/gw_simulated.png -------------------------------------------------------------------------------- /resources/icon_block.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/icon_block.png -------------------------------------------------------------------------------- /resources/pipeline.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/pipeline.jpg -------------------------------------------------------------------------------- /resources/readme_header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/readme_header.png -------------------------------------------------------------------------------- /resources/single_timeseries.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurolib-dev/neurolib/9b6b2b8f082c0cfa212f05576ead55bf23046d6f/resources/single_timeseries.png -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | from os import path 4 | 5 | this_directory = path.abspath(path.dirname(__file__)) 6 | with open(path.join(this_directory, "README.md"), encoding="utf-8") as f: 7 | long_description = f.read() 8 | 9 | with open("requirements.txt") as f: 10 | requirements = f.read().splitlines() 11 | 12 | setuptools.setup( 13 | name="neurolib", 14 | version="0.6.1", 15 | description="Easy whole-brain neural mass modeling", 16 | long_description=long_description, 17 | long_description_content_type="text/markdown", 18 | url="https://github.com/neurolib-dev/neurolib", 19 | author="Caglar Cakan", 20 | author_email="cakan@ni.tu-berlin.de", 21 | license="MIT", 22 | packages=setuptools.find_packages(), 23 | classifiers=[ 24 | "Programming Language :: Python :: 3", 25 | "License :: OSI Approved :: MIT License", 26 | "Operating System :: OS Independent", 27 | ], 28 | python_requires=">=3.6", 29 | install_requires=requirements, 30 | include_package_data=True, 31 | ) 32 | -------------------------------------------------------------------------------- /tests/control/optimal_control/test_oc_hopf.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | from neurolib.models.hopf import HopfModel 5 | from neurolib.control.optimal_control import oc_hopf 6 | 7 | import test_oc_utils as test_oc_utils 8 | 9 | p = test_oc_utils.params 10 | 11 | 12 | class TestHopf(unittest.TestCase): 13 | """ 14 | Test hopf in neurolib/optimal_control/ 15 | """ 16 | 17 | # tests if the control from OC computation coincides with a random input used for target forward-simulation 18 | # single-node case 19 | def test_1n(self): 20 | print("Test OC in single-node system") 21 | model = HopfModel() 22 | test_oc_utils.setinitzero_1n(model) 23 | model.params["duration"] = p.TEST_DURATION_6 24 | 25 | for input_channel in [0, 1]: 26 | cost_mat = np.zeros((model.params.N, len(model.output_vars))) 27 | control_mat = np.zeros((model.params.N, len(model.state_vars))) 28 | control_mat[0, input_channel] = 1.0 # only allow inputs to input_channel 29 | cost_mat[0, np.abs(input_channel - 1).astype(int)] = 1.0 # only measure other channel 30 | 31 | test_oc_utils.set_input(model, p.ZERO_INPUT_1N_6) 32 | model.params[model.input_vars[input_channel]] = p.TEST_INPUT_1N_6 33 | model.run() 34 | target = test_oc_utils.gettarget_1n(model) 35 | 36 | test_oc_utils.set_input(model, p.ZERO_INPUT_1N_6) 37 | model_controlled = oc_hopf.OcHopf(model, target) 38 | 39 | model_controlled.control = np.concatenate( 40 | [ 41 | control_mat[0, 0] * p.INIT_INPUT_1N_6[:, np.newaxis, :], 42 | control_mat[0, 1] * p.INIT_INPUT_1N_6[:, np.newaxis, :], 43 | ], 44 | axis=1, 45 | ) 46 | model_controlled.update_input() 47 | 48 | control_coincide = False 49 | 50 | for i in range(p.LOOPS): 51 | model_controlled.optimize(p.ITERATIONS) 52 | control = model_controlled.control 53 | 54 | c_diff = (np.abs(control[0, input_channel, :] - p.TEST_INPUT_1N_6[0, :]),) 55 | 56 | if np.amax(c_diff) < p.LIMIT_DIFF: 57 | control_coincide = True 58 | break 59 | 60 | if model_controlled.zero_step_encountered: 61 | break 62 | 63 | self.assertTrue(control_coincide) 64 | 65 | # tests if the control from OC computation coincides with a random input used for target forward-simulation 66 | # network case 67 | def test_2n(self): 68 | print("Test OC in 2-node network") 69 | 70 | dmat = np.array([[0.0, p.TEST_DELAY], [p.TEST_DELAY, 0.0]]) 71 | cmat = np.array([[0.0, 1.0], [1.0, 0.0]]) 72 | 73 | model = HopfModel(Cmat=cmat, Dmat=dmat) 74 | test_oc_utils.setinitzero_2n(model) 75 | 76 | cost_mat = np.zeros((model.params.N, len(model.output_vars))) 77 | control_mat = np.zeros((model.params.N, len(model.state_vars))) 78 | control_mat[0, 0] = 1.0 79 | cost_mat[1, 0] = 1.0 80 | 81 | model.params.duration = p.TEST_DURATION_8 82 | 83 | for coupling in ["additive", "diffusive"]: 84 | model.params.coupling = coupling 85 | 86 | model.params["x_ext"] = p.TEST_INPUT_2N_8 87 | model.params["y_ext"] = p.ZERO_INPUT_2N_8 88 | 89 | model.run() 90 | target = test_oc_utils.gettarget_2n(model) 91 | model.params["x_ext"] = p.ZERO_INPUT_2N_8 92 | 93 | model_controlled = oc_hopf.OcHopf( 94 | model, 95 | target, 96 | control_matrix=control_mat, 97 | cost_matrix=cost_mat, 98 | ) 99 | 100 | model_controlled.control = np.concatenate( 101 | [p.INIT_INPUT_2N_8[:, np.newaxis, :], p.ZERO_INPUT_2N_8[:, np.newaxis, :]], axis=1 102 | ) 103 | model_controlled.update_input() 104 | 105 | control_coincide = False 106 | 107 | for i in range(p.LOOPS): 108 | model_controlled.optimize(p.ITERATIONS) 109 | control = model_controlled.control 110 | 111 | c_diff_max = np.amax(np.abs(control[0, 0, :] - p.TEST_INPUT_2N_8[0, :])) 112 | 113 | if c_diff_max < p.LIMIT_DIFF: 114 | control_coincide = True 115 | break 116 | 117 | if model_controlled.zero_step_encountered: 118 | break 119 | 120 | self.assertTrue(control_coincide) 121 | 122 | # Arbitrary network and control setting, get_xs() returns correct array shape (despite initial values array longer than 1) 123 | def test_get_xs(self): 124 | print("Test state shape agrees with target shape") 125 | 126 | cmat = np.array([[0.0, 1.0], [1.0, 0.0]]) 127 | dmat = np.array([[0.0, 0.0], [0.0, 0.0]]) # no delay 128 | model = HopfModel(Cmat=cmat, Dmat=dmat) 129 | model.params.duration = p.TEST_DURATION_6 130 | 131 | model.params["x_ext"] = p.TEST_INPUT_2N_6 132 | model.params["y_ext"] = -p.TEST_INPUT_2N_6 133 | 134 | target = np.ones((model.params.N, 2, p.TEST_INPUT_2N_6.shape[1])) 135 | 136 | model_controlled = oc_hopf.OcHopf( 137 | model, 138 | target, 139 | ) 140 | 141 | model_controlled.optimize(1) 142 | xs = model_controlled.get_xs() 143 | self.assertTrue(xs.shape == target.shape) 144 | 145 | 146 | if __name__ == "__main__": 147 | unittest.main() 148 | -------------------------------------------------------------------------------- /tests/control/optimal_control/test_oc_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from neurolib.utils.collections import dotdict 3 | 4 | params = dotdict({}) 5 | 6 | params.LIMIT_DIFF = 1e-4 7 | params.LIMIT_DIFF_ID = 1e-12 8 | params.TEST_DURATION_6 = 0.6 9 | params.TEST_DURATION_8 = 0.8 10 | params.TEST_DURATION_10 = 1.0 11 | params.TEST_DURATION_12 = 1.2 12 | params.TEST_DELAY = 0.2 13 | params.ITERATIONS = 10000 14 | params.LOOPS = 100 15 | 16 | ################################################### 17 | ZERO_INPUT_1N_6 = np.zeros((1, 1 + int(np.around(params.TEST_DURATION_6 / 0.1, 1)))) 18 | TEST_INPUT_1N_6 = ZERO_INPUT_1N_6.copy() 19 | INIT_INPUT_1N_6 = ZERO_INPUT_1N_6.copy() 20 | 21 | TEST_INPUT_1N_6[0, 1] = 2.0 # no negative values because rate inputs should be positive 22 | TEST_INPUT_1N_6[0, 2] = 0.5 23 | TEST_INPUT_1N_6[0, 3] = 1.0 24 | 25 | INIT_INPUT_1N_6[0, 1] = TEST_INPUT_1N_6[0, 1] - 1e-2 26 | INIT_INPUT_1N_6[0, 2] = TEST_INPUT_1N_6[0, 2] + 1e-3 27 | INIT_INPUT_1N_6[0, 3] = TEST_INPUT_1N_6[0, 3] + 1e-2 28 | 29 | params.ZERO_INPUT_1N_6 = ZERO_INPUT_1N_6 30 | params.TEST_INPUT_1N_6 = TEST_INPUT_1N_6 31 | params.INIT_INPUT_1N_6 = INIT_INPUT_1N_6 32 | 33 | params.INT_INPUT_1N_6 = np.sum(TEST_INPUT_1N_6**2) 34 | 35 | ################################################### 36 | ZERO_INPUT_1N_8 = np.zeros((1, 1 + int(np.around(params.TEST_DURATION_8 / 0.1, 1)))) 37 | TEST_INPUT_1N_8 = ZERO_INPUT_1N_8.copy() 38 | INIT_INPUT_1N_8 = ZERO_INPUT_1N_8.copy() 39 | 40 | TEST_INPUT_1N_8[:, : 1 + int(np.around(params.TEST_DURATION_6 / 0.1, 1))] = TEST_INPUT_1N_6 41 | INIT_INPUT_1N_8[:, : 1 + int(np.around(params.TEST_DURATION_6 / 0.1, 1))] = INIT_INPUT_1N_6 42 | 43 | params.ZERO_INPUT_1N_8 = ZERO_INPUT_1N_8 44 | params.TEST_INPUT_1N_8 = TEST_INPUT_1N_8 45 | params.INIT_INPUT_1N_8 = INIT_INPUT_1N_8 46 | 47 | ################################################### 48 | ZERO_INPUT_1N_10 = np.zeros((1, 1 + int(np.around(params.TEST_DURATION_10 / 0.1, 1)))) 49 | TEST_INPUT_1N_10 = ZERO_INPUT_1N_10.copy() 50 | INIT_INPUT_1N_10 = ZERO_INPUT_1N_10.copy() 51 | 52 | TEST_INPUT_1N_10[:, : 1 + int(np.around(params.TEST_DURATION_6 / 0.1, 1))] = TEST_INPUT_1N_6 53 | INIT_INPUT_1N_10[:, : 1 + int(np.around(params.TEST_DURATION_6 / 0.1, 1))] = INIT_INPUT_1N_6 54 | 55 | params.ZERO_INPUT_1N_10 = ZERO_INPUT_1N_10 56 | params.TEST_INPUT_1N_10 = TEST_INPUT_1N_10 57 | params.INIT_INPUT_1N_10 = INIT_INPUT_1N_10 58 | 59 | ################################################### 60 | ZERO_INPUT_1N_12 = np.zeros((1, 1 + int(np.around(params.TEST_DURATION_12 / 0.1, 1)))) 61 | TEST_INPUT_1N_12 = ZERO_INPUT_1N_12.copy() 62 | INIT_INPUT_1N_12 = ZERO_INPUT_1N_12.copy() 63 | 64 | TEST_INPUT_1N_12[:, : 1 + int(np.around(params.TEST_DURATION_6 / 0.1, 1))] = TEST_INPUT_1N_6 65 | INIT_INPUT_1N_12[:, : 1 + int(np.around(params.TEST_DURATION_6 / 0.1, 1))] = INIT_INPUT_1N_6 66 | 67 | params.ZERO_INPUT_1N_12 = ZERO_INPUT_1N_12 68 | params.TEST_INPUT_1N_12 = TEST_INPUT_1N_12 69 | params.INIT_INPUT_1N_12 = INIT_INPUT_1N_12 70 | 71 | ################################################### 72 | ZERO_INPUT_2N_6 = np.zeros((2, 1 + int(np.around(params.TEST_DURATION_6 / 0.1, 1)))) 73 | TEST_INPUT_2N_6 = ZERO_INPUT_2N_6.copy() 74 | INIT_INPUT_2N_6 = ZERO_INPUT_2N_6.copy() 75 | 76 | TEST_INPUT_2N_6[0, :] = TEST_INPUT_1N_6[0, :] 77 | INIT_INPUT_2N_6[0, :] = INIT_INPUT_1N_6[0, :] 78 | 79 | params.ZERO_INPUT_2N_6 = ZERO_INPUT_2N_6 80 | params.TEST_INPUT_2N_6 = TEST_INPUT_2N_6 81 | params.INIT_INPUT_2N_6 = INIT_INPUT_2N_6 82 | 83 | ################################################### 84 | ZERO_INPUT_2N_8 = np.zeros((2, 1 + int(np.around(params.TEST_DURATION_8 / 0.1, 1)))) 85 | TEST_INPUT_2N_8 = ZERO_INPUT_2N_8.copy() 86 | INIT_INPUT_2N_8 = ZERO_INPUT_2N_8.copy() 87 | 88 | TEST_INPUT_2N_8[0, :] = TEST_INPUT_1N_8[0, :] 89 | INIT_INPUT_2N_8[0, :] = INIT_INPUT_1N_8[0, :] 90 | 91 | params.ZERO_INPUT_2N_8 = ZERO_INPUT_2N_8 92 | params.TEST_INPUT_2N_8 = TEST_INPUT_2N_8 93 | params.INIT_INPUT_2N_8 = INIT_INPUT_2N_8 94 | 95 | ################################################### 96 | ZERO_INPUT_2N_10 = np.zeros((2, 1 + int(np.around(params.TEST_DURATION_10 / 0.1, 1)))) 97 | TEST_INPUT_2N_10 = ZERO_INPUT_2N_10.copy() 98 | INIT_INPUT_2N_10 = ZERO_INPUT_2N_10.copy() 99 | 100 | TEST_INPUT_2N_10[0, :] = TEST_INPUT_1N_10[0, :] 101 | INIT_INPUT_2N_10[0, :] = INIT_INPUT_1N_10[0, :] 102 | 103 | params.ZERO_INPUT_2N_10 = ZERO_INPUT_2N_10 104 | params.TEST_INPUT_2N_10 = TEST_INPUT_2N_10 105 | params.INIT_INPUT_2N_10 = INIT_INPUT_2N_10 106 | 107 | ################################################### 108 | ZERO_INPUT_2N_12 = np.zeros((2, 1 + int(np.around(params.TEST_DURATION_12 / 0.1, 1)))) 109 | TEST_INPUT_2N_12 = ZERO_INPUT_2N_12.copy() 110 | INIT_INPUT_2N_12 = ZERO_INPUT_2N_12.copy() 111 | 112 | TEST_INPUT_2N_12[0, :] = TEST_INPUT_1N_12[0, :] 113 | INIT_INPUT_2N_12[0, :] = INIT_INPUT_1N_12[0, :] 114 | 115 | params.ZERO_INPUT_2N_12 = ZERO_INPUT_2N_12 116 | params.TEST_INPUT_2N_12 = TEST_INPUT_2N_12 117 | params.INIT_INPUT_2N_12 = INIT_INPUT_2N_12 118 | 119 | 120 | def gettarget_1n(model): 121 | return np.concatenate( 122 | ( 123 | np.concatenate((model.params[model.init_vars[0]], model.params[model.init_vars[0]]), axis=1)[ 124 | :, :, np.newaxis 125 | ], 126 | np.stack((model[model.state_vars[0]], model[model.state_vars[1]]), axis=1), 127 | ), 128 | axis=2, 129 | ) 130 | 131 | 132 | def gettarget_2n(model): 133 | return np.concatenate( 134 | ( 135 | np.stack( 136 | (model.params[model.init_vars[0]][:, -1], model.params[model.init_vars[1]][:, -1]), 137 | axis=1, 138 | )[:, :, np.newaxis], 139 | np.stack((model[model.state_vars[0]], model[model.state_vars[1]]), axis=1), 140 | ), 141 | axis=2, 142 | ) 143 | 144 | 145 | def setinitzero_1n(model): 146 | for init_var in model.init_vars: 147 | if "ou" in init_var: 148 | continue 149 | model.params[init_var] = np.array([[0.0]]) 150 | 151 | 152 | def setinitzero_2n(model): 153 | for init_var in model.init_vars: 154 | if "ou" in init_var: 155 | continue 156 | model.params[init_var] = np.zeros((2, 1)) 157 | 158 | 159 | def set_input(model, testinput): 160 | for iv in model.input_vars: 161 | model.params[iv] = testinput 162 | -------------------------------------------------------------------------------- /tests/multimodel/base/test_neural_mass.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests of modelling class. 3 | """ 4 | 5 | import unittest 6 | 7 | import symengine as se 8 | from neurolib.models.multimodel.builder.base.neural_mass import NeuralMass 9 | from neurolib.utils.stimulus import OrnsteinUhlenbeckProcess, ZeroInput 10 | 11 | 12 | class MassTest(NeuralMass): 13 | required_params = ["a", "b"] 14 | num_state_variables = 1 15 | num_noise_variables = 2 16 | helper_variables = ["helper_test"] 17 | python_callbacks = ["test_callback"] 18 | _noise_input = [ZeroInput(), ZeroInput()] 19 | 20 | 21 | class TestNeuralMass(unittest.TestCase): 22 | 23 | PARAMS = {"a": 1.2, "b": 11.9} 24 | 25 | def test_init(self): 26 | mass = MassTest(self.PARAMS) 27 | self.assertTrue(isinstance(mass, NeuralMass)) 28 | self.assertTrue(isinstance(mass.__str__(), str)) 29 | self.assertEqual(mass.__str__(), mass.__repr__()) 30 | self.assertTrue(isinstance(mass.describe(), dict)) 31 | mass._initialize_state_vector() 32 | self.assertEqual(len(mass.initial_state), mass.num_state_variables) 33 | self.assertTrue(hasattr(mass, "DESCRIPTION_FIELD")) 34 | self.assertTrue(all(hasattr(mass, field) for field in mass.DESCRIPTION_FIELD)) 35 | self.assertTrue(hasattr(mass, "_derivatives")) 36 | self.assertTrue(hasattr(mass, "_validate_params")) 37 | self.assertTrue(hasattr(mass, "_validate_callbacks")) 38 | self.assertTrue(hasattr(mass, "_initialize_state_vector")) 39 | self.assertTrue(all(isinstance(symb, se.Symbol) for symb in mass.helper_symbols.values())) 40 | # callbacks are UndefFunction for now 41 | self.assertTrue(all(isinstance(callback, se.UndefFunction) for callback in mass.callback_functions.values())) 42 | 43 | def test_derivatives(self): 44 | mass = MassTest(self.PARAMS) 45 | self.assertRaises(NotImplementedError, mass._derivatives) 46 | 47 | def test_validate_params(self): 48 | mass = MassTest(self.PARAMS) 49 | self.assertDictEqual(self.PARAMS, mass.params) 50 | 51 | def test_update_params(self): 52 | UPDATE_WITH = {"a": 2.4, "input_0": {"seed": 12}} 53 | 54 | mass = MassTest(self.PARAMS) 55 | mass.index = 0 56 | mass.init_mass(start_idx_for_noise=0) 57 | self.assertEqual(mass.params["a"], self.PARAMS["a"]) 58 | mass.update_params(UPDATE_WITH) 59 | self.assertEqual(mass.params["a"], UPDATE_WITH["a"]) 60 | self.assertEqual(mass.params["input_0"]["seed"], UPDATE_WITH["input_0"]["seed"]) 61 | 62 | def test_init_mass(self): 63 | mass = MassTest(self.PARAMS) 64 | self.assertFalse(mass.initialised) 65 | mass.index = 0 66 | mass.init_mass(start_idx_for_noise=6) 67 | self.assertTrue(mass.initialised) 68 | self.assertListEqual(mass.initial_state, [0.0] * mass.num_state_variables) 69 | self.assertListEqual(mass.noise_input_idx, [6, 7]) 70 | 71 | def test_set_noise_input(self): 72 | mass = MassTest(self.PARAMS) 73 | self.assertTrue(all(isinstance(noise, ZeroInput) for noise in mass.noise_input)) 74 | mass.noise_input = [OrnsteinUhlenbeckProcess(0.0, 0.0, 1.0), ZeroInput()] 75 | self.assertTrue(isinstance(mass.noise_input[0], OrnsteinUhlenbeckProcess)) 76 | self.assertTrue(isinstance(mass.noise_input[1], ZeroInput)) 77 | 78 | def test_unwrap_state_vector(self): 79 | mass = MassTest(self.PARAMS) 80 | mass.idx_state_var = 0 81 | self.assertTrue(hasattr(mass, "_unwrap_state_vector")) 82 | state_vec = mass._unwrap_state_vector() 83 | self.assertTrue(isinstance(state_vec, list)) 84 | self.assertEqual(len(state_vec), mass.num_state_variables) 85 | self.assertTrue(all(isinstance(vec, se.Function) for vec in state_vec)) 86 | 87 | 88 | if __name__ == "__main__": 89 | unittest.main() 90 | -------------------------------------------------------------------------------- /tests/multimodel/base/test_params.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple test for parameters. 3 | """ 4 | 5 | import unittest 6 | 7 | import numpy as np 8 | import sympy as sp 9 | from neurolib.models.multimodel.builder.base.params import ( 10 | count_float_params, 11 | float_params_to_individual_symbolic, 12 | float_params_to_vector_symbolic, 13 | ) 14 | 15 | 16 | class TestSymbolicParams(unittest.TestCase): 17 | def test_count_float_params(self): 18 | DICT_IN = {"a": 40.0, "b": 15, "conn": np.random.rand(4, 4), "a.input": 12.5} 19 | length = count_float_params(DICT_IN) 20 | self.assertEqual(length, 2) 21 | 22 | def test_float_params_to_vector_symbolic(self): 23 | DICT_IN = {"a": 40.0, "b": 15, "conn": np.random.rand(4, 4)} 24 | result = float_params_to_vector_symbolic(DICT_IN) 25 | self.assertEqual(len(result), len(DICT_IN)) 26 | self.assertEqual(result.keys(), DICT_IN.keys()) 27 | for k, v in result.items(): 28 | self.assertTrue(isinstance(v, (sp.matrices.expressions.matexpr.MatrixElement, sp.MatrixSymbol))) 29 | if isinstance(v, sp.MatrixSymbol): 30 | self.assertTupleEqual(v.shape, DICT_IN[k].shape) 31 | 32 | def test_float_params_to_individual_symbolic(self): 33 | DICT_IN = {"a": 40.0, "b": 15, "conn": np.random.rand(4, 4)} 34 | result = float_params_to_individual_symbolic(DICT_IN) 35 | self.assertEqual(len(result), len(DICT_IN)) 36 | self.assertEqual(result.keys(), DICT_IN.keys()) 37 | for k, v in result.items(): 38 | self.assertTrue(isinstance(v, (sp.Symbol, np.ndarray))) 39 | if isinstance(v, np.ndarray): 40 | self.assertTupleEqual(v.shape, DICT_IN[k].shape) 41 | self.assertTrue(all([isinstance(element, sp.Symbol) for element in v.flatten()])) 42 | 43 | 44 | if __name__ == "__main__": 45 | unittest.main() 46 | -------------------------------------------------------------------------------- /tests/test_autochunk.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import unittest 3 | import logging 4 | 5 | import numpy as np 6 | import pytest 7 | from neurolib.models.aln import ALNModel 8 | from neurolib.models.fhn import FHNModel 9 | from neurolib.models.hopf import HopfModel 10 | from neurolib.models.thalamus import ThalamicMassModel 11 | from neurolib.models.wc import WCModel 12 | from neurolib.models.ww import WWModel 13 | from neurolib.utils.loadData import Dataset 14 | 15 | 16 | class AutochunkTests(unittest.TestCase): 17 | """ 18 | Base class for autochunk tests. 19 | 20 | Cycles through all models and runs autochunk on them and compares the 21 | results to a normal run. Expect the outputs to be equal. 22 | 23 | All individual model runs are blow this base class. 24 | """ 25 | 26 | durations = [0.1, 0.5, 10.5, 22.3] 27 | chunksizes = [1, 5, 7, 33, 55, 123] 28 | signalVs = [0, 1, 10, 10000] 29 | 30 | def single_node_test(self, model): 31 | for duration in self.durations: 32 | for chunksize in self.chunksizes: 33 | # full run 34 | m1 = model() 35 | m1.params["duration"] = duration 36 | pars_bak = copy.deepcopy(m1.params) 37 | m1.run() 38 | # chunkwise run 39 | m2 = model() 40 | m2.params = pars_bak.copy() 41 | m2.run(chunkwise=True, chunksize=chunksize, append_outputs=True) 42 | # check 43 | self.assertTupleEqual(m1.output.shape, m2.output.shape) 44 | difference = np.sum(np.abs(m1.output - m2.output)) 45 | print( 46 | f"single node model: {model.name}, duration: {duration}, chunksize: {chunksize}, difference: {difference}" 47 | ) 48 | self.assertAlmostEqual(difference, 0.0) 49 | 50 | def network_test(self, model): 51 | ds = Dataset("hcp") 52 | for duration in self.durations: 53 | for chunksize in self.chunksizes: 54 | for signalV in self.signalVs: 55 | # full run 56 | m1 = model(Cmat=ds.Cmat, Dmat=ds.Dmat) 57 | m1.params.signalV = signalV 58 | m1.params["duration"] = duration 59 | pars_bak = copy.deepcopy(m1.params) 60 | m1.run() 61 | # chunkwise run 62 | m2 = model(Cmat=ds.Cmat, Dmat=ds.Dmat) 63 | m2.params = pars_bak.copy() 64 | m2.run(chunkwise=True, chunksize=chunksize, append_outputs=True) 65 | # check 66 | self.assertTupleEqual(m1.output.shape, m2.output.shape) 67 | difference = np.sum(np.abs(m1.output - m2.output)) 68 | print( 69 | f"network model: {model.name}, duration: {duration}, chunksize: {chunksize}, difference: {difference}" 70 | ) 71 | self.assertAlmostEqual(difference, 0.0) 72 | 73 | 74 | class TestALNAutochunk(AutochunkTests): 75 | def test_single(self): 76 | self.single_node_test(ALNModel) 77 | 78 | def test_network(self): 79 | self.network_test(ALNModel) 80 | 81 | def test_onstep_input_autochunk(self): 82 | """Tests passing an input array to a model.""" 83 | model = ALNModel() 84 | model.params["duration"] = 1000 85 | duration_dt = int(model.params["duration"] / model.params["dt"]) 86 | ous = np.zeros((model.params["N"], duration_dt)) 87 | 88 | # prepare input 89 | inp_exc_current = np.zeros((model.params["N"], duration_dt)) 90 | inp_inh_current = np.zeros((model.params["N"], duration_dt)) 91 | inp_exc_rate = np.zeros((model.params["N"], duration_dt)) 92 | inp_inh_rate = np.zeros((model.params["N"], duration_dt)) 93 | 94 | for n in range(model.params["N"]): 95 | fr = 1 96 | inp_exc_current[n, :] = np.sin(np.linspace(0, fr * 2 * np.pi, duration_dt)) * 0.1 97 | 98 | for i in range(duration_dt): 99 | inputs = [inp_exc_current[:, i], inp_inh_current[:, i], inp_exc_rate[:, i], inp_inh_rate[:, i]] 100 | model.autochunk(inputs=inputs, append_outputs=True) 101 | 102 | 103 | class TestFHNAutochunk(AutochunkTests): 104 | def test_single(self): 105 | self.single_node_test(FHNModel) 106 | 107 | def test_network(self): 108 | self.network_test(FHNModel) 109 | 110 | 111 | class TestHopfAutochunk(AutochunkTests): 112 | def test_single(self): 113 | self.single_node_test(HopfModel) 114 | 115 | def test_network(self): 116 | self.network_test(HopfModel) 117 | 118 | 119 | class TestWCAutochunk(AutochunkTests): 120 | def test_single(self): 121 | self.single_node_test(WCModel) 122 | 123 | def test_network(self): 124 | self.network_test(WCModel) 125 | 126 | 127 | class TestWWAutochunk(AutochunkTests): 128 | def test_single(self): 129 | self.single_node_test(WWModel) 130 | 131 | def test_network(self): 132 | self.network_test(WWModel) 133 | 134 | 135 | class TestThalamusAutochunk(AutochunkTests): 136 | @pytest.mark.xfail 137 | def test_single(self): 138 | self.single_node_test(ThalamicMassModel) 139 | 140 | 141 | class ChunkziseImpliesChunksize(unittest.TestCase): 142 | """ 143 | Simply test whether the model runs as expected when 144 | model.run(chunksize=1000) is used without chunkwise=True 145 | """ 146 | 147 | def test_chunksize(self): 148 | model = HopfModel() 149 | chunksize = 100 150 | model.run(chunksize=chunksize) 151 | self.assertEqual(model.output.shape[1], chunksize) 152 | 153 | 154 | if __name__ == "__main__": 155 | unittest.main() 156 | -------------------------------------------------------------------------------- /tests/test_collections.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import unittest 3 | 4 | from neurolib.models.multimodel import MultiModel 5 | from neurolib.models.multimodel.builder.wilson_cowan import WilsonCowanNode 6 | from neurolib.utils.collections import ( 7 | BACKWARD_REPLACE, 8 | FORWARD_REPLACE, 9 | _sanitize_keys, 10 | flat_dict_to_nested, 11 | flatten_nested_dict, 12 | sanitize_dot_dict, 13 | star_dotdict, 14 | unwrap_star_dotdict, 15 | ) 16 | 17 | 18 | class TestCollections(unittest.TestCase): 19 | NESTED_DICT = {"a": {"b": "c", "d": "e"}} 20 | FLAT_DICT_DOT = {"a.b": "c", "a.d": "e"} 21 | PARAM_DICT = { 22 | "mass0": {"a": 0.4, "b": 1.2, "c": "float", "noise": {"b": 12.0}}, 23 | "mass1": {"a": 0.4, "b": 1.2, "c": "int"}, 24 | } 25 | PARAMS_ALL_A = {"mass0.a": 0.4, "mass1.a": 0.4} 26 | PARAMS_ALL_B = {"mass0.b": 1.2, "mass0.noise.b": 12.0, "mass1.b": 1.2} 27 | PARAMS_ALL_B_MINUS = {"mass0.b": 1.2, "mass1.b": 1.2} 28 | PARAMS_ALL_B_MINUS_CHANGED = {"mass0.b": 2.7, "mass1.b": 2.7} 29 | PARAMS_ALL_A_CHANGED = {"mass0.a": 0.7, "mass1.a": 0.7} 30 | 31 | def test_flatten_nested_dict(self): 32 | flat = flatten_nested_dict(self.NESTED_DICT, sep=".") 33 | self.assertDictEqual(flat, self.FLAT_DICT_DOT) 34 | 35 | def test_flat_unflat(self): 36 | flat = flatten_nested_dict(self.NESTED_DICT, sep=".") 37 | unflat = flat_dict_to_nested(flat) 38 | self.assertDictEqual(self.NESTED_DICT, unflat) 39 | 40 | def test_star_dotdict(self): 41 | params = star_dotdict(flatten_nested_dict(self.PARAM_DICT), sep=".") 42 | self.assertTrue(isinstance(params, star_dotdict)) 43 | # try get params by star 44 | self.assertDictEqual(params["*a"], self.PARAMS_ALL_A) 45 | # change params by star 46 | params["*a"] = 0.7 47 | self.assertDictEqual(params["*a"], self.PARAMS_ALL_A_CHANGED) 48 | # delete params 49 | del params["*a"] 50 | self.assertFalse(params["*a"]) 51 | 52 | def test_star_dotdict_minus(self): 53 | params = star_dotdict(flatten_nested_dict(self.PARAM_DICT), sep=".") 54 | self.assertTrue(isinstance(params, star_dotdict)) 55 | # get params by star 56 | self.assertDictEqual(params["*b"], self.PARAMS_ALL_B) 57 | # get params by star and minus 58 | self.assertDictEqual(params["*b|noise"], self.PARAMS_ALL_B_MINUS) 59 | # change params by star and minus 60 | params["*b|noise"] = 2.7 61 | self.assertDictEqual(params["*b|noise"], self.PARAMS_ALL_B_MINUS_CHANGED) 62 | # delete params by star and minus 63 | del params["*b|noise"] 64 | self.assertFalse(params["*b|noise"]) 65 | # check whether the `b` with noise stayed 66 | self.assertEqual(len(params["*b"]), 1) 67 | 68 | def test_sanitize_keys(self): 69 | k = "mass1.tau*|noise" 70 | k_san = _sanitize_keys(k, FORWARD_REPLACE) 71 | self.assertEqual(k_san, k.replace("*", "STAR").replace("|", "MINUS").replace(".", "DOT")) 72 | k_back = _sanitize_keys(k_san, BACKWARD_REPLACE) 73 | self.assertEqual(k, k_back) 74 | 75 | def test_sanitize_dotdict(self): 76 | dct = {"mass1*tau": 2.5, "mass2*tau": 4.1, "mass2.x": 12.0} 77 | should_be = {"mass1STARtau": 2.5, "mass2STARtau": 4.1, "mass2DOTx": 12.0} 78 | dct_san = sanitize_dot_dict(dct, FORWARD_REPLACE) 79 | self.assertDictEqual(dct_san, should_be) 80 | dct_back = sanitize_dot_dict(dct_san, BACKWARD_REPLACE) 81 | self.assertDictEqual(dct_back, dct) 82 | 83 | def test_unwrap_star_dotdict(self): 84 | wc = MultiModel.init_node(WilsonCowanNode()) 85 | dct = {"*tau": 2.5} 86 | should_be = { 87 | "WCnode_0.WCmassEXC_0.tau": 2.5, 88 | "WCnode_0.WCmassEXC_0.input_0.tau": 2.5, 89 | "WCnode_0.WCmassINH_1.tau": 2.5, 90 | "WCnode_0.WCmassINH_1.input_0.tau": 2.5, 91 | } 92 | unwrapped = unwrap_star_dotdict(dct, wc) 93 | self.assertDictEqual(unwrapped, should_be) 94 | 95 | dct = {"STARtau": 2.5} 96 | should_be = { 97 | "WCnode_0.WCmassEXC_0.tau": 2.5, 98 | "WCnode_0.WCmassEXC_0.input_0.tau": 2.5, 99 | "WCnode_0.WCmassINH_1.tau": 2.5, 100 | "WCnode_0.WCmassINH_1.input_0.tau": 2.5, 101 | } 102 | unwrapped = unwrap_star_dotdict(dct, wc, replaced_dict=BACKWARD_REPLACE) 103 | self.assertDictEqual(unwrapped, should_be) 104 | 105 | # test exception with logging message 106 | dct = {"STARtau": 2.5, "*key_not_there": 12.0} 107 | should_be = { 108 | "WCnode_0.WCmassEXC_0.tau": 2.5, 109 | "WCnode_0.WCmassEXC_0.input_0.tau": 2.5, 110 | "WCnode_0.WCmassINH_1.tau": 2.5, 111 | "WCnode_0.WCmassINH_1.input_0.tau": 2.5, 112 | "*key_not_there": 12.0, 113 | } 114 | root_logger = logging.getLogger() 115 | with self.assertLogs(root_logger, level="INFO") as cm: 116 | unwrapped = unwrap_star_dotdict(dct, wc, replaced_dict=BACKWARD_REPLACE) 117 | self.assertDictEqual(unwrapped, should_be) 118 | print(cm.output) 119 | self.assertTrue("INFO:root:Key `*key_not_there` cannot be resolved." in cm.output[0]) 120 | 121 | 122 | if __name__ == "__main__": 123 | unittest.main() 124 | -------------------------------------------------------------------------------- /tests/test_datasets.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from neurolib.utils.loadData import Dataset 4 | 5 | 6 | class TestDatasets(unittest.TestCase): 7 | def test_dataset_gw(self): 8 | ds = Dataset("gw", fcd=True) 9 | self.assertTupleEqual(ds.Cmat.shape, (80, 80)) 10 | self.assertTupleEqual(ds.Dmat.shape, (80, 80)) 11 | self.assertTupleEqual(ds.FCs[0].shape, (80, 80)) 12 | self.assertTrue(len(ds.FCs) == len(ds.BOLDs)) 13 | 14 | def test_dataset_hcp(self): 15 | ds = Dataset("hcp") 16 | ds = Dataset("hcp", normalizeCmats="waytotal") 17 | ds = Dataset("hcp", normalizeCmats="nvoxel") 18 | 19 | self.assertTupleEqual(ds.Cmat.shape, (80, 80)) 20 | self.assertTupleEqual(ds.Dmat.shape, (80, 80)) 21 | self.assertTupleEqual(ds.FCs[0].shape, (80, 80)) 22 | self.assertTrue(len(ds.FCs) == len(ds.BOLDs)) 23 | 24 | 25 | if __name__ == "__main__": 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /tests/test_evolutionUtils.py: -------------------------------------------------------------------------------- 1 | import random 2 | import sys 3 | import unittest 4 | 5 | import neurolib.optimize.evolution.deapUtils as du 6 | import neurolib.optimize.evolution.evolutionaryUtils as eu 7 | import numpy as np 8 | import pytest 9 | from neurolib.optimize.evolution import Evolution 10 | from neurolib.utils.parameterSpace import ParameterSpace 11 | 12 | 13 | class TestEvolutinUtils(unittest.TestCase): 14 | """ 15 | Test functions in neurolib/utils/functions.py 16 | """ 17 | 18 | @classmethod 19 | def setUpClass(cls): 20 | pars = ParameterSpace( 21 | ["mue_ext_mean", "mui_ext_mean", "b"], 22 | [[0.0, 3.0], [0.0, 3.0], [0.0, 100.0]], 23 | ) 24 | fitness_length = 3 25 | evolution = Evolution( 26 | lambda v: v, 27 | pars, 28 | weightList=np.repeat(1, fitness_length), 29 | POP_INIT_SIZE=4, 30 | POP_SIZE=4, 31 | NGEN=2, 32 | filename="TestEvolutionUtils.hdf", 33 | ) 34 | 35 | cls.evolution = evolution 36 | 37 | # fake population 38 | pop = evolution.toolbox.population(n=100) 39 | 40 | for i, p in enumerate(pop): 41 | if random.random() < 0.1: 42 | fitnessesResult = [np.nan] * fitness_length 43 | else: 44 | fitnessesResult = np.random.random(fitness_length) 45 | p.id = i 46 | p.fitness.values = fitnessesResult 47 | p.fitness.score = np.nansum(p.fitness.wvalues) / (len(p.fitness.wvalues)) 48 | p.gIdx = 0 49 | 50 | cls.pop = pop 51 | cls.evolution.pop = pop 52 | cls.evolution.gIdx = 1 53 | 54 | def test_getValidPopulation(self): 55 | self.evolution.getValidPopulation(self.pop) 56 | self.evolution.getInvalidPopulation(self.pop) 57 | 58 | def test_individualToDict(self): 59 | self.evolution.individualToDict(self.pop[0]) 60 | 61 | def test_randomParametersAdaptive(self): 62 | du.randomParametersAdaptive(self.evolution.paramInterval) 63 | 64 | def test_mutateUntilValid(self): 65 | du.mutateUntilValid(self.pop, self.evolution.paramInterval, self.evolution.toolbox, maxTries=10) 66 | 67 | @pytest.mark.skipif(sys.platform == "darwin", reason="plotting does not work on macOS") 68 | def test_plots(self): 69 | _ = pytest.importorskip("matplotlib") 70 | eu.plotPopulation(self.evolution, plotScattermatrix=True) 71 | 72 | 73 | class TestEvolutionCrossover(unittest.TestCase): 74 | def test_all_crossovers(self): 75 | def evo(traj): 76 | return (1,), {} 77 | 78 | pars = ParameterSpace(["x"], [[0.0, 4.0]]) 79 | evolution = Evolution(evalFunction=evo, parameterSpace=pars, filename="TestEvolutionCrossover.hdf") 80 | evolution.runInitial() 81 | init_pop = evolution.pop.copy() 82 | 83 | # perform crossover methods 84 | ind1, ind2 = init_pop[:2] 85 | du.cxNormDraw_adapt(ind1, ind2, 0.4) 86 | du.cxUniform_adapt(ind1, ind2, 0.4) 87 | du.cxUniform_normDraw_adapt(ind1, ind2, 0.4) 88 | 89 | 90 | if __name__ == "__main__": 91 | unittest.main() 92 | -------------------------------------------------------------------------------- /tests/test_explorationUtils.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | import sys 4 | import unittest 5 | 6 | import neurolib.optimize.exploration.explorationUtils as eu 7 | import numpy as np 8 | import pytest 9 | from neurolib.models.aln import ALNModel 10 | from neurolib.optimize.exploration import BoxSearch 11 | from neurolib.utils.loadData import Dataset 12 | from neurolib.utils.parameterSpace import ParameterSpace 13 | 14 | 15 | def randomString(stringLength=10): 16 | """Generate a random string of fixed length""" 17 | letters = string.ascii_lowercase 18 | return "".join(random.choice(letters) for i in range(stringLength)) 19 | 20 | 21 | class TestExplorationUtils(unittest.TestCase): 22 | """ 23 | Test functions in neurolib/optimize/exploration/explorationUtils.py 24 | """ 25 | 26 | @classmethod 27 | def setUpClass(cls): 28 | ds = Dataset("hcp") 29 | model = ALNModel(Cmat=ds.Cmat, Dmat=ds.Dmat) 30 | model.params.duration = 11 * 1000 # ms 31 | model.params.dt = 0.2 32 | parameters = ParameterSpace( 33 | { 34 | "mue_ext_mean": np.linspace(0, 3, 2), 35 | "mui_ext_mean": np.linspace(0, 3, 2), 36 | "b": [0.0, 10.0], 37 | }, 38 | kind="grid", 39 | ) 40 | search = BoxSearch( 41 | model=model, parameterSpace=parameters, filename=f"test_exploration_utils_{randomString(20)}.hdf" 42 | ) 43 | 44 | search.run(chunkwise=True, bold=True) 45 | 46 | search.loadResults() 47 | 48 | cls.model = model 49 | cls.search = search 50 | cls.ds = ds 51 | 52 | def test_processExplorationResults(self): 53 | eu.processExplorationResults(self.search, model=self.model, ds=self.ds, bold_transient=0) 54 | 55 | def test_findCloseResults(self): 56 | eu.findCloseResults(self.search.dfResults, dist=1, mue_ext_mean=0, mui_ext_mean=0.0) 57 | 58 | @pytest.mark.skipif(sys.platform in ["darwin", "win32"], reason="Testing plots does not work on macOS or Windows") 59 | def test_plotExplorationResults(self): 60 | eu.processExplorationResults(self.search, model=self.model, ds=self.ds, bold_transient=0) 61 | 62 | # one_figure = True 63 | # alpha_mask 64 | # contour 65 | eu.plotExplorationResults( 66 | self.search.dfResults, 67 | par1=["mue_ext_mean", "$mue$"], 68 | par2=["mui_ext_mean", "$mui$"], 69 | plot_key="max_" + self.model.default_output, 70 | contour="max_" + self.model.default_output, 71 | alpha_mask="max_" + self.model.default_output, 72 | by=["b"], 73 | by_label=["b"], 74 | plot_key_label="testlabel", 75 | one_figure=True, 76 | ) 77 | 78 | # one_figure = False 79 | eu.plotExplorationResults( 80 | self.search.dfResults, 81 | par1=["mue_ext_mean", "$mue$"], 82 | par2=["mui_ext_mean", "$mui$"], 83 | plot_key="max_" + self.model.default_output, 84 | by_label=["b"], 85 | plot_key_label="testlabel", 86 | one_figure=False, 87 | ) 88 | 89 | @pytest.mark.skipif(sys.platform in ["darwin", "win32"], reason="Testing plots does not work on macOS or Windows") 90 | def test_plotRun(self): 91 | eu.plotResult(self.search, 0) 92 | 93 | 94 | if __name__ == "__main__": 95 | unittest.main() 96 | -------------------------------------------------------------------------------- /tests/test_functions.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | 5 | import neurolib.utils.functions as func 6 | from neurolib.models.aln import ALNModel 7 | from neurolib.utils.loadData import Dataset 8 | 9 | 10 | class TestFunctions(unittest.TestCase): 11 | """ 12 | Test functions in neurolib/utils/functions.py 13 | """ 14 | 15 | @classmethod 16 | def setUpClass(cls): 17 | ds = Dataset("gw") 18 | aln = ALNModel(Cmat=ds.Cmat, Dmat=ds.Dmat) 19 | 20 | # Resting state fits 21 | aln.params["mue_ext_mean"] = 1.57 22 | aln.params["mui_ext_mean"] = 1.6 23 | aln.params["sigma_ou"] = 0.09 24 | aln.params["b"] = 5.0 25 | aln.params["duration"] = 0.2 * 60 * 1000 26 | aln.run(bold=True, chunkwise=True) 27 | 28 | cls.model = aln 29 | cls.ds = Dataset("gw") 30 | 31 | cls.single_node = ALNModel() 32 | 33 | def test_kuramoto(self): 34 | kuramoto = func.kuramoto(self.model.rates_exc[:, ::10], smoothing=1.0) 35 | 36 | def test_fc(self): 37 | FC = func.fc(self.model.BOLD.BOLD) 38 | 39 | def test_fcd(self): 40 | rFCD = func.fcd(self.model.rates_exc, stepsize=100) 41 | 42 | def test_matrix_correlation(self): 43 | FC = func.fc(self.model.BOLD.BOLD) 44 | cc = func.matrix_correlation(FC, self.ds.FCs[0]) 45 | 46 | def test_weighted_correlation(self): 47 | x = self.model.rates_exc[0, :] 48 | y = self.model.rates_exc[1, :] 49 | w = np.ones(x.shape) 50 | cc = func.weighted_correlation(x, y, w) 51 | 52 | def test_ts_kolmogorov(self): 53 | func.ts_kolmogorov( 54 | self.model.rates_exc[::20, :], 55 | self.model.rates_exc, 56 | stepsize=250, 57 | windowsize=30, 58 | ) 59 | 60 | def test_matrix_kolmogorov(self): 61 | func.matrix_kolmogorov( 62 | func.fc(self.model.rates_exc[::20, :]), 63 | func.fc(self.model.rates_exc[::20, :]), 64 | ) 65 | 66 | def test_getPowerSpectrum(self): 67 | fr, pw = func.getPowerSpectrum(self.model.rates_exc[0, :], dt=self.model.params["dt"]) 68 | 69 | def test_getMeanPowerSpectrum(self): 70 | fr, pw = func.getMeanPowerSpectrum(self.model.rates_exc, dt=self.model.params["dt"]) 71 | 72 | 73 | if __name__ == "__main__": 74 | unittest.main() 75 | -------------------------------------------------------------------------------- /tests/test_imports.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | 4 | class TestImports(unittest.TestCase): 5 | @classmethod 6 | def setUpClass(cls): 7 | from neurolib.models.fhn import FHNModel 8 | 9 | fhn = FHNModel() 10 | fhn.run() 11 | cls.model = fhn 12 | 13 | from neurolib.utils.parameterSpace import ParameterSpace 14 | 15 | pars = ParameterSpace({"x": [1, 2]}) 16 | cls.pars = pars 17 | 18 | def test_model_imports(self): 19 | from neurolib.models.hopf import HopfModel 20 | 21 | hopf = HopfModel() 22 | from neurolib.models.aln import ALNModel 23 | 24 | aaln = ALNModel() 25 | from neurolib.models.fhn import FHNModel 26 | 27 | def test_utils_imports(self): 28 | 29 | from neurolib.utils.parameterSpace import ParameterSpace 30 | 31 | pars = ParameterSpace({"x": [1, 2]}) 32 | 33 | from neurolib.utils.signal import Signal 34 | 35 | signal = Signal(self.model.xr()) 36 | 37 | def test_optimize_imports(self): 38 | from neurolib.optimize.evolution import Evolution 39 | 40 | evolution = Evolution(evalFunction=(lambda f: f), parameterSpace=self.pars) 41 | 42 | from neurolib.optimize.exploration import BoxSearch 43 | 44 | search = BoxSearch(evalFunction=(lambda f: f), parameterSpace=self.pars) 45 | 46 | 47 | if __name__ == "__main__": 48 | unittest.main() 49 | -------------------------------------------------------------------------------- /tests/test_parameterspace.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from neurolib.utils.parameterSpace import ParameterSpace 4 | 5 | 6 | class TestParameterSpace(unittest.TestCase): 7 | def test_parameterspace_init(self): 8 | # init from list 9 | _ = ParameterSpace(["a", "b"], [[3], [3]]) 10 | 11 | # init from dict 12 | _ = ParameterSpace({"a": [1, 2], "b": [1, 2]}) 13 | 14 | # init from dict with numpy arrays 15 | _ = ParameterSpace({"a": np.zeros((3)), "b": np.ones((33))}) 16 | 17 | def test_parameterspace_kind(self): 18 | # 'point' 19 | par = ParameterSpace(["a", "b"], [[10], [3.0]]) 20 | self.assertEqual(par.kind, "point") 21 | parametrization = par.get_parametrization() 22 | self.assertTrue(all(len(lst) == 1 for lst in parametrization.values())) 23 | 24 | # 'bound' 25 | par = ParameterSpace(["a", "b"], [[3.0, 5.0], [0.0, 3.0]]) 26 | self.assertEqual(par.kind, "bound") 27 | parametrization = par.get_parametrization() 28 | self.assertTrue(all(len(lst) == 2 for lst in parametrization.values())) 29 | 30 | # 'grid' 31 | par = ParameterSpace(["a", "b"], [[3.0, 3.5, 5.0], [0.0, 3.0]]) 32 | self.assertEqual(par.kind, "grid") 33 | parametrization = par.get_parametrization() 34 | self.assertTrue(all(len(lst) == 6 for lst in parametrization.values())) 35 | 36 | # 'sequence' 37 | par = ParameterSpace(["a", "b"], [[3.0, 3.5, 5.0], [0.0, 3.0]], kind="sequence") 38 | self.assertEqual(par.kind, "sequence") 39 | parametrization = par.get_parametrization() 40 | self.assertTrue(all(len(lst) == 5 for lst in parametrization.values())) 41 | 42 | # `explicit` 43 | par = ParameterSpace(["a", "b"], [[3.0, 3.5, 12.5], [0.0, 3.0, 17.2]], kind="explicit") 44 | self.assertEqual(par.kind, "explicit") 45 | parametrization = par.get_parametrization() 46 | self.assertTrue(all(len(lst) == 3 for lst in parametrization.values())) 47 | 48 | def test_inflate_to_sequence(self): 49 | SHOULD_BE = { 50 | "a": [1, 2, None, None, None, None, None], 51 | "b": [None, None, 3, 4, 5, None, None], 52 | "c": [None, None, None, None, None, 12.0, 54.0], 53 | } 54 | par = ParameterSpace([], []) 55 | param_dict = {"a": [1, 2], "b": [3, 4, 5], "c": [12.0, 54.0]} 56 | 57 | result = par._inflate_to_sequence(param_dict) 58 | self.assertTrue(all(len(lst) == 7 for lst in result.values())) 59 | self.assertDictEqual(result, SHOULD_BE) 60 | 61 | def test_parameterspace_attributes(self): 62 | par = ParameterSpace(["a", "b"], [[10, 8], [3.0]]) 63 | par.a 64 | par["a"] 65 | par.b 66 | par["c"] = [1, 2, 3] 67 | par.lowerBound 68 | par.upperBound 69 | 70 | def test_conversions(self): 71 | par = ParameterSpace({"a": [1, 2], "b": [1, 2]}) 72 | par.named_tuple_constructor 73 | par.named_tuple 74 | 75 | par.dict() 76 | 77 | 78 | if __name__ == "__main__": 79 | unittest.main() 80 | -------------------------------------------------------------------------------- /tests/test_subsampling.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import logging 3 | import unittest 4 | 5 | import numpy as np 6 | from neurolib.models.aln import ALNModel 7 | 8 | 9 | class TestSubsampling(unittest.TestCase): 10 | """ 11 | Tests wheteher model outputs are subsampled as expected. 12 | """ 13 | 14 | def test_subsample_aln(self): 15 | model = ALNModel() 16 | model.params["duration"] = 100 # 100 ms 17 | model.params["dt"] = 0.1 # 0.1 ms 18 | # default behaviour should be no subsampling 19 | self.assertIs(model.params.get("sampling_dt", None), None) 20 | model.run() 21 | full_output = model.output.copy() 22 | 23 | # subsample the same model 24 | model.params["sampling_dt"] = 10 # 10 ms 25 | model.run() 26 | subsample_output = model.output.copy() 27 | 28 | sample_every = int(model.params["sampling_dt"] / model.params["dt"]) 29 | self.assertEqual( 30 | full_output[:, ::sample_every].shape[1], subsample_output.shape[1] 31 | ), "Subsampling returned unexpected output shape" 32 | self.assertTrue( 33 | (full_output[:, ::sample_every] == subsample_output).all() 34 | ), "Subsampling returned unexpected output values" 35 | 36 | def test_subsample_aln_chunkwise(self): 37 | model = ALNModel(seed=42) 38 | model.params["sigma_ou"] = 0.0 39 | model.params["duration"] = 1000 # 1000 ms 40 | model.params["dt"] = 0.1 # 0.1 ms 41 | 42 | # default behaviour should be no subsampling 43 | self.assertIs(model.params.get("sampling_dt", None), None) 44 | 45 | model.run(chunksize=3000) 46 | full_output = model.output.copy() 47 | 48 | # subsample the model (same seed) 49 | model = ALNModel(seed=42) 50 | model.params["sigma_ou"] = 0.0 51 | model.params["duration"] = 1000 # 1000 ms 52 | model.params["dt"] = 0.1 # 0.1 ms 53 | model.params["sampling_dt"] = 10.0 # 10 ms 54 | model.run(chunksize=3000) 55 | subsample_output = model.output.copy() 56 | sample_every = int(model.params["sampling_dt"] / model.params["dt"]) 57 | 58 | self.assertEqual( 59 | full_output[:, ::sample_every].shape[1], subsample_output.shape[1] 60 | ), "Subsampling returned unexpected output shape" 61 | self.assertTrue( 62 | (full_output[:, ::sample_every] == subsample_output).all() 63 | ), "Subsampling returned unexpected output values" 64 | 65 | def test_sampling_dt_smaller_than_dt(self): 66 | model = ALNModel() 67 | model.params["dt"] = 10 # 0.1 ms 68 | model.params["sampling_dt"] = 9 69 | with self.assertRaises(AssertionError): 70 | model.run() 71 | 72 | def test_sampling_dt_lower_than_duration(self): 73 | model = ALNModel() 74 | model.params["dt"] = 0.1 # 0.1 ms 75 | model.params["duration"] = 10 76 | model.params["sampling_dt"] = 30 77 | with self.assertRaises(AssertionError): 78 | model.run() 79 | 80 | def test_sampling_dt_invalid(self): 81 | model = ALNModel() 82 | model.params["sampling_dt"] = -30 83 | with self.assertRaises(ValueError): 84 | model.run() 85 | 86 | def test_sampling_dt_divisible_chunksize(self): 87 | model = ALNModel() 88 | model.params["dt"] = 0.1 # 0.1 ms 89 | model.params["sampling_dt"] = 11.11 90 | with self.assertRaises(AssertionError): 91 | model.run(chunksize=3000) 92 | 93 | def test_sampling_dt_divisible_last_chunksize(self): 94 | model = ALNModel() 95 | model.params["dt"] = 0.1 # 0.1 ms 96 | model.params["sampling_dt"] = 0.21 97 | with self.assertRaises(AssertionError): 98 | model.run(chunksize=210) 99 | 100 | def test_sampling_dt_greater_than_chunksize(self): 101 | model = ALNModel() 102 | model.params["dt"] = 0.1 # 0.1 ms 103 | model.params["sampling_dt"] = 30 104 | with self.assertRaises(AssertionError): 105 | model.run(chunksize=210) 106 | 107 | 108 | if __name__ == "__main__": 109 | unittest.main() 110 | --------------------------------------------------------------------------------