├── .gitignore
├── .readthedocs.yaml
├── LICENSE
├── MANIFEST.in
├── README.md
├── docs
├── Makefile
├── make.bat
├── requirements.txt
└── source
│ ├── conf.py
│ ├── examples
│ ├── examples.rst
│ ├── implementation.rst
│ ├── index.rst
│ ├── installation.rst
│ └── logo.png
├── examples
├── 2020_moving_beyond_generalization
│ ├── Example1.ipynb
│ ├── Example2.ipynb
│ └── data
│ │ ├── Ex2_datasample1.npz
│ │ └── Ex2_datasample2.npz
├── 2021_learning_nonstationary_dynamics
│ ├── Example1.ipynb
│ ├── Example2.ipynb
│ ├── Example3.ipynb
│ └── data
│ │ ├── Ex3_datasample1.npz
│ │ └── Ex3_datasample2.npz
└── 2024_the_dynamics_and_geometry
│ ├── Example1_single_neuron_optimization.ipynb
│ ├── Example2_population_optimization.ipynb
│ ├── Example3_gpu_optimization.ipynb
│ ├── Example4_viterbi.ipynb
│ └── data
│ └── data_for_example.zip
├── neuralflow
├── PDE_Solve.py
├── __init__.py
├── base_cuda.py
├── base_optimizer.py
├── c_get_gamma.c
├── c_get_gamma.pyx
├── cuda_kernels.cu
├── data_generation.py
├── feature_complexity
│ └── fc_base.py
├── firing_rate_models.py
├── gradients.py
├── grid.py
├── model.py
├── optimization.py
├── peq_models.py
├── settings.py
├── spike_data.py
├── utilities
│ ├── __init__.py
│ ├── psth.py
│ ├── rank_nullspace.py
│ ├── spline_padding.py
│ └── visualization_functions.py
└── viterbi.py
├── pyproject.toml
├── requirements.txt
├── setup.py
└── tests
├── EV_problems_library.py
├── test_PDESolve.py
├── test_gradients.py
├── test_model_selection.py
├── test_optimization.py
├── test_shared_optimization.py
├── test_spike_data.py
├── test_viterbi.py
├── testhelper_PDE_Solve.py
└── visualization.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Mac OS stuff
2 | .DS_Store
3 |
4 | # Py-dev stuff
5 | .project
6 | .pydevproject
7 | */.settings
8 |
9 | # Vim stuff
10 | *.swp
11 |
12 | # PyCharm stuff
13 | .idea/
14 |
15 | # Data and testing folders
16 | brain_flow/tests/spikeData/*
17 | venv/*
18 |
19 | # Byte-compiled / optimized / DLL files
20 | __pycache__/
21 | *.py[cod]
22 | *$py.class
23 |
24 | # C extensions
25 | *.so
26 | brain_flow/energy_model/c_get_gamma.c
27 |
28 | # Distribution / packaging
29 | .Python
30 | env/
31 | build/
32 | develop-eggs/
33 | dist/
34 | downloads/
35 | eggs/
36 | .eggs/
37 | lib/
38 | lib64/
39 | parts/
40 | sdist/
41 | var/
42 | *.egg-info/
43 | .installed.cfg
44 | *.egg
45 |
46 | # PyInstaller
47 | # Usually these files are written by a python script from a template
48 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
49 | *.manifest
50 | *.spec
51 |
52 | # Installer logs
53 | pip-log.txt
54 | pip-delete-this-directory.txt
55 |
56 | # Unit test / coverage reports
57 | htmlcov/
58 | .tox/
59 | .coverage
60 | .coverage.*
61 | .cache
62 | nosetests.xml
63 | coverage.xml
64 | *,cover
65 | .hypothesis/
66 |
67 | # Translations
68 | *.mo
69 | *.pot
70 |
71 | # Django stuff:
72 | *.log
73 |
74 | # Sphinx documentation
75 | docs/_build/
76 |
77 | # PyBuilder
78 | target/
79 |
80 | # Ipython Notebook
81 | .ipynb_checkpoints
82 |
83 | # vscode stuff
84 | .vscode/
85 |
86 | # images
87 | *.png
88 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | # Set the OS, Python version and other tools you might need
4 | build:
5 | os: ubuntu-22.04
6 | tools:
7 | python: "3.11"
8 |
9 | # Build documentation in the "docs/" directory with Sphinx
10 | sphinx:
11 | configuration: docs/source/conf.py
12 |
13 | # install requirements
14 | python:
15 | install:
16 | - requirements: docs/requirements.txt
17 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Engel Lab
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include neuralflow *.cu
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # NeuralFlow - version 3
2 |
3 | ## Short description
4 |
5 | Computational framework for modeling neural activity with continuous latent Langevin dynamics.
6 |
7 | Quick installation: ```pip install git+https://github.com/engellab/neuralflow```
8 |
9 | The source code for the following publications:
10 |
11 | 1) **M Genkin, KV Shenoy, C Chandrasekaran, TA Engel, The dynamics and geometry of choice in premotor cortex, bioRxiv (2023)**
12 |
13 | **Link:** https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10401920/
14 |
15 | 2) **Genkin, M., Hughes, O. and Engel, T.A. Learning non-stationary Langevin dynamics from stochastic observations of latent trajectories. Nat Commun 12, 5986 (2021)**.
16 |
17 | **Link:** https://rdcu.be/czqGP
18 |
19 | 3) **Genkin, M., Engel, T.A. Moving beyond generalization to accurate interpretation of flexible models. Nat Mach Intell 2, 674–683 (2020)**.
20 |
21 | **Link:** https://www.nature.com/articles/s42256-020-00242-6/
22 |
23 | **Free access:** https://rdcu.be/b9cW3
24 |
25 | ## Installation
26 | Package only: pip install git+https://github.com/engellab/neuralflow
27 |
28 | Package with examples:
29 |
30 | git clone https://github.com/engellab/neuralflow
31 | cd neuralflow
32 | pip install .
33 |
34 | If you have issues with Cython extension, and want to use precomplied .c instead, open setup.py and change line 7 to USE_CYTHON = 0
35 |
36 | ## GPU support
37 |
38 | If your platform has CUDA-enabled GPU, install cupy package. Then you can use
39 | GPU device for optimization.
40 | Package passes unit tests with cupy-cuda12x==12.2.0
41 |
42 | ## documentation
43 |
44 | https://neuralflow.readthedocs.io/
45 |
46 | ## Getting started
47 |
48 | See examples
49 |
50 | ## Deep dive
51 |
52 | See tests
53 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | #Additional requirements for project compilation at readthedocs
2 | numpydoc
3 | nbsphinx
4 | sphinx-rtd-theme
5 | Cython
6 | numpy
7 | -e .
8 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | # import os
14 | # import sys
15 |
16 |
17 | import sys
18 | import os
19 | on_rtd = os.environ.get('READTHEDOCS') == 'True'
20 | if on_rtd:
21 | sys.path.append('../..')
22 | sys.path.append('..')
23 | else:
24 | #pass
25 | sys.path.insert(0, os.path.abspath('../..'))
26 | #sys.path.insert(0, os.path.abspath('..'))
27 |
28 |
29 | # -- Project information -----------------------------------------------------
30 |
31 |
32 | project = 'neuralflow'
33 | copyright = '2020, Mikhail Genkin and Tatiana Engel'
34 | author = 'Mikhail Genkin and Tatiana Engel'
35 |
36 |
37 |
38 | # The full version, including alpha/beta/rc tags
39 | release = '1.0.0'
40 |
41 |
42 | # -- General configuration ---------------------------------------------------
43 |
44 | # Add any Sphinx extension module names here, as strings. They can be
45 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
46 | # ones.
47 | extensions = ['sphinx.ext.autodoc', 'numpydoc', 'nbsphinx', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon']
48 | html_theme = 'sphinx_rtd_theme'
49 | napoleon_google_docstring = True
50 | napoleon_use_param = False
51 | napoleon_use_ivar = True
52 |
53 | # Add any paths that contain templates here, relative to this directory.
54 | templates_path = ['_templates']
55 |
56 | #numpydoc_show_class_members = False
57 |
58 | # The suffix(es) of source filenames.
59 | # You can specify multiple suffix as a list of string:
60 | #
61 | # source_suffix = ['.rst', '.md']
62 | source_suffix = '.rst'
63 |
64 | # The master toctree document.
65 | master_doc = 'index'
66 |
67 | # The language for content autogenerated by Sphinx. Refer to documentation
68 | # for a list of supported languages.
69 | #
70 | # This is also used if you do content translation via gettext catalogs.
71 | # Usually you set "language" from the command line for these cases.
72 | language = None
73 |
74 | # List of patterns, relative to source directory, that match files and
75 | # directories to ignore when looking for source files.
76 | # This pattern also affects html_static_path and html_extra_path.
77 | exclude_patterns = ['build', 'Thumbs.db', '.DS_Store']
78 |
79 | # autodoc_default_options = {
80 | # 'members': False,
81 | # }
82 |
83 | add_module_names = False
84 |
85 | # The name of the Pygments (syntax highlighting) style to use.
86 | pygments_style = 'sphinx'
87 |
88 | # -- Options for HTML output -------------------------------------------------
89 |
90 | # The theme to use for HTML and HTML Help pages. See the documentation for
91 | # a list of builtin themes.
92 | #
93 | html_theme = 'sphinx_rtd_theme'
94 |
95 | # Theme options are theme-specific and customize the look and feel of a theme
96 | # further. For a list of options available for each theme, see the
97 | # documentation.
98 | #
99 | # html_theme_options = {}
100 |
101 | html_theme_options = {
102 | #'canonical_url': 'https://logomaker.readthedocs.io',
103 | #'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
104 | 'logo_only': False,
105 | 'display_version': True,
106 | 'prev_next_buttons_location': 'none', #'bottom',
107 | 'style_external_links': False,
108 | #'vcs_pageview_mode': '',
109 | 'style_nav_header_background': 'white',
110 | # Toc options
111 | 'collapse_navigation': False,
112 | 'sticky_navigation': True,
113 | 'navigation_depth': 3,
114 | 'includehidden': True,
115 | 'titles_only': False
116 | }
117 |
118 | # Add any paths that contain custom static files (such as style sheets) here,
119 | # relative to this directory. They are copied after the builtin static files,
120 | # so a file named "default.css" will overwrite the builtin "default.css".
121 | #html_static_path = ['_static']
122 |
123 | html_logo = 'logo.png'
124 |
125 | # to customize CSS
126 | # def setup(app):
127 | # app.add_stylesheet('my_theme.css')
128 |
129 | # Custom sidebar templates, must be a dictionary that maps document names
130 | # to template names.
131 | #
132 | # The default sidebars (for documents that don't match any pattern) are
133 | # defined by theme itself. Builtin themes are using these templates by
134 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
135 | # 'searchbox.html']``.
136 | #
137 | # html_sidebars = {}
138 |
139 | # -- Options for HTMLHelp output ---------------------------------------------
140 |
141 | # Output file base name for HTML help builder.
142 | htmlhelp_basename = 'neuralflowdoc'
143 |
144 |
145 | # -- Options for LaTeX output ------------------------------------------------
146 |
147 | latex_elements = {
148 | # The paper size ('letterpaper' or 'a4paper').
149 | #
150 | # 'papersize': 'letterpaper',
151 |
152 | # The font size ('10pt', '11pt' or '12pt').
153 | #
154 | # 'pointsize': '10pt',
155 |
156 | # Additional stuff for the LaTeX preamble.
157 | #
158 | # 'preamble': '',
159 |
160 | # Latex figure (float) alignment
161 | #
162 | # 'figure_align': 'htbp',
163 | }
164 |
165 | # Grouping the document tree into LaTeX files. List of tuples
166 | # (source start file, target name, title,
167 | # author, documentclass [howto, manual, or own class]).
168 | latex_documents = [
169 | (master_doc, 'neuralflow.tex', 'neuralflow Documentation',
170 | 'Mikhail Genkin and Tatiana A. Engel', 'manual'),
171 | ]
172 |
173 |
174 | # -- Options for manual page output ------------------------------------------
175 |
176 | # One entry per manual page. List of tuples
177 | # (source start file, name, description, authors, manual section).
178 | man_pages = [
179 | (master_doc, 'neuralflow', 'neuralflow Documentation',
180 | [author], 1)
181 | ]
182 |
183 | # autodoc_default_options = {
184 | # 'members': True,
185 | # 'special-members': '__call__',
186 | # }
187 |
188 | # -- Options for Texinfo output ----------------------------------------------
189 |
190 | # Grouping the document tree into Texinfo files. List of tuples
191 | # (source start file, target name, title, author,
192 | # dir menu entry, description, category)
193 | texinfo_documents = [
194 | (master_doc, 'neuralflow', 'neuralflow Documentation',
195 | author, 'neuralflow', 'Modeling neural activity with latent Langevin dynamics.',
196 | 'Miscellaneous'),
197 | ]
198 |
--------------------------------------------------------------------------------
/docs/source/examples:
--------------------------------------------------------------------------------
1 | ../../examples
--------------------------------------------------------------------------------
/docs/source/examples.rst:
--------------------------------------------------------------------------------
1 | .. _examples:
2 |
3 | Examples
4 | ========
5 |
6 | Here we provide examples from each of our papers [#Genkin2020]_, [#Genkin2021]_, and [#Genkin2023]_. These examples can also be accessed with Jupyter Notebook from our `GitHub repository `_ .
7 |
8 | **************************************************************************
9 | Moving beyond generalization to accurate interpretation of flexible models
10 | **************************************************************************
11 |
12 | The first example generates synthetic data from a double-well potential
13 | and uses this data to fit the model potential. It reproduces Figure 3 in the main text [#Genkin2020]_.
14 | The second example demonstrates our feature consistency method for model selection in the case of stationary dynamics. It reproduces Figure 5 in the main text [#Genkin2020]_.
15 |
16 | .. toctree::
17 | :maxdepth: 2
18 |
19 | examples/2020_moving_beyond_generalization/Example1.ipynb
20 | examples/2020_moving_beyond_generalization/Example2.ipynb
21 |
22 | *********************************************************************************************
23 | Learning non-stationary Langevin dynamics from stochastic observations of latent trajectories
24 | *********************************************************************************************
25 |
26 | The first example generates synthetic data from the ramping dynamics, and optimizes the model potential on this data. Also the importance of various non-stationary components for accurate model inference is demonstrated. It reproduces Figures 2 and 3 in the main text [#Genkin2020preprint]_.
27 | The second example generates two synthetic datasets from ramping and stepping dynamics, and uses
28 | this data to infer the model potentials. It also infers the model potential, the initial distribution of the latent states and the noise magnitude from data generated using the ramping dynamics. It reproduces Figure 4 in the main text [#Genkin2020preprint]_.
29 | The third example demonstrates feature consistency analysis for model selection for the case of non-stationary data. It reproduces Figure 5a-c in the main text [#Genkin2020preprint]_.
30 |
31 | .. toctree::
32 | :maxdepth: 2
33 |
34 | examples/2021_learning_nonstationary_dynamics/Example1.ipynb
35 | examples/2021_learning_nonstationary_dynamics/Example2.ipynb
36 | examples/2021_learning_nonstationary_dynamics/Example3.ipynb
37 |
38 | *********************************************************************************************
39 | The dynamics and geometry of choice in premotor cortex
40 | *********************************************************************************************
41 |
42 | The first example fits a single-neuron model to PMd data and selects an optimal
43 | model using feature consistency analysis, see Figure 3 in [#Genkin2023]_.
44 | The second example fits a population model to PMd data and selects an optimal
45 | model using feature consistency analysis, see Figure 4 in [#Genkin2023]_.
46 |
47 | .. toctree::
48 | :maxdepth: 2
49 |
50 | examples/2024_the_dynamics_and_geometry/Example1_single_neuron_optimization.ipynb
51 | examples/2024_the_dynamics_and_geometry/Example2_population_optimization.ipynb
52 | examples/2024_the_dynamics_and_geometry/Example3_gpu_optimization.ipynb
53 | examples/2024_the_dynamics_and_geometry/Example4_viterbi.ipynb
54 |
55 |
56 | References
57 | ----------
58 |
59 | .. [#Genkin2020] `Genkin, M., Engel, T.A. Moving beyond generalization to accurate interpretation of flexible models. Nat Mach Intell 2, 674–683 (2020). `_
60 |
61 | .. [#Genkin2021] `Genkin, M., Hughes, O. and Engel, T.A. Learning non-stationary Langevin dynamics from stochastic observations of latent trajectories. Nat Commun 12, 5986 (2021). `_
62 |
63 | .. [#Genkin2023] `Genkin M, Shenoy KV, Chandrasekaran C, Engel TA. The dynamics and geometry of choice in premotor cortex. bioRxiv [Preprint] (2023) . `_
--------------------------------------------------------------------------------
/docs/source/implementation.rst:
--------------------------------------------------------------------------------
1 | Implementation
2 | ==============
3 |
4 |
5 | Core classes
6 | ------------
7 |
8 | .. autoclass:: neuralflow.model.model
9 | :members:
10 |
11 | .. autoclass:: neuralflow.grid.GLLgrid
12 | :members:
13 |
14 | .. autoclass:: neuralflow.spike_data.SpikeData
15 | :members:
16 |
17 | Optimization routines
18 | ---------------------
19 |
20 | .. autoclass:: neuralflow.optimization.Optimization
21 | :members:
22 |
23 | .. autoclass:: neuralflow.base_optimizer.adam_opt
24 | :members:
25 |
26 | .. autoclass:: neuralflow.base_optimizer.gd_opt
27 | :members:
28 |
29 | .. autoclass:: neuralflow.gradients.Grads
30 | :members:
31 |
32 | .. autoclass:: neuralflow.PDE_Solve.PDESolve
33 | :members:
34 |
35 | Predefined peq models
36 | ---------------------
37 |
38 | .. automodule:: neuralflow.peq_models
39 | :members:
40 |
41 | Predefined firing rate models
42 | -----------------------------
43 |
44 | .. automodule:: neuralflow.firing_rate_models
45 | :members:
46 |
47 | Feature complexity analysis
48 | ---------------------------
49 |
50 | .. autoclass:: neuralflow.feature_complexity.fc_base.FC_tools
51 | :members:
52 |
53 |
54 | Data generation from Langevin dynamics
55 | --------------------------------------
56 |
57 | .. autoclass:: neuralflow.data_generation.SyntheticData
58 | :members:
59 |
60 |
61 | Other modules and classes
62 | ---------------------------
63 |
64 | .. automodule:: neuralflow.viterbi
65 | :members:
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | ==============================================================================
2 | NeuralFlow: modeling neural activity with continuous latent Langevin dynamics
3 | ==============================================================================
4 |
5 | NeuralFlow is a Python package for modeling neural spiking activity with continuous latent Langevin dynamics.
6 | The driving force is optimized from data by gradient descent algorithm directly in the space of continuous functions.
7 | Nonstationary data distribution can be modeled by inferring initial distribution of the latent states and
8 | noise magnitude from data using absorbing or reflecting boundary conditions.
9 | Each neuron has its own tuning function f(x) that links neuron's firing rate to
10 | the latent state.
11 |
12 | The modeling results can be interpreted within the dynamical system theory.
13 | The package includes optimization functions (ADAM and Gradient-descent), data generation functions for generating
14 | spike data from a specific Langevin model, and the core modules like spike data, PDE solver, and model.
15 | Viterbi algorithm for inferring the most probable path is also included.
16 |
17 | In addition, we include feature consistency analysis for model selection based on feature consistency between
18 | the models optimized on two data samples.
19 |
20 | To get started, see examples. For deeper understanding of the code, refer to the unit tests.
21 | For more information, see Genkin and Engel (2020) [#Genkin2020]_, Genkin, Hughes and Engel (2020) [#Genkin2021]_,
22 | and Genkin et al. (2023) [#Genkin2023]_
23 |
24 | .. toctree::
25 | :maxdepth: 2
26 |
27 |
28 | installation
29 | examples
30 | implementation
31 |
32 |
33 |
34 |
35 | References
36 | ----------
37 |
38 | .. [#Genkin2020] `Genkin, M., Engel, T.A. Moving beyond generalization to accurate interpretation of flexible models. Nat Mach Intell 2, 674–683 (2020). `_
39 |
40 | .. [#Genkin2021] `Genkin, M., Hughes, O. and Engel, T.A. Learning non-stationary Langevin dynamics from stochastic observations of latent trajectories. Nat Commun 12, 5986 (2021). `_
41 |
42 | .. [#Genkin2023] `Genkin M, Shenoy KV, Chandrasekaran C, Engel TA. The dynamics and geometry of choice in premotor cortex. bioRxiv [Preprint] (2023) . `_
--------------------------------------------------------------------------------
/docs/source/installation.rst:
--------------------------------------------------------------------------------
1 | .. _installation:
2 |
3 | Installation
4 | ============
5 |
6 | Package only
7 | ------------
8 | The package can be installed from a Git repository with pip. ::
9 |
10 | pip install git+https://github.com/engellab/neuralflow
11 |
12 |
13 | Package and examples
14 | --------------------
15 | To get the package with the examples, one needs to clone the repository. The examples are provided as Jupyter notebook
16 | (ipynb) files, so ``jupyter`` package has to be preinstalled. With Conda as a package manager, one may opt to
17 | create a new environment::
18 |
19 | conda create -n neuralflow jupyter pip && conda activate neuralflow
20 |
21 | Alternatively, one can work in the base environment (make sure that ``jupyter`` package is installed).
22 | Clone the repository and go to the repository root directory::
23 |
24 | git clone https://github.com/engellab/neuralflow
25 | cd neuralflow
26 |
27 | Install the package from a local copy::
28 |
29 | pip install .
30 |
31 | After that, you should be able to run examples in ``examples`` folder.
32 | If you have issues with the Cython extension and want to use a precompiled .c file instead, open setup.py and change line 7 to USE_CYTHON = 0
33 |
34 |
35 | CUDA support
36 | ------------
37 |
38 | Optimization can be performed on CUDA-enabled GPU. For GPU support, cupy
39 | package has to be installed on a machine with CUDA-enabled GPU. The package
40 | was tested with cupy version 12.2.0. Note that double-precision computations
41 | are absolutely necessary for our framework, so optimization benefits from
42 | GPU acceleration only if scientific grade GPU (e.g. Tesla V100) is used. A
43 | gaming GPU performance is approximately the same as CPU performance, since
44 | gaming GPUs do not have many double-precision multiprocessors.
--------------------------------------------------------------------------------
/docs/source/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engellab/neuralflow/508cf7b6de2759d90dda774837ab90e0a0fbfb52/docs/source/logo.png
--------------------------------------------------------------------------------
/examples/2020_moving_beyond_generalization/data/Ex2_datasample1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engellab/neuralflow/508cf7b6de2759d90dda774837ab90e0a0fbfb52/examples/2020_moving_beyond_generalization/data/Ex2_datasample1.npz
--------------------------------------------------------------------------------
/examples/2020_moving_beyond_generalization/data/Ex2_datasample2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engellab/neuralflow/508cf7b6de2759d90dda774837ab90e0a0fbfb52/examples/2020_moving_beyond_generalization/data/Ex2_datasample2.npz
--------------------------------------------------------------------------------
/examples/2021_learning_nonstationary_dynamics/data/Ex3_datasample1.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engellab/neuralflow/508cf7b6de2759d90dda774837ab90e0a0fbfb52/examples/2021_learning_nonstationary_dynamics/data/Ex3_datasample1.npz
--------------------------------------------------------------------------------
/examples/2021_learning_nonstationary_dynamics/data/Ex3_datasample2.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engellab/neuralflow/508cf7b6de2759d90dda774837ab90e0a0fbfb52/examples/2021_learning_nonstationary_dynamics/data/Ex3_datasample2.npz
--------------------------------------------------------------------------------
/examples/2024_the_dynamics_and_geometry/data/data_for_example.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engellab/neuralflow/508cf7b6de2759d90dda774837ab90e0a0fbfb52/examples/2024_the_dynamics_and_geometry/data/data_for_example.zip
--------------------------------------------------------------------------------
/neuralflow/PDE_Solve.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """This class solves the following eigenvector-eigenvalue problems:
4 | 1) Fokker-Planck equation
5 | 2) Modified Fokker-Planck equation
6 | 3) Stourm-Liouville problem
7 | """
8 |
9 | from neuralflow.grid import GLLgrid
10 | from neuralflow.utilities.rank_nullspace import nullspace
11 | import numpy as np
12 | import numpy.matlib
13 | import numbers
14 | import scipy
15 | from scipy import linalg
16 | from copy import deepcopy
17 |
18 |
19 | class PDESolve:
20 | """Numerical solution of Stourm-Liouville problem and Fokker-Planck.
21 |
22 |
23 | Parameters
24 | ----------
25 | xbegin : float, optional
26 | The left boundary of the latent state. The default is -1.
27 | xend : float
28 | The right boundary of the latent state. The default is 1.
29 | Np : int
30 | The degree of Langrange interpolation polynomial, also the
31 | number of grid points at each element. The default is 8.
32 | Ne : int
33 | Number of SEM elements. The default is 64.
34 | BoundCond : dict
35 | A dictionary that specifies boundary conditions (Dirichlet, Neumann
36 | or Robin). The default is {'leftB': 'Dirichlet',
37 | 'rightB': 'Dirichlet'}.
38 | In case of Robin, the dictionary must also contain 'leftBCoeff'
39 | and/or 'rightBCoeff', each are dictionaries with two entries
40 | 'c1' and 'c2' that specify BCs coefficients in the following format:
41 | c1*y[xbegin]+c2*y'[xbegin]=0 (left boundary)
42 | c1*y[xend]+c2*y'[xend]=0 (right boundary)
43 | Nv : int, optional
44 | Number of retained eigenvalues and eigenvectors of the operator H.
45 | If set to None, will be equal to grid.N-2, which is the maximum
46 | possible value. If Dirichlet BCs are used, it is stongly
47 | recommended to set this to None to avoid spurious high-freq
48 | oscillations in the fitted functions. The default is None.
49 | with_cuda : bool, optional
50 | Whether to include GPU support. For GPU optimization, the platform
51 | has to be cuda-enabled, and cupy package has to be installed. The
52 | default is False.
53 | """
54 |
55 | # List of available methods
56 | _available_BCs = ['Dirichlet', 'Neumann', 'Robin']
57 |
58 | def __init__(self, xbegin=-1.0, xend=1.0, Np=8, Ne=64,
59 | BoundCond={'leftB': 'Dirichlet', 'rightB': 'Dirichlet'},
60 | Nv=None, with_cuda=False
61 | ):
62 | """
63 |
64 |
65 | Public methods
66 | ------
67 | set_BoundCond, solve_EV
68 |
69 | """
70 |
71 | self.BoundCond = deepcopy(BoundCond)
72 | self.with_cuda = with_cuda
73 |
74 | if with_cuda:
75 | import neuralflow.base_cuda as cuda
76 | self.cuda = cuda
77 | self.cuda_var = cuda.var()
78 |
79 | # Check inputs
80 | self._check_inputs()
81 |
82 | # Convert given boundary condition into a vector BC_ of size (1,4)
83 | self._get_BC()
84 |
85 | # get grid
86 | self.grid = GLLgrid(xbegin, xend, Np, Ne, with_cuda)
87 |
88 | self.Nv = Nv
89 | if Nv is None:
90 | self.Nv = self.grid.N-2
91 |
92 | # Get the Nullspace
93 | self._get_Nullspace()
94 |
95 | @classmethod
96 | def init_from_grid(cls, grid, boundary_mode=None, BoundCond=None):
97 | """Init from grid object
98 |
99 |
100 | Parameters
101 | ----------
102 | grid : neuralflow.grid
103 | Grid object.
104 | boundary_mode : str, optional
105 | Absorbing or reflecting. If None, BoundCond will be used for
106 | boundary conditions. The default is None.
107 | BoundCond : dict
108 | A dictionary that specifies boundary conditions (Dirichlet, Neumann
109 | or Robin). The default is {'leftB': 'Dirichlet',
110 | 'rightB': 'Dirichlet'}.
111 | In case of Robin, the dictionary must also contain 'leftBCoeff'
112 | and/or 'rightBCoeff', each are dictionaries with two entries
113 | 'c1' and 'c2' that specify BCs coefficients in the following
114 | format:
115 | c1*y[xbegin]+c2*y'[xbegin]=0 (left boundary)
116 | c1*y[xend]+c2*y'[xend]=0 (right boundary)
117 |
118 | Returns
119 | -------
120 | self
121 | Initialized PDESolve object.
122 |
123 | """
124 | pde_solve_params = {
125 | 'xbegin': grid.xbegin, 'xend': grid.xend, 'Np': grid.Np,
126 | 'Ne': grid.Ne, 'with_cuda': grid.with_cuda}
127 | if boundary_mode is not None:
128 | if boundary_mode == 'absorbing':
129 | pde_solve_params['BoundCond'] = {
130 | 'leftB': 'Dirichlet', 'rightB': 'Dirichlet'
131 | }
132 | elif boundary_mode == 'reflecting':
133 | pde_solve_params['BoundCond'] = {
134 | 'leftB': 'Neumann', 'rightB': 'Neumann'
135 | }
136 | else:
137 | pde_solve_params['BoundCond'] = BoundCond
138 | return cls(**pde_solve_params)
139 |
140 | def set_BoundCond(self, BoundCond):
141 | """Set new boundary conditions for the Stourm-Liouville probelem
142 |
143 |
144 | Parameters
145 | ----------
146 | BoundCond : dictionary or str
147 | If str, can be 'absorbing' or 'reflecting'.
148 | Alternatively, specify boundary conditions as a dict:
149 | keys : 'leftB', 'rightB', (optionally: 'leftBCoeff', 'rightBCoeff')
150 | values : 'Dirichlet' 'Neumann' or 'Robin'. If 'Robin', addionally
151 | specify coefficients as a dictionary with two keys: [c1,c2],
152 | consistent with the boundary condition of the form:
153 | c1*y(B)+c2*y'(B)=0.
154 | Example: {'leftB':'Robin','leftBCoeff':{'c1'=1, 'c2'=2},
155 | 'rightB':'Robin','rightBCoeff':{'c1'=3, 'c2'=4}}
156 |
157 | """
158 | # Check parameters, set new boundary conditions and calculate new
159 | # Nullspace projector
160 | if type(BoundCond) is str:
161 | if BoundCond == 'absorbing':
162 | bc = {'leftB': 'Dirichlet', 'rightB': 'Dirichlet'}
163 | elif BoundCond == 'reflecting':
164 | bc = {'leftB': 'Neumann', 'rightB': 'Neumann'}
165 | else:
166 | raise ValueError('Unknown boudnary conditions/mode')
167 | else:
168 | bc = BoundCond
169 |
170 | self.BoundCond = bc
171 | self._check_inputs()
172 | self._get_BC()
173 | self._get_Nullspace()
174 |
175 | def solve_EV(self, peq=None, D=1, q=None, w=None, mode='hdark', fr=None,
176 | device='CPU'):
177 | """Solve the Sturm-Liouville/FP/FPE eigenvalue-eigenvector problem.
178 |
179 |
180 | Parameters
181 | ----------
182 | peq : numpy array, dtype=float
183 | Equilibirum probabilioty distribution that determines potential
184 | Phi(x), or a function p(x) in S-L problem.
185 | D : float
186 | Noise magnitude.
187 | q : numpy array, dtype=float
188 | A function q(x) in the S-L problem. The default value is None, in
189 | this case q(x)=0.
190 | w : numpy array, dtype=float
191 | A function w(x) in the S-L problem (non-negative). The default is
192 | None, in this case w(x)=1.
193 | mode : str
194 | Specify mode. Availiable modes:
195 | 'normal': solve Sturm-Liouville problem, ignore D and fr.
196 | 'h0': solve for eigenvalues and vectors of FP operator H0.
197 | 'hdark': solve for eigenvalues and vector of FP operator H.
198 | The default is 'hdark'.
199 | fr : numpy array
200 | The firing rate function (required for 'hdark' mode).
201 | This firing rate function is an elementwise sum of the firing rate
202 | functions of all the neuronal responses. The default is None.
203 | device : str
204 | Can be 'CPU' or 'GPU'.
205 |
206 | Returns
207 | -------
208 | lQ : numpy array (Nv,), dtype=float
209 | The least Nv eigenvalues for the eigenvalue problem of H0 operator.
210 | QxOrig : numpy array (N,Nv), dtype=float
211 | The corresponding scaled eigenvectors
212 | Qx : numpy array (N,Nv), dtype=float
213 | The eigenvectors of EV problem of H0 operator (only for 'h0' and
214 | 'hdark' modes).
215 | lQd: numpy array (Nv,), dtype=float
216 | The eigenvalues of H operator (only for 'hdark' mode).
217 | Qd: numpy array (Nv,Nv), dtype=float
218 | The corresponding eigenvectors in H0 basis (only for 'hdark' mode).
219 |
220 | """
221 |
222 | assert (mode in {'normal', 'h0', 'hdark'}), 'Incorrect mode!'
223 |
224 | if device not in ['CPU', 'GPU']:
225 | raise ValueError(f'Unknown device {device}')
226 | elif device == 'GPU' and not self.with_cuda:
227 | raise ValueError('Initialize the class variable with with_cuda = '
228 | 'True to support GPU computations')
229 |
230 | lib = self.cuda.cp if device == 'GPU' else np
231 |
232 | # Fill peq and w with ones if needed
233 | if peq is None:
234 | peq = lib.ones(self.grid.N, dtype='float64')
235 | if w is None:
236 | w = lib.ones(self.grid.N, dtype='float64')
237 | Nv = self.Nv
238 | if Nv is None:
239 | Nv = self.grid.N - 2
240 |
241 | # If mode is normal do not use D. Otherwise, multiply peq by D and
242 | # flip sign
243 | if mode == 'normal':
244 | self._setmat(peq, q, w, device)
245 | else:
246 | self._setmat(-D * peq, q, w, device)
247 |
248 | if device == 'GPU':
249 | eigh, cpx = self.cuda.cp.linalg.eigh, self.cuda.cupyx
250 | stiffmat = self.cuda_var.stiffmat_
251 | massmat = self.cuda_var.massmat_
252 | dmassmat = self.cuda_var.dmassmat_
253 | NullM_ = self.cuda_var.NullM_
254 | w_d = self.grid.cuda_var.w_d
255 | else:
256 | eigh = linalg.eigh
257 | stiffmat, massmat = self.stiffmat_, self.massmat_
258 | NullM_ = self.NullM_
259 | w_d = self.grid.w_d
260 |
261 | # Solve EV
262 | if device == 'CPU':
263 | # In scipy >= 1.14.0 argument eigvals chaged to subset_by_index
264 | major, minor = [int(el) for el in scipy.__version__.split('.')[:2]]
265 | if major >= 1 and minor >= 14:
266 | lQ, QxOrig = eigh(
267 | stiffmat, massmat, subset_by_index=(0, Nv - 1)
268 | )
269 | else:
270 | lQ, QxOrig = eigh(stiffmat, massmat, eigvals=(0, Nv - 1))
271 | else:
272 | # in cupy generalized EV is not supported. Thus convert it to conv
273 | # EV problem by multiplying both sides by inverted mass matrix
274 | if self.BC_[1] == 0 and self.BC_[3] == 0:
275 | # With Dirichlet BCs the projected massmat is symmetric
276 | temp1 = cpx.rsqrt(dmassmat)
277 | lQ, QxOrig = eigh(temp1[:, None]*stiffmat*temp1)
278 | else:
279 | # Invert and take Cholesky to find W^(-1/2)
280 | temp1 = lib.linalg.cholesky(lib.linalg.inv(massmat))
281 | lQ, QxOrig = eigh(temp1.T.dot(stiffmat.dot(temp1)))
282 | lQ = lQ[0:Nv]
283 | QxOrig = QxOrig[:, 0:Nv]
284 | if self.BC_[1] == 0 and self.BC_[3] == 0:
285 | QxOrig = QxOrig*temp1[:, None]
286 | else:
287 | QxOrig = temp1.dot(QxOrig)
288 |
289 | # Transfor back to N-dimensional basis
290 | QxOrig = NullM_.dot(QxOrig)
291 |
292 | # Rescale eigenvectors by sqrt(peq) to obtain original eigenvectors of
293 | # FP operator
294 | if mode == 'h0' or mode == 'hdark':
295 | if device == 'CPU':
296 | Qx = np.diag(lib.sqrt(peq), 0).dot(QxOrig)
297 | else:
298 | Qx = lib.sqrt(peq)[:, None]*QxOrig
299 |
300 | # Perform additional computations for 'hdark' mode
301 | if mode == 'hdark':
302 |
303 | # Eigenvalue/vectors of dark operator
304 | if device == 'CPU':
305 | Kd = np.diag(lQ) + Qx.T.dot(np.diag(w_d*fr, 0).dot(Qx))
306 | else:
307 | Kd = lib.diag(lQ) + (Qx.T*(w_d*fr)).dot(Qx)
308 | lQd, Qd = eigh(Kd)
309 | Qd = lib.asarray(Qd, order='C')
310 |
311 | if lib.diff(lQd).min() < 0:
312 | raise ValueError('Error! Returned EVVd not sorted')
313 | # assert(all(lQd[i] <= lQd[i + 1] for i in range(len(lQd) - 1))
314 | # ), 'Error! Returned EVVd not sorted'
315 |
316 | # return
317 | if mode == 'normal':
318 | return lQ, QxOrig
319 | elif mode == 'h0':
320 | return lQ, QxOrig, Qx
321 | elif mode == 'hdark':
322 | return lQ, QxOrig, Qx, lQd, Qd
323 |
324 | def _setmat(self, p, q, w, device):
325 | """Calculate stiffness and mass matrices.
326 | Sets stiffmat_full_, massmat_full_, stiffmat_red_, massmat_red_
327 | matrices
328 | """
329 |
330 | if device == 'GPU':
331 | lib = self.cuda.cp
332 | dmat_ = self.grid.cuda_var.dmat_
333 | dmat_d = self.grid.cuda_var.dmat_d
334 | NullM_ = self.cuda_var.NullM_
335 | w_d = self.grid.cuda_var.w_d
336 | w_ = self.grid.cuda_var.w_
337 | else:
338 | lib = np
339 | dmat_ = self.grid.dmat_
340 | dmat_d = self.grid.dmat_d
341 | NullM_ = self.NullM_
342 | w_d = self.grid.w_d
343 | w_ = self.grid.w_
344 |
345 | N, Np, Ne = self.grid.N, self.grid.Np, self.grid.Ne
346 | stiffmat = lib.zeros((N, N), dtype='float64')
347 | # Patch stiffness matrix
348 | pr_node_temp = 0
349 | for i in range(0, Ne):
350 | idx_s, idx_e = i * (Np - 1), i * (Np - 1) + Np
351 | stiffmat[idx_s:idx_e, idx_s:idx_e] = - dmat_.T.dot(
352 | lib.diag(w_ * p[idx_s:idx_e])).dot(dmat_)
353 | stiffmat[idx_s, idx_s] = stiffmat[idx_s, idx_s]+pr_node_temp
354 | pr_node_temp = stiffmat[idx_e-1, idx_e-1].copy()
355 |
356 | # Add diagonal part proportional to q(x)
357 | if q is not None:
358 | stiffmat += lib.diag(q*w_d, 0)
359 |
360 | # Take care of terms from integration by parts:
361 | stiffmat[0, :] -= p[0]*dmat_d[0, :]
362 | stiffmat[-1, :] += p[-1]*dmat_d[-1, :]
363 |
364 | # Massmat
365 | massmat = lib.diag(w_d*w, 0)
366 |
367 | # Project into potent space of operator that preserve BCs
368 | massmat = NullM_.T.dot(massmat.dot(NullM_))
369 | stiffmat = NullM_.T.dot(stiffmat.dot(NullM_))
370 |
371 | if device == 'GPU':
372 | self.cuda_var.stiffmat_ = stiffmat
373 | self.cuda_var.massmat_ = massmat
374 | self.cuda_var.dmassmat_ = lib.diag(massmat)
375 | else:
376 | self.stiffmat_ = stiffmat
377 | self.massmat_ = massmat
378 |
379 | def _get_BC(self):
380 | """Create _BC array that contains boundary condition coefficients that
381 | is consistent with the following representation:
382 | BC_[0]*y(xbegin)+BC_[1]*y'(xbegin)=0
383 | BC_[2]*y(xend)+BC_[3]*y'(xend)=0
384 | """
385 |
386 | if isinstance(self.BoundCond, np.ndarray):
387 | self.BC_ = self.BoundCond
388 | else:
389 | self.BC_ = np.zeros(4)
390 | if self.BoundCond['leftB'] == 'Robin':
391 | self.BC_[0] = self.BoundCond['leftBCoeff']['c1']
392 | self.BC_[1] = self.BoundCond['leftBCoeff']['c2']
393 | else:
394 | self.BC_[:2] = {
395 | 'Dirichlet': np.array([1, 0]),
396 | 'Neumann': np.array([0, 1])
397 | }.get(self.BoundCond['leftB'])
398 |
399 | if self.BoundCond['rightB'] == 'Robin':
400 | self.BC_[2] = self.BoundCond['rightBCoeff']['c1']
401 | self.BC_[3] = self.BoundCond['rightBCoeff']['c2']
402 | else:
403 | self.BC_[2:] = {
404 | 'Dirichlet': np.array([1, 0]),
405 | 'Neumann': np.array([0, 1])
406 | }.get(self.BoundCond['rightB'])
407 |
408 | def _get_Nullspace(self):
409 | """Calculates Nullspace of a projection on boundary conditions operator
410 | """
411 |
412 | BCmat = np.zeros((2, self.grid.N))
413 | BCmat[0, :] = (
414 | np.append(self.BC_[0], np.zeros((1, self.grid.N - 1))) +
415 | self.BC_[1] * self.grid.dmat_d[0, :]
416 | )
417 | BCmat[1, :] = (
418 | np.append(np.zeros((1, self.grid.N - 1)), self.BC_[2]) +
419 | self.BC_[3] * self.grid.dmat_d[-1, :]
420 | )
421 | self.NullM_ = nullspace(BCmat)
422 | if self.with_cuda:
423 | self.cuda_var.NullM_ = self.cuda.cp.asarray(self.NullM_)
424 |
425 | def _check_inputs(self):
426 | """Check the initialized parameters.
427 | """
428 |
429 | # BoundCond set as array
430 | if isinstance(self.BoundCond, np.ndarray):
431 | if (
432 | len(self.BoundCond) != 4 or
433 | np.abs(self.BoundCond[0]) + np.abs(self.BoundCond[1]) == 0 or
434 | np.abs(self.BoundCond[2]) + np.abs(self.BoundCond[3]) == 0
435 | ):
436 | raise ValueError('Incorrect Boundary conditions')
437 | else:
438 | for bc in ['leftB', 'rightB']:
439 | if (
440 | bc not in self.BoundCond or
441 | self.BoundCond[bc] not in PDESolve._available_BCs
442 | ):
443 | raise ValueError('Incorrect Boundary Conditions')
444 |
445 | if self.BoundCond[bc] == 'Robin':
446 | if (
447 | f'{bc}Coeff' not in self.BoundCond.keys() or
448 | 'c1' not in self.BoundCond[f'{bc}Coeff'].keys() or
449 | 'c2' not in self.BoundCond[f'{bc}Coeff'].keys() or
450 | not isinstance(
451 | self.BoundCond[f'{bc}Coeff']['c1'], numbers.Number) or
452 | not isinstance(
453 | self.BoundCond[f'{bc}Coeff']['c2'], numbers.Number)
454 | ):
455 | raise ValueError('f{bc} dictionary is incorrect')
456 |
457 | if type(self.with_cuda) is not bool:
458 | try:
459 | self.with_cuda = bool(self.with_cuda)
460 | except Exception as e:
461 | raise TypeError(f'with_cuda must be boolean. Exception: {e}')
462 |
--------------------------------------------------------------------------------
/neuralflow/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Import packages
3 | """
4 |
5 | from .data_generation import SyntheticData
6 | from . import firing_rate_models
7 | from .gradients import Grads
8 | from .grid import GLLgrid
9 | from .model import model
10 | from .optimization import Optimization
11 | from .PDE_Solve import PDESolve
12 | from . import peq_models
13 | from .spike_data import SpikeData
14 |
15 | __all__ = ['SyntheticData', 'firing_rate_models', 'Grads', 'GLLgrid', 'model',
16 | 'Optimization', 'PDESolve', 'peq_models', 'SpikeData']
17 |
--------------------------------------------------------------------------------
/neuralflow/base_cuda.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """ Cuda warmup functions and modules. This is a module (not class) since
4 | we need a singleton pattern, and modules implement singleton
5 | """
6 |
7 | import cupy as cp
8 | import numpy as np
9 | import cupyx # Will be used when imported, do not remove
10 | import os
11 |
12 | handle = cp.cuda.device.get_cublas_handle()
13 | streams_ = []
14 | device_ = cp.cuda.Device()
15 | max_threads = cp.cuda.Device().attributes['MaxThreadsPerBlock']
16 |
17 | # We will create num_threads_ x num_threads_ 2D grid of threads
18 | num_threads_ = int(np.sqrt(max_threads))
19 |
20 | # l2 norm that is faster than linalg.norm. Not used in the current version
21 | l2norm = cp.ReductionKernel(
22 | 'T x', 'T y', 'x * x', 'a + b', 'y = sqrt(a)', '0', 'l2norm'
23 | )
24 |
25 |
26 | class var:
27 | pass
28 |
29 |
30 | def _update_streams(nstreams):
31 | """If streams is not defined, or there are fewer streams than nstreams -
32 | destroy the previous streams and create new nstreams streams.
33 | """
34 | global streams_
35 | if len(streams_) < nstreams:
36 | if len(streams_) > 0:
37 | memory_pool = cp.cuda.MemoryPool()
38 | for stream in streams_:
39 | memory_pool.free_all_blocks(stream=stream)
40 | streams_ = [cp.cuda.stream.Stream() for _ in range(nstreams)]
41 |
42 |
43 | def _free_streams_memory():
44 | device_.synchronize()
45 | memory_pool = cp.cuda.MemoryPool()
46 | for stream in streams_:
47 | memory_pool.free_all_blocks(stream=stream)
48 |
49 |
50 | def import_custom_functions():
51 | """Read custom cuda kernels from source files
52 | """
53 | global _G0, _G1
54 | with open(os.path.join(os.path.dirname(__file__), 'cuda_kernels.cu')) as f:
55 | code = f.read()
56 | module = cp.RawModule(code=code)
57 | _G0 = module.get_function('G0_d_gpu')
58 | _G1 = module.get_function('G1_d_gpu')
59 |
--------------------------------------------------------------------------------
/neuralflow/c_get_gamma.pyx:
--------------------------------------------------------------------------------
1 | """This source file contains cython implementation of G and G0 functions.
2 | """
3 | cimport numpy as np
4 |
5 | def G1_d(np.ndarray[np.float64_t, ndim=3] GI,
6 | int Nv, np.ndarray[np.float64_t, ndim=1] tempExp,
7 | np.ndarray[np.float64_t, ndim=2] alpha,
8 | np.ndarray[np.float64_t, ndim=1] btemp,
9 | int i, int nnum):
10 | """
11 | Gamma function needed for gradients w.r.t. FR and C (for double
12 | precision calculations)
13 | """
14 |
15 | cdef int j, k
16 | for j in range(Nv):
17 | for k in range(Nv):
18 | GI[j, k, nnum]+=alpha[j,i] * tempExp[j]*btemp[k]
19 |
20 | def G0_d(np.ndarray[np.float64_t, ndim=2] G,
21 | int Nv, np.ndarray[np.float64_t, ndim=1] lQd,
22 | np.ndarray[np.float64_t, ndim=1] tempExp,
23 | np.ndarray[np.float64_t, ndim=2] alpha,
24 | np.ndarray[np.float64_t, ndim=1] btemp,
25 | double dt, int i):
26 | """
27 | Gamma-function needed for all of the gradients (for double
28 | precision calculations)
29 | """
30 |
31 | cdef int j, k
32 | for j in range(Nv):
33 | for k in range(Nv):
34 | if j==k:
35 | G[j,k] += alpha[j, i] * btemp[j] * dt * tempExp[j]
36 | else:
37 | G[j,k] += alpha[j, i] * btemp[k] * (tempExp[j] - tempExp[k]) / (lQd[k] - lQd[j])
--------------------------------------------------------------------------------
/neuralflow/cuda_kernels.cu:
--------------------------------------------------------------------------------
1 | // This file contains Cuda kernels for GPU calculation of the Gamma functions
2 | // Also see c_get_gamma.pyx for CPU analogues
3 |
4 | extern "C" {
5 |
6 | __global__ void G0_d_gpu(double* G, const int Nv, const double* alpha,
7 | const double* dark_exp, const double* lQd,
8 | const double* btemp, const double* dt_arr, const int i){
9 | // Gamma-function needed for gradients w.r.t. driving force F (for double precision calculations)
10 | const int numThreadsx = blockDim.x * gridDim.x;
11 | const int threadIDx = blockIdx.x * blockDim.x + threadIdx.x;
12 | const int numThreadsy = blockDim.y * gridDim.y;
13 | const int threadIDy = blockIdx.y * blockDim.y + threadIdx.y;
14 | const int ic = threadIDx*Nv + threadIDy;
15 | const double dt = dt_arr[0];
16 | if (threadIDx < Nv && threadIDy < Nv)
17 | if (threadIDx==threadIDy)
18 | G[ic]+=alpha[threadIDx+i*Nv]*btemp[threadIDy]*dt*dark_exp[threadIDx+i*Nv];
19 | else
20 | G[ic]+=alpha[threadIDx+i*Nv]*btemp[threadIDy]*(dark_exp[threadIDx+i*Nv]-dark_exp[threadIDy+i*Nv])/(lQd[threadIDy]-lQd[threadIDx]);
21 | }
22 |
23 | __global__ void G1_d_gpu(double* GI, const int Nv, const double* alpha,
24 | const double* dark_exp,
25 | const double* btemp, const int i, const int* nid) {
26 | // Gamma function needed for gradients w.r.t. firing rates (for double precision calculations)
27 | const int numThreadsx = blockDim.x * gridDim.x;
28 | const int threadIDx = blockIdx.x * blockDim.x + threadIdx.x;
29 | const int numThreadsy = blockDim.y * gridDim.y;
30 | const int threadIDy = blockIdx.y * blockDim.y + threadIdx.y;
31 | const int ic = nid[i]*Nv*Nv + threadIDx*Nv + threadIDy;
32 | if (threadIDx < Nv && threadIDy < Nv)
33 | GI[ic]+=alpha[threadIDx+i*Nv]*btemp[threadIDy]*dark_exp[threadIDx+i*Nv];
34 | }
35 | }
--------------------------------------------------------------------------------
/neuralflow/data_generation.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """This source file contains SyntheticData class for synthetic data generation.
3 | """
4 |
5 | import logging
6 | import numpy as np
7 | import math
8 | from tqdm import tqdm
9 | from neuralflow.settings import implemented_bms
10 | from scipy.interpolate import interp1d
11 | from neuralflow.spike_data import SpikeData
12 |
13 | logger = logging.getLogger(__name__)
14 |
15 |
16 | class SyntheticData:
17 | """Spike data generation from a Langevin model
18 |
19 |
20 | Parameters
21 | ----------
22 | model : model
23 | An instance of neuralflow.model.
24 | boundary_mode : ENUM('absorbing', 'reflecting')
25 | Boundary behavior.
26 | dt : float, optional
27 | Time bin size for Langevin ODE integration. The default is 0.0001.
28 | record_trial_end : bool, optional
29 | Whether or not to include trial end. Usually True is the best
30 | choice. The default is True.
31 |
32 | Notes
33 | ------
34 | No GPU support. Data generation is usually pretty fast, so CPU is
35 | sufficient for this purpose.
36 |
37 | """
38 |
39 | def __init__(
40 | self, model, boundary_mode, dt=0.0001,
41 | record_trial_end=True
42 | ):
43 | self.model = model
44 | self.grid = model.grid
45 | if boundary_mode not in implemented_bms:
46 | raise ValueError(f'Unknown boundary mode {boundary_mode}')
47 | self.boundary_mode = boundary_mode
48 | self.num_neuron = model.num_neuron
49 | self.dt = dt
50 | self.record_trial_end = record_trial_end
51 |
52 | def generate_data(
53 | self, trial_start=0, trial_end=1, num_trials=2, model_num=0
54 | ):
55 | """Generate spike data and latent trajectories.
56 |
57 |
58 | Parameters
59 | ----------
60 | trial_start : float or list, optional
61 | Trial start time. To specify the same trial start time for all
62 | trials, provide a single float, or provide a list for each trial.
63 | The default is 0.
64 | trial_end : float or list, optional
65 | Trial end time. To specify the same trial end time for all
66 | trials, provide a single float, or provide a list for each trial.
67 | Note that for absorbing boundary mode this serves as a timeout and
68 | the actual trial end time can be smaller if the boundary is reached
69 | before this time. The default is 1.
70 | num_trials : int, optional
71 | Number of trials to be generated. The default is 2.
72 | model_num : int, optional
73 | Which model to use for data generation. The default is 0.
74 |
75 | Returns
76 | -------
77 | data : numpy array (num_trials, 2), dtype = np.ndarray.
78 | Data in ISI format. See spike_data class for details.
79 | time_bins : numpy array (num_trials,), dtype = np.ndarray
80 | For each trial contains times at which latent trajectory was
81 | recorded. Each entry is 1D numpy array of floats.
82 | x : numpy array (num_trials,), dtype = np.ndarray
83 | Latent trajectories for each trial. Each entry is 1D array of
84 | floats. Same shape as time_bins
85 | """
86 |
87 | if not np.isscalar(trial_start) and len(trial_start) != num_trials:
88 | raise ValueError(
89 | 'trial_start should be float or a list of length num_tials'
90 | )
91 |
92 | if np.isscalar(trial_start):
93 | trial_start = [trial_start] * num_trials
94 |
95 | if not np.isscalar(trial_end) and len(trial_end) != num_trials:
96 | raise ValueError(
97 | 'trial_end should be float or list of length num_tials'
98 | )
99 | if np.isscalar(trial_end):
100 | trial_end = [trial_end] * num_trials
101 |
102 | time_epoch = [(s, e) for s, e in zip(trial_start, trial_end)]
103 |
104 | peq, p0, D, fr = self.model.get_params(model_num)
105 |
106 | # Use firing rate function if availible
107 | fr_lambda = self.model.get_fr_lambda()
108 |
109 | return self._generate_data(peq, p0, D, fr, fr_lambda, time_epoch)
110 |
111 | def _generate_data(self, peq, p0, D, fr, fr_lambda, time_epoch):
112 | """Generates synthetic spike data and latent trajectories from a
113 | given model defined by (peq,p0,D,firing_rate_model).
114 | """
115 | num_trial = len(time_epoch)
116 |
117 | # generate diffusion trajectories
118 | x, time_bins = self._generate_diffusion(peq, p0, D, time_epoch)
119 |
120 | # initialize data arrays
121 | spikes = np.empty((self.num_neuron, num_trial), dtype=np.ndarray)
122 |
123 | # compute firing rates and spikes
124 | for iTrial in range(num_trial):
125 | for iCell in range(self.num_neuron):
126 | if fr_lambda is not None:
127 | # Just evaluate firing rate at each x(t)
128 | rt = fr_lambda[iCell](x[iTrial])
129 | else:
130 | # Interpolate firing rate functions between the grid points
131 | fr_interp = interp1d(
132 | self.grid.x_d, fr[..., iCell], kind='cubic'
133 | )
134 | rt = fr_interp(x[iTrial])
135 | # Generate spikes from rate
136 | spikes[iCell, iTrial] = self._generate_inhom_poisson(
137 | time_bins[iTrial][0:rt.shape[0]], rt
138 | )
139 |
140 | # Calculate the actual time epoch with the actual end of trial times
141 | time_epoch_actual = [
142 | (time_epoch[i][0], time_bins[i][-1] + self.dt)
143 | for i in range(num_trial)
144 | ]
145 | # transform spikes to ISIs
146 | data = SpikeData.transform_spikes_to_isi(
147 | spikes, time_epoch_actual, self.record_trial_end
148 | )
149 | return data, time_bins, x
150 |
151 | def _generate_inhom_poisson(self, time, rate):
152 | """Generate spike sequence from a given rate of inhomogenious Poisson
153 | process lambda(t)
154 | """
155 | # calculate cumulative rate
156 | deltaT = time[1:] - time[:-1]
157 | r = np.cumsum(rate[0:-1] * deltaT)
158 | r = np.insert(r, 0, 0)
159 | deltaR = r[1:] - r[:-1]
160 |
161 | # generate 1.5 as many spikes as expected on average for exponential
162 | # distribution with rate 1
163 | numX = math.ceil(1.5 * r[-1])
164 |
165 | # generate exponential distributed spikes with the average rate 1
166 | notEnough = True
167 | x = np.empty(0)
168 | xend = 0.0
169 | while notEnough:
170 | x = np.append(
171 | x, xend + np.cumsum(np.random.exponential(1.0, numX)))
172 | # check that we generated enough spikes
173 | if (not len(x) == 0):
174 | xend = x[-1]
175 | notEnough = xend < r[-1]
176 |
177 | # trim extra spikes
178 | x = x[x <= r[-1]]
179 |
180 | if len(x) == 0:
181 | spikes = np.empty(0)
182 | else:
183 | # for each x find index of the last rate which is smaller than x
184 | indJ = [np.where(r <= x[iSpike])[0][-1]
185 | for iSpike in range(len(x))]
186 | # compute rescaled spike times
187 | spikes = time[indJ] + (x - r[indJ]) * deltaT[indJ] / deltaR[indJ]
188 |
189 | return spikes
190 |
191 | def _generate_diffusion(self, peq, p0, D, time_epoch):
192 | """Sample latent trajectory by numerical integration of Langevin
193 | equation
194 | """
195 |
196 | num_trial = len(time_epoch)
197 |
198 | # pre-allocate output
199 | x = np.empty(num_trial, dtype=np.ndarray)
200 | time_bins = np.empty(num_trial, dtype=np.ndarray)
201 |
202 | # sample initial condition from the equilibrium distribution
203 | x0 = self._sample_from_p(p0, num_trial)
204 |
205 | # compute force profile from the potential
206 | force = self.model.force_from_peq(peq)
207 |
208 | # fix very high force values on the boundary (so that particle do not
209 | # travel too far during the time dt)
210 | ind = (np.abs(force) > 0.05 / (D * self.dt)) & (self.grid.x_d < 0)
211 | force[ind] = 0.05 / (D * self.dt)
212 | ind = (np.abs(force) > 0.05 / (D * self.dt)) & (self.grid.x_d > 0)
213 | force[ind] = -0.05 / (D * self.dt)
214 |
215 | N = len(force)
216 |
217 | for i, iTrial in enumerate(tqdm(range(num_trial))):
218 |
219 | # generate time bins
220 | time_bins[iTrial] = np.arange(
221 | time_epoch[iTrial][0], time_epoch[iTrial][1], self.dt
222 | )
223 | num_bin = len(time_bins[iTrial]) - 1
224 | y = np.zeros(num_bin + 1)
225 | y[0] = x0[iTrial]
226 |
227 | # generate noise
228 | noise = (np.sqrt(self.dt * 2 * D) * np.random.randn(num_bin))
229 |
230 | # account for absorbing boundary trajectories ending early
231 | max_ind = num_bin + 1
232 |
233 | # Do Euler integration
234 | for iBin in range(num_bin):
235 | # find force at the current position by linear interpolation
236 | ind = np.argmax(self.grid.x_d - y[iBin] >= 0)
237 | if ind == 0:
238 | f = force[0]
239 | elif ind == N-1:
240 | f = force[-1]
241 | else:
242 | theta = (
243 | (y[iBin] - self.grid.x_d[ind - 1]) /
244 | (self.grid.x_d[ind] - self.grid.x_d[ind - 1])
245 | )
246 | f = (1.0 - theta) * force[ind - 1] + theta * force[ind]
247 | y[iBin + 1] = y[iBin] + D * f * self.dt + noise[iBin]
248 |
249 | # Handle boundaries:
250 | if self.boundary_mode == "reflecting":
251 | # Handle reflection. To verify, check the following:
252 | # Provided that abs(y[iBin + 1]) < 3*L:
253 | # 1) If y is within x domain, the output is y[iBin + 1]
254 | # 2) If y > self.grid.x_d_[-1], the value is
255 | # 2 * self.grid.x_d_[-1] - y[iBin + 1]
256 | # 3) If y < self.grid.x_d_[0], the value is
257 | # 2 * self.grid.x_d_[0] - y[iBin + 1])
258 | y[iBin + 1] = min(
259 | max(y[iBin + 1], 2 * self.grid.x_d[0] - y[iBin + 1]),
260 | 2 * self.grid.x_d[-1] - y[iBin + 1]
261 | )
262 |
263 | # Regenerate the values if noise magnitude was very high
264 | # This happens when y[iBin + 1] is outside of the domain by
265 | # more than domain length L. Should happen very rare
266 | # because of clipping of large force values. Repeat until
267 | # the noise values make y[iBin + 1] not too large
268 | error_state = True
269 | while error_state:
270 | if (
271 | y[iBin + 1] < self.grid.x_d[0] or
272 | y[iBin + 1] > self.grid.x_d[-1]
273 | ):
274 | y[iBin + 1] = (
275 | y[iBin] + D * f * self.dt +
276 | np.sqrt(self.dt * 2 * D) * np.random.randn()
277 | )
278 | y[iBin + 1] = min(
279 | max(y[iBin + 1],
280 | 2 * self.grid.x_d[0] - y[iBin + 1]
281 | ),
282 | 2 * self.grid.x_d[-1] - y[iBin + 1]
283 | )
284 | else:
285 | error_state = False
286 |
287 | elif self.boundary_mode == "absorbing":
288 | # Check termination condition
289 | if (
290 | y[iBin + 1] < self.grid.x_d[0] or
291 | y[iBin + 1] > self.grid.x_d[-1]
292 | ):
293 | max_ind = iBin
294 | break
295 | x[iTrial] = y[:max_ind]
296 | time_bins[iTrial] = time_bins[iTrial][:max_ind]
297 |
298 | return x, time_bins
299 |
300 | def _sample_from_p(self, p, num_sample):
301 | """Generate samples from a given probability distribution. Needed for
302 | initialization of the latent trajectories. Note: this function does
303 | not support delta-functions or extremely narrow p-function.
304 | """
305 | x = np.zeros(num_sample)
306 | pcum = np.cumsum(p * self.grid.w_d)
307 | y = np.random.uniform(0, 1, num_sample)
308 | for iSample in range(num_sample):
309 | # find index of the element closest to y[iSample]
310 | ind = (np.abs(pcum - y[iSample])).argmin()
311 | x[iSample] = self.grid.x_d[ind]
312 | return x
313 |
--------------------------------------------------------------------------------
/neuralflow/firing_rate_models.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Template firing rate functions."""
3 |
4 | import numpy as np
5 |
6 | # Minimum firing rate in Hz to prevent log(f(x)) attaining too low values
7 | min_firing_rate = 1
8 |
9 |
10 | def custom(x, lambdafunc=None):
11 | """Custom fr model.
12 | Either supply a function, or leave None if the model of the generated data
13 | is not known (and define fr manually after).
14 |
15 |
16 | Parameters
17 | ----------
18 | x : numpy array, dtype=float
19 | SEM grid points
20 | lambdafunc : function object.
21 | The default is None.
22 |
23 | Returns
24 | -------
25 | fr : numpy array
26 | Firing rate function evaluated on SEM grid
27 |
28 | """
29 | if lambdafunc is not None:
30 | fr = lambdafunc(x)
31 | return np.maximum(fr, min_firing_rate)
32 | else:
33 | return None
34 |
35 |
36 | def rectified_linear(x, slope=50.0, x_thresh=-1.0):
37 | """Rectified-linear firing rate model.
38 | r(x, slope, thresh) = max[slope*(x - x_thresh), 0]
39 |
40 | Parameters
41 | ----------
42 | x : numpy array, dtype=float
43 | SEM grid points
44 | slope : float
45 | Firing-rate slope parameter. The default is 50.0.
46 | threshold : float
47 | Firing threshold parameter. The default is -1.0.
48 |
49 | Returns
50 | -------
51 | numpy array
52 | Firing rate function evaluated on SEM grid
53 | """
54 |
55 | return np.maximum(slope * (x - x_thresh), min_firing_rate)
56 |
57 |
58 | def linear(x, slope=50.0, bias=2):
59 | """Linear firing rate model.
60 | r(x, slope, bias) = max[slope * x + bias, 0]
61 |
62 | Parameters
63 | ----------
64 | x : numpy array, dtype=float
65 | SEM grid points
66 | slope : float
67 | Firing-rate slope parameter. The default is 50.0.
68 | bias : float
69 | Firing threshold parameter. The default is 2.
70 |
71 | Returns
72 | -------
73 | numpy array
74 | Firing rate function evaluated on SEM grid
75 |
76 | """
77 | return np.maximum(slope * x + bias, min_firing_rate)
78 |
79 |
80 | def peaks(x, center=np.array([-0.5, 0.5]), width=np.array([0.2, 0.3]),
81 | amp=np.array([100, 80])):
82 | """Sum of gaussian peaks
83 | f= SUM(A*exp((x-x0)^2/w^2))
84 |
85 | Parameters
86 | ----------
87 | x : numpy array, dtype=float
88 | SEM grid points
89 | center : numpy array, dtype=float
90 | Centers of Gaussian peaks. The default is np.array([-0.5,0.5]).
91 | width : numpy array, dtype=float
92 | Widths of Gaussian peaks. The default is np.array([0.2,0.3]).
93 | amp : numpy array, dtype=float
94 | Magnitudes of Gaussian peaks. The default is np.array([100,80]).
95 |
96 | Returns
97 | -------
98 | numpy array
99 | Firing rate function evaluated on SEM grid
100 |
101 | """
102 |
103 | if not isinstance(center, np.ndarray):
104 | center, width, amp = np.array(center), np.array(width), np.array(amp)
105 | out = np.asarray([
106 | amp[i]*np.exp(-(x-center[i])**2/width[i]**2)
107 | for i in range(np.size(center))
108 | ])
109 | return np.maximum(np.sum(out, axis=0), min_firing_rate)
110 |
111 |
112 | def sinus(x, bias=100, amp=30, freq=np.pi):
113 | """ Rectified sinusoid
114 |
115 |
116 | Parameters
117 | ----------
118 | x : numpy array, dtype=float
119 | SEM grid points
120 | bias : float
121 | Bias. The default is 100.
122 | amp : float
123 | Amplitude of the sinusoid. The default is 30.
124 | freq : float
125 | Frequency of the sinusoid. The default is np.pi.
126 |
127 | Returns
128 | -------
129 | numpy array
130 | Firing rate function evaluated on SEM grid
131 |
132 | """
133 | return np.maximum(amp*np.sin(freq*x)+bias, min_firing_rate)
134 |
135 |
136 | def cos_square(x, amp=1000, freq=np.pi/2, bias=0):
137 | """Cosine-squareed model with a given amplitude and frequency
138 |
139 |
140 | Parameters
141 | ----------
142 | x : numpy array, dtype=float
143 | SEM grid points
144 | amp : float
145 | Amplitude. The default is 1000.
146 | freq : float
147 | Frequency. The default is np.pi/2.
148 | bias : float
149 | Bias. The default is 0.
150 |
151 | Returns
152 | -------
153 | numpy array
154 | Firing rate function evaluated on SEM grid
155 |
156 | """
157 | return np.maximum(amp*(np.cos(x*freq))**2+x*bias, min_firing_rate)
158 |
159 |
160 | firing_model_types_ = {
161 | 'custom': custom,
162 | 'rectified_linear': rectified_linear,
163 | 'linear': linear,
164 | 'peaks': peaks,
165 | 'sinus': sinus,
166 | 'cos_square': cos_square
167 | }
168 |
--------------------------------------------------------------------------------
/neuralflow/grid.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Base class for computing the GLL grid, the assosiated weights, differentiation
5 | matrix and antiderivative matrix (asuming Lagrnage polynomials as basis funcs)
6 | """
7 |
8 | import numpy as np
9 | import numpy.matlib
10 | from numpy.polynomial import legendre
11 | from functools import reduce
12 | from itertools import combinations
13 | from operator import mul
14 |
15 |
16 | class GLLgrid():
17 | """Grid class that calculates Gauss-Lobatto-Legendre grid and implements
18 | integration and differentiation on the grid.
19 |
20 | Parameters
21 | ----------
22 | xbegin : float
23 | The left boundary of the latent state. The default is -1.
24 | xend : float
25 | The right boundary of the latent state. The default is 1.
26 | Np : int
27 | The degree of Langrange interpolation polynomial, also the
28 | number of grid points at each element. The default is 8.
29 | Ne : int
30 | Number of SEM elements. The default is 64.
31 | with_cuda : bool, optional
32 | Whether to include GPU support. For GPU optimization, the platform
33 | has to be cuda-enabled, and cupy package has to be installed. The
34 | default is False.
35 |
36 | """
37 |
38 | def __init__(self, xbegin=-1, xend=1, Np=8, Ne=64, with_cuda=False):
39 |
40 | self.xbegin = xbegin
41 | self.xend = xend
42 | self.Np = Np
43 | self.Ne = Ne
44 |
45 | # Check inputs
46 | self._check_inputs()
47 |
48 | # cuda
49 | self.with_cuda = with_cuda
50 | if with_cuda:
51 | import neuralflow.base_cuda as cuda
52 | self.cuda = cuda
53 | self.cuda_var = cuda.var()
54 |
55 | # Set N - total number of points
56 | self.N = (Np-1) * Ne + 1
57 |
58 | # Compute grid, weights, and differention matrix
59 | self._get_grid()
60 |
61 | # Calculate antiderivative matrix
62 | self._set_AD_mat()
63 |
64 | def __repr__(self):
65 | return (
66 | f'Grid(xbegin={self.xbegin}, xend={self.xend}, Np={self.Np}, '
67 | f'Ne={self.Ne}, {"with cuda" if self.with_cuda else "cpu only"})'
68 | )
69 |
70 | def __deepcopy__(self, memo):
71 | """ Note: deepcopy won't work if grid initializaed with with_cuda=True
72 | since self.cuda references a module (which are singletons in python)
73 | Simply create a new instance with the same parameters for deepcopy
74 | """
75 | return GLLgrid(
76 | self.xbegin, self.xend, self.Np, self.Ne, self.with_cuda
77 | )
78 |
79 | def Differentiate(self, f, result=None, device='CPU'):
80 | """ Take a derivative of function f
81 |
82 |
83 | Parameters
84 | ----------
85 | f : numpy array, dtype=float
86 | Function values evaluated on the grid
87 | result : numpy array, dtype=float
88 | A container for the results (to avoid additional allocation).
89 | If not provided, will return a result. The default is None.
90 | device : str, optional
91 | ENUM('CPU', 'GPU'). The default is 'CPU'.
92 |
93 | Returns
94 | -------
95 | numpy array
96 | If the result is not provided at the input, it will be returned.
97 |
98 | """
99 | if device not in ['CPU', 'GPU']:
100 | raise ValueError(f'Unknown device {device}')
101 | elif device == 'GPU' and not self.with_cuda:
102 | raise ValueError('Initialize the class variable with with_cuda = '
103 | 'True to support GPU computations')
104 |
105 | diff_mat = self.dmat_d if device == 'CPU' else self.cuda_var.dmat_d
106 |
107 | if result is None:
108 | return diff_mat.dot(f)
109 | else:
110 | diff_mat.dot(f, out=result)
111 |
112 | def Integrate(self, f, result=None, device='CPU'):
113 | """Indefinite integral of a function f using integration matrix.
114 |
115 |
116 | Parameters
117 | ----------
118 | f : numpy array, dtype=float
119 | Function values evaluated on the grid
120 | result : numpy array, dtype=float
121 | A container for the results (to avoid additional allocation).
122 | If not provided, will return a result. The default is None.
123 |
124 | Returns
125 | -------
126 | numpy array
127 | If the result is not provided at the input, it will be returned.
128 |
129 | """
130 |
131 | if device not in ['CPU', 'GPU']:
132 | raise ValueError(f'Unknown device {device}')
133 | elif device == 'GPU' and not self.with_cuda:
134 | raise ValueError('Initialize the class variable with with_cuda = '
135 | 'True to support GPU computations')
136 |
137 | int_mat = self.AD_d if device == 'CPU' else self.cuda_var.AD_d
138 |
139 | if result is None:
140 | return int_mat.dot(f)
141 | else:
142 | int_mat.dot(f, out=result)
143 |
144 | def _get_grid(self):
145 | """Calculate grid nodes, corresponding weights and differentiation
146 | matrix (with SEM method)
147 |
148 |
149 | Sets
150 | ----
151 | x_d, w_d, dmat_d, dx, x_, w_, dmat_, ele_scale_
152 | """
153 |
154 | # Scaling factor:
155 | self.ele_scale_ = (self.xend - self.xbegin) / (2 * self.Ne)
156 |
157 | # Calculate local grid, weights, differentiation matrix
158 | self._get_single_element()
159 |
160 | # Now patch locals to get globals
161 | self.x_d = np.zeros(self.N)
162 | self.w_d = np.zeros(self.N)
163 | self.dmat_d = np.zeros((self.N, self.N), dtype='float64')
164 |
165 | for i in range(self.Ne):
166 | patch = np.arange(i * (self.Np - 1), i *
167 | (self.Np - 1) + self.Np)
168 |
169 | # Patch as described in SEM documentation
170 | self.x_d[patch] = self.x_ + (2 * i + 1) * self.ele_scale_
171 | self.w_d[patch] += self.w_
172 | self.dmat_d[np.ix_(patch, patch)] += self.dmat_
173 |
174 | self.x_d += self.xbegin
175 |
176 | # Divide rows that correspond to primary nodes by 2
177 | # This is because at primay nodes the derivative is a half-sum of
178 | # derivative from the left and derivative from the right
179 | for i in range(self.Ne - 1):
180 | self.dmat_d[i * (self.Np - 1) + self.Np - 1, :] /= 2.0
181 |
182 | # Copy the results to GPU memory
183 | if self.with_cuda:
184 | self.cuda_var.x_ = self.cuda.cp.asarray(self.x_)
185 | self.cuda_var.w_ = self.cuda.cp.asarray(self.w_)
186 | self.cuda_var.dmat_ = self.cuda.cp.asarray(self.dmat_)
187 | self.cuda_var.x_d = self.cuda.cp.asarray(self.x_d)
188 | self.cuda_var.w_d = self.cuda.cp.asarray(self.w_d)
189 | self.cuda_var.dmat_d = self.cuda.cp.asarray(self.dmat_d)
190 |
191 | def _get_single_element(self):
192 | """Calculate local grid nodes, corresponding weights and
193 | differentiation matrix using numpy.polynomial.legendre module
194 |
195 |
196 | Sets
197 | ----
198 | x_, w_, dmat_
199 |
200 | """
201 | # Interested in Legendre polynomial #(Np-1):
202 | coefs = np.append(np.zeros(self.Np - 1), 1)
203 |
204 | # Calculate grid points:
205 | self.x_ = np.append(
206 | np.append(-1, legendre.Legendre(coefs).deriv().roots()), 1)
207 |
208 | # Need legendre polynomial at grid points:
209 | Ln = legendre.legval(self.x_, coefs)
210 |
211 | # Calculate weights:
212 | self.w_ = 2 / ((self.Np - 1) * self.Np * Ln**2)
213 |
214 | # Calculate differentiation matrix:
215 | self.dmat_ = np.zeros((len(Ln), len(Ln)))
216 | for i in range(self.Np):
217 | for j in range(self.Np):
218 | if i != j:
219 | self.dmat_[i][j] = (
220 | Ln[i] / (Ln[j] * (self.x_[i] - self.x_[j]))
221 | )
222 | else:
223 | self.dmat_[i][i] = 0
224 | self.dmat_[0, 0] = -(self.Np - 1) * (self.Np) / 4
225 | self.dmat_[-1, -1] = (self.Np - 1) * (self.Np) / 4
226 |
227 | # Scale locals:
228 | self.x_ *= self.ele_scale_
229 | self.w_ *= self.ele_scale_
230 | self.dmat_ /= self.ele_scale_
231 |
232 | def _set_AD_mat(self):
233 | """Calculates Integration Matrix that can be used to calculate
234 | antiderivative
235 | """
236 |
237 | # Define local grid at a single element xi \in [-1;1]
238 | x_local = self.x_ / self.ele_scale_
239 |
240 | # Allocate local and global antiderivative matrix
241 | self.AD_ = np.zeros((self.Np, self.Np))
242 | self.AD_d = np.zeros((self.N, self.N))
243 |
244 | # Construct local matrix first:
245 | # integration coefficients of x, x^2, x^3, ... of Lagrange
246 | # interpolation polynomials
247 | coefs = np.zeros(self.Np)
248 | coefs[-1] = 1 / self.Np
249 | # Matrix with columns x, x^2, ..., x^N
250 | x_mat = (
251 | np.transpose(np.matlib.repmat(x_local, self.Np, 1))
252 | )**np.arange(1, self.Np + 1)
253 | for i in range(self.Np):
254 | # take of all but current grid points:
255 | inds = np.append(np.arange(i), np.arange(i + 1, self.Np))
256 | x_crop = x_local[inds]
257 |
258 | # Calculate integration coefficients and common denominator using
259 | # sums of all single, pairwise, triplewise, etc. combinations
260 | Combinations = [
261 | sum(reduce(mul, c)
262 | for c in combinations(x_crop, i + 1))
263 | for i in range(self.Np - 1)
264 | ]
265 | coefs[:-1] = (
266 | (-1)**np.arange(1 - self.Np % 2, self.Np - self.Np % 2)
267 | ) * Combinations[::-1] / np.arange(1, self.Np)
268 | denominator = np.prod(np.ones(self.Np - 1) * x_local[i] - x_crop)
269 |
270 | # Choose integration constant c0 such that F(-1)=0
271 | c0 = -np.sum((-1)**np.arange(1, self.Np + 1) * coefs)
272 |
273 | # Calculate differentiation matrix
274 | self.AD_[:, i] = (x_mat.dot(coefs) + c0) / denominator
275 |
276 | # Set first row to zero and scale
277 | self.AD_[0, :] = 0
278 | self.AD_ *= self.ele_scale_
279 |
280 | # Now calculate global AD matrix:
281 | for i in range(self.Ne):
282 | patch = np.arange(i * (self.Np - 1), i *
283 | (self.Np - 1) + self.Np)
284 | self.AD_d[np.ix_(patch, patch)] += self.AD_
285 | self.AD_d[np.ix_(
286 | np.arange(i * (self.Np - 1) + self.Np, self.N), patch
287 | )] += self.AD_[-1, :]
288 | if self.with_cuda:
289 | self.cuda_var.AD_d = self.cuda.cp.asarray(self.AD_d)
290 |
291 | def _check_inputs(self):
292 | if self.xend <= self.xbegin:
293 | raise ValueError('x interval length is <= 0')
294 | if not isinstance(self.Np, (int, np.integer)) or self.Np < 3:
295 | raise ValueError('Np is not int or less than 3')
296 | if not isinstance(self.Ne, (int, np.integer)) or self.Ne < 1:
297 | raise ValueError('Ne is not int or less than 1')
298 |
--------------------------------------------------------------------------------
/neuralflow/peq_models.py:
--------------------------------------------------------------------------------
1 | """Template peq functions.
2 | Equilibirum probability distribution is related to potential U(x)=-log(peq).
3 | """
4 |
5 | import numpy as np
6 | from neuralflow.utilities.spline_padding import add_anchor_point
7 | from scipy.interpolate import CubicSpline, PPoly
8 |
9 |
10 | def custom(x, w, lambdafunc=None):
11 | """Custom model (for example, for real data fitting).
12 |
13 | Parameters
14 | ----------
15 | x : numpy array (N,), dtype=float
16 | Grid points in which the model will be evaluated. N is the number of
17 | grid points.
18 | w : numpy array (N,), dtype=float
19 | Weights used to evaluate integrals by the Gaussian quadrature.
20 | lambdafunc: function or None
21 | Either supply a function, or leave None if the model of the generated
22 | data is not known, in which case x and w are ignored.
23 |
24 | Returns
25 | -------
26 | peq : numpy array (N,), dtype=float
27 | Probability density distribution evaluated at grid ponits x.
28 | """
29 | if lambdafunc is not None:
30 | peq = lambdafunc(x)
31 | peq /= sum(w*peq)
32 | else:
33 | peq = None
34 | return peq
35 |
36 |
37 | def cos_square(x, w, xbegin=-1.0, xend=1.0):
38 | """Cosine-squareed model.
39 |
40 | y, L --> peq(y, L) ~ cos( y*pi/L )**2
41 |
42 | integral peq(x) dx =1
43 | y - x centered on the middle of the domain
44 | L - domain length
45 |
46 | Parameters
47 | ----------
48 | x : numpy array (N,), dtype=float
49 | Grid points in which the model will be evaluated. N is the number of
50 | grid points.
51 | w : numpy array (N,), dtype=float
52 | Weights used to evaluate integrals by the Gaussian quadrature.
53 | xbegin: float
54 | Left boundary of the x domain. The default is -1.
55 | xend: float
56 | Right boundary of the x-domain. The default is 1.
57 |
58 | Returns
59 | -------
60 | peq : numpy array (N,), dtype=float
61 | Probability density distribution evaluated at grid ponits x.
62 | """
63 | y = x-(xbegin+xend)/2
64 | L = xend-xbegin
65 | peq = (np.cos(y*np.pi/L))**2
66 | # normalization
67 | peq /= sum(w*peq)
68 | return peq
69 |
70 |
71 | def cos_fourth_power(x, w, xbegin=-1.0, xend=1.0):
72 | """Cosine^4 peq model.
73 |
74 | y, L --> peq(y, L) ~ cos( y*pi/L )**2, integral peq(x) dx =1
75 | where y - x centered on the middle of the domain, L - domain length
76 |
77 | Parameters
78 | ----------
79 | x : numpy array (N,), dtype=float
80 | Grid points in which the model will be evaluated. N is the number of
81 | grid points.
82 | w : numpy array (N,), dtype=float
83 | Weights used to evaluate integrals by the Gaussian quadrature.
84 | xbegin: float
85 | Left boundary of the x domain. The default is -1.
86 | xend: float
87 | Right boundary of the x-domain. The default is 1.
88 |
89 | Returns
90 | -------
91 | peq : numpy array, dtype=float
92 | Probability density distribution evaluated at grid ponits x.
93 | """
94 | y = x-(xbegin+xend)/2
95 | L = xend-xbegin
96 | peq = (np.cos(y*np.pi/L))**4
97 | # normalization
98 | peq /= sum(w*peq)
99 |
100 | return peq
101 |
102 |
103 | def stepping(x, w,
104 | interp_x=[-1, -0.5, 0, 0.3, 1],
105 | interp_y=[0, 4, 0, 1, -1],
106 | bc_left=[(1, 0), (1, 0)],
107 | bc_right=[(1, 0), (1, 0)]):
108 | """Stepping model.
109 |
110 | Parameters
111 | ----------
112 | x : numpy array (N,), dtype=float
113 | Grid points in which the model will be evaluated. N is the number of
114 | grid points.
115 | w : numpy array (N,), dtype=float
116 | Weights used to evaluate integrals by the Gaussian quadrature.
117 | interp_x: numpy array (5,), dtype=float, or list
118 | The x positions of the boundaries, maixma and minima of the potential,
119 | sorted from the left to the right. Contains 5 values: the left
120 | boundary, the first maximum, the minimum, the second maximum and the
121 | right boundary. The default is [-1, -0.5, 0, 0.3, 1], which corresponds
122 | to the stepping potential used in M. Genkin, O. Hughes, T.A. Engel,
123 | Nat Commun 12, 5986 (2021).
124 | interp_y: numpy array (5,), dtype=float, or list
125 | The corresponding y-values of the potential at the points specified by
126 | interp_x. The default is [0, 4, 0, 1, -1], which corresponds to the
127 | stepping potential used in M. Genkin, O. Hughes, T.A. Engel paper,
128 | Nat Commun 12, 5986 (2021).
129 | bc_left: list
130 | A list that contains two tuples that specify boundary conditions for
131 | the potential on the left boundary. The format is the same as in
132 | bc_type argument of scipy.interpolate.CubicSpline function. The default
133 | is [(1, 0), (1, 0)], which corresponds to a zero-derivative (Neumann)
134 | BCs for the potential.
135 | bc_right: list
136 | Same as bc_left, but for a right boundary. The default is
137 | [(1, 0), (1, 0)], which corresponds to a zero-derivative (Neumann) BCs
138 | for the potential.
139 |
140 | Returns
141 | -------
142 | peq : numpy array, dtype=float
143 | Probability density distribution evaluated at grid ponits x.
144 | """
145 | xv = np.array(interp_x)
146 | yv = np.array(interp_y)
147 | cs = np.zeros((xv.shape[0]+1, 4), dtype=x.dtype)
148 |
149 | # Find additonal anchoring points on left and right boundaries to make the
150 | # potential around minima/maxima symmetric
151 | x_add_l, y_add_l = add_anchor_point(xv[:3], yv[:3])
152 | x_add_r, y_add_r = add_anchor_point(xv[-3:][::-1], yv[-3:][::-1])
153 |
154 | # Add these poionts to the x and y arrays
155 | xv_new = np.concatenate(([xv[0], x_add_l], xv[1:-1], [x_add_r, xv[-1]]))
156 | yv_new = np.concatenate(([yv[0], y_add_l], yv[1:-1], [y_add_r, yv[-1]]))
157 |
158 | # Use three points for boundary splines, and two points for splines in the
159 | # bulk
160 | cs[0:2, :] = CubicSpline(xv_new[:3], yv_new[:3], bc_type=bc_left).c.T
161 | for i in range(1, xv.shape[0]-2):
162 | cs[i+1, :] = CubicSpline(xv[i:i+2], yv[i:i+2],
163 | bc_type=[(1, 0), (1, 0)]).c.T
164 | cs[-2:] = CubicSpline(xv_new[-3:], yv_new[-3:], bc_type=bc_right).c.T
165 |
166 | poly = PPoly(cs.T, xv_new)
167 | peq = np.exp(-poly(x))
168 | # normalization
169 | peq /= sum(w*peq)
170 | return peq
171 |
172 |
173 | def linear_pot(x, w, slope=1):
174 | """Ramping model derived from a linear potential
175 |
176 | V(x)=slope*x
177 | peq(x) = exp (-slope*x) / || exp (-slope*x) ||
178 |
179 | Parameters
180 | ----------
181 | x : numpy array (N,), dtype=float
182 | Grid points in which the model will be evaluated. N is the number of
183 | grid points.
184 | w : numpy array (N,), dtype=float
185 | Weights used to evaluate integrals by the Gaussian quadrature.
186 | slope: float
187 | Slope of the potential function.
188 |
189 | Returns
190 | -------
191 | peq : numpy array, dtype=float
192 | Probability density distribution evaluated at grid ponits x.
193 | """
194 | V = slope*x
195 | peq = np.exp(-V)
196 | # normalization
197 | peq /= sum(w*peq)
198 |
199 | return peq
200 |
201 |
202 | def manual(x, w, interp_x=[-1, -0.3, 0, 0.3, 1],
203 | interp_y=[0, 10, 5, 10, 0],
204 | bc_left=[(1, 0), (1, 0)],
205 | bc_right=[(1, 0), (1, 0)],
206 | stationary_points=None):
207 | """Define arbitrary model by specifiying minima and maxima of the
208 | potential
209 | """
210 |
211 | xv = np.array(interp_x)
212 | yv = np.array(interp_y)
213 | cs = np.zeros((xv.shape[0]-1, 4), dtype=x.dtype)
214 |
215 | i = 0
216 | while i < xv.shape[0]-1:
217 | if stationary_points is not None and stationary_points[i+1] == 0:
218 | cs[i:i+2, :] = CubicSpline(xv[i:i+3],
219 | yv[i:i+3], bc_type=[(1, 0), (1, 0)]).c.T
220 | i += 2
221 | else:
222 | cs[i, :] = CubicSpline(xv[i:i+2], yv[i:i+2],
223 | bc_type=[(1, 0), (1, 0)]).c.T
224 | i += 1
225 |
226 | poly = PPoly(cs.T, xv)
227 | peq = np.exp(-poly(x))
228 | peq /= sum(w*peq)
229 | return peq
230 |
231 |
232 | def single_well(x, w, xmin=0, miu=10.0, sig=0):
233 | """
234 | Peq derived from a single well quadratic potential.
235 |
236 | peq(x) = exp[ -U(x) ]
237 |
238 | y = x - xmin
239 |
240 | U(y) = miu * y**2 + sig*y
241 |
242 | integral peq(x) dx =1
243 |
244 | Parameters
245 | ----------
246 | x : numpy array (N,), dtype=float
247 | Grid points in which the model will be evaluated. N is the number of
248 | grid points.
249 | w : numpy array (N,), dtype=float
250 | Weights used to evaluate integrals by the Gaussian quadrature.
251 | xmin : float
252 | position of the minimum for sig=0 case. The default is 0.
253 | miu : float
254 | curvature (steepness) of the potential. The default is 10.
255 | sig : float
256 | assymetry parameter, to bias left vs. rights sides of the potential.
257 | The default is 0.
258 |
259 | Returns
260 | -------
261 | peq : numpy array, dtype=float
262 | Probability density distribution evaluated at grid ponits x.
263 | """
264 |
265 | y = x - xmin
266 |
267 | peq = np.exp(-(0.5*miu*y**2 + sig*y))
268 | # normalization
269 | peq /= sum(w*peq)
270 |
271 | return peq
272 |
273 |
274 | def double_well(x, w, xmax=0, xmin=0.3, depth=4.0, sig=0):
275 | """
276 | Peq derived from a double-well potential.
277 |
278 | peq(x) = exp[ -U(x) ]
279 |
280 | y = x - xmax
281 | ymin = xmin - xmax
282 |
283 | U(y) = depth * (y/ymin)**4 - 2*depth * (y/ymin)**2 +sig*y
284 |
285 | integral peq(x) dx =1
286 |
287 | Parameters
288 | ----------
289 | x : numpy array (N,), dtype=float
290 | Grid points in which the model will be evaluated. N is the number of
291 | grid points.
292 | w : numpy array (N,), dtype=float
293 | Weights used to evaluate integrals by the Gaussian quadrature.
294 | xmax : float
295 | position of the maximum of the potential in the symmetrix case (sig=0)
296 | xmin : float
297 | position of the two symmetric minima for sig=0 case. The default is 0.
298 | depth : float
299 | depth of the potential wells (U_max - U_min) for the symmetric case
300 | (sig=0). The default is 4.
301 | sig : float
302 | assymetry parameter, to bias one well over another. The default is 0.
303 |
304 | Returns
305 | -------
306 | peq : numpy array (N,), dtype=float
307 | Probability density distribution evaluated at grid ponits x.
308 | """
309 |
310 | y = x - xmax
311 | ymin = xmin - xmax
312 |
313 | peq = np.exp(-(depth*(y/ymin)**4 - 2*depth*(y/ymin)**2 + sig*y))
314 | # normalization
315 | peq /= sum(w*peq)
316 |
317 | return peq
318 |
319 |
320 | def uniform(x, w):
321 | """
322 | Uniform peq model, derived from a constant potential.
323 |
324 | peq = const
325 |
326 | integral peq(x) dx =1
327 |
328 | Parameters
329 | ----------
330 | x : numpy array (N,), dtype=float
331 | Grid points in which the model will be evaluated. N is the number of
332 | grid points.
333 | w : numpy array (N,), dtype=float
334 | Weights used to evaluate integrals by the Gaussian quadrature.
335 | Returns
336 | -------
337 | peq : numpy array (N,), dtype=float
338 | Probability density distribution evaluated at grid ponits x.
339 | """
340 |
341 | peq = np.ones(x.shape)
342 | # normalization
343 | peq /= sum(w*peq)
344 |
345 | return peq
346 |
347 |
348 | peq_model_mixtures = {
349 | 'cos_square': cos_square,
350 | 'single_well': single_well,
351 | 'double_well': double_well,
352 | 'uniform': uniform
353 | }
354 |
355 |
356 | def mixture(x, w, theta=0.5, model1={'model': 'cos_square', 'params': {}},
357 | model2={'model': 'double_well', 'params': {}}):
358 | """
359 | Convex mixture of two models.
360 |
361 | peq = theta*peq_model_1 + (1-theta)*peq*model_2
362 |
363 | integral peq(x) dx =1
364 |
365 | Parameters
366 | ----------
367 | x : numpy array (N,), dtype=float
368 | Grid points in which the model will be evaluated.
369 | w : numpy array (N,), dtype=float
370 | Weights used to evaluate integrals by the Gaussian quadrature.
371 | theta : float
372 | convex weight, 0<=theta<=1. The default is 0.5.
373 | model1 : dictionary.
374 | Dictionary that contains model name (one of the built-in models) and
375 | the corresponding parameters.
376 | The default is {'model': 'cos_square', 'params': {}}
377 | model2 : dictionary.
378 | Dictionary that contains model name (one of the built-in models) and
379 | the corresponding parameters.
380 | The default is {'model': 'double_well', 'params': {}}
381 |
382 | Returns
383 | -------
384 | peq : numpy array (N,), dtype=float
385 | Probability density distribution evaluated at grid ponits x.
386 | """
387 | assert 0 <= theta <= 1
388 |
389 | if not model1['model'] in peq_model_mixtures:
390 | raise ValueError(
391 | "peq_model type for model mixtures should be one of "
392 | f"{peq_model_mixtures.keys()}, {model1['model']} was given."
393 | )
394 | if not model2['model'] in peq_model_mixtures:
395 | raise ValueError(
396 | "peq_model type for model mixtures should be one of "
397 | f"{peq_model_mixtures.keys()}, {model2['model']} was given."
398 | )
399 |
400 | peq1 = peq_model_mixtures[model1['model']](x, w, **model1['params'])
401 | peq2 = peq_model_mixtures[model2['model']](x, w, **model2['params'])
402 |
403 | peq = theta*peq1 + (1-theta)*peq2
404 |
405 | # normalization
406 | peq /= sum(w*peq)
407 |
408 | return peq
409 |
410 |
411 | peq_model_types_ = {
412 | 'cos_square': cos_square,
413 | 'cos_fourth_power': cos_fourth_power,
414 | 'single_well': single_well,
415 | 'double_well': double_well,
416 | 'stepping': stepping,
417 | 'linear_pot': linear_pot,
418 | 'uniform': uniform,
419 | 'mixture': mixture,
420 | 'custom': custom,
421 | 'manual': manual
422 | }
423 |
--------------------------------------------------------------------------------
/neuralflow/settings.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """ Settings and default parameters
4 | """
5 | import numpy as np
6 |
7 | # Important: there are allowed min and max values
8 | # During optimization, the updated values will be
9 | # clipped to these numbers. These values may be
10 | # changed depending on the situation
11 | MINIMUM_PEQ = 1.0e-5
12 | MINIMUM_D = 0.01
13 |
14 | # Default optimization settings
15 | opt_settings = {
16 | 'max_epochs': 100,
17 | 'mini_batch_number': 1,
18 | 'params_to_opt': ['F', 'F0', 'D', 'Fr', 'C'],
19 | 'beta1': 0.9,
20 | 'beta2': 0.99,
21 | 'epsilon': 10**-8,
22 | 'etaf': 0,
23 | }
24 |
25 | # Default LS settings
26 | line_search_setting = {
27 | 'max_fun_eval': 3,
28 | 'epoch_schedule': np.array([]),
29 | 'nSearchPerEpoch': 1
30 | }
31 |
32 | # For checking parameters and thoughing exceptions
33 | implemented_bms = ['absorbing', 'reflecting']
34 | implemented_optimizers = ['ADAM', 'GD']
35 |
36 | # Line search hyperparams
37 | _cmin = 1
38 | _cmax = 150
39 | _dmin = 0.02
40 | _dmax = 10
41 |
--------------------------------------------------------------------------------
/neuralflow/spike_data.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """Class for spiking data storage and manipulation
4 | """
5 | import numpy as np
6 | import numbers
7 | from collections.abc import Iterable
8 | import logging
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | class SpikeData:
14 | """A class for storing spike data in an ISI format and also for conversion
15 | between spiketimes and ISI formats
16 |
17 |
18 | Parameters
19 | ----------
20 | data : numpy array
21 | Two formats are supported:
22 | 1) ISIs format: the data is numpy array of the size (N,2) of type
23 | np.ndarray, where each elements is a 1D array. N is the number of
24 | trials, and for each trial the first column contains all of the
25 | inter spike intervals (ISIs) in seconds, and the second column
26 | contains the corresponding neuronal IDs (trial termination, if
27 | recorded, is indicated with -1). Neuronal Ids start from zero.
28 | ISIs are represented as 1D arrays of floats, and neuronal indices
29 | as 1D array of integers.
30 | data[i][0] - 1D array, ISIs of type float for the trial i.
31 | The last entry can optionally be the time interval between
32 | the last spike and trial termination time.
33 | data[i][1] - 1D array, neuronal IDs of type int64 for the trial
34 | i. The last entry is -1 if the trial termination time is
35 | recorded.
36 | Example: create a data with 3 neurons (id 0,1,2) and two trials.
37 | Trial 0 started at time 0 s and ended at 1.55 s, where neuron
38 | 1 spiked at 0.05 s and neuron 2 spikes at 0.55 s. Trial 1 also
39 | started at 0 s, ended at 10.05 s, where neuron 0 spiked at 0.05
40 | s, neuron 1 spiked at 3.05 s, and neuron 2 spiked at 1.05 and
41 | 6.05 s.
42 | ISIs = np.empty((2, 2), dtype=np.ndarray)
43 | ISIs[0][0] = np.array([0.05,0.5,1])
44 | ISIs[0][1] = np.array([1,2,-1])
45 | ISIs[1][0] = np.array([0.05,1,2,3,4])
46 | ISIs[1][1] = np.array([0,2,1,2,-1])
47 | 2) spiketimes format, where the data is numpy array of size
48 | (num_neuron, N) of type object, N is the number of trials. Each of
49 | the entries is 1D array that specify spiketimes of each neuron on
50 | each trial. In this case time_epoch array specify each trial start
51 | and end times. For the example above, the spiketimes format would
52 | be the following:
53 | spiketimes = np.array(
54 | [
55 | [np.array([], dtype=np.float64), np.array([0.05])],
56 | [np.array([0.05]), np.array([3.05])],
57 | [np.array([0.55]), np.array([1.05, 6.05])]
58 | ],
59 | dtype=object
60 | )
61 | timeepoch = [(0, 1.55), (0, 10.05)]
62 | dformat : str, optional
63 | ENUM('spiketimes', 'ISIs'). The default is 'ISIs'.
64 | time_epoch : list, optional
65 | For each trial, specify trial start time and trial end time as a
66 | tuple. Only needed for spiketimes format. The default is None.
67 | num_neuron : int, optional
68 | Number of neurons in the data. If not provided, will be inferred.
69 | The default is None.
70 | with_trial_end : bool, optional
71 | Whether trial end time is recorded. The default is True.
72 | with_cuda : bool, optional
73 | Whether to include GPU support. For GPU optimization, the platform
74 | has to be cuda-enabled, and cupy package has to be installed. The
75 | default is False.
76 | """
77 |
78 | def __init__(self, data, dformat='ISIs', time_epoch=None,
79 | num_neuron=None, with_trial_end=True, with_cuda=False
80 | ):
81 | """
82 | Public methods
83 | ------
84 | to_GPU, trial_average_fr, change_format
85 |
86 | """
87 |
88 | self.data = data
89 |
90 | if dformat not in ['ISIs', 'spiketimes']:
91 | raise ValueError('Format has to be ISIs or spiketimes')
92 | self.dformat = dformat
93 | self.time_epoch = time_epoch
94 | self.num_neuron = num_neuron
95 | self.with_trial_end = with_trial_end
96 | self._check_and_fix_inputs()
97 | self.with_cuda = with_cuda
98 | if self.with_cuda:
99 | import neuralflow.base_cuda as cuda
100 | self.cuda = cuda
101 | self.cuda_var = cuda.var()
102 |
103 | def to_GPU(self):
104 | """ Copy data to GPU memory.
105 | """
106 | if not self.with_cuda:
107 | raise ValueError(
108 | 'Initialize spikedata with with_cuda = True for GPU support'
109 | )
110 |
111 | if self.dformat == 'spiketimes':
112 | data = SpikeData.transform_spikes_to_isi(
113 | self.data, self.time_epoch
114 | )
115 | else:
116 | data = self.data
117 |
118 | # In Cuda the data will be stored in a nested list format
119 | self.cuda_var.data = [[] for _ in range(data.shape[0])]
120 |
121 | for iTrial, seq in enumerate(data):
122 | self.cuda_var.data[iTrial].append(
123 | self.cuda.cp.asarray(seq[0], dtype='float64')
124 | )
125 | self.cuda_var.data[iTrial].append(
126 | self.cuda.cp.asarray(seq[1], dtype='int32')
127 | )
128 |
129 | def trial_average_fr(self):
130 | """For each neuron, compute trial-average firing rate. This is needed
131 | to scale the initial guess of the firing rate function to match the
132 | average spike rate in the data.
133 | """
134 | if self.dformat != 'ISIs':
135 | logger.warning('Only ISIs format is supported')
136 | return None
137 | tot_spikes = np.zeros(self.num_neuron)
138 | tot_times = np.empty(len(self.data))
139 | for trial in range(len(self.data)):
140 | tot_times[trial] = np.sum(self.data[trial][0])
141 | sp_count, _ = np.histogram(
142 | self.data[trial][1],
143 | bins=np.arange(-0.5, self.num_neuron, 1)
144 | )
145 | tot_spikes += sp_count
146 | tot_time = np.sum(tot_times)
147 | return tot_spikes/tot_time
148 |
149 | def change_format(self, new_format, record_trial_end=True):
150 | """Convert the data between spiketimes and ISIs format.
151 |
152 |
153 | Parameters
154 | ----------
155 | new_format : str
156 | ENUM('spiketimes', 'ISIs').
157 | record_trial_end : bool, optional
158 | Whether to record trial end time in ISIs format.
159 | The default is True.
160 |
161 | """
162 | if self.dformat == new_format:
163 | logger.info(f'The data format is already {new_format}')
164 | elif new_format == 'ISIs':
165 | self.data = SpikeData.transform_spikes_to_isi(
166 | self.data, self.time_epoch, record_trial_end
167 | )
168 | self.dformat = new_format
169 | self.time_epoch = None
170 | logger.info(f'Data is in {new_format} format')
171 | elif new_format == 'spiketimes':
172 | self.data, self.time_epoch = SpikeData.transform_isis_to_spikes(
173 | self.data
174 | )
175 | self.dformat = new_format
176 | logger.info(f'Data is in {new_format} format')
177 | else:
178 | raise ValueError('Unknown format')
179 |
180 | @staticmethod
181 | def transform_spikes_to_isi(spikes, time_epoch, record_trial_end=True):
182 | """Convert spike times to ISI format, which is a suitable format for
183 | optimization.
184 | """
185 |
186 | num_neuron, num_trial = spikes.shape
187 |
188 | # initialize data array
189 | data = np.empty((num_trial, 2), dtype=np.ndarray)
190 |
191 | # indices of neurons that spiked
192 | spike_ind = np.empty(num_neuron, dtype=np.ndarray)
193 |
194 | # transform spikes to interspike intervals format
195 | for iTrial in range(num_trial):
196 | for iCell in range(num_neuron):
197 | spike_ind[iCell] = iCell * \
198 | np.ones(len(spikes[iCell, iTrial]), dtype=int)
199 | all_spikes = np.concatenate(spikes[:, iTrial], axis=0)
200 | all_spike_ind = np.concatenate(spike_ind[:], axis=0)
201 | # create data array
202 | data[iTrial, 0] = np.zeros(len(all_spikes) + record_trial_end)
203 |
204 | if all_spikes.shape[0] == 0:
205 | data[iTrial, 1] = np.zeros(0)
206 | # If no spikes emitted, set to trial beginning time
207 | last_spike_time = time_epoch[iTrial][0]
208 | else:
209 | # sort spike times and neuron index arrays
210 | ind_sort = np.argsort(all_spikes)
211 | all_spikes = all_spikes[ind_sort]
212 | all_spike_ind = all_spike_ind[ind_sort]
213 | data[iTrial, 0][1:len(all_spikes)] = np.diff(all_spikes)
214 | data[iTrial, 0][0] = all_spikes[0] - time_epoch[iTrial][0]
215 | last_spike_time = all_spikes[-1]
216 |
217 | if record_trial_end:
218 | data[iTrial, 0][-1] = time_epoch[iTrial][1] - last_spike_time
219 | # assign indicies of neurons which fired, trial end is marked
220 | # with -1
221 | data[iTrial, 1] = np.concatenate((all_spike_ind, [-1]))
222 | else:
223 | data[iTrial, 1] = all_spike_ind
224 | return data
225 |
226 | @staticmethod
227 | def transform_isis_to_spikes(data):
228 | """ Convert ISIs to spikes
229 | """
230 |
231 | num_neuron = max([seq[1].max() for seq in data]) + 1
232 | num_trial = data.shape[0]
233 |
234 | spikes = np.empty((num_neuron, num_trial), dtype=np.ndarray)
235 | time_epoch = []
236 |
237 | for iTrial, seq in enumerate(data):
238 | all_spikes = np.cumsum(seq[0])
239 | time_epoch.append((0, all_spikes[-1]))
240 | for iCell in range(num_neuron):
241 | spikes[iCell, iTrial] = all_spikes[seq[1] == iCell]
242 |
243 | return spikes, time_epoch
244 |
245 | def _check_and_fix_inputs(self):
246 | if self.dformat == 'spiketimes':
247 | if not isinstance(self.data, Iterable):
248 | raise TypeError(
249 | 'Spikes should be an ndarray or a list of length '
250 | 'num_neuron'
251 | )
252 | num_neuron = len(self.data)
253 |
254 | # Record number of neurons
255 | if self.num_neuron is None:
256 | self.num_neuron = num_neuron
257 | elif self.num_neuron != num_neuron:
258 | raise ValueError(
259 | f'Provided number of neurons is {num_neuron}, detected '
260 | f'number of neurons is {self.num_neuron}'
261 | )
262 |
263 | # A simple (non-nested) list/array would mean 1 neuron and 1 trial
264 | if num_neuron == 1 and not isinstance(self.data[0], Iterable):
265 | self.data = [[self.data]]
266 |
267 | for iseq, seq in enumerate(self.data):
268 | if not isinstance(seq, Iterable):
269 | raise TypeError(
270 | 'Each entry in spikes should be a list or array of '
271 | 'length num_trials'
272 | )
273 | if not isinstance(seq[0], Iterable):
274 | # This means 1 trial
275 | self.data[iseq] = [self.data[iseq]]
276 | for iseq, seq in enumerate(self.data):
277 | if iseq == 0:
278 | num_trial = len(seq)
279 | else:
280 | if len(seq) != num_trial:
281 | raise ValueError(
282 | f'Neuron {iseq} has {len(seq)} trials, while '
283 | f'neuron 0 has {num_trial} trials'
284 | )
285 | for isubseq, subseq in enumerate(seq):
286 | if not isinstance(subseq, np.ndarray):
287 | self.data[iseq][isubseq] = np.array(
288 | self.data[iseq][isubseq]
289 | )
290 | for iseq, seq in enumerate(self.data):
291 | for isubseq, subseq in enumerate(seq):
292 | if not all(np.diff(subseq) >= 0):
293 | raise ValueError(
294 | f'Spikes for neuron {iseq} are not sorted on trial'
295 | f' {isubseq}'
296 | )
297 |
298 | for i, (el1, el2) in enumerate(self.time_epoch):
299 | if not isinstance(el1, numbers.Number):
300 | raise TypeError(
301 | 'Each of the trial_start should be a number'
302 | )
303 | if not isinstance(el2, numbers.Number):
304 | raise TypeError('Each of the trial_end should be a number')
305 | if el2 < el1:
306 | raise ValueError(
307 | 'trial_end should be greater than trial_start'
308 | )
309 | for neuron in range(num_neuron):
310 | if self.data[neuron][i].size == 0:
311 | continue
312 | if el1 > self.data[neuron][i][0]:
313 | raise ValueError(
314 | f'On trial {i} one of the neurons spiked before '
315 | 'trial_start'
316 | )
317 | if el2 < self.data[neuron][i][-1]:
318 | raise ValueError(
319 | f'On trial {i} one of the neurons spiked after '
320 | 'trial_end'
321 | )
322 |
323 | elif self.dformat == 'ISIs':
324 | if isinstance(self.data, list):
325 | self.data = np.asarray(self.data, dtype='object')
326 | num_trials = len(self.data)
327 | if num_trials == 0:
328 | raise ValueError('No data provided')
329 |
330 | for trial_num, seq in enumerate(self.data):
331 | if len(seq) != 2:
332 | raise ValueError(
333 | 'For each trial two entries should be provided: ISIs '
334 | 'and neuron Ids'
335 | )
336 | ISIs = seq[0]
337 | if isinstance(ISIs, list):
338 | self.data[trial_num][0] = np.asarray(
339 | self.data[trial_num][0], dtype=np.float64
340 | )
341 | ISIs = self.data[trial_num][0]
342 | if not isinstance(ISIs, np.ndarray):
343 | raise TypeError('ISIs should be passed as numpy array')
344 | if len(ISIs.shape) > 1:
345 | raise ValueError('ISIs should be 1D array')
346 | ISIs = ISIs.astype(np.float64)
347 | if not all(ISIs >= 0):
348 | raise ValueError(
349 | f'Trial {trial_num} contains negative ISIs'
350 | )
351 | Ids = seq[1]
352 | if isinstance(Ids, list):
353 | self.data[trial_num][1] = np.asarray(Ids, int)
354 | Ids = self.data[trial_num][1]
355 | if not isinstance(Ids, np.ndarray):
356 | raise TypeError('Neural ids should be passed as np array')
357 | if len(Ids.shape) > 1:
358 | raise ValueError('Neural ids should be 1D array')
359 | Ids = Ids.astype('int')
360 | self.data[trial_num][1] = Ids
361 | if np.any(Ids[:-1] < 0) or (
362 | self.num_neuron is not None and
363 | np.any(Ids >= self.num_neuron)
364 | ):
365 | raise ValueError(
366 | 'Neural ids should be integers that start from 0'
367 | )
368 | if len(Ids) > 0 and (Ids[-1] < -1):
369 | raise ValueError(
370 | 'The last neural id should be -1 or nonnegative int'
371 | )
372 | if ISIs.shape != Ids.shape:
373 | raise ValueError(
374 | 'ISIs and neural ids must have the same size'
375 | )
376 | num_neuron = max([seq[1].max() for seq in self.data]) + 1
377 |
378 | # Record number of neurons
379 | if self.num_neuron is None:
380 | self.num_neuron = num_neuron
381 | elif self.num_neuron != num_neuron:
382 | raise ValueError(
383 | f'Provided number of neurons is {num_neuron}, detected '
384 | f'number of neurons is {self.num_neuron}'
385 | )
386 |
387 | # Ensure that every trial end time is recorded
388 | if self.with_trial_end:
389 | if not all([seq[1][-1] == -1 for seq in self.data]):
390 | raise ValueError(
391 | 'On each trial the last entry should be an ISI between'
392 | ' the last spike and trial termination time'
393 | )
394 | else:
395 | # Delete trial end time
396 | for trial_num, seq in enumerate(self.data):
397 | if seq[1][-1] == -1:
398 | logger.debug(
399 | f'Removing trial end time from trial {trial_num}'
400 | )
401 | self.data[trial_num][0] = self.data[trial_num][0][:-1]
402 | self.data[trial_num][1] = self.data[trial_num][1][:-1]
403 |
--------------------------------------------------------------------------------
/neuralflow/utilities/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 |
--------------------------------------------------------------------------------
/neuralflow/utilities/psth.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """ Utility functions to compute PSTH (trial-average firing rates)
4 | """
5 |
6 | import numpy as np
7 | import scipy
8 |
9 |
10 | def FiringRate(spikes, time_window, dt, tbegin, tend):
11 | """Calculates firing rate from spike data using rectangular time_window
12 | with step dt.
13 |
14 |
15 | Parameters
16 | ----------
17 | spikes : numpy array
18 | Spike times.
19 | time_window : float
20 | The size of rectangular moving window.
21 | dt : float
22 | The distance between mid points of the consequtive bins.
23 | tbegin : float
24 | Start time.
25 | tend : float
26 | End time.
27 |
28 | Returns
29 | -------
30 | time_bins : numpy array
31 | An array of time bins centers.
32 | rate : numpy array
33 | Estimated firing rate at each time bin.
34 |
35 | """
36 | time_bins = np.linspace(
37 | tbegin + time_window / 2,
38 | tend - time_window / 2,
39 | int((tend - tbegin - time_window) / dt)
40 | )
41 | if len(time_bins) == 0:
42 | time_bins = np.array([0.5 * (tbegin + tend)])
43 | spikes = spikes.reshape((-1, 1))
44 | time_bins = time_bins.reshape((1, -1))
45 | rate = np.sum(
46 | np.abs(spikes - time_bins) < time_window / 2, axis=0
47 | ) / time_window
48 | return time_bins, rate
49 |
50 |
51 | def extract_psth(spike_data, RTs, time_window, dt, tbegin, tend):
52 | """Extract psth for plotting
53 | """
54 |
55 | num_neurons, num_trials = spike_data.shape
56 |
57 | rate = np.zeros((np.linspace(
58 | tbegin+time_window/2, tend-time_window/2,
59 | int((tend-tbegin-time_window)/dt)
60 | ).size,
61 | num_trials
62 | ))
63 | rates, rates_SEM = [], []
64 | hand_rt_median = np.median(RTs)
65 | for neuron in range(num_neurons):
66 | for trial in range(num_trials):
67 | tb, rate[:, trial] = FiringRate(
68 | spike_data[neuron, trial], time_window, dt, tbegin, tend
69 | )
70 | rate[np.squeeze(tb) > RTs[trial], trial] = np.nan
71 |
72 | rates.append(np.nanmean(rate, axis=1)[np.squeeze(tb) < hand_rt_median])
73 | rates_SEM.append(
74 | scipy.stats.sem(rate, axis=1, nan_policy='omit')[
75 | np.squeeze(tb) < hand_rt_median
76 | ]
77 | )
78 | return tb, rates, rates_SEM
79 |
--------------------------------------------------------------------------------
/neuralflow/utilities/rank_nullspace.py:
--------------------------------------------------------------------------------
1 | """This source file contains some auxilliary functions for PDE Solve."""
2 | import numpy as np
3 | from numpy.linalg import svd
4 |
5 |
6 | def rank(A, atol=1e-13, rtol=0):
7 | """Estimate the rank (i.e. the dimension of the nullspace) of a matrix.
8 |
9 | The algorithm used by this function is based on the singular value
10 | decomposition of `A`.
11 |
12 | Parameters
13 | ----------
14 | A : ndarray
15 | A should be at most 2-D. A 1-D array with length n will be treated
16 | as a 2-D with shape (1, n)
17 | atol : float
18 | The absolute tolerance for a zero singular value. Singular values
19 | smaller than `atol` are considered to be zero.
20 | rtol : float
21 | The relative tolerance. Singular values less than rtol*smax are
22 | considered to be zero, where smax is the largest singular value.
23 |
24 | If both `atol` and `rtol` are positive, the combined tolerance is the
25 | maximum of the two; that is::
26 | tol = max(atol, rtol * smax)
27 | Singular values smaller than `tol` are considered to be zero.
28 |
29 | Return value
30 | ------------
31 | r : int
32 | The estimated rank of the matrix.
33 |
34 | See also
35 | --------
36 | numpy.linalg.matrix_rank
37 | matrix_rank is basically the same as this function, but it does not
38 | provide the option of the absolute tolerance.
39 | """
40 |
41 | A = np.atleast_2d(A)
42 | s = svd(A, compute_uv=False)
43 | tol = max(atol, rtol * s[0])
44 | rank = int((s >= tol).sum())
45 | return rank
46 |
47 |
48 | def nullspace(A, atol=1e-13, rtol=0):
49 | """Compute an approximate basis for the nullspace of A.
50 |
51 | The algorithm used by this function is based on the singular value
52 | decomposition of `A`.
53 |
54 | Parameters
55 | ----------
56 | A : ndarray
57 | A should be at most 2-D. A 1-D array with length k will be treated
58 | as a 2-D with shape (1, k)
59 | atol : float
60 | The absolute tolerance for a zero singular value. Singular values
61 | smaller than `atol` are considered to be zero.
62 | rtol : float
63 | The relative tolerance. Singular values less than rtol*smax are
64 | considered to be zero, where smax is the largest singular value.
65 |
66 | If both `atol` and `rtol` are positive, the combined tolerance is the
67 | maximum of the two; that is::
68 | tol = max(atol, rtol * smax)
69 | Singular values smaller than `tol` are considered to be zero.
70 |
71 | Return value
72 | ------------
73 | ns : ndarray
74 | If `A` is an array with shape (m, k), then `ns` will be an array
75 | with shape (k, n), where n is the estimated dimension of the
76 | nullspace of `A`. The columns of `ns` are a basis for the
77 | nullspace; each element in numpy.dot(A, ns) will be approximately
78 | zero.
79 | """
80 |
81 | A = np.atleast_2d(A)
82 | u, s, vh = svd(A)
83 | tol = max(atol, rtol * s[0])
84 | nnz = (s >= tol).sum()
85 | ns = vh[nnz:].conj().T
86 | return ns
87 |
--------------------------------------------------------------------------------
/neuralflow/utilities/spline_padding.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from scipy.interpolate import CubicSpline, PPoly
3 | import numpy as np
4 |
5 |
6 | def add_anchor_point(x, y):
7 | """Auxilliary function to create stepping potential. Find additional
8 | anchoring point (x_add, y_add), such that x[0] < x_add < x[1], and y_add =
9 | y_spline (x_add_mirror), where y_spline is spline between x[1] and x[2],
10 | and x_add_mirror is a mirror point of x_add w.r.t. x[1], e.g.
11 | |x_add-x[1]|=|x[1]-x_add_mirror|. This additional point will force the
12 | barriers to have symmetrical shape.
13 |
14 | Params:
15 | x: x - values at three right or three left boundary points, e.g.
16 | interp_x[0:3], or interp_x[-3:][::-1] (reverse order on the right
17 | boundary).
18 | y: corresponding y values
19 | Returns:
20 | x_add, y_add - additional point in between x[0] < x_add < x[1] that
21 | can be used for spline interpolation.
22 |
23 |
24 | Parameters
25 | ----------
26 | x : np.array
27 | Values at three right or three left boundary points, e.g.
28 | interp_x[0:3], or interp_x[-3:][::-1] (reverse order on the right
29 | boundary).
30 | y : np.array
31 | The corresponding y values.
32 |
33 | Returns
34 | -------
35 | x_add, y_add
36 |
37 | """
38 | # Start with the middle point
39 | x_add_mirror = 0.5 * (x[1] + x[2])
40 | not_found = True
41 | cpoly = PPoly(CubicSpline(np.sort(x[1:3]), np.sort(
42 | y[1:3]), bc_type=[(1, 0), (1, 0)]).c, np.sort([x[1], x[2]]))
43 | first_peak_is_maximum = True if y[0] <= y[1] else False
44 | while not_found:
45 | # Check if y-value at anchor point exceeds value at the boundary and
46 | # that x-value is within an interval
47 | if first_peak_is_maximum:
48 | if (
49 | cpoly(x_add_mirror) > y[0] and
50 | np.abs(x_add_mirror - x[1]) < np.abs(x[1] - x[0])
51 | ):
52 | x_add = 2 * x[1] - x_add_mirror
53 | if x[1] > x[0]:
54 | poly2 = PPoly(
55 | CubicSpline(
56 | [x[0], x_add, x[1]],
57 | [y[0], cpoly(x_add_mirror), y[1]],
58 | bc_type=[(1, 0), (1, 0)]
59 | ).c,
60 | [x[0], x_add, x[1]]
61 | )
62 | else:
63 | poly2 = PPoly(
64 | CubicSpline(
65 | [x[1], x_add, x[0]],
66 | [y[1], cpoly(x_add_mirror), y[0]],
67 | bc_type=[(1, 0), (1, 0)]
68 | ).c,
69 | [x[1], x_add, x[0]]
70 | )
71 | x_dense = np.linspace(x[0], x[1], 100)
72 | if all(poly2(x_dense) <= y[1]):
73 | not_found = False
74 | else:
75 | if cpoly(x_add_mirror) < y[0]:
76 | if np.abs(x_add_mirror - x[1]) < np.abs(x[1] - x[0]):
77 | x_add = 2 * x[1] - x_add_mirror
78 | if x[1] > x[0]:
79 | poly2 = PPoly(
80 | CubicSpline(
81 | [x[0], x_add, x[1]],
82 | [y[0], cpoly(x_add_mirror), y[1]],
83 | bc_type=[(1, 0), (1, 0)]
84 | ).c,
85 | [x[0], x_add, x[1]]
86 | )
87 | else:
88 | poly2 = PPoly(
89 | CubicSpline(
90 | [x[1], x_add, x[0]],
91 | [y[1], cpoly(x_add_mirror), y[0]],
92 | bc_type=[(1, 0), (1, 0)]
93 | ).c,
94 | [x[1], x_add, x[0]]
95 | )
96 | x_dense = np.linspace(x[0], x[1], 100)
97 | if all(poly2(x_dense) >= y[1]):
98 | not_found = False
99 | x_add_mirror = 0.5 * (x[1] + x_add_mirror)
100 | return x_add, cpoly(2 * x[1] - x_add)
101 |
--------------------------------------------------------------------------------
/neuralflow/utilities/visualization_functions.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """ Functions for visualization used in 2021_learning_nonstationary_dynamics
3 | """
4 |
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 |
8 |
9 | def plot_spikes(data, handle, colormap, spike_spacing=0.7, spike_height=0.4,
10 | spike_linewidth=1.5):
11 | """Visualize spike rasters
12 |
13 |
14 | Parameters
15 | ----------
16 | data : numpy array (N,2), dtype=np.ndarray.
17 | Spike data in ISIs format.
18 | handle : matplotlib.axes._subplots.AxesSubplot
19 | Handle of the matplotlib axes
20 | colormap : List
21 | For each trial contains color in RGB format.
22 | spike_spacing : float
23 | Vertical spacing between the spikes. The default is 0.7.
24 | spike_height : float
25 | Height of the spike ticks. The default is 0.4.
26 | spike_linewidth : float
27 | Width of the spike ticks. The default is 1.5.
28 |
29 | Returns
30 | -------
31 | None.
32 |
33 | """
34 | num_trials = data.shape[0]
35 |
36 | # Convert ISIs into spikes, discard trial end time, and convert ms into
37 | # seconds
38 | spikes = []
39 | for i in range(num_trials):
40 | if data[i][1][-1] == -1:
41 | spikes.append(1000 * np.cumsum(data[i][0][:-1], axis=0))
42 | else:
43 | spikes.append(1000 * np.cumsum(data[i][0], axis=0))
44 |
45 | # Plot spike rasters
46 | for i in range(num_trials):
47 | for sp in spikes[num_trials - i - 1]:
48 | handle.plot(
49 | [sp, sp],
50 | [spike_spacing * i - spike_height / 2,
51 | spike_spacing * i + spike_height / 2],
52 | linewidth=spike_linewidth,
53 | color=colormap[num_trials - i - 1]
54 | )
55 |
56 | # Some adjustements
57 | handle.set_yticklabels([])
58 | handle.set_yticks([])
59 | handle.spines["top"].set_visible(False)
60 | handle.spines["right"].set_visible(False)
61 | handle.spines["left"].set_visible(False)
62 |
63 |
64 | def plot_fitting_results(figure_handle1, figure_handle2, init_model, results,
65 | gt_model, ll_gt, iterations, colors, num_model=0):
66 | """Visualise fitting results: plot negative loglikelihood vs. iteration
67 | number, and fitted potential functions on the selected iterations.
68 | This format was used for learning_nonstationary_dynamics (2.0.0 version)
69 |
70 | Parameters
71 | ----------
72 | figure_handle1,figure_handle2 : matplotlib handles
73 | Where to plot negative loglikelihood vs. iteration number and the fitted
74 | potentials, respectively.
75 | em_fit : EnergyModel
76 | Fitted EnergyModel object.
77 | em_gt : EnergyModel
78 | Ground-truth EnergyModel object which was used to generate the data.
79 | fit_options : dictionary
80 | Options used for fitting.
81 | iterations : numpy array
82 | List of iterations on which the potential function will be plotted.
83 | Same size as the colors list.
84 | colors : list
85 | List with RGB colors, where each entry is a list with three RGB values.
86 | These colors will be used to plot model potential on the selected
87 | iterations. Same size as iterations array.
88 |
89 | Returns
90 | -------
91 | """
92 |
93 | # Plot Relative loglikelihood
94 | ax = plt.subplot(figure_handle1)
95 | rel_lls = (ll_gt - results['logliks'][num_model]) / ll_gt
96 | iterations_all = np.arange(1, rel_lls.size + 1)
97 | ax.plot(iterations_all, rel_lls, linewidth=3, color=[0.35, 0.35, 0.35])
98 | ax.plot(iterations_all, np.zeros_like(iterations_all),
99 | '--', linewidth=3, color=[0.5, 0.5, 0.5])
100 | for i, iteration in enumerate(iterations):
101 | ax.plot(iterations_all[iteration], rel_lls[iteration],
102 | '.', markersize=60, color=colors[i])
103 |
104 | plt.ylabel(r'Relative $\log\mathscr{L}$', fontsize=15)
105 | plt.xlabel('Iteration number', fontsize=15)
106 | plt.xscale('log')
107 | ax.tick_params(axis='both', which='major', labelsize=15)
108 |
109 | # Plot the fitted potential functions
110 | ax = plt.subplot(figure_handle2)
111 |
112 | # Potential is negative log of peq
113 | ax.plot(gt_model.grid.x_d, -np.log(gt_model.peq[num_model]), linewidth=3,
114 | color='black', label='Ground-truth')
115 | ax.plot(
116 | init_model.grid.x_d, -np.log(results['peq'][0][num_model, :]),
117 | linewidth=3, color=[0.5, 0.5, 0.5], label='Initialization'
118 | )
119 | for i, iteration in enumerate(iterations):
120 | ax.plot(
121 | init_model.grid.x_d,
122 | -np.log(results['peq'][iteration][num_model, :]),
123 | linewidth=3, color=colors[i],
124 | label='Iteration {}'.format(iteration)
125 | )
126 | plt.legend(fontsize=15)
127 | plt.xlabel(r'Latent state, $x$', fontsize=15)
128 | plt.ylabel(r'Potential, $\Phi(x)$', fontsize=15)
129 | ax.tick_params(axis='both', which='major', labelsize=15)
130 |
--------------------------------------------------------------------------------
/neuralflow/viterbi.py:
--------------------------------------------------------------------------------
1 |
2 | # -*- coding: utf-8 -*-
3 | """Viterbi algorithm. Only supported on CPU
4 | """
5 |
6 | import numpy as np
7 |
8 |
9 | class Viterbi:
10 |
11 | """Viterbi algorithm
12 |
13 |
14 | Parameters
15 | ----------
16 | grad : neuralflow.Grads
17 | Gradient object is needed for solving FPE.
18 |
19 | Returns
20 | -------
21 | None.
22 | """
23 |
24 | def __init__(self, grad):
25 | """
26 | """
27 | self.grad = grad
28 |
29 | def run_viterbi(self, data, model, model_num=0):
30 | """Viterbi algorithm
31 | Finds the latent path X that maximizes joint probability P(X,Y). The
32 | path X is sampled at trial start/trial end time, and at the time of
33 | each of spike.
34 |
35 |
36 | Parameters
37 | ----------
38 | data : neuralflow.spike_data.SpikeData.
39 | SpikeData object in ISI format.
40 | model : neuralflow.model.model
41 | A model object.
42 | model_num : model number to be used. The default is 0.
43 |
44 | Returns
45 | -------
46 | trajectories : numpy array, dtype=np.ndarray
47 | Latent trajectory sampled at spike times for each trial in the
48 | data.
49 | state_inds : indeces of trajectory latent states on the gird.
50 |
51 | """
52 |
53 | # Compute EV solution
54 | EV_solution = self.grad._get_EV_solution(model, model_num)
55 |
56 | trajectories = np.empty(data.data.shape[0], dtype=np.ndarray)
57 | state_inds = np.empty(data.data.shape[0], dtype=np.ndarray)
58 |
59 | for trial in range(data.data.shape[0]):
60 | data_cur = data.data[trial]
61 | trajectories[trial], state_inds[trial] = self._Viterbi_trial(
62 | data_cur, model, model_num, EV_solution
63 | )
64 | return trajectories, state_inds
65 |
66 | def _Viterbi_trial(self, data, model, model_num, EV_solution):
67 | """Viterbi Algorithm for a single trial
68 | """
69 |
70 | # Extract ISI and neuron_id for convenience
71 | seq, nid = data
72 |
73 | # Sequence length
74 | S_Total = len(seq)
75 |
76 | grid = self.grad.pde_solver.grid
77 |
78 | # Exclude points on the boundary for absorbing boundary. This is
79 | # equivalent to setting the probability of latent trajectory to be zero
80 | # on the boundary before the trial end
81 | margin = 1
82 |
83 | # For some reason Viterbi does not works well with reflecting mode.
84 | # For a reflective boundary there is probability accumulation in the
85 | # domain boundaries, causing the trajectory to always stay exactly at
86 | # the boundary at all times. I fix it by setting a large margin where
87 | # the probability is forced to be zero.
88 | # Ideally, the margin for reflective boundary should be zero
89 | margin_ref = grid.N//30
90 |
91 | # Qxd is transformation matrix from H-basis to SEM, QxOrig is the same
92 | # but with scaled EVs
93 | Qxd = EV_solution["Qx"].dot(EV_solution["Qd"])
94 | QxdOrig = EV_solution["QxOrig"].dot(EV_solution["Qd"])
95 |
96 | # Model parameters
97 | peq, p0, D, fr = model.get_params(model_num)
98 |
99 | # Initialize the atemp for forward pass.
100 | atemp = np.log(p0)
101 |
102 | # Store the latent states for traceback
103 | states = np.zeros((grid.N, S_Total + 1)).astype(int)
104 |
105 | # Precalculate propagation matrix exp(-lambda_i*dt_j)
106 | prop_mat = np.exp(np.outer(-EV_solution["lQd"], seq))
107 |
108 | # Forward pass
109 | for i in range(1, S_Total + 1):
110 | # Propagate delta-function probability density in latent state
111 | temp = (
112 | Qxd.dot(QxdOrig.T * prop_mat[:, i-1][:, np.newaxis]) *
113 | np.sqrt(peq)[:, np.newaxis]
114 | )
115 | # Exclude boundaries
116 | if self.grad.boundary_mode == 'absorbing' and margin > 0:
117 | temp[:, 0:margin] = 0
118 | temp[:, -margin:] = 0
119 | temp[0:margin, :] = 0
120 | temp[-margin:, :] = 0
121 | elif self.grad.boundary_mode == 'reflecting' and margin_ref > 0:
122 | temp[:, 0:margin_ref] = 0
123 | temp[:, -margin_ref:] = 0
124 | temp[0:margin_ref, :] = 0
125 | temp[-margin_ref:, :] = 0
126 |
127 | # Multiply (sum up logs) with the previous vector of probabilities
128 | reduced_arg = np.log(np.maximum(temp, 10**-10)) + atemp
129 | # Max over previous state
130 | states[:, i] = np.argmax(reduced_arg, axis=1)
131 | # Subscribe maximum for each state
132 | atemp = reduced_arg[np.arange(grid.N), states[:, i]]
133 | # Emit spike
134 | if i != S_Total or not self.grad.with_trial_end:
135 | atemp += np.log(fr[:, nid[i-1]])
136 | # Exclude boundaries for absorbing boundary mode, as we can't hit
137 | # a boundary before trial end time
138 | if self.grad.boundary_mode == 'absorbing' and margin > 0:
139 | atemp[0:margin] = atemp[-margin:] = 0
140 | elif self.grad.boundary_mode == 'reflecting' and margin_ref > 0:
141 | atemp[0:margin_ref] = atemp[-margin_ref:] = 0
142 |
143 | # trajectory and state indices
144 | trajectory = np.zeros(S_Total + 1)
145 | state_inds = np.zeros(S_Total + 1, dtype=int)
146 |
147 | if self.grad.boundary_mode == 'absorbing':
148 | # Force to end at the boundary
149 | idx = margin if atemp[margin] > atemp[-1 -
150 | margin] else grid.N-1-margin
151 | trajectory[-1] = grid.x_d[0] if idx == margin else grid.x_d[-1]
152 | state_inds[-1] = 0 if idx == margin else grid.N-1
153 | else:
154 | idx = np.argmax(atemp)
155 | trajectory[-1] = grid.x_d[idx]
156 | state_inds[i-1] = idx
157 |
158 | # Traceback
159 | for i in range(S_Total, 0, -1):
160 | idx = states[idx, i]
161 | trajectory[i-1] = grid.x_d[idx]
162 | state_inds[i-1] = idx
163 |
164 | return trajectory, state_inds.astype(int)
165 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", "wheel", "Cython", "numpy"]
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Additional requirements for project compilation at readthedocs
2 | numpydoc
3 | nbsphinx
4 |
5 | # Additional requirements for Windows:
6 | Microsoft Visual C++ 14.0 or greater
7 |
8 | # Additional requirements for Mac:
9 | VS code with C++
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, dist
2 | from setuptools.extension import Extension
3 | #from distutils.extension import Extension
4 | #dist.Distribution().fetch_build_eggs(['numpy'])
5 | import numpy as numpy
6 |
7 | USE_CYTHON = 1 # change to 0 to build the extension from c.
8 |
9 | ext = '.pyx' if USE_CYTHON else '.c'
10 |
11 |
12 | extensions = [
13 | Extension(
14 | "neuralflow.c_get_gamma",
15 | ["neuralflow/c_get_gamma" + ext],
16 | include_dirs=[numpy.get_include(), "neuralflow/"]
17 | )
18 | ]
19 |
20 | if USE_CYTHON:
21 | from Cython.Build import cythonize
22 | extensions = cythonize(extensions, build_dir="neuralflow/build")
23 |
24 |
25 | setup(name='neuralflow',
26 | description='Modeling neural spiking activity with a contnuous latent Langevin dynamics',
27 | version='3.0.0',
28 | ext_modules=extensions,
29 | packages=["neuralflow", "neuralflow.utilities", "neuralflow.feature_complexity"],
30 | keywords='Neuroscience, Machine learning, Langevin modeling',
31 | author='Mikhail Genkin and Tatiana A. Engel',
32 | author_email='engel@cshl.edu',
33 | license='MIT',
34 | include_package_data=True,
35 | setup_requires=['numpy'],
36 | install_requires=[
37 | 'numpy',
38 | 'matplotlib',
39 | 'pandas',
40 | 'scipy',
41 | 'tqdm',
42 | 'scikit-learn',
43 | ],
44 | zip_safe=False)
45 |
--------------------------------------------------------------------------------
/tests/EV_problems_library.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | This file contains tests for the Sturm–Liouville eigenvalue problems with known
4 | analytical solution. All the eigenvectors normilizaed by there max(abs()) value
5 |
6 | Input:
7 | Nv : int
8 | Number of EigenValues and EigenVectors to output.
9 | xgrid : np.array or None
10 | Array of x-values where the exact solution will be evaluated.
11 | mode : ENUM('domain_and_bc', 'exact_solution').
12 | Either return xbegin, xend, and boundary conditions, or the exact
13 | solution.
14 | Output:
15 | if mode == 'domain_and_bc':
16 | BC : a dictionary that specify boundary conditions
17 | xbeing : float
18 | x-position of the domain's left end
19 | xend : float
20 | x-position of the domain's right end
21 | else:
22 | p(x) - function p calculated at xgrid
23 | q(x) - function q calculated at xgrid
24 | w(x) - function w calculated at xgrid
25 | BC - boundary conditions
26 | Evals - array of size Nv with analytical solutions for the eigenvalues
27 | Evects - array (len(xgrid),Nv) with analytical solutions for the
28 | eigenvectors
29 | """
30 | import numpy as np
31 | import numpy.matlib
32 | import scipy.special as specfuns
33 |
34 | # Sine test: -u''(x)=lambda*u(x); u(0)=u(1)=0
35 |
36 |
37 | def SineTest(Nv, xgrid, mode):
38 | xbegin, xend = 0, 1
39 | if mode == 'domain_and_bc':
40 | BC = {'leftB': 'Dirichlet', 'rightB': 'Dirichlet'}
41 | return BC, xbegin, xend
42 | else:
43 | assert (
44 | np.abs(xgrid[0]-xbegin) < 10**-
45 | 5 and np.abs(xgrid[-1]-xend) < 10**-5
46 | ), 'Incorrect x-interval'
47 | EVals = [((n+1)*np.pi)**2 for n in range(0, Nv)]
48 | EVects = np.sin(np.outer(xgrid, np.sqrt(EVals)))
49 | EVects /= np.max(np.abs(EVects), 0)
50 | p = -np.ones(len(xgrid))
51 | q = None
52 | w = np.ones(len(xgrid))
53 | return p, q, w, EVals, EVects
54 |
55 | # Cosine test: -u''(x)=lambda*u(x); u'(0)=u'(1)=0
56 |
57 |
58 | def CosineTest(Nv, xgrid, mode):
59 | xbegin, xend = 0, 1
60 | if mode == 'domain_and_bc':
61 | BC = {'leftB': 'Neumann', 'rightB': 'Neumann'}
62 | return BC, xbegin, xend
63 | else:
64 | assert (
65 | np.abs(xgrid[0]-xbegin) < 10**-
66 | 5 and np.abs(xgrid[-1]-xend) < 10**-5
67 | ), 'Incorrect x-interval'
68 | EVals = [(n*np.pi)**2 for n in range(0, Nv)]
69 | EVects = np.cos(np.outer(xgrid, np.sqrt(EVals)))
70 | EVects /= np.max(np.abs(EVects), 0)
71 | p = -np.ones(len(xgrid))
72 | q = None
73 | w = np.ones(len(xgrid))
74 | return p, q, w, EVals, EVects
75 |
76 | # Sine Test2: -u''(x)=lambda*u(x), u(0)=0, u'(1)=0
77 |
78 |
79 | def Sine2Test(Nv, xgrid, mode):
80 | xbegin, xend = 0, 1
81 | if mode == 'domain_and_bc':
82 | BC = {'leftB': 'Dirichlet', 'rightB': 'Neumann'}
83 | return BC, xbegin, xend
84 | else:
85 | assert (
86 | np.abs(xgrid[0]-xbegin) < 10**-
87 | 5 and np.abs(xgrid[-1]-xend) < 10**-5
88 | ), 'Incorrect x-interval'
89 | EVals = [((n+1/2)*np.pi)**2 for n in range(0, Nv)]
90 | EVects = np.sin(np.outer(xgrid, np.sqrt(EVals)))
91 | EVects /= np.max(np.abs(EVects), 0)
92 | p = -np.ones(len(xgrid))
93 | q = None
94 | w = np.ones(len(xgrid))
95 | return p, q, w, EVals, EVects
96 |
97 | # Mixed test: -u''(x)=lambda*u(x); u(0)+u'(0)=u(1)+u'(1)=0
98 |
99 |
100 | def MixedCosineSineTest(Nv, xgrid, mode):
101 | xbegin, xend = 0, 1
102 | if mode == 'domain_and_bc':
103 | BC = {
104 | 'leftB': 'Robin', 'leftBCoeff': {'c1': 1, 'c2': 1},
105 | 'rightB': 'Robin', 'rightBCoeff': {'c1': 1, 'c2': 1}
106 | }
107 | return BC, xbegin, xend
108 | else:
109 | assert (
110 | np.abs(xgrid[0]-xbegin) < 10**-
111 | 5 and np.abs(xgrid[-1]-xend) < 10**-5
112 | ), 'Incorrect x-interval'
113 | EVals = [(n*np.pi)**2 for n in range(1, Nv)]
114 | EVects = (
115 | np.sin(np.outer(xgrid, np.sqrt(EVals))) -
116 | np.cos(np.outer(xgrid, np.sqrt(EVals)))*np.sqrt(EVals)
117 | )
118 | EVals = np.append(-1, EVals)
119 | EVects = np.append(
120 | np.reshape(np.exp(-xgrid), (len(xgrid), 1)), EVects, axis=1
121 | )
122 | EVects /= np.max(np.abs(EVects), 0)
123 | p = -np.ones(len(xgrid))
124 | q = None
125 | w = np.ones(len(xgrid))
126 | return p, q, w, EVals, EVects
127 |
128 | # -x^2y''(x)-2xy(x)=lambda*y, y(1)+2y'(1)=0, y(2)+4y'(2)=0
129 |
130 |
131 | def SquarePeqMixedTest(Nv, xgrid, mode):
132 | xbegin, xend = 1, 2
133 | if mode == 'domain_and_bc':
134 | BC = {
135 | 'leftB': 'Robin', 'leftBCoeff': {'c1': 1, 'c2': 2},
136 | 'rightB': 'Robin', 'rightBCoeff': {'c1': 1, 'c2': 4}
137 | }
138 | return BC, xbegin, xend
139 | else:
140 | assert (
141 | np.abs(xgrid[0]-xbegin) < 10**-
142 | 5 and np.abs(xgrid[-1]-xend) < 10**-5
143 | ), 'Incorrect x-interval'
144 | EVals = [1/4 + (n*np.pi/np.log(2))**2 for n in range(0, Nv)]
145 | EVects = (
146 | np.cos(np.outer(
147 | np.log(xgrid), [np.pi*n/np.log(2) for n in range(0, Nv)]
148 | )) /
149 | np.matlib.repmat(np.sqrt(xgrid), Nv, 1).transpose()
150 | )
151 | EVects /= np.max(np.abs(EVects), 0)
152 | p = -xgrid**2
153 | q = None
154 | w = np.ones(len(xgrid))
155 | return p, q, w, EVals, EVects
156 |
157 | # -(xy')'+y/x=lambda*x*y, y(0)=0, y'(1)=0
158 |
159 |
160 | def BesselTest(Nv, xgrid, mode):
161 | xbegin, xend = 0, 1
162 | if mode == 'domain_and_bc':
163 | BC = {'leftB': 'Dirichlet', 'rightB': 'Neumann'}
164 | return BC, xbegin, xend
165 | else:
166 | assert (
167 | np.abs(xgrid[0]-xbegin) < 10**-
168 | 5 and np.abs(xgrid[-1]-xend) < 10**-5
169 | ), 'Incorrect x-interval'
170 | EVals = specfuns.jnp_zeros(1, Nv)**2
171 | EVects = specfuns.j1(np.outer(xgrid, np.sqrt(EVals)))
172 | EVects /= np.max(np.abs(EVects), 0)
173 | p = -xgrid
174 | # Replace Inf with 10**10 as Inf is not supported by the solver
175 | q = np.append(10**10, 1/xgrid[1:])
176 | w = xgrid
177 | return p, q, w, EVals, EVects
178 |
--------------------------------------------------------------------------------
/tests/test_PDESolve.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """Test PDE solver
4 | """
5 |
6 | import unittest
7 | from neuralflow import PDE_Solve
8 | import numpy as np
9 | from testhelper_PDE_Solve import PerformTest
10 | from pkg_resources import working_set
11 |
12 | GPU_support = any([pkg.key.startswith('cupy') for pkg in working_set])
13 |
14 |
15 | class TestPDESolve(unittest.TestCase):
16 |
17 | @classmethod
18 | def setUpClass(cls):
19 | # Parameters for testing EV problem.
20 |
21 | # NUmber of elements
22 | cls.Ne = 128
23 | # Number of points per element.
24 | cls.Np = 8
25 | # Number of EV/EVects to compare with analytical solution
26 | cls.Nv = 10
27 | # Name of the problem. see EigenValueProblemTests.py
28 | cls.EVTestNames = ['Sine', 'Sine2', 'Cosine', 'MixedCosineSine',
29 | 'SquarePeqMixed', 'Bessel']
30 |
31 | # Test derivative and antiderivative matrices
32 | # Define (f(x),f'(x), and intergral(f(x)))
33 | # antiderivative is zero at x=xbegin
34 | cls.test_functions = [lambda x, x0: (x**2, 2*x, (x**3-x0**3)/3),
35 | lambda x, x0: (
36 | np.sin(x), np.cos(x), np.cos(x0) - np.cos(x)
37 | )
38 | ]
39 |
40 | def testInputs(self):
41 | """"A few test for input parameters
42 | """
43 | with self.assertRaises(ValueError):
44 | xbegin, xend = -1, -1
45 | _ = PDE_Solve.PDESolve(xbegin, xend)
46 | with self.assertRaises(ValueError):
47 | Np = 1
48 | _ = PDE_Solve.PDESolve(Np=Np)
49 | with self.assertRaises(ValueError):
50 | Ne = 0
51 | _ = PDE_Solve.PDESolve(Ne=Ne)
52 | with self.assertRaises(ValueError):
53 | BoundCond = {'leftB': 'Neumann', 'rightB': 'Neuman'}
54 | _ = PDE_Solve.PDESolve(BoundCond=BoundCond)
55 |
56 | def testEVsolver(self):
57 | """Solve EV problem and compare numerical solution with analytical for
58 | six problems with known analytical solution.
59 | """
60 | for name in self.EVTestNames:
61 | _, ErrVec, ErrVal = PerformTest(
62 | self.Ne, self.Nv, self.Np, name, False
63 | )
64 | self.assertTrue(
65 | all(ErrVec < 10**-6) and all(ErrVal < 10**-6),
66 | f'{name} error is lager than 10^-6'
67 | )
68 |
69 | @unittest.skipIf(not GPU_support, 'cupy not installed')
70 | def testEVsolverGPU(self):
71 | """Solve EV problem and compare numerical solution with analytical for
72 | six problems with known analytical solution.
73 | """
74 | for name in self.EVTestNames:
75 | _, ErrVec, ErrVal = PerformTest(
76 | self.Ne, self.Nv, self.Np, name, True
77 | )
78 | self.assertTrue(
79 | all(ErrVec < 10**-6) and all(ErrVal < 10**-6),
80 | f'{name} error is lager than 10^-6'
81 | )
82 |
83 | def testIntegrate(self):
84 | """Test antiderivative and integration functions"""
85 |
86 | # Initialize class instance
87 | xbegin, xend = -2, 2
88 | solver = PDE_Solve.PDESolve(xbegin, xend, self.Np, self.Ne)
89 | for f in self.test_functions:
90 | res = f(solver.grid.x_d, xbegin)
91 | # Compute derivative
92 | f_derivative = solver.grid.Differentiate(res[0])
93 | # Compute antiderivative
94 | f_integral = solver.grid.Integrate(res[0])
95 | self.assertTrue(np.allclose(f_derivative, res[1]))
96 | self.assertTrue(np.allclose(f_integral, res[2]))
97 |
98 | @unittest.skipIf(not GPU_support, 'cupy not installed')
99 | def testIntegrateGPU(self):
100 | """Test antiderivative and integration functions"""
101 |
102 | import cupy as cp
103 | # Initialize class instance
104 | xbegin, xend = -2, 2
105 | solver = PDE_Solve.PDESolve(
106 | xbegin, xend, self.Np, self.Ne, with_cuda=True
107 | )
108 | for f in self.test_functions:
109 | res = list(f(solver.grid.cuda_var.x_d, xbegin))
110 | # Compute derivative
111 | f_derivative = solver.grid.Differentiate(res[0], device='GPU')
112 | # Compute antiderivative
113 | f_integral = solver.grid.Integrate(res[0], device='GPU')
114 | self.assertTrue(cp.allclose(f_derivative, res[1]))
115 | self.assertTrue(cp.allclose(f_integral, res[2]))
116 |
117 |
118 | if __name__ == '__main__':
119 | unittest.main()
120 |
--------------------------------------------------------------------------------
/tests/test_gradients.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Unittests for the gradients. It tests the gradients
4 | of loglikelihood w.r.t. each of the parameters (F,D,F0,Fr,C) and compares the
5 | result with finite difference approximation.
6 | """
7 |
8 | import unittest
9 | from neuralflow.model import model
10 | from neuralflow.data_generation import SyntheticData
11 | from neuralflow.gradients import Grads
12 | from neuralflow.spike_data import SpikeData
13 | from neuralflow.grid import GLLgrid
14 | from neuralflow import peq_models
15 | from neuralflow import firing_rate_models
16 | import numpy as np
17 | from itertools import product
18 | from pkg_resources import working_set
19 |
20 | GPU_support = any([pkg.key.startswith('cupy') for pkg in working_set])
21 |
22 |
23 | def ConvexSum(Left, Right, theta): return Left*theta + Right*(1-theta)
24 |
25 |
26 | class TestGradients(unittest.TestCase):
27 | """Computes gradient numerically and compares to finite diffrence
28 | result. Takes two different models on the left end and the right ends (F1
29 | and F2), and defines a family of models
30 | F(theta) = theta * M1 + (1 - theta) * M2. Calculates the
31 | gradient at theta = 0.5 and compares to finite difference approximation.
32 | """
33 | @classmethod
34 | def setUpClass(cls):
35 | """Define the model and some unittest hyperparameters
36 | """
37 |
38 | cls.boundary_modes = ['absorbing', 'reflecting']
39 | cls.pde_solve_params = {'xbegin': -1, 'xend': 1, 'Np': 8, 'Ne': 32}
40 |
41 | # Initialize grid with and without GPU support (so that there is no
42 | # error on platforms without cuda-enabled GPU)
43 | cls.grid = GLLgrid(**cls.pde_solve_params)
44 | if GPU_support:
45 | cls.grid_with_cuda = GLLgrid(
46 | **cls.pde_solve_params, with_cuda=True
47 | )
48 |
49 | # Ground-truth model for data generation
50 | cls.em = model.new_model(
51 | peq_model={'model': 'linear_pot', 'params': {'slope': 5}},
52 | p0_model={'model': 'single_well', 'params': {'miu': 100}},
53 | D=0.1,
54 | fr_model=[
55 | {"model": "linear", "params": {"slope": 50, "bias": 60}},
56 | {"model": "linear", "params": {"slope": -50, "bias": 60}}
57 | ],
58 | grid=cls.grid,
59 | )
60 | cls.num_neurons = cls.em.fr.shape[2]
61 |
62 | # Generate data for absorbing and reflecting modes
63 | cls.data = []
64 | for bm in cls.boundary_modes:
65 | dg = SyntheticData(cls.em, bm)
66 | data, _, _ = dg.generate_data(0, 1, 10, 0)
67 | cls.data.append(data)
68 |
69 | # Theta is a 1D variable that runs from 0 to 1. Gradients will be
70 | # calculate at 0.5
71 | cls.theta = 0.5
72 |
73 | # Epsilon for finite diffrence and two values of theta where logliks
74 | # will be calculated
75 | eps = 1e-3
76 | cls.theta1, cls.theta2 = cls.theta + eps, cls.theta - eps
77 |
78 | # Maximum allowed relative difference for grad vs. finite difference
79 | # estimate
80 | cls.tol = 1e-04
81 |
82 | def FD_grad_testing(self, grad, em_c, em_1, em_2, boundary_mode,
83 | with_cuda):
84 | """ Compute gradient along the line at em_c and approximate it with
85 | the difference in logliks.
86 | """
87 |
88 | # Each boundary mode has its own data
89 | data = SpikeData(
90 | self.data[self.boundary_modes.index(boundary_mode)],
91 | 'ISIs',
92 | with_cuda=with_cuda
93 | )
94 |
95 | if with_cuda:
96 | data.to_GPU()
97 | data = data.cuda_var.data
98 | else:
99 | data = data.data
100 |
101 | # Calculate gradient at theta
102 | gr = grad.get_grad_data(data, em_c, mode='gradient')
103 |
104 | # Calculate logliks at theta1 and theta2
105 | ll1 = grad.get_grad_data(data, em_1, mode='loglik')
106 | ll2 = grad.get_grad_data(data, em_2, mode='loglik')
107 |
108 | # Finite difference estimate
109 | fd_approx = (ll2 - ll1) / (self.theta2 - self.theta1)
110 | return gr, fd_approx
111 |
112 | def ForceTesting(self, with_cuda, boundary_mode, non_equilibrium):
113 |
114 | device = 'GPU' if with_cuda else 'CPU'
115 |
116 | # Left end: peq and force
117 | peqLeft = peq_models.linear_pot(self.grid.x_d, self.grid.w_d, 5)
118 | FLeft = self.em.force_from_peq(peqLeft)
119 |
120 | # Right end
121 | peqRight = peq_models.double_well(
122 | self.grid.x_d, self.grid.w_d, 0, 0.7, 2.0, 0
123 | )
124 | FRight = self.em.force_from_peq(peqRight)
125 |
126 | # Calculate forces at theta, theta1, and theta2
127 | # In general, F(theta) = Fleft*theta+ FRight*(1-theta)
128 | F = ConvexSum(FLeft, FRight, self.theta)
129 | F1 = ConvexSum(FLeft, FRight, self.theta1)
130 | F2 = ConvexSum(FLeft, FRight, self.theta2)
131 |
132 | # Calculate the corresponding peqs
133 | peq = self.em.peq_from_force(F)
134 | peq1 = self.em.peq_from_force(F1)
135 | peq2 = self.em.peq_from_force(F2)
136 |
137 | if with_cuda:
138 | grid = self.grid_with_cuda
139 | else:
140 | grid = self.grid
141 |
142 | if non_equilibrium:
143 | p0 = self.em.p0
144 | else:
145 | p0 = None
146 |
147 | em_c = model.new_model(
148 | peq, p0, self.em.D, self.em.fr, grid, with_cuda=with_cuda
149 | )
150 | em_1 = model.new_model(
151 | peq1, p0, self.em.D, self.em.fr, grid, with_cuda=with_cuda
152 | )
153 | em_2 = model.new_model(
154 | peq2, p0, self.em.D, self.em.fr, grid, with_cuda=with_cuda
155 | )
156 |
157 | grad = Grads(
158 | self.pde_solve_params, boundary_mode, ['F'], self.num_neurons,
159 | device=device
160 | )
161 |
162 | # Compute gradient and finite difference approximation
163 | gr, fd_approx = self.FD_grad_testing(
164 | grad, em_c, em_1, em_2, boundary_mode, with_cuda
165 | )
166 |
167 | # Gradient along theta direction by chain rule
168 | if with_cuda:
169 | gr['F'] = grad.cuda.cp.asnumpy(gr['F'])
170 | dLdth_grad = np.sum(gr['F'] * (FLeft-FRight) * self.grid.w_d)
171 | # Relative difference
172 | reldiff = np.abs((dLdth_grad - fd_approx)/(fd_approx))
173 | return reldiff
174 |
175 | def DTesting(self, with_cuda, boundary_mode):
176 |
177 | device = 'GPU' if with_cuda else 'CPU'
178 |
179 | # Left/right end D values
180 | DLeft = 5
181 | DRight = 15
182 |
183 | # Calculate D at theta, theta1, theta2
184 | D = ConvexSum(DLeft, DRight, self.theta)
185 | D1 = ConvexSum(DLeft, DRight, self.theta1)
186 | D2 = ConvexSum(DLeft, DRight, self.theta2)
187 |
188 | if with_cuda:
189 | grid = self.grid_with_cuda
190 | else:
191 | grid = self.grid
192 |
193 | em_c = model.new_model(
194 | self.em.peq, self.em.p0, D, self.em.fr, grid, with_cuda=with_cuda
195 | )
196 | em_1 = model.new_model(
197 | self.em.peq, self.em.p0, D1, self.em.fr, grid, with_cuda=with_cuda
198 | )
199 | em_2 = model.new_model(
200 | self.em.peq, self.em.p0, D2, self.em.fr, grid, with_cuda=with_cuda
201 | )
202 |
203 | grad = Grads(
204 | self.pde_solve_params, boundary_mode, ['D'], self.num_neurons,
205 | device=device
206 | )
207 |
208 | # Compute gradient and finite difference approximation
209 | gr, fd_approx = self.FD_grad_testing(
210 | grad, em_c, em_1, em_2, boundary_mode, with_cuda
211 | )
212 |
213 | # dLikelihood/dtheta with gradient by chain rule:
214 | dLdth_grad = gr['D'] * (DLeft-DRight)
215 |
216 | # Relative difference
217 | reldiff = np.abs((dLdth_grad - fd_approx)/(fd_approx))
218 | return reldiff
219 |
220 | def F0Testing(self, with_cuda, boundary_mode, non_equilibrium):
221 |
222 | device = 'GPU' if with_cuda else 'CPU'
223 |
224 | # Left end: p0 and F0
225 | p0Left = peq_models.linear_pot(self.grid.x_d, self.grid.w_d, 5)
226 | F0Left = self.em.force_from_peq(p0Left)
227 |
228 | # Right end: p0
229 | p0Right = peq_models.double_well(
230 | self.grid.x_d, self.grid.w_d, 0, 0.7, 2.0, 0
231 | )
232 | F0Right = self.em.force_from_peq(p0Right)
233 |
234 | # Calculate F0 at theta, theta1, and theta2
235 | # In general, F0(theta) = F0left*theta+ F0Right*(1-theta)
236 | F0 = ConvexSum(F0Left, F0Right, self.theta)
237 | F01 = ConvexSum(F0Left, F0Right, self.theta1)
238 | F02 = ConvexSum(F0Left, F0Right, self.theta2)
239 |
240 | p0 = self.em.peq_from_force(F0)
241 | p0_1 = self.em.peq_from_force(F01)
242 | p0_2 = self.em.peq_from_force(F02)
243 |
244 | if with_cuda:
245 | grid = self.grid_with_cuda
246 | else:
247 | grid = self.grid
248 |
249 | em_c = model.new_model(
250 | self.em.peq, p0, self.em.D, self.em.fr, grid, with_cuda=with_cuda
251 | )
252 | em_1 = model.new_model(
253 | self.em.peq, p0_1, self.em.D, self.em.fr, grid,
254 | with_cuda=with_cuda
255 | )
256 | em_2 = model.new_model(
257 | self.em.peq, p0_2, self.em.D, self.em.fr, grid,
258 | with_cuda=with_cuda
259 | )
260 |
261 | grad = Grads(
262 | self.pde_solve_params, boundary_mode, ['F0'], self.num_neurons,
263 | device=device
264 | )
265 |
266 | # Compute gradient and finite difference approximation
267 | gr, fd_approx = self.FD_grad_testing(
268 | grad, em_c, em_1, em_2, boundary_mode, with_cuda
269 | )
270 | if device == 'GPU':
271 | gr['F0'] = grad.cuda.cp.asnumpy(gr['F0'])
272 |
273 | # dLikelihood/dtheta by chain rule:
274 | dLdth_grad = np.sum(gr['F0'] * (F0Left-F0Right) * self.grid.w_d)
275 |
276 | # Relative difference
277 | reldiff = np.abs((dLdth_grad - fd_approx)/(fd_approx))
278 |
279 | return reldiff
280 |
281 | def FrTesting(self, with_cuda, boundary_mode):
282 |
283 | device = 'GPU' if with_cuda else 'CPU'
284 | # Left end: fr and Fr
285 | frLeft = np.stack([
286 | firing_rate_models.linear(self.grid.x_d, 50, 60),
287 | firing_rate_models.sinus(self.grid.x_d, 50, 30)
288 | ]).T
289 | # Ensure constant C is 1 by dividing by the fr(xbegin)
290 | frLeft /= frLeft[0, :]
291 | FrLeft = self.em.Fr_from_fr(frLeft)
292 |
293 | # Right end: fr and Fr
294 | frRight = np.stack([
295 | firing_rate_models.linear(self.grid.x_d, -50, 60),
296 | firing_rate_models.sinus(self.grid.x_d, 40, 20)
297 | ]).T
298 | frRight /= frRight[0, :]
299 | FrRight = self.em.Fr_from_fr(frRight)
300 |
301 | # Calculate Fr at theta, theta1, and theta2
302 | # In general, Fr(theta) = Frleft*theta+ FrRight*(1-theta)
303 | Fr = ConvexSum(FrLeft, FrRight, self.theta)
304 | Fr1 = ConvexSum(FrLeft, FrRight, self.theta1)
305 | Fr2 = ConvexSum(FrLeft, FrRight, self.theta2)
306 |
307 | # Calculate the corresponding frs
308 | fr = self.em.fr_from_Fr(Fr)
309 | fr_1 = self.em.fr_from_Fr(Fr1)
310 | fr_2 = self.em.fr_from_Fr(Fr2)
311 |
312 | if with_cuda:
313 | grid = self.grid_with_cuda
314 | else:
315 | grid = self.grid
316 |
317 | em_c = model.new_model(
318 | self.em.peq, self.em.p0, self.em.D, fr, grid, with_cuda=with_cuda
319 | )
320 | em_1 = model.new_model(
321 | self.em.peq, self.em.p0, self.em.D, fr_1, grid,
322 | with_cuda=with_cuda
323 | )
324 | em_2 = model.new_model(
325 | self.em.peq, self.em.p0, self.em.D, fr_2, grid,
326 | with_cuda=with_cuda
327 | )
328 |
329 | grad = Grads(
330 | self.pde_solve_params, boundary_mode, ['Fr'], self.num_neurons,
331 | device=device
332 | )
333 |
334 | # Compute gradient and finite difference approximation
335 | gr, fd_approx = self.FD_grad_testing(
336 | grad, em_c, em_1, em_2, boundary_mode, with_cuda
337 | )
338 | if device == 'GPU':
339 | gr['Fr'] = grad.cuda.cp.asnumpy(gr['Fr'])
340 |
341 | # dLikelihood/dtheta by chain rule:
342 | dLdth_grad = np.sum(
343 | gr['Fr']*(FrLeft-FrRight)*self.grid.w_d[:, np.newaxis]
344 | )
345 |
346 | # Relative difference
347 | reldiff = np.abs((dLdth_grad - fd_approx)/(fd_approx))
348 |
349 | return reldiff
350 |
351 | # @profile
352 | def CTesting(self, with_cuda, boundary_mode):
353 |
354 | device = 'GPU' if with_cuda else 'CPU'
355 |
356 | # Left/right C values
357 | CLeft = np.array([1, 2])
358 | CRight = np.array([5, 3])
359 |
360 | # Calculate C at theta, theta1, theta2
361 | C = ConvexSum(CLeft, CRight, self.theta)
362 | C1 = ConvexSum(CLeft, CRight, self.theta1)
363 | C2 = ConvexSum(CLeft, CRight, self.theta2)
364 |
365 | # Calculate the corresponding firing rates:
366 | fr = self.em.fr[0]/self.em.fr[0][0, :]*C
367 | fr_1 = self.em.fr[0]/self.em.fr[0][0, :]*C1
368 | fr_2 = self.em.fr[0]/self.em.fr[0][0, :]*C2
369 |
370 | if with_cuda:
371 | grid = self.grid_with_cuda
372 | else:
373 | grid = self.grid
374 |
375 | em_c = model.new_model(
376 | self.em.peq, self.em.p0, self.em.D, fr, grid, with_cuda=with_cuda
377 | )
378 | em_1 = model.new_model(
379 | self.em.peq, self.em.p0, self.em.D, fr_1, grid,
380 | with_cuda=with_cuda
381 | )
382 | em_2 = model.new_model(
383 | self.em.peq, self.em.p0, self.em.D, fr_2, grid,
384 | with_cuda=with_cuda
385 | )
386 |
387 | grad = Grads(
388 | self.pde_solve_params, boundary_mode, ['C'], self.num_neurons,
389 | device=device
390 | )
391 |
392 | # Compute gradient and finite difference approximation
393 | gr, fd_approx = self.FD_grad_testing(
394 | grad, em_c, em_1, em_2, boundary_mode, with_cuda
395 | )
396 | if device == 'GPU':
397 | gr['C'] = grad.cuda.cp.asnumpy(gr['C'])
398 |
399 | # dLikelihood/dtheta with gradient by chain rule:
400 | dLdth_grad = np.sum(gr['C']*(CLeft-CRight))
401 |
402 | # Relative difference
403 | reldiff = np.abs((dLdth_grad - fd_approx)/(fd_approx))
404 |
405 | return reldiff
406 |
407 | def testFGrad_CPU(self):
408 | for bm, eq_mode in product(self.boundary_modes, [True]): # , False]):
409 | test_name = f'Test grad F, CPU, {bm}, non-equilibrium = {eq_mode}'
410 | with self.subTest(test_name, bm=bm, eq_mode=eq_mode):
411 | reldiff = self.ForceTesting(False, bm, eq_mode)
412 | print(f'CPU, {bm}, non-equilibrium = {eq_mode}, F-grad '
413 | f' reldiff: {reldiff :.12f}')
414 | self.assertTrue(reldiff < self.tol, 'Grad F test failed')
415 |
416 | def testDGrad_CPU(self):
417 | for bm in self.boundary_modes:
418 | test_name = f'Test grad D, CPU, {bm}'
419 | with self.subTest(test_name, bm=bm):
420 | reldiff = self.DTesting(False, bm)
421 | print(f'CPU, {bm}, D-grad reldiff: {reldiff :.12f}')
422 | self.assertTrue(reldiff < self.tol, 'Grad D test failed')
423 |
424 | def testF0Grad_CPU(self):
425 | for bm, eq_mode in product(self.boundary_modes, [True, False]):
426 | test_name = f'Test grad F0, CPU, {bm}, non-equilibrium = {eq_mode}'
427 | with self.subTest(test_name, bm=bm, eq_mode=eq_mode):
428 | reldiff = self.F0Testing(False, bm, eq_mode)
429 | print(f'CPU, {bm}, non-equilibrium = {eq_mode}, F0-grad '
430 | f' reldiff: {reldiff :.12f}')
431 | self.assertTrue(reldiff < self.tol, 'Grad F0 test failed')
432 |
433 | def testFrGrad_CPU(self):
434 | for bm in self.boundary_modes:
435 | test_name = f'Test grad Fr, CPU, {bm}'
436 | with self.subTest(test_name, bm=bm):
437 | reldiff = self.FrTesting(False, bm)
438 | print(f'CPU, {bm}, Fr-grad reldiff: {reldiff :.12f}')
439 | self.assertTrue(reldiff < self.tol, 'Grad Fr test failed')
440 |
441 | def testCGrad_CPU(self):
442 | for bm in self.boundary_modes:
443 | test_name = f'Test grad C, CPU, {bm}'
444 | with self.subTest(test_name, bm=bm):
445 | reldiff = self.CTesting(False, bm)
446 | print(f'CPU, {bm}, C-grad reldiff: {reldiff :.12f}')
447 | self.assertTrue(reldiff < self.tol, 'Grad C test failed')
448 |
449 | @unittest.skipIf(not GPU_support, 'cupy not installed')
450 | def testFGrad_GPU(self):
451 | for bm, eq_mode in product(self.boundary_modes, [True, False]):
452 | test_name = f'Test grad F, GPU, {bm}, non-equilibrium = {eq_mode}'
453 | with self.subTest(test_name, bm=bm, eq_mode=eq_mode):
454 | reldiff = self.ForceTesting(True, bm, eq_mode)
455 | print(f'GPU, {bm}, non-equilibrium = {eq_mode}, F-grad '
456 | f' reldiff: {reldiff :.12f}')
457 | self.assertTrue(reldiff < self.tol, 'Grad F test failed')
458 |
459 | @unittest.skipIf(not GPU_support, 'cupy not installed')
460 | def testDGrad_GPU(self):
461 | for bm in self.boundary_modes:
462 | test_name = f'Test grad D, GPU, {bm}'
463 | with self.subTest(test_name, bm=bm):
464 | reldiff = self.DTesting(True, bm)
465 | print(f'GPU, {bm}, D-grad reldiff: {reldiff :.12f}')
466 | self.assertTrue(reldiff < self.tol, 'Grad D test failed')
467 |
468 | @unittest.skipIf(not GPU_support, 'cupy not installed')
469 | def testF0Grad_GPU(self):
470 | for bm, eq_mode in product(self.boundary_modes, [True, False]):
471 | test_name = f'Test grad F0, GPU, {bm}, non-equilibrium = {eq_mode}'
472 | with self.subTest(test_name, bm=bm, eq_mode=eq_mode):
473 | reldiff = self.F0Testing(True, bm, eq_mode)
474 | print(f'GPU, {bm}, non-equilibrium = {eq_mode}, F0-grad '
475 | f' reldiff: {reldiff :.12f}')
476 | self.assertTrue(reldiff < self.tol, 'Grad F0 test failed')
477 |
478 | @unittest.skipIf(not GPU_support, 'cupy not installed')
479 | def testFrGrad_GPU(self):
480 | for bm in self.boundary_modes:
481 | test_name = f'Test grad Fr, GPU, {bm}'
482 | with self.subTest(test_name, bm=bm):
483 | reldiff = self.FrTesting(True, bm)
484 | print(f'GPU, {bm}, Fr-grad reldiff: {reldiff :.12f}')
485 | self.assertTrue(reldiff < self.tol, 'Grad Fr test failed')
486 |
487 | @unittest.skipIf(not GPU_support, 'cupy not installed')
488 | def testCGrad_GPU(self):
489 | for bm in self.boundary_modes:
490 | test_name = f'Test grad C, GPU, {bm}'
491 | with self.subTest(test_name, bm=bm):
492 | reldiff = self.CTesting(True, bm)
493 | print(f'GPU, {bm}, C-grad reldiff: {reldiff :.12f}')
494 | self.assertTrue(reldiff < self.tol, 'Grad C test failed')
495 |
496 |
497 | if __name__ == '__main__':
498 | unittest.main()
499 |
--------------------------------------------------------------------------------
/tests/test_model_selection.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | A few tests for model selection using fc_tools.
4 | Mostly tests that there are no errors in model selection.
5 | """
6 |
7 | import logging
8 | import unittest
9 | from neuralflow.model import model
10 | from neuralflow.data_generation import SyntheticData
11 | from neuralflow.spike_data import SpikeData
12 | from neuralflow.grid import GLLgrid
13 | from neuralflow.feature_complexity.fc_base import FC_tools
14 | from neuralflow.optimization import Optimization
15 | from copy import deepcopy
16 |
17 |
18 | class TestModelSelection(unittest.TestCase):
19 | """
20 | """
21 | @classmethod
22 | def setUpClass(cls):
23 | """Define the model and some unittest hyperparameters
24 | """
25 |
26 | cls.boundary_modes = ['absorbing', 'reflecting']
27 | cls.pde_solve_params = {'xbegin': -1, 'xend': 1, 'Np': 8, 'Ne': 16}
28 |
29 | cls.grid = GLLgrid(**cls.pde_solve_params)
30 |
31 | # Ground-truth model for data generation
32 | cls.em = model.new_model(
33 | peq_model={'model': 'linear_pot', 'params': {'slope': 5}},
34 | p0_model={'model': 'single_well', 'params': {'miu': 100}},
35 | D=0.3,
36 | fr_model=[
37 | {"model": "linear", "params": {"slope": 50, "bias": 60}},
38 | {"model": "linear", "params": {"slope": -50, "bias": 60}}
39 | ],
40 | grid=cls.grid,
41 | )
42 | cls.num_neurons = cls.em.fr.shape[2]
43 |
44 | # Generate data for absorbing and reflecting modes
45 | cls.data = {bm: [] for bm in cls.boundary_modes}
46 | for bm in cls.boundary_modes:
47 | dg = SyntheticData(cls.em, bm)
48 | data, _, _ = dg.generate_data(0, 1, 10, 0)
49 | cls.data[bm].append(SpikeData(data, 'ISIs'))
50 | data, _, _ = dg.generate_data(0, 1, 10, 0)
51 | cls.data[bm].append(SpikeData(data, 'ISIs'))
52 |
53 | cls.opt_params = {'max_epochs': 20, 'mini_batch_number': 2,
54 | 'learning_rate': {'alpha': 0.5}}
55 |
56 | # visualization
57 | cls.n_epochs_to_disp = 5
58 |
59 | # This determines which parameters are shared and which are not
60 | cls.params_size = {'peq': 1, 'D': 1, 'fr': 1, 'p0': 1}
61 |
62 | cls.model_param_mapping = {
63 | 'F': 'peq_model', 'F0': 'p0_model', 'D': 'D', 'Fr': 'fr_model',
64 | 'C': 'fr_model'
65 | }
66 | cls.params_to_opt = ['F', 'F0', 'D', 'Fr', 'C']
67 |
68 | cls.initial_guess = {
69 | 'peq_model': {'model': 'uniform', 'params': {}},
70 | 'p0_model': {'model': 'uniform', 'params': {}},
71 | 'D': 1,
72 | 'fr_model': [
73 | {"model": "linear", "params": {"slope": -5, "bias": 15}},
74 | {"model": "linear", "params": {"slope": 10, "bias": 100}},
75 | ],
76 | 'grid': cls.grid
77 | }
78 |
79 | def test_FC_equilibrium(self):
80 | """ Optimize the model on two datasets and perform feature complexity
81 | analysis
82 | """
83 | # Set p0 to none for non-equilibeirum case. Setting p0 to None makes
84 | # the model equilibirum
85 | init_model_params = deepcopy(self.initial_guess)
86 | init_model_params['p0_model'] = None
87 | init_model = model.new_model(**init_model_params)
88 | assert not init_model.non_equilibrium, \
89 | 'For some reason the model object is not equilibirum'
90 |
91 | # Also don't optimize F0 and D. We exclude p0 since the model is
92 | # equilibrium, so there is no p0. We exclude D since the formula
93 | # developed in Genkin & Engel 2020 paper does not use D.
94 | params_to_opt = ['F', 'C', 'Fr']
95 |
96 | for t_num, bm in enumerate(self.boundary_modes):
97 | with self.subTest(
98 | f'Testing equilibirum model selection, boundary mode {bm}',
99 | bm=bm
100 | ):
101 |
102 | optimization1 = Optimization(
103 | self.data[bm][0],
104 | init_model,
105 | 'ADAM',
106 | {**self.opt_params, **{'params_to_opt': params_to_opt}},
107 | pde_solve_params=self.pde_solve_params,
108 | boundary_mode=bm,
109 | )
110 | # run optimization
111 | optimization1.run_optimization()
112 |
113 | optimization2 = Optimization(
114 | self.data[bm][0],
115 | init_model,
116 | 'ADAM',
117 | {**self.opt_params, **{'params_to_opt': params_to_opt}},
118 | pde_solve_params=self.pde_solve_params,
119 | boundary_mode=bm,
120 | )
121 | # run optimization
122 | optimization2.run_optimization()
123 | fc = FC_tools(non_equilibrium=False, model=init_model,
124 | boundary_mode=bm, terminal_time=1)
125 | FCs1, min_inds_1, FCs2, min_inds_2, JS, FC_opt_ind = (
126 | fc.FeatureConsistencyAnalysis(
127 | optimization1.results, optimization2.results,
128 | 0.015, 3, 0
129 | )
130 | )
131 |
132 | def test_FC_nonequilibrium(self):
133 | """ Optimize the model on two datasets and perform feature complexity
134 | analysis
135 | """
136 | init_model = model.new_model(**self.initial_guess)
137 | # In this case model should ne non-equilibrium, since p0_model is
138 | # specified
139 | assert init_model.non_equilibrium, \
140 | 'For some reason the model object is not non-equilibirum'
141 | params_to_opt = self.params_to_opt
142 |
143 | for t_num, bm in enumerate(self.boundary_modes):
144 | with self.subTest(
145 | 'Testing nonequilibirum model selection, boundary mode'
146 | f'{bm}',
147 | bm=bm
148 | ):
149 |
150 | optimization1 = Optimization(
151 | self.data[bm][0],
152 | init_model,
153 | 'ADAM',
154 | {**self.opt_params, **{'params_to_opt': params_to_opt}},
155 | pde_solve_params=self.pde_solve_params,
156 | boundary_mode=bm,
157 | )
158 | # run optimization
159 | optimization1.run_optimization()
160 |
161 | optimization2 = Optimization(
162 | self.data[bm][0],
163 | init_model,
164 | 'ADAM',
165 | {**self.opt_params, **{'params_to_opt': params_to_opt}},
166 | pde_solve_params=self.pde_solve_params,
167 | boundary_mode=bm,
168 | )
169 | # run optimization
170 | optimization2.run_optimization()
171 | fc = FC_tools(non_equilibrium=True, model=init_model,
172 | boundary_mode=bm, terminal_time=1)
173 | FCs1, min_inds_1, FCs2, min_inds_2, JS, FC_opt_ind = (
174 | fc.FeatureConsistencyAnalysis(
175 | optimization1.results, optimization2.results,
176 | 0.015, 3, 0
177 | )
178 | )
179 |
180 |
181 | if __name__ == '__main__':
182 | logging.basicConfig(level=logging.INFO)
183 | logging.getLogger().setLevel(logging.DEBUG)
184 | unittest.main()
185 |
--------------------------------------------------------------------------------
/tests/test_shared_optimization.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """Unittests for the optimization. It tests various
4 | optimization settings. It also visualizes the results
5 |
6 | Testcases include:
7 | testForceInference : Adam optimization of the driving force F that defines
8 | potential Phi(x).
9 | testP0Inference : Adam optimization of p0(x) through F0.
10 | testDInference : Adam optimization of D.
11 | testFrInference : Adam optimization of Fr (and plots fr).
12 | testCInference : Adam optimization of C.
13 | testDLineSearch : Adam optimization of D by line search.
14 | testCLineSearch : Adam optimization of C by line search.
15 | The following parameters can be adjusted in SetUpClass method:
16 | -pde_solve_params.
17 | -em_gt parameters for the ground-truth model.
18 | -num_trials for data generation.
19 | -with_validation = True/False - whether to calculate validation score.
20 | -max_epochs - number of iteraitons
21 | -optimizer
22 | -sgd_options - to do mini-batch descent.
23 |
24 | """
25 |
26 |
27 | import logging
28 | import unittest
29 | from neuralflow.model import model
30 | from visualization import PlotResults
31 | from neuralflow.data_generation import SyntheticData
32 | from neuralflow.spike_data import SpikeData
33 | from neuralflow.grid import GLLgrid
34 | from neuralflow.optimization import Optimization
35 | from itertools import product
36 | import pathlib
37 | import numpy as np
38 | from pkg_resources import working_set
39 | GPU_support = any([pkg.key.startswith('cupy') for pkg in working_set])
40 | logger = logging.getLogger(__name__)
41 |
42 |
43 | class TestSharedOptimization(unittest.TestCase):
44 | """Perform optimization
45 | """
46 | @classmethod
47 | def setUpClass(cls):
48 | """
49 | """
50 |
51 | # Initialize pde solver and energy model class
52 | cls.boundary_mode = ['reflecting', 'absorbing']
53 |
54 | # Feel free to change
55 | cls.pde_solve_params = {'xbegin': -1, 'xend': 1, 'Np': 8, 'Ne': 16}
56 |
57 | cls.grid_cpu = GLLgrid(**cls.pde_solve_params)
58 | if GPU_support:
59 | cls.grid_gpu = GLLgrid(**cls.pde_solve_params, with_cuda=True)
60 |
61 | # Feel free to test different ground-truths
62 | gt_param_1 = {
63 | 'peq_model': {
64 | "model": "manual",
65 | "params": {
66 | "interp_x": [-1, -0.25, 1],
67 | "interp_y": [2, 4, 0],
68 | "bc_left": [[1, 0], [1, 0]],
69 | "bc_right": [[1, 0], [1, 0]]
70 | }
71 | },
72 | 'p0_model': {
73 | "model": "single_well", "params": {"miu": 200, "xmin": 0}
74 | },
75 | 'D': 0.2,
76 | 'fr_model': [
77 | {"model": "linear", "params": {"slope": 50, "bias": 60}},
78 | {"model": "peaks", "params": {
79 | "center": [-0.3], "width": [0.7], "amp": [60]
80 | }},
81 | {"model": "peaks", "params": {
82 | "center": [-0.6, 0.6], "width": [0.3, 0.5], "amp": [30, 80]
83 | }}
84 | ]
85 | }
86 | em_gt_1 = model.new_model(**gt_param_1, grid=cls.grid_cpu)
87 |
88 | # Feel free to test different ground-truths
89 | gt_param_2 = {
90 | 'peq_model': {"model": "linear_pot", "params": {"slope": -3}},
91 | 'p0_model': {"model": "single_well",
92 | "params": {"miu": 200, "xmin": 0}
93 | },
94 | 'D': 0.2,
95 | 'fr_model': [
96 | {"model": "linear", "params": {"slope": 50, "bias": 60}},
97 | {"model": "peaks", "params": {
98 | "center": [-0.3], "width": [0.7], "amp": [60]
99 | }},
100 | {"model": "peaks", "params": {
101 | "center": [-0.6, 0.6], "width": [0.3, 0.5], "amp": [30, 80]
102 | }}
103 | ]
104 | }
105 | em_gt_2 = model.new_model(**gt_param_2, grid=cls.grid_cpu)
106 |
107 | # Create a model from these two models
108 | cls.em_gt_cpu = model(
109 | np.concatenate((em_gt_1.peq, em_gt_2.peq), axis=0),
110 | np.concatenate((em_gt_1.p0, em_gt_2.p0), axis=0),
111 | np.concatenate((em_gt_1.D, em_gt_2.D), axis=0),
112 | np.concatenate((em_gt_1.fr, em_gt_2.fr), axis=0),
113 | params_size={'peq': 2, 'D': 2, 'fr': 2, 'p0': 2},
114 | grid=cls.grid_cpu
115 | )
116 | if GPU_support:
117 | cls.em_gt_gpu = model(
118 | np.concatenate((em_gt_1.peq, em_gt_2.peq), axis=0),
119 | np.concatenate((em_gt_1.p0, em_gt_2.p0), axis=0),
120 | np.concatenate((em_gt_1.D, em_gt_2.D), axis=0),
121 | np.concatenate((em_gt_1.fr, em_gt_2.fr), axis=0),
122 | params_size={'peq': 2, 'D': 2, 'fr': 2, 'p0': 2},
123 | grid=cls.grid_gpu,
124 | with_cuda=True
125 | )
126 |
127 | # Synthetic data generation
128 | with_cv = True
129 | num_training_trials = [50, 30]
130 | num_val_trials = [max(el // 10, 3) for el in num_training_trials]
131 | cls.dataTR = {bm: [] for bm in cls.boundary_mode}
132 | cls.dataCV = {bm: [] for bm in cls.boundary_mode}
133 |
134 | for samp in range(2):
135 | for bm in cls.boundary_mode:
136 | dg = SyntheticData(cls.em_gt_cpu, bm)
137 | logger.info(
138 | f'Generating {num_training_trials[0]} trials of training '
139 | f'data using {bm} boundary mode, datasample {samp}'
140 | )
141 | data, _, _ = dg.generate_data(
142 | 0, 1, num_training_trials[samp], samp)
143 | cls.dataTR[bm].append(
144 | SpikeData(data, 'ISIs', with_cuda=GPU_support))
145 | if with_cv:
146 | logger.info(
147 | f'Generating {num_val_trials[0]} trials of validation '
148 | f'data using {bm} boundary mode, datasample {samp}'
149 | )
150 | dataCV, _, _ = dg.generate_data(
151 | 0, 1, num_val_trials[samp], samp)
152 | cls.dataCV[bm].append(
153 | SpikeData(dataCV, 'ISIs', with_cuda=GPU_support)
154 | )
155 |
156 | # optimization
157 | cls.optimizers = ['ADAM', 'GD']
158 | cls.opt_params = {'max_epochs': 20, 'mini_batch_number': 10}
159 | cls.opt_params_ls = {'max_epochs': 1, 'mini_batch_number': 1}
160 |
161 | cls.ls_options_simultaneous_inference = {
162 | 'C_opt': {'epoch_schedule': [1, 10]},
163 | 'D_opt': {'epoch_schedule': [1, 10]}
164 | }
165 |
166 | # visualization
167 | cls.n_epochs_to_disp = 5
168 |
169 | # This determines which parameters are shared and which are not
170 | cls.params_size = {'peq': 2, 'D': 2, 'fr': 2, 'p0': 2}
171 |
172 | cls.model_param_mapping = {
173 | 'F': 'peq_model', 'F0': 'p0_model', 'D': 'D', 'Fr': 'fr_model',
174 | 'C': 'fr_model'
175 | }
176 | cls.params_to_opt = ['F', 'F0', 'D', 'Fr', 'C']
177 |
178 | cls.initial_guess = {
179 | 'peq_model': {'model': 'uniform', 'params': {}},
180 | 'p0_model': {'model': 'uniform', 'params': {}},
181 | 'D': 3,
182 | 'fr_model': [
183 | {"model": "linear", "params": {"slope": -5, "bias": 15}},
184 | {"model": "linear", "params": {"slope": 10, "bias": 100}},
185 | {"model": "linear", "params": {"slope": 10, "bias": 100}}
186 | ],
187 | }
188 | cls.hyperparam = {
189 | 'ADAM_alpha': {'simultaneous': 0.1},
190 | 'GD_lr': {
191 | 'F': 0.05, 'F0': 0.05, 'D': 0.005, 'Fr': 0.0005, 'C': 0.2
192 | }
193 | }
194 |
195 | # Create temporary directory for test results
196 | cls.res_fold = 'unit_test_shared_opt_results'
197 | pathlib.Path(cls.res_fold).mkdir(parents=True, exist_ok=True)
198 |
199 | cls.ll_gt = {bm: [None]*2 for bm in cls.boundary_mode}
200 | cls.ll_gt_cv = {bm: [None]*2 for bm in cls.boundary_mode}
201 |
202 | # Counter of the test number
203 | cls.test_num = 0
204 |
205 | def SharedSimultaneousInferenceTesting(self, with_cuda):
206 | """Inference of all of the parameters together
207 | """
208 | device = 'CPU' if not with_cuda else 'GPU'
209 | for t_num, (optimizer, bm) in enumerate(
210 | product(self.optimizers, self.boundary_mode)
211 | ):
212 | with self.subTest(
213 | f'Testing {optimizer}, boundary mode {bm}, simultaneous '
214 | 'inference',
215 | optimizer=optimizer, bm=bm
216 | ):
217 | self.test_num += 1
218 | logger.info(
219 | f'Running test {self.test_num}, optimizer = {optimizer}, '
220 | f'boundary mode {bm}, with_cuda {with_cuda}'
221 | )
222 |
223 | # learning rate is optimizer-dependent
224 | if optimizer == 'ADAM':
225 | lr = {
226 | 'alpha': self.hyperparam['ADAM_alpha']['simultaneous']
227 | }
228 | else:
229 | lr = self.hyperparam['GD_lr'].copy()
230 |
231 | # Optimization params
232 | opt_params = {
233 | **self.opt_params,
234 | 'params_to_opt': list(self.hyperparam['GD_lr'].keys()),
235 | 'learning_rate': lr
236 | }
237 |
238 | init_model = model.new_model(
239 | **self.initial_guess,
240 | grid=self.grid_gpu if with_cuda else self.grid_cpu,
241 | params_size=self.params_size,
242 | with_cuda=with_cuda
243 | )
244 |
245 | optimization = Optimization(
246 | self.dataTR[bm],
247 | init_model,
248 | optimizer,
249 | opt_params,
250 | self.ls_options_simultaneous_inference,
251 | pde_solve_params=self.pde_solve_params,
252 | boundary_mode=bm,
253 | dataCV=self.dataCV[bm],
254 | device=device
255 | )
256 |
257 | # run optimization
258 | optimization.run_optimization()
259 |
260 | # Compute ground-truth loglik
261 | em_gt = self.em_gt_gpu if with_cuda else self.em_gt_cpu
262 | for samp in range(2):
263 | if self.ll_gt[bm][samp] is None:
264 | self.ll_gt[bm][samp] = (
265 | optimization.optimizer.gradient.get_grad_data(
266 | optimization.optimizer.get_dataTR(samp),
267 | em_gt,
268 | samp
269 | )
270 | )
271 | if (
272 | self.dataCV[bm] is not None and
273 | self.ll_gt_cv[bm][samp] is None
274 | ):
275 | self.ll_gt_cv[bm][samp] = (
276 | optimization.optimizer.gradient.get_grad_data(
277 | optimization.optimizer.get_dataCV(
278 | samp), em_gt, samp
279 | )
280 | )
281 |
282 | # Visualize the results
283 | PlotResults(
284 | f'Opt = {optimizer}, boundary mode = {bm}',
285 | ['F', 'Fr', 'F0', 'D', 'C'],
286 | self.n_epochs_to_disp,
287 | optimization.results,
288 | self.em_gt_cpu,
289 | self.ll_gt[bm],
290 | self.ll_gt_cv[bm],
291 | self.res_fold
292 | )
293 |
294 | if self.res_fold is not None:
295 | logger.info(f'Test result saved into {self.res_fold}')
296 |
297 | def test_SharedSimultaneousInferenceCPU(self):
298 | self.SharedSimultaneousInferenceTesting(False)
299 |
300 | @unittest.skipIf(not GPU_support, 'cupy not installed')
301 | def test_SharedSimultaneousInferenceGPU(self):
302 | self.SharedSimultaneousInferenceTesting(True)
303 |
304 |
305 | if __name__ == '__main__':
306 | logging.basicConfig(level=logging.INFO)
307 | logging.getLogger().setLevel(logging.DEBUG)
308 | unittest.main()
309 |
--------------------------------------------------------------------------------
/tests/test_spike_data.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Wed Sep 27 20:22:31 2023
5 |
6 | @author: mikhailgenkin
7 | """
8 |
9 | import numpy as np
10 | from neuralflow.spike_data import SpikeData
11 | import unittest
12 | import sys
13 | sys.path.insert(0, '/Users/mikhailgenkin/Neuroscience/CSHL/Code/neuralflow')
14 |
15 |
16 | class TestSpikeData(unittest.TestCase):
17 |
18 | @classmethod
19 | def setUpClass(cls):
20 | # Sample spike data in ISI format: 2 trials, 3 neurons (ids = 0, 1, 2)
21 | cls.ISIs = np.empty((2, 2), dtype=np.ndarray)
22 | cls.ISIs[0][0] = np.array([0.05, 0.5, 1])
23 | cls.ISIs[1][0] = np.array([0.05, 1, 2, 3, 4])
24 | cls.ISIs[1][1] = np.array([0, 2, 1, 2, -1])
25 | cls.ISIs[0][1] = np.array([1, 2, -1])
26 |
27 | # Equivalent data in spiketimes format
28 | cls.spiketimes = np.array(
29 | [
30 | [np.array([], dtype=np.float64), np.array([0.05])],
31 | [np.array([0.05]), np.array([3.05])],
32 | [np.array([0.55]), np.array([1.05, 6.05])]
33 | ],
34 | dtype=object
35 | )
36 | cls.timeepoch = [(0, 1.55), (0, 10.05)]
37 |
38 | def testtransformations(self):
39 | """"A few test for input parameters
40 | """
41 |
42 | sdata = SpikeData(self.ISIs, dformat='ISIs')
43 |
44 | # Test neuron number autodetection
45 | self.assertTrue(sdata.num_neuron == 3)
46 |
47 | spikes, timebins = SpikeData.transform_isis_to_spikes(self.ISIs)
48 |
49 | # Test transformation from ISIs to Spikes
50 | self.assertTrue(
51 | all(
52 | [np.allclose(seq1, seq2, atol=10**-8)
53 | for seq1, seq2 in zip(spikes.flat, self.spiketimes.flat)]
54 | )
55 | )
56 | self.assertTrue(
57 | all(
58 | [np.allclose(np.array(s1), np.array(s2), atol=10**-8)
59 | for s1, s2 in zip(timebins, self.timeepoch)]
60 | )
61 |
62 | )
63 |
64 | # Transform spiketimes back to ISI format
65 | ISIs = SpikeData.transform_spikes_to_isi(spikes, timebins)
66 |
67 | # Assert equivalence
68 | self.assertTrue(
69 | all(
70 | [np.allclose(seq1, seq2, atol=10**-8)
71 | for seq1, seq2 in zip(ISIs.flat, self.ISIs.flat)]
72 | )
73 | )
74 |
75 |
76 | if __name__ == '__main__':
77 | unittest.main()
78 |
--------------------------------------------------------------------------------
/tests/test_viterbi.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """Tests and visualize Viterbi algorithm
4 | """
5 |
6 | import logging
7 | import unittest
8 | import numpy as np
9 | import matplotlib.pyplot as plt
10 | import matplotlib
11 | from neuralflow.model import model
12 | from neuralflow.spike_data import SpikeData
13 | from neuralflow.data_generation import SyntheticData
14 | from neuralflow.grid import GLLgrid
15 | from neuralflow.gradients import Grads
16 | from neuralflow.viterbi import Viterbi
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 |
21 | class TestViterbi(unittest.TestCase):
22 | """Perform optimization
23 | """
24 | @classmethod
25 | def setUpClass(cls):
26 | """
27 | """
28 |
29 | # Initialize pde solver and energy model class
30 | cls.boundary_mode = ['absorbing', 'reflecting']
31 |
32 | # Feel free to change
33 | cls.pde_solve_params = {'xbegin': -1, 'xend': 1, 'Np': 8, 'Ne': 16}
34 |
35 | cls.grid = GLLgrid(**cls.pde_solve_params)
36 |
37 | # Feel free to test different ground-truths
38 | cls.gt_param = {
39 | 'peq_model': {'model': 'linear_pot', 'params': {'slope': -1}},
40 | 'p0_model': {'model': 'single_well', 'params': {'miu': 100}},
41 | 'D': 0.5,
42 | 'fr_model': [
43 | *[{"model": "linear", "params": {"slope": 50, "bias": 60}}]*1,
44 | *[{"model": "linear", "params": {"slope": -50, "bias": 60}}]*1,
45 | ]
46 | }
47 | cls.gt_model = model.new_model(**cls.gt_param, grid=cls.grid)
48 |
49 | # Synthetic data generation
50 | num_trials = 3
51 | cls.data, cls.latent_trajectories, cls.time_bins = {}, {}, {}
52 | for bm in cls.boundary_mode:
53 | dg = SyntheticData(cls.gt_model, bm)
54 | logger.info(f'Generating {num_trials} trials of data using {bm} '
55 | 'boundary mode')
56 | data, cls.time_bins[bm], cls.latent_trajectories[bm] = (
57 | dg.generate_data(0, 3, num_trials, 0)
58 | )
59 | cls.data[bm] = SpikeData(data, 'ISIs')
60 |
61 | def test_Viterbi(self):
62 | """ Use Viterbi to predict most likely trajectory on each trial. Plot
63 | the predicted trajectories against the ground-truth trajectories.
64 | """
65 |
66 | # Optimize driving force
67 | for bm in self.boundary_mode:
68 | with self.subTest(
69 | f'Testing Viterbi with {bm} boundary mode', bm=bm
70 | ):
71 | logger.info(f'Running Viterbi with {bm} boundary mode')
72 |
73 | grad = Grads(
74 | self.pde_solve_params, bm,
75 | num_neuron=self.gt_model.num_neuron,
76 | )
77 | viterbi = Viterbi(grad)
78 | trajectories_vit, _ = viterbi.run_viterbi(
79 | self.data[bm], self.gt_model
80 | )
81 |
82 | # Plot the results
83 | norm = matplotlib.colors.Normalize(vmin=0, vmax=10)
84 | cmap = matplotlib.cm.tab10
85 | colors = {key: cmap(norm(key))[:-1] for key in range(10)}
86 | plt.figure()
87 | for t in range(min(len(self.data[bm].data), 3)):
88 | plt.plot(
89 | self.time_bins[bm][t], self.latent_trajectories[bm][t],
90 | color=colors[t],
91 | label=f'ground-truth trajectories, trial {t}',
92 | alpha=0.5
93 | )
94 | plt.plot(
95 | np.concatenate(
96 | ([0], np.cumsum(self.data[bm].data[t][0]))),
97 | trajectories_vit[t], '--', color=colors[t],
98 | linewidth=2,
99 | label=f'Viterbi trajectories, trial {t}',
100 | )
101 | plt.title(f'Viterbi, boundary_mode = {bm}')
102 | plt.legend()
103 |
104 |
105 | if __name__ == '__main__':
106 | logging.basicConfig(level=logging.INFO)
107 | logging.getLogger().setLevel(logging.INFO)
108 | unittest.main()
109 |
--------------------------------------------------------------------------------
/tests/testhelper_PDE_Solve.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Tests PDE_Solve class. Specify test and choose FD or SEM method.
4 | Plots: Error and execution time vs. N and fits curves (log-log plot).
5 | """
6 |
7 | from neuralflow.PDE_Solve import PDESolve
8 | from pkg_resources import working_set
9 | import EV_problems_library as EV_lib
10 | from scipy.optimize import curve_fit
11 | import matplotlib.pyplot as plt
12 | import time
13 | import numpy as np
14 |
15 | GPU_support = any([pkg.key.startswith('cupy') for pkg in working_set])
16 |
17 |
18 | # Dictionary of availible tests
19 | TestDict = {
20 | 'Sine': EV_lib.SineTest,
21 | 'Sine2': EV_lib.Sine2Test,
22 | 'Cosine': EV_lib.CosineTest,
23 | 'MixedCosineSine': EV_lib.MixedCosineSineTest,
24 | 'SquarePeqMixed': EV_lib.SquarePeqMixedTest,
25 | 'Bessel': EV_lib.BesselTest,
26 | }
27 |
28 | # Compare L2 norms of exact and approximate solutions
29 |
30 |
31 | def L2Error(sol1, sol2):
32 | return np.linalg.norm(sol1-sol2, axis=0)/np.linalg.norm(sol1, axis=0)
33 |
34 | # For curve fitting purpose
35 |
36 |
37 | def PowerLawFunction(x, b, c):
38 | return b*x**c
39 |
40 |
41 | def ErrorAndExecPlot(Nseq, L2Err, ExecTime, fit1, fit2, f2start, f1end):
42 | """Plot the results
43 | """
44 | # Plot error and error fit (if succsessful)
45 | fig, ax1 = plt.gcf(), plt.gca()
46 | ax1.loglog(Nseq, L2Err, 'bo', label='L2 Error')
47 | if fit1 is not None:
48 | ax1.loglog(
49 | Nseq[:f1end], PowerLawFunction(Nseq[:f1end], *fit1), '-b',
50 | label='fit: E=%5.3f*N^%5.3f' % tuple(fit1)
51 | )
52 |
53 | ax1.set_xlabel('N, number of points')
54 | ax1.set_ylabel('L2error', color='b')
55 | ax1.tick_params('y', colors='b')
56 | ax1.legend(loc='upper center')
57 |
58 | # Plot exec. time and it's fit (if succsessful)
59 | ax2 = ax1.twinx()
60 | ax2.loglog(Nseq, ExecTime, 'gx', label='Execution time')
61 | if fit2 is not None:
62 | ax2.loglog(
63 | Nseq[f2start:], PowerLawFunction(Nseq[f2start:], *fit2), '-g',
64 | label='fit: T=%5.3f*N^%5.3f' % tuple(fit2)
65 | )
66 |
67 | ax2.set_ylabel('Execution Time', color='r')
68 | ax2.tick_params('y', colors='r')
69 | ax2.legend(loc='lower center')
70 | fig.tight_layout()
71 | plt.show()
72 |
73 |
74 | def PerformTest(Ne, Np, Nv, name, with_cuda):
75 | """ Solve EV problem and compute L2 error with the exact solution
76 | """
77 |
78 | # obtain interval bounds and BC:
79 | BC, xbegin, xend = TestDict[name](Nv, None, 'domain_and_bc')
80 |
81 | # Initialize class instance
82 | solver = PDESolve(
83 | xbegin, xend, Np, Ne, BoundCond=BC, Nv=Nv, with_cuda=with_cuda
84 | )
85 |
86 | # Obtain peq, boundary conditions, exact solution
87 | peq, q, w, EVal_exact, EVec_exact = TestDict[name](
88 | Nv, solver.grid.x_d, 'exact_solution'
89 | )
90 |
91 | device = 'GPU' if with_cuda else 'CPU'
92 | # Solve and scale eigenvectors
93 | if with_cuda:
94 | # Need to transfer inputs to GPU
95 | import cupy as cp
96 | peq = cp.asarray(peq, dtype='float64')
97 | if q is not None:
98 | q = cp.asarray(q, dtype='float64')
99 | w = cp.asarray(w, dtype='float64')
100 | start = time.time()
101 | EVal, EVec = solver.solve_EV(
102 | peq=peq, q=q, w=w, mode='normal', device=device
103 | )
104 | end = time.time()
105 | if with_cuda:
106 | # Transfer outputs back to CPU
107 | EVal = cp.asnumpy(EVal)
108 | EVec = cp.asnumpy(EVec)
109 | ExecTime = end-start
110 | EVec /= np.max(np.abs(EVec), 0)
111 |
112 | # Calculate error based on eigenvectors and error based on eigenvalues:
113 | L2ErrEVec = np.minimum(L2Error(EVec_exact, EVec),
114 | L2Error(EVec_exact, -EVec))
115 | L2ErrEVal = np.zeros(Nv)
116 | for j in range(Nv):
117 | L2ErrEVal[j] = (
118 | np.abs(EVal_exact[j]-EVal[j])
119 | if np.abs(EVal_exact[j]) < 10**(-3) else
120 | np.abs(EVal_exact[j]-EVal[j])/np.abs(EVal_exact[j])
121 | )
122 |
123 | return ExecTime, L2ErrEVec, L2ErrEVal
124 |
125 |
126 | if __name__ == '__main__':
127 | # Test parameters, feel free to change
128 | # Try other tests (see TestDict in Lines 32-39 for the availible tests)
129 | TestName = 'Bessel'
130 |
131 | with_cuda = False
132 | if with_cuda and not GPU_support:
133 | raise Exception(
134 | 'Cupy package not found. Install cupy package on cuda-enabled '
135 | 'platform and rerun this'
136 | )
137 |
138 | Nv = 10 # Number of eigenvectors/eigenvalues for error calculation
139 | Np = 6 # Degree of a single element
140 |
141 | # Specify different Ne (number of elements) for the test
142 | Ne_seq = np.array(
143 | [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 20, 30, 40, 50, 75, 100,
144 | 150, 200, 250, 300, 1000],
145 | dtype='int'
146 | )
147 |
148 | # Total number of points:
149 | N_seq = (Np-1) * Ne_seq + 1
150 |
151 | # Allocate Errors based on eigenvectors and eigenvalues, measure execution
152 | # times
153 | L2ErrEVec = np.zeros((len(N_seq), Nv))
154 | L2ErrEVal = np.zeros((len(N_seq), Nv))
155 | ExecTime = np.zeros(len(N_seq))
156 |
157 | # Now calculaete and compare:
158 | for i, Ne in enumerate(Ne_seq):
159 | ExecTime[i], L2ErrEVec[i, :], L2ErrEVal[i, :] = PerformTest(
160 | Ne, Np, Nv, TestName, with_cuda
161 | )
162 | print(f'Ne = {Ne} calculated, execution time = {ExecTime[i]:.5f} s')
163 |
164 | # Curve fit with power function (or linear function in log-log)
165 | ErrToFit = np.mean(L2ErrEVec[:, 1:], 1)
166 | endpoint = np.where(ErrToFit > 10**-10)
167 | if len(endpoint[0]) == 0:
168 | c1 = None
169 | endpoint = ([[-1]])
170 | else:
171 | c1, pc1 = curve_fit(
172 | lambda x, a, b: a+b*x,
173 | np.log(np.array(N_seq)[endpoint]),
174 | np.log(ErrToFit[endpoint]), bounds=([0, -100], [100, 0])
175 | )
176 | # convert coefficient from log to linear for plotting
177 | c1[0] = np.exp(c1[0])
178 |
179 | startpoint = np.where(ExecTime > 0.05)
180 | if len(startpoint[0]) == 0:
181 | c2 = None
182 | startpoint = ([[-1]])
183 | else:
184 | c2, pc2 = curve_fit(
185 | lambda x, a, b: a+b*x,
186 | np.log(np.array(N_seq)[startpoint]), # fit log(exectime))
187 | np.log(ExecTime[startpoint]),
188 | bounds=([-100, 0], [0, 10])
189 | )
190 | c2[0] = np.exp(c2[0])
191 |
192 | # Plot the results
193 | plt.clf()
194 | ErrorAndExecPlot(
195 | N_seq, ErrToFit, ExecTime, c1, c2, startpoint[0][0], endpoint[0][-1]
196 | )
197 |
--------------------------------------------------------------------------------
/tests/visualization.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """ Visualize optimization results in unittests
4 | """
5 |
6 | import matplotlib.pyplot as plt
7 | import matplotlib.gridspec as gridspec
8 | import numpy as np
9 | import os
10 |
11 |
12 | def PlotResults(title, params_to_opt, nresults, results, em_gt, ll_gt,
13 | ll_gt_cv, save_fold=None):
14 | n_samples = results['logliks'].shape[0]
15 |
16 | title = (f'{title}, inference of {params_to_opt}')
17 | if type(params_to_opt) is str:
18 | params_to_opt = [params_to_opt]
19 |
20 | if 'C' in params_to_opt and 'Fr' in params_to_opt:
21 | params_to_opt.remove('C')
22 |
23 | # size of parameters
24 | param_mapping = {'F': 'peq', 'F0': 'p0', 'C': 'fr', 'Fr': 'fr', 'D': 'D'}
25 | param_size = {
26 | p: results[param_mapping[p]][0].shape[0] for p in params_to_opt
27 | }
28 | if n_samples > 1:
29 | title += (
30 | f', shared_params={[p for p in params_to_opt if param_size[p]==1]}'
31 | )
32 | num_plots = sum(param_size.values()) + n_samples
33 | num_cols = num_plots // 3 + (num_plots % 3 > 0)
34 |
35 | fig = plt.figure(figsize=(25, 5*num_cols))
36 | gs = gridspec.GridSpec(num_cols, 3, wspace=0.5, hspace=0.8)
37 |
38 | if nresults == 1:
39 | title += ' by line search'
40 |
41 | fig.suptitle(title)
42 | colors = plt.get_cmap('jet')(np.linspace(0.0, 1.0, nresults+1))
43 |
44 | subplot_num = -1
45 | for samp in range(n_samples):
46 | subplot_num += 1
47 | ax = plt.subplot(gs[subplot_num//3, subplot_num % 3])
48 | iters_to_plot = results['iter_num'].copy().astype('float')
49 |
50 | # Log scale does not disply zero, hack it
51 | if results['iter_num'][0] == 0:
52 | iters_to_plot[0] = 10**-1
53 |
54 | # Scale loglik
55 | ll0_tr = results['logliks'][samp][0]
56 | ll_tr = (
57 | (results['logliks'][samp][results['iter_num']] - ll0_tr) /
58 | np.abs(ll0_tr)
59 | )
60 | ax.semilogx(
61 | iters_to_plot, ll_tr, '--', linewidth=3, color=[0.1, 0.1, 0.1],
62 | label=f'Training negative loglik, datasample {samp}'
63 | )
64 | plt.plot(
65 | results['iter_num'],
66 | (ll_gt[samp] - ll0_tr) / np.abs(ll0_tr) * np.ones_like(
67 | results['iter_num']
68 | ),
69 | '-', linewidth=3, color=[0.1, 0.1, 0.1],
70 | label='Ground-truth Training'
71 | )
72 | if ll_gt_cv is not None:
73 | ll0_cv = results['logliksCV'][samp][0]
74 | ll_cv = (
75 | (results['logliksCV'][samp][results['iter_num']] - ll0_cv)
76 | / np.abs(ll0_cv)
77 | )
78 | ax.semilogx(
79 | iters_to_plot, ll_cv, '-.', linewidth=3, color='blue',
80 | label='Validation negative loglik, datasample {samp}'
81 | )
82 | plt.plot(
83 | results['iter_num'],
84 | (ll_gt_cv[samp]-ll0_cv)/np.abs(ll0_cv) * np.ones_like(
85 | results['iter_num']
86 | ),
87 | '-', linewidth=3, color='blue',
88 | label='Ground-truth Validation'
89 | )
90 |
91 | iter_to_plot = (
92 | np.linspace(0, len(results['iter_num']) - 1, nresults+1)
93 | .round()
94 | .astype(int)
95 | )
96 | for i in range(nresults+1):
97 | iter_num = iter_to_plot[i]
98 | plt.plot(
99 | iters_to_plot[iter_num], ll_tr[iter_num], 'o',
100 | color=colors[i, :], markersize=15,
101 | label=f'Iteration {iter_num}'
102 | )
103 | if ll_gt_cv is not None:
104 | plt.plot(
105 | iters_to_plot[iter_num], ll_cv[iter_num], 'o',
106 | color=colors[i, :], markersize=15
107 | )
108 | plt.ylabel(r'Relative $\log\mathscr{L}$', fontsize=15)
109 | plt.xlabel('Iteration number', fontsize=15)
110 | plt.legend(ncol=2, prop={'size': 6})
111 | plt.gcf().suptitle(title)
112 |
113 | for i, param in enumerate(params_to_opt):
114 | if samp > param_size[param] - 1:
115 | continue
116 | subplot_num = subplot_num + 1
117 | ax = plt.subplot(gs[subplot_num//3, subplot_num % 3])
118 |
119 | if n_samples > 1:
120 | if param_size[param] > 1:
121 | suffix = f', DS {samp}'
122 | else:
123 | suffix = ', shared'
124 | else:
125 | suffix = ''
126 | if param == 'F':
127 | plt.plot(
128 | em_gt.grid.x_d, -np.log(em_gt.peq[samp]), color='black',
129 | linewidth=3, label='Ground-truth'
130 | )
131 | for i in range(nresults+1):
132 | iter_num = iter_to_plot[i]
133 | plt.plot(
134 | em_gt.grid.x_d,
135 | -np.log(results['peq'][iter_num][samp, :]),
136 | color=colors[i, :], linewidth=3,
137 | label=f'Iteration {results["iter_num"][iter_num]}'
138 | )
139 | plt.ylabel(r'Potential, $\Phi(x)$' + suffix, fontsize=15)
140 | plt.xlabel(r'Latent state, $x$', fontsize=15)
141 | elif param == 'F0':
142 | plt.plot(
143 | em_gt.grid.x_d, em_gt.p0[samp], color='black', linewidth=3,
144 | label='Ground-truth'
145 | )
146 | for i in range(nresults+1):
147 | iter_num = iter_to_plot[i]
148 | plt.plot(
149 | em_gt.grid.x_d, results['p0'][iter_num][samp, :],
150 | color=colors[i, :], linewidth=3,
151 | label=f'Iteration {results["iter_num"][iter_num]}'
152 | )
153 | plt.ylabel(r'$p_0(x)$' + suffix, fontsize=15)
154 | plt.xlabel(r'Latent state, $x$', fontsize=15)
155 | elif param == 'D':
156 | all_iters = results['iter_num']
157 | plt.plot(
158 | all_iters, np.ones_like(all_iters) * em_gt.D[samp],
159 | color='black', linewidth=3, label='Ground-truth'
160 | )
161 | plt.plot(
162 | all_iters,
163 | [results['D'][i][samp] for i in range(len(results['D']))],
164 | color=[0.5, 0.5, 0.5], linewidth=3
165 | )
166 | plt.ylabel(r'$D$' + suffix, fontsize=15)
167 | plt.xlabel(r'Epoch number', fontsize=15)
168 | elif param in ['Fr', 'C']:
169 | num_neurons = results['fr'][0].shape[2]
170 | for neuron in range(num_neurons):
171 | plt.plot(
172 | em_gt.grid.x_d, em_gt.fr[samp,
173 | :, neuron], color='black',
174 | linewidth=3,
175 | label='Ground-truth' if neuron == 0 else None
176 | )
177 | for i in range(nresults+1):
178 | iter_num = iter_to_plot[i]
179 | for neuron in range(num_neurons):
180 | plt.plot(em_gt.grid.x_d,
181 | results['fr'][iter_num][samp, :, neuron],
182 | color=colors[i, :], linewidth=3
183 | )
184 | plt.ylabel(r'$fr(x)$' + suffix, fontsize=15)
185 | plt.xlabel(r'Latent state, $x$', fontsize=15)
186 | plt.legend(ncol=2, prop={'size': 6})
187 |
188 | if save_fold is not None:
189 | plt.savefig(os.path.join(save_fold, f'{title}.png'))
190 | plt.close(plt.gcf())
191 |
--------------------------------------------------------------------------------