├── .travis.yml ├── AUTHORS ├── INSTALL ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── THANKS ├── debian ├── changelog ├── compat ├── control ├── copyright └── rules ├── doc ├── .gitignore ├── Makefile ├── README.rst ├── analysis.rst ├── conf.py ├── datastore.rst ├── example.param ├── guidelines.rst ├── hdf5.rst ├── images │ ├── logo.svg │ ├── logo_apd.svg │ └── poster_NT_at_INCF_at_SfN.svg ├── index.rst ├── io.rst ├── management.rst ├── parameters.rst ├── plotting.rst ├── signals.rst ├── spike2.rst ├── stgen.rst ├── testdocs.py └── wikidoc.py ├── examples ├── matlab_vs_python │ ├── acml_rng_mt.py │ ├── append_test.py │ ├── smallnet.m │ ├── smallnet.py │ ├── smallnet_acml.py │ ├── smallnet_inlineC.py │ └── tweave2.py ├── parameter_search │ ├── __init__.py │ └── parameter_search_example.py ├── parameters │ ├── LGN_receptive_field.py │ ├── exported_model_parameters.tex │ ├── model_parameters.pdf │ ├── model_parameters.tex │ ├── parameter_to_latex.py │ ├── tables_exported_model_parameters.tex │ └── validation │ │ ├── data │ │ ├── conf1.yaml │ │ ├── conf2.yaml │ │ └── conf_schema1.yaml │ │ └── simple_validation.py ├── retina │ ├── benchmark_linear.py │ ├── benchmark_noise.py │ ├── benchmark_retina.py │ ├── results │ │ ├── fig-benchmark_linear.png │ │ ├── fig-benchmark_noise.png │ │ └── fig-benchmark_retina.png │ ├── retina.py │ └── test_parallel.py ├── sfn2008 │ ├── c_data │ ├── sfn_example_parameterspace.py │ ├── sfn_example_simulated_data.py │ ├── sfn_example_spike2.py │ ├── sfn_example_stgen.py │ ├── spike_data │ └── vm_data ├── single_neuron │ ├── CRF_neuron_vs_signal.py │ ├── SpikeTrain2Play.wav │ ├── fiber.param │ ├── playing_with_simple_single_neuron.py │ ├── results │ │ └── fig-CRF_neuron_vs_signal.png │ └── simple_single_neuron.py ├── spike2 │ ├── IF-Curve.py │ └── neurons_parameter_estimate.py └── stgen │ ├── inh_2Dmarkov_psth.py │ ├── inh_gamma_psth.py │ └── shotnoise_step.py ├── requirements.txt ├── setup.py ├── src ├── __init__.py ├── analysis.py ├── datastore │ ├── __init__.py │ ├── interface.py │ ├── keygenerators.py │ └── shelve_ds.py ├── export.py ├── io.py ├── optimize │ ├── __init__.py │ ├── optimizers.py │ └── parameter_search.py ├── parameters │ ├── __init__.py │ └── validators.py ├── plotting.py ├── random.py ├── sandbox.py ├── signals │ ├── __init__.py │ ├── analogs.py │ ├── intervals.py │ ├── pairs.py │ └── spikes.py ├── spike2 │ ├── __init__.py │ ├── sonpy │ │ ├── README │ │ ├── __init__.py │ │ ├── _marker.py │ │ ├── _waveform.py │ │ └── son.py │ └── spike2channels.py ├── stgen.py ├── tisean │ ├── __init__.py │ └── tisean.py ├── utilities │ └── __init__.py ├── visual_logging.py └── visualization │ └── __init__.py ├── std_params ├── PyNN │ └── IF_cond_exp_gsfa_grr │ │ └── muller_etal2007.param └── example.param └── test ├── analysis ├── crosscorrelate │ ├── out_matlab_int │ ├── out_matlab_int_lag_100 │ ├── out_matlab_int_lag_500 │ └── spike_data └── make_kernel │ ├── alp.mat │ ├── alp_reversed.mat │ ├── box.mat │ ├── epa.mat │ ├── exp.mat │ ├── exp_reversed.mat │ ├── gau.mat │ └── tri.mat ├── test_analogs.py ├── test_analysis.py ├── test_datastore.py ├── test_io.py ├── test_logging.py ├── test_optimize.py ├── test_parameters.py ├── test_plotting.py ├── test_random.py ├── test_spikes.py ├── test_stgen.py ├── test_utilities.py └── test_validators.py /.travis.yml: -------------------------------------------------------------------------------- 1 | # Idea from Matthew Brett of nipy. Pretend to be erlang and use apt, for now. 2 | language: erlang 3 | install: 4 | - sudo apt-get install python-dev 5 | - sudo apt-get install python-numpy 6 | - sudo apt-get install python-scipy 7 | - sudo apt-get install python-matplotlib 8 | - sudo apt-get install ipython 9 | - sudo apt-get install python-tables 10 | - sudo apt-get install python-imaging 11 | - sudo apt-get install python-rpy 12 | - sudo apt-get install python-rpy2 13 | - sudo apt-get install python-yaml 14 | - sudo apt-get install python-pip 15 | - sudo apt-get install python-nose 16 | - sudo apt-get install python-coverage 17 | - pip install --user -r requirements.txt --use-mirrors 18 | - pip install --user . --use-mirrors 19 | script: 20 | - nosetests --with-coverage --cover-package=NeuroTools -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | NeuroTools is a community-driven open-source project with many hard 2 | working contributors. They are ... 3 | 4 | Daniel Bruederle (bruederle@neuralensemble.org) 5 | Andrew Davison (apdavison@neuralensemble.org) 6 | Jens Kremkow (kremkow@neuralensemble.org) 7 | Eilif Muller (emuller@neuralensemble.org) 8 | Eric Mueller (eric.mueller@neuralensemble.org) 9 | Laurent Perrinet (perrinet@neuralensemble.org) 10 | Michael Schmuker (mschmuker@neuralensemble.org) 11 | Pierre Yger (yger@neuralensemble.org) 12 | 13 | On GitHub: 14 | Trevor Bekolay https://github.com/tbekolay 15 | Rick Gerkin https://github.com/rgerkin 16 | 17 | Module maintainers: 18 | 19 | signals: Pierre Yger 20 | Laurent Perrinet 21 | Jens Kremkow 22 | parameters: Andrew Davison 23 | io: Pierre Yger 24 | plotting: Daniel Bruederle 25 | analysis: Eilif Muller 26 | utilities: Daniel Bruederle 27 | stgen: Eilif Muller 28 | Michael Schmuker 29 | examples: Laurent Perrinet 30 | spike2: Jens Kremkow 31 | datastore: Andrew Davison 32 | 33 | Documentation manager: Pierre Yger 34 | Web-admin: Eilif Muller 35 | Promotion: Laurent Perrinet 36 | 37 | Testing manager: Andrew Davison 38 | 39 | Packaging manager: Eric Mueller 40 | 41 | 42 | NeuroTools software development is supported in part by the EU under 43 | the grant IST-2005-15879 (FACETS, http://www.facets-project.org). -------------------------------------------------------------------------------- /INSTALL: -------------------------------------------------------------------------------- 1 | NeuroTools Installation 2 | ======================= 3 | 4 | The simplest method is to use: 5 | 6 | $ pip install NeuroTools 7 | 8 | 9 | Compiling from source 10 | --------------------- 11 | 12 | First download the package files:: 13 | 14 | $ git clone https://github.com/meduz/NeuroTools.git 15 | $ cd NeuroTools 16 | 17 | Install as root (if you want a global install):: 18 | 19 | # python setup.py install 20 | 21 | or for those without root access, install to a writable location, something like:: 22 | 23 | $ python setup.py install --prefix=$HOME/opt/mystuff 24 | 25 | Then you need to add the location:: 26 | 27 | $HOME/opt/mystuff/lib/python2.7/site-packages/ 28 | 29 | to your PYTHONPATH or within python in your sys.path directive. 30 | 31 | Note, here lib is replaced by `lib64` on 64 bit systems, and `python2.7` is 32 | (obviously) replaced by your python version. 33 | 34 | Developers of NeuroTools may be interested in using the last-updated version 35 | from git. 36 | A solution is to symbolically link the src folder to a folder included in the path:: 37 | 38 | cd my_local_site-packages_folder 39 | ln -s where_I_check-out_neuralensemble/NeuroTools/trunk/src NeuroTools 40 | 41 | and voila! 42 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include doc/*.rst 3 | recursive-include examples *.py *.param 4 | include AUTHORS 5 | include INSTALL 6 | include LICENSE 7 | include THANKS 8 | 9 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | default: pypi_all 2 | 3 | pypi_all: pypi_tags pypi_push pypi_upload 4 | # https://docs.python.org/2/distutils/packageindex.html 5 | pypi_tags: 6 | git commit -am' tagging for PyPI ' 7 | # in case you wish to delete tags, visit http://wptheming.com/2011/04/add-remove-github-tags/ 8 | git tag 0.3.1 -m "Adds a tag so that we can put this on PyPI." 9 | git push --tags origin master 10 | 11 | pypi_push: 12 | python setup.py register 13 | 14 | pypi_upload: 15 | python setup.py sdist upload 16 | 17 | pypi_docs: 18 | rm web.zip 19 | #ipython nbconvert --to html $(NAME).ipynb 20 | #mv $(NAME).html index.html 21 | #runipy $(NAME).ipynb --html index.html 22 | zip web.zip index.html 23 | open https://pypi.python.org/pypi?action=pkg_edit&name=$NAME 24 | 25 | install_dev: 26 | pip uninstall -y $(NAME) 27 | pip install -e . 28 | 29 | clean: 30 | rm -fr *.pyc *.py~ build dist 31 | 32 | .PHONY: clean 33 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | NeuroTools 3 | ========== 4 | 5 | NeuroTools is a collection of tools for representing and analyzing 6 | neuroscientific data. 7 | 8 | NeuroTools is written in Python, and works best with PyNN, or one of the growing 9 | list of simulation engines with a Python front-end such as NEURON, NEST, PCSIM, 10 | FACETS Neuromorphic VLSI, Brian, MOOSE/GENESIS, Neurospaces/GENESIS. NeuroTools 11 | provides modules to facilitate simulation setup, parameterization and 12 | instrumentation, and data management, analysis and visualization. The 13 | data-related tools are equally suited to analysis of experimental data, although 14 | that is not the primary motivation for their development. 15 | 16 | NeuroTools aims to: 17 | 18 | 1. increase the productivity of individual modellers by automating, simplifying, 19 | and establishing best-practices for common tasks, 20 | 21 | 2. increase the productivity of the neuroscience modelling community by reducing 22 | the amount of code duplication across simulation communities, 23 | 24 | 3. increase the reliability of data analysis tools leveraging Linus's law: 25 | "given enough eyeballs, all bugs are shallow." 26 | 27 | Code status 28 | ----------- 29 | 30 | .. image:: https://travis-ci.org/NeuralEnsemble/NeuroTools.png?branch=master 31 | :target: https://travis-ci.org/NeuralEnsemble/NeuroTools 32 | .. image:: https://coveralls.io/repos/NeuralEnsemble/NeuroTools/badge.png 33 | :target: https://coveralls.io/r/NeuralEnsemble/NeuroTools 34 | 35 | This code is considered to be mature and will not evolve - we will only focus on bug fixes. 36 | 37 | Development of a new analysis library takes place @ https://github.com/NeuralEnsemble/elephant 38 | 39 | Contributing to NeuroTools 40 | -------------------------- 41 | 42 | If you are interested in participating in NeuroTools development, please see 43 | the webpage at http://neuralensemble.org/NeuroTools 44 | 45 | Related projects 46 | ---------------- 47 | 48 | NeuroTools has evolved in different flavours: 49 | 50 | - a legacy version @ https://neuralensemble.kip.uni-heidelberg.de/svn/NeuroTools 51 | - a fork by Trevor Bekolay: https://github.com/tbekolay/neurotools 52 | - another fork (version 0.2.0) : https://github.com/meduz/NeuroTools 53 | 54 | Installation 55 | ------------ 56 | 57 | To install use: 58 | 59 | pip install NeuroTools 60 | 61 | For installation instructions, see doc/source/install.rst 62 | 63 | Documentation 64 | ------------- 65 | 66 | For documentation see the ``doc`` folder or http://packages.python.org/NeuroTools 67 | 68 | For examples see the ``examples`` folder. 69 | 70 | 71 | More information 72 | ---------------- 73 | 74 | - Home page: http://neuralensemble.org/NeuroTools 75 | - Mailing list: https://groups.google.com/forum/?fromgroups#!forum/neuralensemble 76 | - Documentation: http://packages.python.org/neo/ 77 | - Bug reports: https://github.com/NeuralEnsemble/NeuroTools/issues 78 | 79 | 80 | License 81 | ------- 82 | 83 | Copyright (C) 2008 Daniel Bruederle, Andrew Davison, Jens Kremkow 84 | Laurent Perrinet, Michael Schmuker, Eilif Muller, Eric Mueller, Pierre Yger 85 | 86 | NeuroTools is free software; you can redistribute it and/or modify 87 | it under the terms of the GNU General Public License as published by 88 | the Free Software Foundation; either version 2 of the License, or 89 | (at your option) any later version. 90 | 91 | This program is distributed in the hope that it will be useful, 92 | but WITHOUT ANY WARRANTY; without even the implied warranty of 93 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 94 | GNU General Public License for more details. 95 | 96 | You should have received a copy of the GNU General Public License along 97 | with this program; if not, write to the Free Software Foundation, Inc., 98 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 99 | 100 | The GNU General Public License does not permit this software to be 101 | redistributed in proprietary programs. 102 | 103 | See ``LICENSE``. 104 | 105 | -------------------------------------------------------------------------------- /THANKS: -------------------------------------------------------------------------------- 1 | Here we acknowledge the support of others not mentioned in AUTHORS: 2 | 3 | 4 | -------------------------------------------------------------------------------- /debian/changelog: -------------------------------------------------------------------------------- 1 | neurotools (0.1.0+svn446) karmic; urgency=low 2 | 3 | * debian/rules using pysupport stuff 4 | * svn revision 446 5 | 6 | -- Eric Mueller Wed, 12 May 2010 18:53:58 +0200 7 | 8 | neurotools (0.1.0+svn) intrepid; urgency=low 9 | 10 | * svn 11 | 12 | -- neurotools Mon, 17 Nov 2008 11:45:54 +0100 13 | 14 | neurotools (0.1.0) intrepid; urgency=low 15 | 16 | * Initial release. 17 | 18 | -- neurotools Fri, 14 Nov 2008 11:40:49 +0100 19 | -------------------------------------------------------------------------------- /debian/compat: -------------------------------------------------------------------------------- 1 | 7 2 | -------------------------------------------------------------------------------- /debian/control: -------------------------------------------------------------------------------- 1 | Source: neurotools 2 | Priority: optional 3 | Maintainer: 4 | Build-Depends: debhelper (>= 7) 5 | Build-Depends-Indep: python-dev (>= 2.5) | python2.5-dev, python-central 6 | Standards-Version: 3.8.0 7 | Section: python 8 | Homepage: http://neuralensemble.org/NeuroTools 9 | XS-Python-Version: >= 2.5 10 | 11 | Package: python-neurotools 12 | Architecture: all 13 | Depends: ${python:Depends}, python-numpy, python-scipy, python-matplotlib, python-tk 14 | Recommends: mencoder, nest2, python-rpy, texlive-latex-extra 15 | XB-Python-Version: ${python:Versions} 16 | Description: Tools to support neural simulation tasks. 17 | - The signals module provides core functionality that allows manipulation of 18 | and calculations with spike trains and analog signals. 19 | - The spike2 module offers an easy way for reading data from CED's Spike2 Son 20 | files into the NeuroTools enviroment. 21 | - The parameters module contains classes to make managing large, hierarchical 22 | parameter sets easier. 23 | - The analysis module contains miscellaneous analysis functions. 24 | - The stgen module contains various stochastic process generators relevant for 25 | Neuroscience (OU, poisson, inhomogenous gamma, ...). 26 | - The utilities sub-package contains srblib, an easy-to-use interface for 27 | interacting with SRB servers. 28 | - The io module is the gateway for all reading/writing of files, in different 29 | formats, in NeuroTools. 30 | - The plotting module contains a collection of tools for plotting and image 31 | processing, based on Matplotlib and the Python Imaging Library. 32 | - The datastore resents a consistent interface for persistent data storage 33 | (e.g., for caching intermediate results), irrespective of storage back-end. 34 | 35 | -------------------------------------------------------------------------------- /debian/copyright: -------------------------------------------------------------------------------- 1 | NeuroTools: Analysis, visualization and management of real and simulated 2 | neuroscience data. Copyright (C) 2008 Daniel Bruederle, Andrew Davison, Jens 3 | Kremkow Laurent Perrinet, Michael Schmuker, Eilif Muller, Eric Mueller, Pierre 4 | Yger 5 | 6 | NeuroTools is free software; you can redistribute it and/or modify it under the 7 | terms of the GNU General Public License as published by the Free Software 8 | Foundation; either version 2 of the License, or (at your option) any later 9 | version. 10 | 11 | This program is distributed in the hope that it will be useful, but WITHOUT ANY 12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A 13 | PARTICULAR PURPOSE. See the GNU General Public License for more details. 14 | 15 | You should have received a copy of the GNU General Public License along with 16 | this program; if not, write to the Free Software Foundation, Inc., 51 Franklin 17 | Street, Fifth Floor, Boston, MA 02110-1301 USA. 18 | 19 | The GNU General Public License does not permit this software to be 20 | redistributed in proprietary programs. 21 | -------------------------------------------------------------------------------- /debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | # -*- makefile -*- 3 | 4 | DEB_PYTHON_SYSTEM := pysupport 5 | 6 | include /usr/share/cdbs/1/rules/debhelper.mk 7 | include /usr/share/cdbs/1/class/python-distutils.mk 8 | 9 | clean:: 10 | rm -rf build build-stamp configure-stamp build/ MANIFEST 11 | dh_clean 12 | -------------------------------------------------------------------------------- /doc/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | 44 | html: 45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 48 | 49 | dirhtml: 50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 51 | @echo 52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 53 | 54 | singlehtml: 55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 56 | @echo 57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 58 | 59 | pickle: 60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 61 | @echo 62 | @echo "Build finished; now you can process the pickle files." 63 | 64 | json: 65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 66 | @echo 67 | @echo "Build finished; now you can process the JSON files." 68 | 69 | htmlhelp: 70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 71 | @echo 72 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 73 | ".hhp project file in $(BUILDDIR)/htmlhelp." 74 | 75 | qthelp: 76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 77 | @echo 78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/neurotools.qhcp" 81 | @echo "To view the help file:" 82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/neurotools.qhc" 83 | 84 | devhelp: 85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 86 | @echo 87 | @echo "Build finished." 88 | @echo "To view the help file:" 89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/neurotools" 90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/neurotools" 91 | @echo "# devhelp" 92 | 93 | epub: 94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 95 | @echo 96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 97 | 98 | latex: 99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 100 | @echo 101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 103 | "(use \`make latexpdf' here to do that automatically)." 104 | 105 | latexpdf: 106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 107 | @echo "Running LaTeX files through pdflatex..." 108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 110 | 111 | text: 112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 113 | @echo 114 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 115 | 116 | man: 117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 118 | @echo 119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 120 | 121 | texinfo: 122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 123 | @echo 124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 125 | @echo "Run \`make' in that directory to run these through makeinfo" \ 126 | "(use \`make info' here to do that automatically)." 127 | 128 | info: 129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 130 | @echo "Running Texinfo files through makeinfo..." 131 | make -C $(BUILDDIR)/texinfo info 132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 133 | 134 | gettext: 135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 136 | @echo 137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 138 | 139 | changes: 140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 141 | @echo 142 | @echo "The overview file is in $(BUILDDIR)/changes." 143 | 144 | linkcheck: 145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 146 | @echo 147 | @echo "Link check complete; look for any errors in the above output " \ 148 | "or in $(BUILDDIR)/linkcheck/output.txt." 149 | 150 | doctest: 151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 152 | @echo "Testing of doctests in the sources finished, look at the " \ 153 | "results in $(BUILDDIR)/doctest/output.txt." 154 | -------------------------------------------------------------------------------- /doc/README.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | Notes on NeuroTools documentation 3 | =========================== 4 | 5 | The text files in this directory (including this one) are in reStructuredText_ format, which can easily be processed into HTML or LaTeX formats using Docutils_, e.g.:: 6 | 7 | $ rst2html.py --initial-header-level=2 parameters.rst > parameters.html 8 | 9 | More easily, you may use the provided ```Makefile```: 10 | 11 | $ make html 12 | 13 | Many of the files contain examples of interactive python sessions. The validity of this code can be tested by running, for example:: 14 | 15 | $ python testdocs.py parameters.rst 16 | 17 | .. _reStructuredText: http://docutils.sourceforge.net/rst.html 18 | .. _Docutils: http://docutils.sourceforge.net/ -------------------------------------------------------------------------------- /doc/analysis.rst: -------------------------------------------------------------------------------- 1 | Analysis 2 | ======== 3 | 4 | Autodoc 5 | ------- 6 | 7 | .. automodule:: NeuroTools.analysis 8 | :members: 9 | :undoc-members: 10 | -------------------------------------------------------------------------------- /doc/datastore.rst: -------------------------------------------------------------------------------- 1 | The ``datastore`` module 2 | ======================== 3 | 4 | The ``datastore`` module aims to present a consistent interface for persistent 5 | data storage, irrespective of storage back-end. The main intended usage is for 6 | caching of intermediate results. If it takes a long computation time to 7 | calculate an object's data, it can save the data to a datastore the first time 8 | it is run, and then if an identical object is needed in future, it can just 9 | retrieve its data from the store and not have to compute it. 10 | 11 | Since it is intended for objects to be able to store part or all of their 12 | internal data, the storage/retrieval keys are based on the object identity and 13 | state. 14 | 15 | We assume that an object's identity is uniquely defined by its type (which may 16 | also depend on the source code revision number) and its parameters, while its 17 | state is defined by its identity and by its inputs (we should possibly add some 18 | concept of time to this). 19 | 20 | Hence, any object (which we call a 'component' in this context) must have 21 | the following attributes: 22 | 23 | ``parameters`` 24 | a NeuroTools ``ParameterSet`` object 25 | 26 | ``input`` 27 | another component or ``None``; we assume a single input. 28 | A list of inputs should also be possible. We need to be wary of 29 | recurrent loops, in which two components both have each other as 30 | direct or indirect inputs). 31 | 32 | ``version`` 33 | the source-code version 34 | 35 | There are two advantages to using the ``datastore`` module rather than just 36 | using, say, ``shelve`` directly:: 37 | 38 | 1. You don't have to worry about keeping track of the key used to identify 39 | your data in the store: the ``DataStore`` object takes care of this for 40 | you. 41 | 2. You can use various different back-ends to store your data (local 42 | filesystem, remote filesystem, database) and to manage the keys 43 | (``shelve``, a database, the filesystem), and the interface remains the 44 | same. 45 | 46 | Creating a datastore 47 | -------------------- 48 | 49 | Two different storage backends are available, ``ShelveDataStore`` and 50 | ``DjangoORMDataStore``. It is also intended to 51 | be easy to write your own, custom storage backend. Whichever backend is used, 52 | after you have created your datastore, the interface is the same. For this 53 | example we will use the ``ShelveDataStore``:: 54 | 55 | >>> from NeuroTools.datastore import ShelveDataStore 56 | >>> datastore = ShelveDataStore(root_dir="/tmp") 57 | 58 | Here we specify that the ``shelve`` files will be created in ``/tmp``. Now let 59 | us create a simple component whose data we wish to store:: 60 | 61 | >>> class SimpleComponent(object): 62 | ... def __init__(self, parameters, 63 | 64 | Autodoc 65 | ------- 66 | 67 | .. automodule:: NeuroTools.datastore 68 | :members: 69 | :undoc-members: 70 | -------------------------------------------------------------------------------- /doc/example.param: -------------------------------------------------------------------------------- 1 | { 2 | "tau_refrac": 0.11, 3 | "tau_m": 10.0, 4 | "cm": 0.25, 5 | "synI": { 6 | "tau": 10.0, 7 | "E": -75.0, 8 | }, 9 | "synE": { 10 | "tau": 1.5, 11 | "E": 0.0, 12 | }, 13 | "v_thresh": -57.0, 14 | "v_reset": -70.0, 15 | "v_rest": -70.0, 16 | } -------------------------------------------------------------------------------- /doc/guidelines.rst: -------------------------------------------------------------------------------- 1 | Developer Guidelines 2 | ==================== 3 | 4 | Coding Style 5 | ------------ 6 | 7 | - Please read "The Python Coding Style Guidelines" in 8 | `PEP-8 `_ and follow them 9 | to a reasonable degree. 10 | - Perhaps use a tool like pylint to help you in improving your coding 11 | style. 12 | 13 | Dependency Checking 14 | ------------------- 15 | 16 | NeuroTools supplies a dependency checker to gracefully disable functionality 17 | for which the required dependencies are not installed, without affecting other 18 | functionality. If you would like to use these facilities to add a dependency 19 | to a non-standard python module, please see the "check\_dependency" function or 20 | the various modules that use it for examples. 21 | 22 | Testing 23 | ------- 24 | 25 | If you are looking to contribute functionality, please provide also 26 | tests for said functionality. See the ``test`` folder for examples of testing. 27 | 28 | The complete NeuroTools test suite can be run as follows: 29 | 30 | :: 31 | 32 | $ nosetests --with-coverage --cover-package=NeuroTools 33 | 34 | The python "nose" unittesting suite is a dependency here. On ubuntu/debian, 35 | install it using: 36 | 37 | :: 38 | 39 | $ sudo apt-get install python-nose python-coverage 40 | 41 | Tests for each packages can be run individually, for example for the 42 | ``NeuroTools.stgen`` sub-package: 43 | 44 | :: 45 | 46 | $ python test/test_stgen.py 47 | 48 | License of contributed code 49 | --------------------------- 50 | 51 | All of NeuroTools in under GPL with the copyright remaining with contributors. 52 | In practice, since there maybe many contributions to a given module, we have 53 | until now just attributed a global NeuroTools copyright to all contributors 54 | equally. It is therefore understood that all copyright holders would need vote 55 | to make any licensing changes. Please contact existing core developers if you 56 | would like to submit code, but have a problem with submitting code to 57 | NeuroTools under these license terms, or if you would like to be added to the 58 | list of NeuroTools copyright holders. 59 | 60 | doc strings 61 | ----------- 62 | 63 | Here are described some guidelines for formatting and content of Python 64 | "doc strings" in NeuroTools 65 | 66 | Function doc strings 67 | ~~~~~~~~~~~~~~~~~~~~ 68 | 69 | Here is a template for providing doc strings for functions: 70 | 71 | :: 72 | 73 | def function(...): 74 | """ 75 | A description of what is the function doing and returning. Few lines 76 | 77 | Inputs: 78 | param1 - what is param 1 79 | param2 - what is param 2 80 | .... 81 | 82 | Examples: 83 | >> One or several examples showing how it can be used 84 | 85 | See also: 86 | functions related to, used by the function. 87 | """ 88 | 89 | Module doc strings 90 | ~~~~~~~~~~~~~~~~~~ 91 | 92 | The top level module doc string should inform the user about the 93 | structure of the module without having to look at the code The module 94 | docstring is the doc you can have if, for example, you do, in ipython 95 | (or with help() in python): 96 | 97 | :: 98 | 99 | >> import numpy 100 | >> ?numpy 101 | >> import scipy.optimize 102 | >> ?scipy.optimize 103 | 104 | As you can see, it's a summary of a module organization, its functions, 105 | and so on... It's a text that should be inserted at the top of the file, 106 | and I suggest the following template: 107 | 108 | :: 109 | 110 | """ 111 | == Name.of.the.module == 112 | 113 | A short text describing how the module is useful, crucial, powerful 114 | 115 | Classes 116 | --------- 117 | 118 | Object1 - The first type of object that can be created with the module 119 | Object2 - One other type of object and what we can do with 120 | 121 | 122 | Functions 123 | --------- 124 | 125 | function1 - A key function of the module, and what we can do with 126 | function2 - Same as before... 127 | 128 | """ 129 | 130 | An example for the stgen module would be as follows: 131 | 132 | :: 133 | 134 | """ 135 | NeuroTools.stgen 136 | ================ 137 | 138 | A collection of tools for stochastic process generation. 139 | 140 | Classes 141 | --------- 142 | 143 | StGen - Object to generate stochastic processes of various kinds 144 | and return them as SpikeTrain or AnalogSignal objects. 145 | 146 | Functions 147 | ----------- 148 | 149 | shotnoise_fromspikes - Convolves the provided spike train with shot 150 | decaying exponential. 151 | gamma_hazard - Compute the hazard function for a gamma 152 | process with parameters a,b. 153 | """ 154 | 155 | -------------------------------------------------------------------------------- /doc/hdf5.rst: -------------------------------------------------------------------------------- 1 | HDF Tools 2 | ========= 3 | 4 | 5 | HDF5 Overview 6 | ------------- 7 | 8 | - A `quick start `_ / `more complete `_ 9 | - Hierarchical Data Format version 5, known as HDF5, is a general purpose file format that can store every kind of data in a hierarchical manner. An HDF5 file could be described by a tree of nodes. Each node could contain other nodes or leaves and described with attributes. Leaves under a specified node corresponds to stored data and could also have several attributes to describe them. This format is well adapted to scientific data because it allows to store images, tables, matrices and more with efficient I/O and compression options. That's why it has been chosen as a common file format to share data between NeuroTools developpers. `more details `_ 10 | 11 | HDF5 and Python 12 | --------------- 13 | 14 | In fact HDF5 is not only a file format. It's also a high level C++ API that allows to store data in a such format. But it is not the only way to store data in a HDF5 file. The pytables python library, makes easier the conception of HDF5 files in an Object Oriented (OO) manner. It enables files nodes tree navigation, nodes attributes affectation, tables or arrays creation and data insertion. This library is really mature and gives access to many practical functions, but it was necessary to extend this library with new classes and functions to adapt it to Optical Imaging and Neural Network Simulation contexts. 15 | 16 | Why developping an extension ? 17 | ------------------------------ 18 | 19 | Developping an extension is an OO approach necessary to make itself reusable, easily extendable and avoid the pytables code pollution. Moreover, it allows to generate new objects classes that are easier to recognize among base objects and gives access to new functionnalities adapted to each object. A such extension requires that users and developpers who want to work on it have a knowledge on pytables base library. But a class (cf next paragraph) has been fortunately developped to simplify access to pytables classes and function. It will replace old hdf5tools and hdf5api in the NeuroTools folder. 20 | 21 | Extension Overview 22 | ------------------ 23 | 24 | This extension is divided in 5 Python files : 25 | 26 | #. FileExtension? : the base class of the extension, inherits from the pytables File class. Could be considered as a high level library that gives access to all pytables objects and the new extension objects. 27 | #. AdvancedTable? : add new functionnalities to the pytables Table class like sorts, insertions and queries on tables. 28 | #. Images : useful to convert common file format to VLArray pytables class and display images embedded into HDF5. 29 | #. Movie : encapsulate several common file format images located in a filesystem folder into a VLArray and display them. 30 | #. Spikes, divided into 3 distincts classes : 31 | 32 | #. SpikeList? : store spikelist like [(reltime1, id1), ... (reltimeM, idM)] 33 | #. SpikingNeurons? : store spikelist like [[reltime1, id1, ..., idM],...,[reltimeK, ..., id N]] 34 | #. NeuronIndex? : store neuron index defined like [[id1, posX1, posY1, posZ1], ..., [idN, posXN, posYN, posZN]] or [[id1, posX1, posY1, layer1], ..., [idN, posXN, posYN, layerM]] 35 | 36 | -------------------------------------------------------------------------------- /doc/io.rst: -------------------------------------------------------------------------------- 1 | The ``io`` module 2 | ================= 3 | 4 | This module will be the gateway of all the input/output relations in 5 | NeuroTools, especially regarding the inferface with pyNN. This is in 6 | that module that you'll have the Standard Formats currently supported 7 | by NeuroTools (text and pickle, hdf5 planned in a near future), and if 8 | you want to implement your own ``load`` function, reading your own 9 | particular data structure for the ``signals`` module, you should read 10 | the documentation 11 | 12 | File Handlers 13 | ------------- 14 | 15 | A File handler is an abstract object that will have to implement some 16 | key methods in order to be able to read and write NeuroTools objects 17 | from a file (given in the constructor). The idea is that is you want 18 | to design your own File handler, you just have to implement the 19 | abstract methods of the objects, i.e ``write()`` (to write an object 20 | to a file), ``read_spikes(params)`` read data and return a SpikeList 21 | object and ``read_analogs(params, type)``, read data and returns an 22 | analog signal according to type. To have a better understanding, just 23 | have a look to the two file handlers implemented in NeuroTools, i.e 24 | ``StandardTextFile`` and ``StandPickleFile``. 25 | 26 | The ``StandardTextFile`` class 27 | ------------------------------ 28 | 29 | Creation 30 | ~~~~~~~~ 31 | 32 | The ``StandardTextFile`` inherits from ``FileHandler`` 33 | 34 | Here is an example of creating simple ``StandardTextFile`` objects:: 35 | 36 | >>> textfile = StandardTextFile("test.txt") 37 | 38 | Usage 39 | ~~~~~ 40 | 41 | If you want to read a data file with spikes, and return a SpikeList object:: 42 | 43 | >>> spklist = textfile.read_spikes({'id_list' :range(11), 't_start' : 0, 't_stop' : 1000}) 44 | 45 | More generally, the ``read_spikes()`` method of an object inheriting from ``FileHandler`` accepts arguments 46 | like id_list, t_start, t_stop, which are the one used in the SpikeList constructor. Note that the ``StandardTextFile`` object have private functions for an internal use only that will check/read 47 | informations in the headers of the text file, ... See io.py for a deeper understanding of its behavior. 48 | 49 | Similar syntax is used for reading a analog signal object:: 50 | 51 | >>> aslist = textfile.read_analogs('vm', {'id_list':range(11)}) 52 | 53 | In the case of an ``AnalogSignal``, the type here, selected in [vm, conductance, current] will specified 54 | the type of the NeuroTools object returned by the function. Either a ``VmList``, ``ConductanceList`` or 55 | ``CurrentList`` 56 | 57 | It you want to save an object to a file, just do:: 58 | 59 | >>> textfile.write(object) 60 | 61 | objet can be a SpikeList or any kind of AnalogSignalList. 62 | 63 | 64 | The ``StandardPickleFile`` class 65 | -------------------------------- 66 | 67 | Creation 68 | ~~~~~~~~ 69 | 70 | The ``StandardPickleFile`` also inherits from ``FileHandler`` 71 | 72 | Here is an example of creating simple ``StandardPickleFile`` objects:: 73 | 74 | >>> pickfile = StandardPickleFile("test.pick") 75 | 76 | Usage 77 | ~~~~~ 78 | 79 | If you want to read a data file with spikes, and return a SpikeList object:: 80 | 81 | >>> spklist = pickfile.read_spikes({'id_list' : range(11), 't_start' : 0, 't_stop' : 1000}) 82 | 83 | Since this object inherits from ``FileHandler``, the idea is that its behavior is *exactly* the 84 | same than the ``StandardTextFile``. Similar syntax is used for reading a analog signal object:: 85 | 86 | >>> aslist = pickfile.read_analogs('vm', {'id_list' : range(11)}) 87 | 88 | In the case of an ``AnalogSignal``, the type here, selected in [vm, conductance, current] will specified 89 | the type of the NeuroTools object returned by the function. Either a ``VmList``, ``ConductanceList`` or 90 | ``CurrentList`` 91 | 92 | It you want to save an object to a file, just do:: 93 | 94 | >>> pickfile.write(object) 95 | 96 | objet can be a SpikeList or any kind of AnalogSignalList. 97 | 98 | The ``YOURStandardFormatFile`` class 99 | ------------------------------------ 100 | 101 | As said before, you just have to implement some key functions, as defined in the ``FileHandler``:: 102 | 103 | >>> class YOURStandardFormatFile(FileHandler): 104 | def write(self, object): 105 | ### Your method here ######### 106 | ### Should save an object to the file self.filename### 107 | 108 | def read_spikes(self, params): 109 | ### Your method here, reading data from self.filename ######### 110 | ### Should read data and return a SpikeList object constrained by params 111 | from NeuroTools import signals 112 | return signals.SpikeList(...) 113 | 114 | def read_analogs(self, type, params): 115 | if not type in ["vm", "current", "conductance"]: 116 | raise Exception("The type %s is not available for the Analogs Signals" %type) 117 | ### Your method here reading data from self.filename ######### 118 | from NeuroTools import signals 119 | if type == 'vm': 120 | return signals.VmList(...) 121 | elif type == 'conductance': 122 | return signals.ConductanceList(...) 123 | elif type == 'current': 124 | return signals.CurrentList(...) 125 | 126 | Data Handlers 127 | ------------- 128 | 129 | The data handler is just a file input/output manager. This is just an interface for ``load/save`` functions. 130 | This is this kind of object which is created by all the ``load`` methods of NeuroTools.signals 131 | 132 | The ``DataHandler`` class 133 | ------------------------- 134 | 135 | You should not have to deal directly with this class, because this is just an interface. See io.py for more details 136 | 137 | Autodoc 138 | ------- 139 | 140 | .. automodule:: NeuroTools.io 141 | :members: 142 | :undoc-members: 143 | 144 | -------------------------------------------------------------------------------- /doc/management.rst: -------------------------------------------------------------------------------- 1 | Module maintainers 2 | ================== 3 | 4 | Ideally, each module should have approx. two maintainers. 5 | 6 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 7 | | ``signals``: | Pierre Yger, `LaurentPerrinet `_, Jens Kremkow (others volunteers are more than welcome) | 8 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 9 | | ``parameters``: | Andrew Davison | 10 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 11 | | ``io``: | Pierre Yger and ... | 12 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 13 | | ``plotting``: | Daniel Bruederle | 14 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 15 | | ``analysis``: | Eilif Muller | 16 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 17 | | ``utilities``: | Daniel Bruederle | 18 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 19 | | ``stgen``: | Eilif Muller, Michael Schmuker | 20 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 21 | | ``examples``: | `LaurentPerrinet `_ | 22 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 23 | | ``spike2``: | Jens Kremkow | 24 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 25 | | ``datastore``: | Andrew Davison | 26 | +-------------------+-----------------------------------------------------------------------------------------------------------------------------------+ 27 | 28 | Maintainers are responsible for 29 | 30 | - identifying missing functionality/tests/docs in their module 31 | - writing tickets using `GitHub's ticket system `_) 32 | - finding volunteers to write the code, tests and documentation 33 | 34 | **Documentation manager**: Pierre Yger 35 | 36 | +-------------------------------+--------------------+ 37 | | advertising: | Laurent Perrinet | 38 | +-------------------------------+--------------------+ 39 | 40 | *Responsibilities*: combining the documentation from the different modules into 41 | a coherent whole, ensuring consistent formatting, spell-checking, etc. 42 | 43 | **Testing manager**: Andrew Davison 44 | 45 | *Responsibilities*: 46 | 47 | - identifying areas of the codebase that are not well tested, and notifying 48 | the module maintainers 49 | - organizing/collecting tests that use several of the NeuroTools modules, i.e. 50 | integration tests rather than unit tests. 51 | 52 | **Packaging manager**: Eric Mueller 53 | 54 | *Responsibilities*: 55 | 56 | - setup.py, i.e. making sure that distutils installation works. 57 | - uploading packages to PyPI, software.incf.org, etc. 58 | - evaluating whether easy\_install would work for NeuroTools 59 | -------------------------------------------------------------------------------- /doc/spike2.rst: -------------------------------------------------------------------------------- 1 | The ``spike2`` module 2 | ========================= 3 | 4 | The ``spike2`` module offers an easy way for reading data from CED's Spike2 Son files into the NeuroTools environment. 5 | 6 | The main interaction with with Spike2 files is provided by the SON Library which was written by:: 7 | 8 | Antonio Gonzalez 9 | Department of Neuroscience 10 | Karolinska Institutet 11 | Antonio.Gonzalez at cantab.net 12 | http://www.neuro.ki.se/broberger/ 13 | 14 | The ``spike2`` module integrates the SON library such that the loading function directly returns ``NeuroTools.signals`` objects. 15 | This makes it very easy to apply certain analysis routines on simulated and real experimental data. 16 | 17 | Loading channels 18 | ---------------- 19 | 20 | Usually SON files contain data from multiple channels. The channels can contain analog signals, like membrane potential traces or discrete data like markers. The ``spike2`` module knows which type of channel you are loading and thus returns the appropriate ``NeuroTools.signals`` object. 21 | 22 | Here some examples how one can simply load the data. For the following example we assume that the SON file contains the membrane potential trace in channel 1, the markers in channel 2 and in channel 3 is data that we dont want to use: 23 | 24 | To load the ``spike2`` module you could do the following:: 25 | 26 | >>> import NeuroTools.spike2.spike2channels as spike2 27 | 28 | to load all channels in the file just use the load function:: 29 | 30 | >>> all_channels = spike2.load(filename) 31 | >>> all_channels.keys() 32 | [1,2,3] 33 | 34 | since we don't need channel 3 we can use the channels parameter to specify which channel to load:: 35 | 36 | >>> just_needed_channels = spike2.load(filename, channels=[1,2]) 37 | >>> just_needed_channels.keys() 38 | [1,2] 39 | 40 | what objects are returned?:: 41 | 42 | >>> type(just_needed_channels[1]) 43 | 44 | >>> type(just_needed_channels[2]) 45 | 46 | 47 | what can I now do with it? I.e. the Adc channel type is inherited from ``signals.AnalogSignal`` and thus you could plot an event_triggered_average on the marker times (please note that the markers are in seconds, we need them in milliseconds therefore the factor 1000):: 48 | 49 | >>> vm = just_needed_channels[1] 50 | >>> marker = just_needed_channels[2] 51 | >>> vm..event_triggered_average(marker.times*1000., display=True) 52 | 53 | in case your vm channel contains multiple stimuli conditions that you dont want to average, but still be separated you can use the slice_by_events function:: 54 | 55 | >>> vm_sclices = vm.slice_by_events(marker.times*1000.,t_min=100,t_max=1000) 56 | 57 | For further examples of what to do with ``NeuroTools.signals`` objects, please refer to the documentation for the ``signals`` module. 58 | 59 | Autodoc 60 | ------- 61 | 62 | .. automodule:: NeuroTools.spike2 63 | :members: 64 | :undoc-members: 65 | 66 | 67 | -------------------------------------------------------------------------------- /doc/stgen.rst: -------------------------------------------------------------------------------- 1 | The ``stgen`` module 2 | ==================== 3 | 4 | This module offers various stochastic generators for point processes that can 5 | be used as spike trains. 6 | 7 | The StGen class 8 | --------------- 9 | 10 | Creation 11 | ~~~~~~~~ 12 | 13 | Create an ``StGen`` object: 14 | 15 | >>> st_gen = StGen() 16 | 17 | This will initialize the stochastic generator and by default try to create a 18 | numpy random generator instance. 19 | 20 | Optionally, you can also pass a random number generator instance to the 21 | constructor: 22 | 23 | >>> import numpy 24 | >>> st_gen = StGen(rng = numpy.random.RandomState()) 25 | 26 | You can also use random number generators from gnu scientific library (gsl): 27 | 28 | >>> from pygsl.rng import rng 29 | >>> st_gen_gsl = StGen(rng = rng()) 30 | 31 | If you want to seed the random number generator with a specific seed, you can 32 | do so in the constructor: 33 | 34 | >>> st_gen = StGen(seed = 1234567) 35 | 36 | Alternatively, you can re-seed the random number generator when the StGen 37 | object has already been created: 38 | 39 | >>> st_gen.seed(7654321) 40 | 41 | Poisson-distributed point processes 42 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 43 | 44 | Using the ``StGen``-object, you can generate point processes with 45 | inter-spike-intervals distributed according to a poisson distribution: 46 | 47 | >>> st_gen = StGen() 48 | >>> spike_train_poisson = st_gen.poisson_generator(rate = 100., 49 | tstart = 0., 50 | tstop = 2500.) 51 | 52 | This generates a NeuroTools.SpikeTrain object, containing spike times with an 53 | approximate rate of 100 Hz and a duration of 2.5 seconds. 54 | 55 | If you want a numpy array of spike times rather than a SpikeTrain object, 56 | specify the array keyword: 57 | 58 | >>> spike_train_array = st_gen.poisson_generator(rate = 100., array = True) 59 | 60 | Dynamic poisson-distributes point processes 61 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | 63 | StGen can also generate inhomogeneous poisson processes, i.e. spike trains with 64 | dynamically changing rates: 65 | 66 | >>> spike_train_dyn = st_gen.poissondyn_generator(rate = [50., 80., 30.], 67 | t = [0., 1000., 2000.], 68 | tstop = 2.5, 69 | array = False) 70 | 71 | This will generate a SpikeTrain object containing spike times with an 72 | approximate rate of 50 Hz for one second, followed by 80 Hz for one second, and 73 | finally 30 Hz for half a second. Note that t[0] is used as tstart. 74 | 75 | Autodoc 76 | ------- 77 | 78 | .. automodule:: NeuroTools.stgen 79 | :members: 80 | :undoc-members: 81 | 82 | 83 | -------------------------------------------------------------------------------- /doc/testdocs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to run doctests. 4 | 5 | Usage: testdocs.py [options] FILE 6 | 7 | Options: 8 | -h, --help show this help message and exit 9 | --strict Use the original doctest output checker, not the more lax local 10 | version. 11 | """ 12 | 13 | import doctest 14 | import sys 15 | import os.path 16 | from optparse import OptionParser 17 | 18 | optionflags = doctest.IGNORE_EXCEPTION_DETAIL+doctest.NORMALIZE_WHITESPACE 19 | 20 | class MyOutputChecker(doctest.OutputChecker): 21 | """ 22 | Modification of doctest.OutputChecker to work better with the 23 | users' manual: 24 | * Often, we don't want to have the output that is printed 25 | by Python in the manual, as it just takes up space without adding any 26 | useful information. 27 | """ 28 | 29 | def __init__(self,strict): 30 | self.strict = strict 31 | 32 | def check_output(self, want, got, optionflags): 33 | if self.strict: 34 | return doctest.OutputChecker.check_output(self, want, got, optionflags) 35 | else: 36 | if want == '': 37 | return True 38 | else: 39 | try: 40 | int(want) and int(got) 41 | return True 42 | except ValueError: 43 | return doctest.OutputChecker.check_output(self, want, got, optionflags) 44 | 45 | def mytestfile(filename,globs,optionflags,strict=False): 46 | parser = doctest.DocTestParser() 47 | if globs is None: 48 | globs = {} 49 | else: 50 | globs = globs.copy() 51 | name = os.path.basename(filename) 52 | 53 | runner = doctest.DocTestRunner(checker=MyOutputChecker(strict=strict), optionflags=optionflags) 54 | # Read the file, convert it to a test, and run it. 55 | s = open(filename).read() 56 | test = parser.get_doctest(s, globs, name, filename, 0) 57 | runner.run(test) 58 | runner.summarize() 59 | return runner.failures, runner.tries 60 | 61 | # ============================================================================== 62 | if __name__ == "__main__": 63 | 64 | # Process command line 65 | parser = OptionParser(usage="usage: %prog [options] FILE") 66 | parser.add_option("--strict", action="store_true", dest="strict", default=False, 67 | help="Use the original doctest output checker, not the more lax local version.") 68 | 69 | (options, args) = parser.parse_args() 70 | if len(args) == 1: 71 | docfile = args[0] 72 | else: 73 | parser.print_help() 74 | sys.exit(1) 75 | 76 | # Run test 77 | exec("from NeuroTools.%s import *" % docfile.replace('.rst','')) 78 | mytestfile(docfile, globs=globals(), optionflags=optionflags, strict=options.strict) 79 | 80 | sys.exit(0) 81 | -------------------------------------------------------------------------------- /examples/matlab_vs_python/acml_rng_mt.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy 3 | from scipy import weave 4 | 5 | force = 0 6 | 7 | def rng_init(seed=1234): 8 | """ Initializes ACML Mersenne Twister random number generator 9 | with 1 seed 10 | 11 | (The seed initializes a ACML NAG generator to generate 12 | the 624 seeds required for the ACML Mersenne Twister algo) 13 | """ 14 | 15 | global _state 16 | 17 | code = """ 18 | 19 | int seeds[1]; 20 | seeds[0] = seed; 21 | int lseeds = 1; 22 | int lstate = 633; 23 | int info; 24 | 25 | drandinitialize(3,1,seeds,&lseeds,_state,&lstate,&info); 26 | 27 | return_val = info; 28 | 29 | """ 30 | 31 | info = weave.inline(code, ['_state','seed'], 32 | headers = [''], 33 | libraries = ["acml","acml_mv"], 34 | include_dirs = ['/opt/acml4.0.1/gfortran64/include'], 35 | library_dirs = ['/opt/acml4.0.1/gfortran64/lib'], 36 | force = force) 37 | 38 | if info!=0: 39 | raise Exception, "Error initializing ACML rng." 40 | 41 | 42 | 43 | # Storage for rng state 44 | _state = numpy.zeros(633,numpy.intc) 45 | rng_init() 46 | 47 | def normal(loc=0.0,scale=1.0,size=None): 48 | """ ACML optimized version of numpy.random.normal """ 49 | global _state 50 | 51 | if size==None: 52 | size=(1,) 53 | 54 | x = numpy.zeros(size,float) 55 | n = len(x.flat) 56 | 57 | code = """ 58 | 59 | int info; 60 | int n_ = n; 61 | double mean_ = loc; 62 | double var_ = scale; 63 | 64 | drandgaussian_(&n,&mean_,&var_,_state,x,&info); 65 | 66 | """ 67 | 68 | weave.inline(code, ['x','scale','loc','n','_state'], 69 | headers = [''], 70 | libraries = ["acml","acml_mv"], 71 | include_dirs = ['/opt/acml4.0.1/gfortran64/include'], 72 | library_dirs = ['/opt/acml4.0.1/gfortran64/lib'], 73 | force = force) 74 | 75 | 76 | return x 77 | 78 | def uniform(low=0.0,high=1.0,size=None): 79 | """ ACML optimized version of numpy.random.uniform """ 80 | global _state 81 | 82 | if size==None: 83 | size=(1,) 84 | 85 | if low>=high: 86 | raise ValueError, "Error: require low'], 104 | libraries = ["acml", "acml_mv"], 105 | include_dirs = ['/opt/acml4.0.1/gfortran64/include'], 106 | library_dirs = ['/opt/acml4.0.1/gfortran64/lib'], 107 | force = force) 108 | 109 | 110 | return x 111 | 112 | 113 | 114 | def exponential(scale=1.0,size=None): 115 | """ ACML optimized version of numpy.random.exponential """ 116 | global _state 117 | 118 | if size==None: 119 | size=(1,) 120 | 121 | x = numpy.zeros(size,float) 122 | n = len(x.flat) 123 | 124 | code = """ 125 | 126 | int info; 127 | int n_ = n; 128 | double scale_ = scale; 129 | 130 | drandexponential_(&n,&scale_,_state,x,&info); 131 | 132 | """ 133 | 134 | weave.inline(code, ['x','scale','n','_state'], 135 | headers = [''], 136 | libraries = ["acml","acml_mv"], 137 | include_dirs = ['/opt/acml4.0.1/gfortran64/include'], 138 | library_dirs = ['/opt/acml4.0.1/gfortran64/lib'], 139 | force = force) 140 | 141 | 142 | return x 143 | 144 | 145 | def randint(low,high=None,size=None): 146 | """ ACML optimized version of numpy.random.randint """ 147 | 148 | global _state 149 | 150 | if size==None: 151 | size=(1,) 152 | 153 | if high==None: 154 | high = low 155 | low = 0 156 | 157 | x = numpy.zeros(size,numpy.intc) 158 | n = len(x.flat) 159 | 160 | code = """ 161 | 162 | int info; 163 | int n_ = n; 164 | int low_ = low; 165 | int high_ = high; 166 | 167 | dranddiscreteuniform_(&n,&low_,&high_,_state,x,&info); 168 | 169 | """ 170 | 171 | weave.inline(code, ['x','low','high','n','_state'], 172 | headers = [''], 173 | libraries = ["acml","acml_mv"], 174 | include_dirs = ['/opt/acml4.0.1/gfortran64/include'], 175 | library_dirs = ['/opt/acml4.0.1/gfortran64/lib'], 176 | force = force) 177 | 178 | 179 | return x 180 | 181 | -------------------------------------------------------------------------------- /examples/matlab_vs_python/append_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy 3 | 4 | n = 100000 5 | 6 | print 'n=',n 7 | # fill a list 8 | 9 | 10 | t1 = time.time() 11 | 12 | l = [] 13 | 14 | for i in xrange(n): 15 | l.append(1.0) 16 | 17 | t2 = time.time() 18 | 19 | print 'list append: ',t2-t1 20 | 21 | 22 | # fill an array by concatenation 23 | 24 | t1 = time.time() 25 | 26 | a = array([]) 27 | for i in xrange(n): 28 | a = numpy.concatenate((a,[1.0])) 29 | 30 | t2 = time.time() 31 | 32 | print 'numpy.concatentate ',t2-t1 33 | 34 | 35 | # fill an array by resizing self 36 | 37 | t1 = time.time() 38 | 39 | a = array([]) 40 | for i in xrange(n): 41 | a.resize(len(a)+1) 42 | a[-1] = 1.0 43 | 44 | t2 = time.time() 45 | 46 | print 'a.resize', t2-t1 47 | 48 | 49 | # fill an array by resize function 50 | 51 | t1 = time.time() 52 | 53 | a = array([]) 54 | for i in xrange(n): 55 | a = numpy.resize(a,len(a)+1) 56 | a[-1] = 1.0 57 | 58 | t2 = time.time() 59 | 60 | print 'a=numpy.resize(a,n) ', t2-t1 61 | 62 | 63 | # fill an array without resize 64 | 65 | t1 = time.time() 66 | 67 | a = zeros(n) 68 | for i in xrange(n): 69 | a[i] = 1.0 70 | 71 | t2 = time.time() 72 | 73 | print 'fill array of known size ',t2-t1 74 | 75 | # fill an array with append 76 | 77 | t1 = time.time() 78 | 79 | a = array([]) 80 | for i in xrange(n): 81 | a = append(a,1.0) 82 | 83 | t2 = time.time() 84 | 85 | print 'numpy.append ', t2-t1 86 | 87 | -------------------------------------------------------------------------------- /examples/matlab_vs_python/smallnet.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | 3 | tic; 4 | 5 | % Created by Eugene M. Izhikevich, 2003 Modified by S. Fusi 2007 6 | % Excitatory neurons Inhibitory neurons 7 | Ne=1000; Ni=4; 8 | Je=250/Ne; % synaptic couplings 9 | Ji=0; 10 | 11 | reset=0; refr=2.5; % reset depolarization (mV) and refractory period (ms) 12 | connectivity=[heaviside(rand(Ne+Ni,Ne)-.9),heaviside(rand(Ne+Ni,Ni)-.9)]; % connectivity matrix 13 | S=[Je*rand(Ne+Ni,Ne),-Ji*rand(Ne+Ni,Ni)]; % synaptic couplings (mV) 14 | S=S.*connectivity; 15 | 16 | lambda=5; % leakage (mV/ms) 17 | dt=.05; sdt=sqrt(dt); 18 | mb=3; sb=4; % statistics of the background external current 19 | mue=mb; sigmae=sb; 20 | sigmai=0.0 21 | 22 | v=0.*ones(Ne+Ni,1); % Initial values of v 23 | r=0.*zeros(Ne+Ni,1); % refractory periods 24 | firings=[]; % spike timings 25 | 26 | disp(sprintf('mu(nu=5Hz)=%f\n',mb+Ne*Je*.015-lambda)); 27 | disp(sprintf('mu(nu=100Hz)=%f\n',mb+Ne*Je*.1-lambda)); 28 | 29 | toc; 30 | tic; 31 | 32 | duration=400; % total duration of the simulation (ms) 33 | ti=1; 34 | for t=1:dt:duration 35 | if(t>150) % activate a strong external input 36 | mue=6.5; 37 | sigmae=7.5; 38 | end 39 | 40 | if(t>300) % restore the initial statistics of the external current 41 | mue=mb; 42 | sigmae=sb; 43 | end 44 | 45 | Iext=[sigmae*randn(Ne,1);sigmai*randn(Ni,1)]; % external input 46 | fired=find(v>=20); % indices of spikes 47 | if ~isempty(fired) 48 | firings=[firings; t+0*fired, fired]; 49 | v(fired)=reset; 50 | r(fired)=refr; 51 | aux=v-dt*(lambda-mue)+sum(S(:,fired),2)+sdt*Iext; 52 | else 53 | aux=v-dt*(lambda-mue)+sdt*Iext; 54 | end 55 | nr=find(r<=0); % neurons which are not in the refractory period 56 | v(nr)=heaviside(aux(nr)).*aux(nr); 57 | nr=find(r>0); 58 | r(nr)=r(nr)-dt; 59 | vt(ti)=v(1); tt(ti)=t; 60 | ti=ti+1; 61 | end; 62 | 63 | toc; 64 | 65 | % ------------------------------------------------------------------------- 66 | % Plot everything 67 | % ------------------------------------------------------------------------- 68 | 69 | disp('Plotting...') 70 | 71 | tic; 72 | 73 | figure(1); 74 | 75 | subplot(3,1,1); 76 | vt(find(vt>=20))=65; 77 | plot(tt,vt); 78 | ylabel('V-V_{rest} (mV)'); 79 | 80 | subplot(3,1,2); 81 | if Ne<100 82 | ie=find(firings(:,2)<=Ne); 83 | else 84 | ie=find(firings(:,2)<=100); 85 | end 86 | plot(firings(ie,1),firings(ie,2),'.'); 87 | set(gca,'xlim',[0 duration]); 88 | ylabel('Neuron index'); 89 | 90 | subplot(3,1,3); 91 | k=1; 92 | binsize=1; 93 | for tbins=1:binsize:(duration-binsize) 94 | m(k)=size(find((firings(:,2)<=Ne) & (firings(:,1)>=tbins) & (firings(:,1)40))/Ne/binsize*1000.); 100 | line([0 duration],[ma ma]); 101 | h=line([20 20],[0 50]); set(h,'linestyle','--'); 102 | 103 | ylabel('Hz'); 104 | xlabel('time (ms)'); 105 | 106 | toc; 107 | -------------------------------------------------------------------------------- /examples/matlab_vs_python/smallnet.py: -------------------------------------------------------------------------------- 1 | # Created by Eugene M. Izhikevich, 2003 Modified by S. Fusi 2007 2 | # Ported to Python by Eilif Muller, 2008. 3 | # 4 | # Notes: 5 | # 6 | # Requires matplotlib,ipython,numpy>=1.0.3 7 | # On a debian/ubuntu based system: 8 | # $ apt-get install python-matplotlib python-numpy ipython 9 | # 10 | # Start ipython with threaded plotting support: 11 | # $ ipython -pylab 12 | # 13 | # At the resulting prompt, run the file by: 14 | # In [1]: execfile('smallnet.py') 15 | # 16 | # In [2]: myplot() 17 | 18 | # Modules required 19 | import numpy 20 | import numpy.random as random 21 | from itertools import repeat 22 | # Bug fix for numpy version 1.0.4 23 | numpy.lib.function_base.any = numpy.any 24 | 25 | # For measuring performance 26 | import time 27 | t1 = time.time() 28 | 29 | # Excitatory and inhibitory neuron counts 30 | Ne = 1000 31 | Ni = 4 32 | N = Ne+Ni 33 | 34 | # Synaptic couplings 35 | Je = 250.0/Ne 36 | Ji = 0.0 37 | 38 | # reset depolarization (mV) 39 | reset = 0.0 40 | 41 | # refractory period (ms) 42 | refr = 2.5 43 | 44 | # Synaptic couplings (mV) 45 | S = numpy.zeros((N,N)) 46 | S[:,:Ne] = Je*random.uniform(size=(N,Ne)) 47 | S[:,:Ni] = -Ji*random.uniform(size=(N,Ni)) 48 | 49 | # Connectivity 50 | S[:,:Ne][random.uniform(size=(N,Ne))-0.9<=0.0]=0.0 51 | S[:,Ne:][random.uniform(size=(N,Ni))-0.9<=0.0]=0.0 52 | 53 | # (mV/ms) (lambda is a python keyword) 54 | leak = 5.0 55 | dt = 0.05 56 | sdt = numpy.sqrt(dt) 57 | 58 | # Statistics of the background external current 59 | mb = 3.0; sb = 4.0 60 | mue = mb; sigmae=sb 61 | 62 | sigmai = 0.0 63 | 64 | # State variable v, initial value of 0 65 | v = numpy.zeros(N) 66 | 67 | # Refractory period state variable 68 | r = numpy.zeros(N) 69 | 70 | # Spike timings in a list 71 | firings = [] 72 | 73 | print 'mu(nu=5Hz)=%f' % (mb+Ne*Je*.015-leak,) 74 | print 'mu(nu=100Hz)=%f' % (mb+Ne*Je*.1-leak,) 75 | 76 | # total duration of the simulation (ms) 77 | duration = 400.0 78 | t = numpy.arange(0.0,400.0,dt) 79 | vt = numpy.zeros_like(t) 80 | 81 | t2 = time.time() 82 | print 'Elapsed time is ', str(t2-t1), ' seconds.' 83 | 84 | t1 = time.time() 85 | 86 | for i,ti in enumerate(t): 87 | # time for a strong external input 88 | if ti>150.0: 89 | mue = 6.5 90 | sigmae = 7.5 91 | 92 | # time to restore the initial statistics of the external current 93 | if ti>300.0: 94 | mue = mb 95 | sigmae = sb 96 | 97 | Iext = random.normal(size=N) 98 | Iext[:Ne]*=sigmae 99 | Iext[Ne:]*=sigmai 100 | 101 | # Which neurons fired? 102 | fired = numpy.nonzero(v>=20.0)[0] 103 | 104 | if len(fired)>0: 105 | 106 | # Save mean firing rate of the excitatory neurons 107 | 108 | v[fired] = reset 109 | r[fired] = refr 110 | 111 | # Append spikes to the spike list 112 | firings+=zip(fired,repeat(ti)) 113 | 114 | aux = v-dt*(leak-mue)+numpy.sum(S[:,fired],1)+sdt*Iext 115 | 116 | else: 117 | aux = v-dt*(leak-mue)+sdt*Iext; 118 | 119 | # Neurons not in the refractory period 120 | nr = numpy.nonzero(r<=0)[0] 121 | # Bound voltages above 0.0 122 | v[nr] = numpy.where(aux[nr]>=0.0,aux[nr],0.0) 123 | 124 | # Progress refractory variable 125 | nr = numpy.nonzero(r>0)[0] 126 | r[nr]-=dt 127 | 128 | # record the voltage trace of the zeroeth neuron 129 | vt[i] = v[0] 130 | 131 | 132 | t2 = time.time() 133 | print 'Elapsed time is ', str(t2-t1), ' seconds.' 134 | 135 | # ------------------------------------------------------------------------- 136 | # Plot everything 137 | # ------------------------------------------------------------------------- 138 | 139 | 140 | def myplot(): 141 | 142 | global firings 143 | 144 | t1 = time.time() 145 | 146 | figure() 147 | 148 | # Membrane potential trace of the zeroeth neuron 149 | subplot(3,1,1) 150 | 151 | vt[vt>=20.0]=65.0 152 | plot(t,vt) 153 | ylabel(r'$V-V_{rest}\ \left[\rm{mV}\right]$') 154 | 155 | # Raster plot of the spikes of the network 156 | subplot(3,1,2) 157 | myfirings = array(firings) 158 | myfirings_100 = myfirings[myfirings[:,0]=1.0.3 7 | # On a debian/ubuntu based system: 8 | # $ apt-get install python-matplotlib python-numpy ipython 9 | # 10 | # Start ipython with threaded plotting support: 11 | # $ ipython -pylab 12 | # 13 | # At the resulting prompt, run the file by: 14 | # In [1]: execfile('smallnet.py') 15 | 16 | # Modules required 17 | import numpy 18 | import numpy.random as random 19 | import acml_rng 20 | # Bug fix for numpy version 1.0.4 21 | numpy.lib.function_base.any = numpy.any 22 | 23 | # For measuring performance 24 | import time 25 | t1 = time.time() 26 | 27 | # Excitatory and inhibitory neuron counts 28 | Ne = 1000 29 | Ni = 4 30 | N = Ne+Ni 31 | 32 | # Synaptic couplings 33 | Je = 250.0/Ne 34 | Ji = 0.0 35 | 36 | # reset depolarization (mV) 37 | reset = 0.0 38 | 39 | # refractory period (ms) 40 | refr = 2.5 41 | 42 | # Synaptic couplings (mV) 43 | S = numpy.zeros((N,N)) 44 | S[:,:Ne] = Je*random.uniform(size=(N,Ne)) 45 | S[:,:Ni] = -Ji*random.uniform(size=(N,Ni)) 46 | 47 | # Connectivity 48 | S[:,:Ne][random.uniform(size=(N,Ne))-0.9<=0.0]=0.0 49 | S[:,Ne:][random.uniform(size=(N,Ni))-0.9<=0.0]=0.0 50 | 51 | # (mV/ms) (lambda is a python keyword) 52 | leak = 5.0 53 | dt = 0.05 54 | sdt = numpy.sqrt(dt) 55 | 56 | # Statistics of the background external current 57 | mb = 3.0; sb = 4.0 58 | mue = mb; sigmae=sb 59 | 60 | sigmai = 0.0 61 | 62 | # State variable v, initial value of 0 63 | v = numpy.zeros(N) 64 | 65 | # Refractory period state variable 66 | r = numpy.zeros(N) 67 | 68 | # Spike timings in a list 69 | firings = [] 70 | spikes = [[]]*N 71 | 72 | 73 | print 'mu(nu=5Hz)=%f' % (mb+Ne*Je*.015-leak,) 74 | print 'mu(nu=100Hz)=%f' % (mb+Ne*Je*.1-leak,) 75 | 76 | # total duration of the simulation (ms) 77 | duration = 400.0 78 | t = numpy.arange(0.0,400.0,dt) 79 | vt = numpy.zeros_like(t) 80 | 81 | t2 = time.time() 82 | print 'Elapsed time is ', str(t2-t1), ' seconds.' 83 | 84 | t1 = time.time() 85 | 86 | for i,ti in enumerate(t): 87 | # time for a strong external input 88 | if ti>150.0: 89 | mue = 6.5 90 | sigmae = 7.5 91 | 92 | # time to restore the initial statistics of the external current 93 | if ti>300.0: 94 | mue = mb 95 | sigmae = sb 96 | 97 | Iext = acml_rng.normal(1.0,N) 98 | Iext[:Ne]*=sigmae 99 | Iext[Ne:]*=sigmai 100 | 101 | # Which neurons fired? 102 | fired = numpy.nonzero(v>=20.0)[0] 103 | 104 | if len(fired)>0: 105 | 106 | # Save mean firing rate of the excitatory neurons 107 | 108 | v[fired] = reset 109 | r[fired] = refr 110 | 111 | # Append spikes to the spike list 112 | for n in fired: 113 | # Spikes are stored by a (neuron, time) pair 114 | # For easy plotting later 115 | firings.append((n,ti)) 116 | # and as a list for each neuron 117 | spikes[n].append(ti) 118 | 119 | aux = v-dt*(leak-mue)+numpy.sum(S[:,fired],1)+sdt*Iext 120 | 121 | else: 122 | aux = v-dt*(leak-mue)+sdt*Iext; 123 | 124 | # Neurons not in the refractory period 125 | nr = numpy.nonzero(r<=0)[0] 126 | # Bound voltages above 0.0 127 | v[nr] = numpy.where(aux[nr]>=0.0,aux[nr],0.0) 128 | 129 | # Progress refractory variable 130 | nr = numpy.nonzero(r>0)[0] 131 | r[nr]-=dt 132 | 133 | # record the voltage trace of the zeroeth neuron 134 | vt[i] = v[0] 135 | 136 | 137 | t2 = time.time() 138 | print 'Elapsed time is ', str(t2-t1), ' seconds.' 139 | 140 | # ------------------------------------------------------------------------- 141 | # Plot everything 142 | # ------------------------------------------------------------------------- 143 | 144 | 145 | def myplot(): 146 | 147 | global firings 148 | 149 | t1 = time.time() 150 | 151 | figure() 152 | 153 | # Membrane potential trace of the zeroeth neuron 154 | subplot(3,1,1) 155 | 156 | vt[vt>=20.0]=65.0 157 | plot(t,vt) 158 | ylabel(r'$V-V_{rest}\ \left[\rm{mV}\right]$') 159 | 160 | # Raster plot of the spikes of the network 161 | subplot(3,1,2) 162 | myfirings = array(firings) 163 | myfirings_100 = myfirings[myfirings[:,0]=1.0.3 7 | # On a debian/ubuntu based system: 8 | # $ apt-get install python-matplotlib python-numpy ipython 9 | # 10 | # Start ipython with threaded plotting support: 11 | # $ ipython -pylab 12 | # 13 | # At the resulting prompt, run the file by: 14 | # In [1]: execfile('smallnet.py') 15 | 16 | # Modules required 17 | import numpy 18 | import numpy.random as random 19 | # Bug fix for numpy version 1.0.4 for numpy.histogram 20 | numpy.lib.function_base.any = numpy.any 21 | 22 | # For inline C optimization 23 | from scipy import weave 24 | 25 | # For measuring performance 26 | import time 27 | t1 = time.time() 28 | 29 | # Excitatory and inhibitory neuron counts 30 | Ne = 1000 31 | Ni = 4 32 | N = Ne+Ni 33 | 34 | # Synaptic couplings 35 | Je = 250.0/Ne 36 | Ji = 0.0 37 | 38 | # Synaptic couplings (mV) 39 | #S = numpy.zeros((N,N)) 40 | #S[:,:Ne] = Je*random.uniform(size=(N,Ne)) 41 | #S[:,Ne:] = -Ji*random.uniform(size=(N,Ni)) 42 | 43 | # Connectivity 44 | #S[:,:Ne][random.uniform(size=(N,Ne))-0.9<=0.0]=0.0 45 | #S[:,Ne:][random.uniform(size=(N,Ni))-0.9<=0.0]=0.0 46 | 47 | # 10% Connectivity 48 | targets = [] 49 | weights = [] 50 | # excitatory 51 | for i in xrange(Ne): 52 | targets.append(random.permutation(numpy.arange(N))[:random.poisson(N*0.1)]) 53 | weights.append(Je*ones_like(targets[i])) 54 | # inhibitory 55 | for i in xrange(Ne,N): 56 | targets.append(random.permutation(numpy.arange(N))[:random.poisson(N*0.1)]) 57 | weights.append(-Ji*ones_like(targets[i])) 58 | 59 | # Statistics of the background external current 60 | #mb = 3.0; sb = 4.0 61 | #mue = mb; sigmae=sb 62 | 63 | #sigmai = 0.0 64 | 65 | # State variable v, initial value of 0 66 | v = numpy.zeros(N) 67 | 68 | # Refractory period state variable 69 | r = numpy.zeros(N) 70 | 71 | # storage for intermediate calculations 72 | I = numpy.zeros(N) 73 | Isyn = numpy.zeros(N) 74 | 75 | # Spike timings in a list 76 | spikes = [[] for x in xrange(N)] 77 | 78 | #print 'mu(nu=5Hz)=%f' % (mb+Ne*Je*.015-leak,) 79 | #print 'mu(nu=100Hz)=%f' % (mb+Ne*Je*.1-leak,) 80 | 81 | # total duration of the simulation (ms) 82 | dt = 0.05 83 | duration = 400.0 84 | t = numpy.arange(0.0,duration,dt) 85 | vt = numpy.zeros_like(t) 86 | 87 | # This is inline C code 88 | c_code = """ 89 | 90 | const double mb = 3.0; 91 | const double sb = 4.0; 92 | double mue = mb; 93 | double sigmae = sb; 94 | double sigmai = 0.0; 95 | //double dt = 0.05; // (ms) 96 | double leak = 5.0; // (mV/ms) 97 | double sdt = sqrt(dt); 98 | double reset = 0.0; //(mV) 99 | double refr = 2.5; //(ms) 100 | double threshold = 20.0; //(mv) 101 | double Je = 250.0/Ne; 102 | double Ji = 0.0; 103 | 104 | 105 | int i,j,k; 106 | 107 | // GSL random number generation setup 108 | 109 | const gsl_rng_type * T_gsl; 110 | gsl_rng * r_gsl; 111 | 112 | gsl_rng_env_setup(); 113 | 114 | T_gsl = gsl_rng_default; 115 | r_gsl = gsl_rng_alloc (T_gsl); 116 | 117 | py::list l; 118 | 119 | for(i=0;i150.0) { 122 | mue = 6.5; 123 | sigmae = 7.5; 124 | } 125 | 126 | // time to restore the initial statistics of the external input 127 | if (t(i)>300.0) { 128 | mue = mb; 129 | sigmae = sb; 130 | } 131 | 132 | // Noise plus synaptic input from last step 133 | for (j=0;j=threshold) { 148 | l = py::list((PyObject*)(spikes[j])); 149 | l.append(t(i)); 150 | for (k=0;k=0.0 ? v(j) : 0.0; 161 | } 162 | else { 163 | r(j)-=dt; 164 | } 165 | } 166 | 167 | vt(i) = v(0); 168 | 169 | } 170 | 171 | // Clean-up the GSL random number generator 172 | gsl_rng_free (r_gsl); 173 | 174 | l = py::list((PyObject*)spikes[0]); 175 | l.append(3.0); 176 | 177 | 178 | """ 179 | 180 | t2 = time.time() 181 | print 'Elapsed time is ', str(t2-t1), ' seconds.' 182 | 183 | t1 = time.time() 184 | 185 | 186 | weave.inline(c_code, ['v','r','t','vt','dt', 187 | 'spikes','I','Isyn','Ne','Ni','N','targets','weights'], 188 | type_converters=weave.converters.blitz, 189 | headers = ["", ""], 190 | libraries = ["gsl","gslcblas"]) 191 | 192 | t2 = time.time() 193 | print 'Elapsed time is ', str(t2-t1), ' seconds.' 194 | 195 | 196 | def myplot(): 197 | 198 | global firings 199 | 200 | t1 = time.time() 201 | 202 | figure() 203 | 204 | # Membrane potential trace of the zeroeth neuron 205 | subplot(3,1,1) 206 | 207 | vt[vt>=20.0]=65.0 208 | plot(t,vt) 209 | ylabel(r'$V-V_{rest}\ \left[\rm{mV}\right]$') 210 | 211 | # Raster plot of the spikes of the network 212 | subplot(3,1,2) 213 | myfirings = array(firings) 214 | myfirings_100 = myfirings[myfirings[:,0] ipcluster -n 2 & 20 | 21 | This will start two computation engines and a controller in the background. 22 | 23 | When the controller is up, run this script: 24 | 25 | > python parameter_search_example.py 26 | 27 | Calculation will start, and after a few seconds (depending on your hardware) 28 | it will save a png graphics file that illustrates the firing rate of a neuron 29 | as a function of the input rate and the weight of the synapse to your current 30 | directory. 31 | 32 | """ 33 | def model_network(param_dict): 34 | """ 35 | This model network consists of a spike source and a neuron (IF_curr_alpha). 36 | The spike rate of the source and the weight can be specified in the 37 | param_dict. Returns the number of spikes fired during 1000 ms of simulation. 38 | 39 | Parameters: 40 | param_dict - dictionary with keys 41 | rate - the rate of the spike source (spikes/second) 42 | weight - weight of the connection source -> neuron 43 | 44 | Returns: 45 | dictionary with keys: 46 | source_rate - the rate of the spike source 47 | weight - weight of the connection source -> neuron 48 | neuron_rate - spike rate of the neuron 49 | """ 50 | #set up the network 51 | import pyNN.neuron as sim 52 | sim.setup(dt = 0.01, min_delay = 1., max_delay = 1., debug = False, 53 | quit_on_end = False) 54 | 55 | weight = param_dict['weight'] 56 | 57 | import NeuroTools.stgen as stgen 58 | stgen = stgen.StGen() 59 | spiketrain = stgen.poisson_generator(param_dict['rate'], t_stop = 1000.) 60 | source = sim.Population(1, sim.SpikeSourceArray, 61 | {'spike_times':spiketrain.spike_times}) 62 | neuron = sim.Population(1, sim.IF_cond_alpha) 63 | sim.Projection(source, neuron, 64 | method = sim.OneToOneConnector(weights = param_dict['weight'], 65 | delays = 1.)) 66 | 67 | #set recorder 68 | neuron.record() 69 | neuron.record_v() 70 | 71 | #run the simulation 72 | sim.run(1001.) 73 | sim.end() 74 | 75 | # count the number of spikes 76 | spikes = neuron.getSpikes() 77 | numspikes = len(spikes) 78 | 79 | # return everything, including the input parameters 80 | return {'source_rate':param_dict['rate'], 81 | 'weight':param_dict['weight'], 82 | 'neuron_rate':numspikes } 83 | 84 | def make_param_dict_list(): 85 | """ 86 | create a list of parameter dictionaries for the model network. 87 | """ 88 | # there is certainly a way to do this with NeuroTools. 89 | import numpy 90 | rates = numpy.linspace(start = 10., stop = 100., num = 5) 91 | weights = numpy.linspace(start = 0.1, stop = 1.0, num = 5) 92 | from NeuroTools.parameters import ParameterSet, ParameterSpace, ParameterRange 93 | params = ParameterSpace(ParameterSet({'rate':ParameterRange(rates), 94 | 'weight': ParameterRange(weights)})) 95 | dictlist = [p.as_dict() for p in params.iter_inner()] 96 | return dictlist 97 | 98 | def show_results(result): 99 | """ 100 | visualizes the result of the parameter search. 101 | Parameters: 102 | result - list of result dictionaries. 103 | """ 104 | import numpy 105 | rates = numpy.sort([r['source_rate'] for r in result]) 106 | weights = numpy.sort([r['weight'] for r in result]) 107 | neuron_rates = numpy.zeros((len(rates), len(weights))) 108 | for r_i in range(len(rates)): 109 | for w_i in range(len(weights)): 110 | neuron_rates[r_i, w_i] = [r['neuron_rate'] for r in result 111 | if (r['source_rate'] == rates[r_i]) 112 | and (r['weight'] == weights[w_i])][0] 113 | import NeuroTools.plotting as plotting 114 | pylab = plotting.get_display(True) 115 | pylab.rcParams.update(plotting.pylab_params()) 116 | subplot = pylab.imshow(neuron_rates, 117 | interpolation = 'nearest', 118 | origin = 'lower') 119 | plotting.set_labels(subplot.get_axes(), 120 | xlabel = 'rate', 121 | ylabel = 'weight') 122 | pylab.colorbar() 123 | # could add fancy xticks and yticks here 124 | import tempfile, os 125 | (fd, figfilename) = tempfile.mkstemp(prefix = 'parameter_search_result', 126 | suffix = '.png', 127 | dir = os.getcwd()) 128 | pylab.gcf().savefig(figfilename) 129 | 130 | def run_it(): 131 | """" 132 | Run the parameter search. 133 | """ 134 | import NeuroTools.optimize.parameter_search as ps 135 | 136 | # search the parameter space 137 | param_dict_list = make_param_dict_list() 138 | srchr = ps.IPythonParameterSearcher( 139 | dict_iterable = param_dict_list, 140 | func = model_network) 141 | srchr.search() 142 | outlist = srchr.harvest() 143 | 144 | #return the results 145 | return outlist 146 | 147 | if __name__ == '__main__': 148 | results = run_it() 149 | show_results(results) 150 | -------------------------------------------------------------------------------- /examples/parameters/LGN_receptive_field.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | This script demonstrates a simple use of the parameter class in NeuroTools. 4 | 5 | It plots the spatio-teporal recptive field defined in Cai et al. (1997, J. 6 | of Neurophysiology) for different sets of parameters. These are defined 7 | around a central value, while the paramter_scan() function allows to explore 8 | different ranges. 9 | 10 | """ 11 | 12 | import numpy, pylab 13 | from NeuroTools.parameters import ParameterSpace 14 | from NeuroTools.parameters import ParameterRange 15 | from NeuroTools.sandbox import make_name 16 | # 17 | lw = 2. 18 | fs = 16 19 | 20 | def parameter_scan(p=None): 21 | if p is None: 22 | p = default_parameters() 23 | 24 | #p.K2 = ParameterRange([0.7,0.0]) 25 | # p.dt = ParameterRange([1.,6.77,10.,1000.]) # ms 26 | #p.td = ParameterRange([0.])#,6.,20.]) 27 | #p.t1 = ParameterRange([0.]) # ms 28 | #p.t2 = ParameterRange([0.]) # ms 29 | #g = 0.8 30 | #p.c1 = ParameterRange([0.14*g]) 31 | #p.c2 = ParameterRange([0.12*g]) 32 | p.sigma_c = ParameterRange([0.1, 0.4, 0.8]) 33 | p.K2 = ParameterRange([0., 0.7, 1.4]) 34 | create_figure() 35 | 36 | for experiment in p.iter_inner(): 37 | name = make_name(experiment, p.range_keys()) 38 | print name 39 | plot_receptive_field(p=experiment, label=name) 40 | 41 | def create_figure(): 42 | pylab.close('all') 43 | pylab.rcParams['figure.figsize'] = (10.0, 5.5) 44 | pylab.rcParams['figure.facecolor'] = 'white' 45 | pylab.figure() 46 | 47 | def plot_receptive_field(p=None, d2=True, label=None): 48 | if (p == None): 49 | p = default_parameters() 50 | if label == '': 51 | label = 'default' 52 | # temporal 53 | t = numpy.arange(0., p.duration, p.dt) 54 | 55 | # spatial 56 | x = numpy.arange(-p.size/2.+p.degree_per_pixel/2., p.size/2.+p.degree_per_pixel/2., p.degree_per_pixel) 57 | 58 | xm, tm = numpy.meshgrid(x, t) 59 | kernel = RF(xm, tm, p) 60 | 61 | kernel_shape = kernel[:, :].shape 62 | kernel_fra = kernel_shape[1]/6. 63 | ticks_st = numpy.arange(-kernel_shape[1]/2., kernel_shape[1]/2.+1, kernel_fra) 64 | ticks_st += kernel_shape[1]/2. 65 | ticks_st -= 0.5 66 | min_max_label = kernel_shape[1]*p.degree_per_pixel 67 | min_max_label /= 2. 68 | ticks_st_label = numpy.arange(-min_max_label, min_max_label+1, min_max_label/3.).astype('int') 69 | degree_ticks_label = [str(i) for i in ticks_st_label] 70 | degree_ticks = ticks_st 71 | 72 | time_ticks = numpy.arange(kernel.shape[0])*p.dt 73 | time_vector = time_ticks 74 | time_ticks_label = numpy.arange(0., p.duration, p.dt) 75 | time_ticks_label = numpy.array([str(i) for i in time_ticks_label.round(0).astype('int')]) 76 | time_steps = int(30/p.dt) 77 | 78 | label_degree = 'degree' 79 | label_time = 'time (ms)' 80 | label_norm = 'normalized amplitude' 81 | norm_act_lim = (-0.4, 1.1) 82 | if not d2: 83 | # spatial 1d 84 | time = int(34./p.dt) 85 | pylab.subplot(1, 2, 1) 86 | pylab.plot(kernel[time, :], label='34 ms', linewidth=lw)#,label=label) 87 | pylab.xticks(degree_ticks,degree_ticks_label, fontsize=fs) 88 | pylab.yticks(fontsize=fs) 89 | pylab.xlabel(label_degree, fontsize=fs) 90 | pylab.ylabel(label_norm, fontsize=fs) 91 | pylab.ylim(norm_act_lim) 92 | pylab.title('spatial at 34 ms', fontsize=fs) 93 | 94 | # temporal 1d 95 | pylab.subplot(1, 2, 2) 96 | pylab.plot(time_vector, kernel[:, 44], label='x: 0.0, y: 0.0', linewidth=lw)#,label=label) 97 | pylab.xticks(time_ticks[0::time_steps], time_ticks_label[0::time_steps].tolist(), fontsize=fs) 98 | pylab.yticks(fontsize=fs) 99 | pylab.xlabel(label_time, fontsize=fs) 100 | # pylab.ylabel(label_norm,fontsize=fs) 101 | pylab.ylim(norm_act_lim) 102 | pylab.legend() 103 | pylab.title('temporal at 0.0 degree', fontsize=fs) 104 | 105 | else: 106 | pylab.close('all') 107 | pylab.rcParams['figure.figsize'] = (6.5, 6.0) 108 | pylab.rcParams['figure.facecolor'] = 'white' 109 | pylab.figure() 110 | 111 | pylab.pcolor(kernel.T, vmin=norm_act_lim[0], vmax=norm_act_lim[1], shading='flat') 112 | pylab.xticks(time_ticks[0::time_steps], time_ticks_label[0::time_steps].tolist(), fontsize=fs) 113 | pylab.yticks(degree_ticks, degree_ticks_label, fontsize=fs) 114 | pylab.xlabel(label_time, fontsize=fs) 115 | pylab.ylabel(label_degree, fontsize=fs) 116 | pylab.ylim(0, kernel.shape[1]) 117 | cb = pylab.colorbar() 118 | cb.set_label(label_norm, fontsize=fs) 119 | pylab.show() 120 | 121 | def default_parameters(): 122 | # receptive field parameters 123 | p = ParameterSpace({}) 124 | p.Ac = 1. 125 | p.As = 1./3. 126 | p.K1 = 1.05 127 | p.K2 = 0.7 128 | p.c1 = 0.14 129 | p.c2 = 0.12 130 | p.n1 = 7. 131 | p.n2 = 8. 132 | p.t1 = -6. # ms 133 | p.t2 = -6. # ms 134 | p.td = 6.0 # time differece between ON-OFF 135 | p.sigma_c = 0.3#0.4 # Allen 2006 # sigma of center gauss degree 136 | p.sigma_s = 1.5#p.sigma_c*1.5+0.4 # Allen 2006 # sigma of surround gauss degree 137 | 138 | # Kernel dims 139 | # temporal 140 | p.size = 10. # degree 141 | p.degree_per_pixel = 0.1133 142 | # spatial 143 | p.dt = 1.0 # ms 144 | p.duration = 200. # ms 145 | return p 146 | 147 | 148 | def RF(x, t, p): 149 | kernel = Fc(x, p)*Gc(t, p)-Fs(x, p)*Gs(t, p) 150 | kernel /= kernel.max() 151 | return kernel 152 | 153 | def Fc(x, p): 154 | return F(x, p.Ac, p.sigma_c) 155 | 156 | def Fs(x, p): 157 | return F(x, p.As, p.sigma_s) 158 | 159 | def Gc(t, p): 160 | return G(t, p) 161 | 162 | def Gs(t, p): 163 | return G(t-p.td, p) 164 | 165 | def F(x, A, sigma): 166 | return A*numpy.exp((-x**2)/(2*sigma**2)) 167 | 168 | def G(t, p): 169 | p1 = p.K1*(((p.c1*(t-p.t1))**p.n1)*numpy.exp((-p.c1*(t-p.t1))))/((p.n1**p.n1)*numpy.exp(-p.n1)) 170 | p2 = p.K2*(((p.c2*(t-p.t2))**p.n2)*numpy.exp((-p.c2*(t-p.t2))))/((p.n2**p.n2)*numpy.exp(-p.n2)) 171 | p3 = p1-p2 172 | return p3 173 | 174 | if __name__ == '__main__': 175 | parameter_scan() 176 | -------------------------------------------------------------------------------- /examples/parameters/exported_model_parameters.tex: -------------------------------------------------------------------------------- 1 | \hspace*{0.0cm} a : 23\\ 2 | \hspace*{0.0cm} b: \\ 3 | \hspace*{1.5cm} s : [1, 2, 3]\\ 4 | \hspace*{1.5cm} w : see Table~\ref{w} \\ 5 | \hspace*{0.0cm} \\ 6 | \hspace*{0.0cm} name : first experiment\\ 7 | \hspace*{0.0cm} orientation : see Table~\ref{orientation} \\ 8 | \hspace*{0.0cm} simulator : pyNN -------------------------------------------------------------------------------- /examples/parameters/model_parameters.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/examples/parameters/model_parameters.pdf -------------------------------------------------------------------------------- /examples/parameters/model_parameters.tex: -------------------------------------------------------------------------------- 1 | \documentclass[a4paper, 12pt]{article} 2 | 3 | \begin{document} 4 | 5 | \section{Parameters} 6 | \input{exported_model_parameters} 7 | \section{Parameter tables} 8 | \input{tables_exported_model_parameters} 9 | 10 | \end{document} -------------------------------------------------------------------------------- /examples/parameters/parameter_to_latex.py: -------------------------------------------------------------------------------- 1 | from NeuroTools.parameters import ParameterSet 2 | from NeuroTools.parameters import ParameterRange 3 | from NeuroTools.parameters import ParameterTable 4 | 5 | 6 | 7 | p = ParameterSet({}) 8 | p.orientation = ParameterTable(""" 9 | # RS RSa RSb FS FSa FSb 10 | RS 15. 15. 12 13 12 1 11 | FS 15. 15. 9 2 2 9 12 | LGN 0. 12. 0 2 0 0 13 | V2 3. 2. 2 2 9 7 14 | M1 0. 0. 8 2 2 1 15 | """) 16 | p.a = 23 17 | p.b = ParameterSet({}) 18 | p.b.s = ParameterRange([1,2,3]) 19 | p.b.w = ParameterTable(""" 20 | # RS FS 21 | all 1. 0. 22 | none -1. -2. 23 | """) 24 | p.name = 'first experiment' 25 | p.simulator = 'pyNN' 26 | 27 | 28 | 29 | p.export('exported_model_parameters.tex',format='latex',**{'indent':1.5}) 30 | -------------------------------------------------------------------------------- /examples/parameters/tables_exported_model_parameters.tex: -------------------------------------------------------------------------------- 1 | \begin{table*}[ht] 2 | \begin{center} 3 | \begin{tabular}{ccc} 4 | & FS & RS \\ 5 | all & 0.0 & 1.0 \\ 6 | none & -2.0 & -1.0 \\ 7 | \end{tabular} 8 | \end{center} 9 | \caption{w} 10 | \label{w} 11 | \end{table*} 12 | \begin{table*}[ht] 13 | \begin{center} 14 | \begin{tabular}{ccccccc} 15 | & FSa & FSb & FS & RS & RSa & RSb \\ 16 | LGN & 0.0 & 0.0 & 2.0 & 0.0 & 12.0 & 0.0 \\ 17 | V2 & 9.0 & 7.0 & 2.0 & 3.0 & 2.0 & 2.0 \\ 18 | FS & 2.0 & 9.0 & 2.0 & 15.0 & 15.0 & 9.0 \\ 19 | M1 & 2.0 & 1.0 & 2.0 & 0.0 & 0.0 & 8.0 \\ 20 | RS & 12.0 & 1.0 & 13.0 & 15.0 & 15.0 & 12.0 \\ 21 | \end{tabular} 22 | \end{center} 23 | \caption{orientation} 24 | \label{orientation} 25 | \end{table*} 26 | -------------------------------------------------------------------------------- /examples/parameters/validation/data/conf1.yaml: -------------------------------------------------------------------------------- 1 | # user info 2 | username: joe 3 | email: joe@example.com 4 | 5 | # recipes 6 | recipes: 7 | all: /somewhere1/file1.xml 8 | specific: /somewhere2/file2.xml 9 | -------------------------------------------------------------------------------- /examples/parameters/validation/data/conf2.yaml: -------------------------------------------------------------------------------- 1 | # user info 2 | username: joe 3 | email: 1 4 | 5 | # recipes 6 | recipes: 7 | all: /somewhere1/file1.xml 8 | 9 | -------------------------------------------------------------------------------- /examples/parameters/validation/data/conf_schema1.yaml: -------------------------------------------------------------------------------- 1 | # user info 2 | username: '' 3 | email: '' 4 | 5 | # recipes 6 | recipes: 7 | all: '' 8 | specific: '' 9 | -------------------------------------------------------------------------------- /examples/parameters/validation/simple_validation.py: -------------------------------------------------------------------------------- 1 | import NeuroTools.parameters.validators 2 | import NeuroTools.parameters as ntp 3 | 4 | schema = ntp.ParameterSchema("./data/conf_schema1.yaml") 5 | p1 = ntp.ParameterSet("./data/conf1.yaml") 6 | p2 = ntp.ParameterSet("./data/conf2.yaml") 7 | 8 | v = ntp.CongruencyValidator() 9 | 10 | print v.validate(p1,schema) 11 | 12 | # Find first error 13 | try: 14 | v.validate(p2,schema) 15 | except Exception as e: 16 | print e 17 | # correct it 18 | p2[e.path] = '' 19 | 20 | # Find second error 21 | try: 22 | v.validate(p2,schema) 23 | except Exception as e: 24 | # 25 | print e 26 | 27 | -------------------------------------------------------------------------------- /examples/retina/benchmark_linear.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf8 -*- 3 | """ 4 | benchmark_linear.py 5 | ========================= 6 | 7 | Having fixed the background noise we are just studying now how different 8 | signal to noise ratios are integrated by the neurons. 9 | 10 | Laurent Perrinet, INCM, CNRS 11 | 12 | $ Id $ 13 | 14 | """ 15 | 16 | import os, sys, numpy, pylab, shelve 17 | 18 | N, N_exp = 1000, 6 19 | t_smooth = 100. # width (in ms) of the integration window 20 | from NeuroTools.parameters import ParameterSpace, ParameterRange 21 | snr = 2.0 * numpy.linspace(0.1,2.0,N_exp) 22 | p = ParameterSpace({'snr' : ParameterRange(list(snr))}) 23 | 24 | 25 | name = sys.argv[0].split('.')[0] # name of the current script withpout the '.py' part 26 | results = shelve.open('results/mat-' + name) 27 | try: 28 | 29 | temporal_ON = results['temporal_ON'] 30 | temporal_OFF = results['temporal_OFF'] 31 | lower_edges = results['lower_edges'] 32 | params = results['params'] 33 | #if (params == retina.params): raise('Parameters have changed') 34 | 35 | except: 36 | from retina import * 37 | retina = Retina(N) 38 | retina.params['amplitude'] = numpy.ones(retina.params['amplitude'].shape) 39 | 40 | 41 | # calculates the dimension of the parameter space 42 | results_dim, results_label = p.parameter_space_dimension_labels() 43 | 44 | # creates results array with size of parameter space dimension 45 | data = retina.run(retina.params,verbose=False) 46 | lower_edges = data['out_ON_DATA'].time_axis(t_smooth) 47 | N_smooth = len(lower_edges) 48 | 49 | temporal_ON, temporal_OFF = [],[] 50 | import progressbar # see http://projects.scipy.org/pipermail/scipy-dev/2008-January/008200.html 51 | pbar=progressbar.ProgressBar(widgets=[name, " ", progressbar.Percentage(), ' ', 52 | progressbar.Bar(), ' ', progressbar.ETA()], maxval=N_exp) 53 | for i_exp,experiment in enumerate(p.iter_inner()): 54 | params = retina.params 55 | params.update(experiment) # updates what changed in the dictionary 56 | # simulate the experiment and get its data 57 | data = retina.run(params,verbose=False) 58 | # calculating the index in the parameter space 59 | index = p.parameter_space_index(experiment) 60 | # put the data at the right position in the results array 61 | temporal_ON.append(sum(data['out_ON_DATA'].firing_rate(t_smooth))/N)# 62 | temporal_OFF.append(sum(data['out_OFF_DATA'].firing_rate(t_smooth))/N)# 63 | pbar.update(i_exp) 64 | 65 | 66 | results['lower_edges'] = lower_edges 67 | results['temporal_ON'] = temporal_ON 68 | results['temporal_OFF'] = temporal_OFF 69 | results['params'] = retina.params 70 | 71 | pbar.finish() 72 | 73 | results.close() 74 | 75 | ############################################################################### 76 | 77 | from NeuroTools.plotting import pylab_params 78 | 79 | """ Figure 1 80 | 81 | Prints to a figure the mean firing rate for the output (ON and OFF) as a function 82 | of the different parameter values. It's similar to a CRF function. 83 | 84 | """ 85 | #pylab.close('all') 86 | #pylab.rcParams.update(pylab_params(fig_width_pt = 497.9/2., ratio = 1.)) 87 | pylab.figure(1) 88 | #fmax = numpy.max([numpy.max(temporal_OFF[:]),numpy.max(temporal_ON[:])]) 89 | 90 | pylab.subplot(211) 91 | for i_exp in range(N_exp): 92 | pylab.plot(lower_edges[:-1] + t_smooth/2, temporal_ON[i_exp], 93 | label= '%5.2f' % p.snr._values[i_exp]) 94 | pylab.xticks( numpy.round(numpy.linspace(0, params.simtime, 5),0) ) 95 | pylab.ylabel('ON Firing frequency (Hz)') 96 | pylab.axis([0, params.simtime, 0.0, numpy.max(temporal_ON[:])]) 97 | pylab.legend(loc='upper right') 98 | pylab.subplot(212) 99 | for i_exp in range(N_exp): 100 | pylab.plot(lower_edges[:-1] + t_smooth/2, temporal_OFF[i_exp]) 101 | pylab.xticks( numpy.round(numpy.linspace(0, params.simtime, 5),0) ) 102 | pylab.ylabel('OFF Firing frequency (Hz)') 103 | pylab.xlabel('time (ms)') 104 | pylab.axis([0, params.simtime, 0.0, numpy.max(temporal_OFF[:]) ]) 105 | 106 | 107 | if 0: 108 | pylab.ion() 109 | #pylab.show() 110 | else: 111 | pylab.savefig('results/fig-' + name + '.pdf') 112 | pylab.savefig('results/fig-' + name + '.png', dpi = 300) 113 | 114 | -------------------------------------------------------------------------------- /examples/retina/benchmark_noise.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding: utf8 -*- 3 | """ 4 | benchmark_noise.py 5 | ================== 6 | 7 | Just studying how different background noise current are integrated by the 8 | neurons on the retinal fibers. 9 | 10 | Illustrates how one many use parameters to explore one set of parameters and 11 | compute a CRF function. See benchmark_linear to store time varrying values. 12 | 13 | Laurent Perrinet, INCM, CNRS 14 | 15 | $ Id $ 16 | 17 | """ 18 | 19 | import os, sys, numpy, shelve 20 | 21 | N, N_exp_noise = 1000, 22 22 | from NeuroTools.parameters import * 23 | p = ParameterSpace({'noise_std' : ParameterRange(list(10.**(numpy.linspace(-.50,1.,N_exp_noise))))}) 24 | 25 | name = sys.argv[0].split('.')[0] # name of the current script withpout the '.py' part 26 | results = shelve.open('results/mat-' + name) 27 | try: 28 | CRF = results['CRF'] 29 | except: 30 | # this is not mandatory but just a "easy_install progressbar" away 31 | # else remove all corresponding lines in this code... 32 | import progressbar # see http://projects.scipy.org/pipermail/scipy-dev/2008-January/008200.html 33 | import retina as model 34 | retina = model.Retina(N) 35 | retina.params['snr'] = 0 # no input 36 | 37 | # calculates the dimension of the parameter space 38 | results_dim, results_label = p.parameter_space_dimension_labels() 39 | 40 | # creates results array with size of parameter space dimension 41 | CRF = numpy.empty(results_dim) 42 | 43 | pbar=progressbar.ProgressBar(widgets=[name, " ", progressbar.Percentage(), ' ', 44 | progressbar.Bar(), ' ', progressbar.ETA()], maxval=numpy.prod(results_dim)) 45 | for i_exp,experiment in enumerate(p.iter_inner()): 46 | params = retina.params 47 | params.update(experiment) # updates what changed in the dictionary 48 | # simulate the experiment and get its data 49 | data = retina.run(params,verbose=False) 50 | # calculating the index in the parameter space 51 | index = p.parameter_space_index(experiment) 52 | # put the data at the right position in the results array 53 | CRF[index] = data['out_ON_DATA'].mean_rate()# 54 | pbar.update(i_exp) 55 | 56 | results['CRF'] = CRF 57 | 58 | pbar.finish() 59 | 60 | results.close() 61 | 62 | from NeuroTools.plotting import pylab_params 63 | 64 | """ Figure 1 65 | 66 | Prints to a figure the mean firing rate for the output (ON and OFF) as a function of the different parameter values. It's similar to a CRF function. 67 | 68 | TODO put standard deviation of activity, print CV 69 | 70 | """ 71 | 72 | import pylab 73 | 74 | pylab.figure(num = 1) 75 | 76 | pylab.plot(p.noise_std._values,CRF,'go-', label='line 1', linewidth=2) 77 | pylab.ylabel('Firing Frequency (Hz)') 78 | pylab.xlabel('Noise amplitude') 79 | 80 | 81 | if 0: 82 | pylab.show() 83 | else: 84 | pylab.savefig('results/fig-' + name + '.pdf') 85 | pylab.savefig('results/fig-' + name + '.png', dpi = 300) 86 | 87 | 88 | #TODO: make a plot showing that spontaneous activity is a point process with a known histogram -------------------------------------------------------------------------------- /examples/retina/benchmark_retina.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf8 -*- 3 | """ 4 | benchmark_retina.py 5 | =================== 6 | 7 | This should feed the "Hyper-Column" for benchmark one. 8 | (see https://facets.kip.uni-heidelberg.de/private/wiki/index.php/V1_hypercolumn#Benchmark_one ) 9 | 10 | The input is a discrete dirac simulating the response in Magnocellular Pathway 11 | M to primary visual cortex' layer 4C$\alpha$ then to 4B as defined in Benchmark 12 | one. The paramters tested is SNR sensitivity / spatial stability of V1 response. 13 | 14 | 15 | Illustrates how one many use parameters to explore different sets of parameters 16 | and compute their effect. See benchmark_noise and benchmark_linear for simpler 17 | examples. 18 | 19 | Laurent Perrinet, INCM, CNRS 20 | 21 | $ Id $ 22 | 23 | """ 24 | 25 | import os, sys, numpy, pylab, shelve, progressbar 26 | 27 | N, N_snr, N_seeds = 1000, 5, 10 28 | from NeuroTools.parameters import * 29 | p = ParameterSpace({ 30 | 'snr' : ParameterRange(list(numpy.linspace(0.5,4.0,N_snr))), 31 | 'kernelseed' : ParameterRange(list([12345 + k for k in range(N_seeds)]))}) 32 | 33 | name = sys.argv[0].split('.')[0] # name of the current script withpout the '.py' part 34 | 35 | try: 36 | ############## MAKING THE SIMULATIONS ############### 37 | results = shelve.open('results/mat-' + name) 38 | try: 39 | DATA = results['DATA'] 40 | params = results['params'] 41 | except: 42 | from retina import * 43 | retina = Retina(N) 44 | # calculates the dimension of the parameter space 45 | results_dim, results_label = p.parameter_space_dimension_labels() 46 | 47 | DATA = [] 48 | pbar=progressbar.ProgressBar(widgets=[name, " ", progressbar.Percentage(), ' ', 49 | progressbar.Bar(), ' ', progressbar.ETA()], maxval=N_snr*N_seeds) 50 | for i_exp, experiment in enumerate(p.iter_inner()): 51 | params = retina.params 52 | params.update(experiment) # updates what changed in the dictionary 53 | # simulate the experiment and get its data 54 | data = retina.run(params)#,verbose=False) 55 | # store it 56 | DATA.append(data)# 57 | pbar.update(i_exp) 58 | 59 | results['DATA'] = DATA 60 | results['params'] = retina.params 61 | 62 | pbar.finish() 63 | results.close() 64 | results = shelve.open('results/mat-pre-' + name) 65 | ############## PRE-PROCESSING ########################### 66 | #boing # uncomment to force recomputing the pre-processing stage 67 | lower_edges = results['lower_edges'] 68 | temporal_ON = results['temporal_ON'] 69 | map_spatial_OFF = results['map_spatial_OFF'] 70 | temporal_OFF = results['temporal_OFF'] 71 | map_spatial_ON = results['map_spatial_ON'] 72 | lower_edges = results['lower_edges'] 73 | results.close() 74 | 75 | except: 76 | def temporal_mean(spike_list): 77 | return numpy.sum(spike_list.firing_rate(t_smooth),axis=0) 78 | 79 | t_smooth = 100. # ms. integration time to show fiber activity 80 | lower_edges = DATA[0]['out_ON_DATA'].time_axis(t_smooth) 81 | N_smooth = len(lower_edges)-1 82 | 83 | #N_snr = len(p.snr) 84 | temporal_ON, temporal_OFF = numpy.zeros((N_smooth,N_snr)), numpy.zeros((N_smooth,N_snr)) 85 | map_spatial_ON, map_spatial_OFF = numpy.zeros((N,N_snr)), numpy.zeros((N,N_snr)) 86 | 87 | # 88 | N_ret, simtime = params['N_ret'], params['simtime'] 89 | x = params['position'][0] 90 | y = params['position'][1] 91 | r2 = x**2 + y**2 92 | r = numpy.sqrt(r2) 93 | id_center = [int(k) for k in numpy.where( r2 < N_ret**2)[0]] 94 | # mean activity accross kernelseeds as a function of SNR 95 | for i_exp, experiment in enumerate(p.iter_inner()): 96 | # calculating the index in the parameter space 97 | index = p.parameter_space_index(experiment) 98 | # getting SpikeLists corresponding to the interesting parts (within the center) 99 | temporal_ON[:,index[1]] += temporal_mean(DATA[i_exp]['out_ON_DATA'].id_slice(id_center))/N_seeds 100 | temporal_OFF[:,index[1]] += temporal_mean(DATA[i_exp]['out_ON_DATA'].id_slice(id_center))/N_seeds 101 | map_spatial_ON[:,index[1]] += DATA[i_exp]['out_ON_DATA'].mean_rates(t_start=simtime/4.,t_stop=3*simtime/4.)#/N_seeds 102 | map_spatial_OFF[:,index[1]] += DATA[i_exp]['out_OFF_DATA'].mean_rates(t_start=simtime/4.,t_stop=3*simtime/4.)#/N_seeds 103 | 104 | results = shelve.open('results/mat-pre-' + name) 105 | results['temporal_ON'] = temporal_ON 106 | results['map_spatial_OFF'] = map_spatial_OFF 107 | results['temporal_OFF'] = temporal_OFF 108 | results['map_spatial_ON'] = map_spatial_ON 109 | results['lower_edges'] = lower_edges 110 | results.close() 111 | 112 | results.close() 113 | 114 | ############# MAKING FIGURE ############################ 115 | from NeuroTools.plotting import pylab_params 116 | from numpy import zeros, where, arange 117 | 118 | pylab.close('all') 119 | pylab.ioff() #pylab.ion() # 120 | 121 | """ Figure 122 | 123 | Prints to a figure the mean firing rate 124 | * in (x,y) accross time during the stimulation and 125 | * in t accross positions within the center 126 | for the output (ON and OFF) and for the different parameter values. 127 | 128 | """ 129 | pylab.rcParams.update(pylab_params(fig_width_pt = 497.9) )#, text_fontsize=8)) 130 | pylab.figure(num = 1, dpi=300, facecolor='w', edgecolor='k') 131 | 132 | x = params['position'][0] 133 | y = params['position'][1] 134 | #Lmargin, Rmargin, dmargin, umargin = 0.05, 0.15, 0.05, 0.05 135 | #pylab.axes([Lmargin, dmargin , 1.0 - Rmargin- Lmargin,1.0-umargin-dmargin]) # [left, bottom, width, height] 136 | #pylab.subplot(131) 137 | pylab.axes([0.1, 0.33, .3/1.61 , .3]) # [left, bottom, width, height] 138 | pylab.scatter(x,y,c=params['amplitude'], faceted = False) #, edgecolors='none' 139 | pylab.title('Input',fontsize ='small') 140 | pylab.axis('equal') 141 | pylab.subplot(232) 142 | pylab.plot(lower_edges[:-1],temporal_ON) 143 | pylab.title('time course (ROI) ',fontsize = 'small') 144 | #pylab.title('time course ON',fontsize = 'small') 145 | pylab.xticks( numpy.linspace(0, params.simtime, 5) ) 146 | pylab.ylabel('ON activity (Hz / neuron)') 147 | #pylab.axis('tight') 148 | pylab.subplot(235) 149 | pylab.plot(lower_edges[:-1],temporal_OFF) 150 | #pylab.title('time course OFF',fontsize = 'small') 151 | pylab.xticks( numpy.linspace(0, params.simtime, 5) ) 152 | pylab.ylabel('OFF activity (Hz / neuron)') 153 | #pylab.axis('tight') 154 | pylab.xlabel('time (ms)') 155 | pylab.subplot(233) 156 | pylab.scatter(x, y, c= map_spatial_ON[:,-1], faceted = False) #, edgecolors='none' 157 | #pylab.title('spatial distribution ON',fontsize ='small') 158 | pylab.title('Output',fontsize ='small') 159 | pylab.subplot(236) 160 | pylab.scatter(x, y, c= map_spatial_OFF[:,-1], faceted = False) #, edgecolors='none' 161 | #pylab.title('spatial distribution OFF',fontsize ='small') 162 | 163 | if 0: 164 | pylab.ion() 165 | pylab.show() 166 | else: 167 | pylab.savefig('results/fig-' + name + '.pdf') 168 | pylab.savefig('results/fig-' + name + '.png', dpi = 300) 169 | 170 | 171 | -------------------------------------------------------------------------------- /examples/retina/results/fig-benchmark_linear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/examples/retina/results/fig-benchmark_linear.png -------------------------------------------------------------------------------- /examples/retina/results/fig-benchmark_noise.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/examples/retina/results/fig-benchmark_noise.png -------------------------------------------------------------------------------- /examples/retina/results/fig-benchmark_retina.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/examples/retina/results/fig-benchmark_retina.png -------------------------------------------------------------------------------- /examples/retina/test_parallel.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf8 -*- 3 | """ 4 | test_parallel.py 5 | ================ 6 | 7 | Trying to task farming parameter exploration 8 | 9 | 99% inspired by parameter_search_example.py from Michael Schmuker 10 | 11 | To run this script, you first have to invoke an IPython controller and 12 | computation engines. If IPython is installed correctly and with parallel 13 | computation support, you can just type: 14 | 15 | > ipcluster -n 2 & 16 | 17 | This will start two computation engines and a controller in the background. 18 | 19 | When the controller is up, run this script: 20 | 21 | > python parameter_search_example.py 22 | 23 | Calculation will start, and after a few seconds (depending on your hardware) 24 | it will save a png graphics file that illustrates the firing rate of a neuron 25 | as a function of the input rate and the weight of the synapse to your current 26 | directory. 27 | 28 | 29 | Laurent Perrinet, INCM, CNRS 30 | 31 | $ Id $ 32 | 33 | 34 | """ 35 | 36 | def model_network(param_dict): 37 | """ 38 | This model network consists of a spike source and a neuron (IF_curr_alpha). 39 | The spike rate of the source and the weight can be specified in the 40 | param_dict. Returns the number of spikes fired during 1000 ms of simulation. 41 | 42 | Parameters: 43 | param_dict - dictionary with keys 44 | rate - the rate of the spike source (spikes/second) 45 | weight - weight of the connection source -> neuron 46 | 47 | Returns: 48 | dictionary with keys: 49 | source_rate - the rate of the spike source 50 | weight - weight of the connection source -> neuron 51 | neuron_rate - spike rate of the neuron 52 | """ 53 | #set up the network 54 | from retina import Retina 55 | retina = Retina(param_dict['N']) 56 | params = retina.params 57 | params.update(param_dict) # updates what changed in the dictionary 58 | # simulate the experiment and get its data 59 | data = retina.run(params)#,verbose=False) 60 | neuron_rate = data['out_ON_DATA'].mean_rate() 61 | print neuron_rate 62 | # return everything, including the input parameters 63 | return {'snr':param_dict['snr'], 64 | 'kernelseed':param_dict['kernelseed'], 65 | 'neuron_rate': neuron_rate} 66 | 67 | 68 | def make_param_dict_list(N): 69 | """ 70 | create a list of parameter dictionaries for the model network. 71 | """ 72 | N_snr, N_seeds = 5, 10 73 | from NeuroTools.parameters import ParameterSpace, ParameterRange 74 | import numpy 75 | params = ParameterSpace({ 76 | 'N' : N, 77 | 'snr' : ParameterRange(list(numpy.linspace(0.1,2.0,N_snr))), 78 | 'kernelseed' : ParameterRange(list([12345+ k for k in range(N_seeds)]))}) 79 | 80 | dictlist = [p.as_dict() for p in params.iter_inner()] 81 | return dictlist 82 | 83 | def show_results(result): 84 | """ 85 | visualizes the result of the parameter search. 86 | Parameters: 87 | result - list of result dictionaries. 88 | """ 89 | import numpy 90 | t_smooth = 100. # ms. integration time to show fiber activity 91 | snrs = numpy.sort([r['snr'] for r in result]) 92 | neuron_rates = numpy.zeros(len(snr)) 93 | for snr_i in range(len(snrs)): 94 | neuron_rates[r_i] = [r['neuron_rate'] for r in result 95 | if (r['source_rate'] == snrs[snr_i])][0] 96 | 97 | import NeuroTools.plotting as plotting 98 | pylab = plotting.get_display(True) 99 | pylab.rcParams.update(plotting.pylab_params()) 100 | print rates, neuron_rates 101 | subplot = pylab.imshow( neuron_rates, 102 | interpolation = 'nearest', 103 | origin = 'lower') 104 | plotting.set_labels(subplot.get_axes(), 105 | xlabel = 'rate', 106 | ylabel = 'weight') 107 | pylab.colorbar() 108 | # could add fancy xticks and yticks here 109 | import tempfile, os 110 | (fd, figfilename) = tempfile.mkstemp(prefix = 'parameter_search_result', 111 | suffix = '.png', 112 | dir = os.getcwd()) 113 | pylab.gcf().savefig(figfilename) 114 | 115 | def run_it(N): 116 | """" 117 | Run the parameter search. 118 | """ 119 | 120 | import sys 121 | sys.path.append('../parameter_search/') 122 | 123 | import parameter_search as ps 124 | # search the parameter space around the operating point 125 | param_dict_list = make_param_dict_list(N) 126 | srchr = ps.IPythonParameterSearcher( 127 | dictlist = param_dict_list, 128 | func = model_network) 129 | srchr.search() 130 | outlist = srchr.harvest() 131 | 132 | #return the results 133 | return outlist 134 | 135 | if __name__ == '__main__': 136 | 137 | results = run_it(N=100) 138 | show_results(results) 139 | -------------------------------------------------------------------------------- /examples/sfn2008/sfn_example_parameterspace.py: -------------------------------------------------------------------------------- 1 | """ 2 | $Id: sfn_example_parameterspace.py 350 2008-11-16 00:51:40Z mschmucker $ 3 | 4 | Example to show off some capabilities of the parameters module. 5 | 6 | - creates a ParameterSpace of c and jitter for the example shown in 7 | sfn_example_stgen.py 8 | - the parameters c and jitter are scanned and the cc and the corrcoef 9 | are calculated 10 | - all the cc's are plotted 11 | 12 | Performed at the NeuroTools demo session, INCF booth, 13 | SfN annual meeting 2008, Washington. DC. 14 | """ 15 | import numpy, pylab 16 | 17 | import NeuroTools.stgen as stgen 18 | sg = stgen.StGen() 19 | 20 | from NeuroTools.parameters import ParameterSpace 21 | from NeuroTools.parameters import ParameterRange 22 | from NeuroTools.sandbox import make_name 23 | 24 | # creating a ParameterSpace 25 | p = ParameterSpace({}) 26 | 27 | # adding fixed parameters 28 | p.nu = 20. # rate [Hz] 29 | p.duration = 1000. 30 | 31 | # adding ParameterRanges 32 | p.c = ParameterRange([0.0,0.01,0.1,0.5]) 33 | p.jitter = ParameterRange([0.0,1.0,5.0,]) 34 | 35 | # calculation of the ParameterSpace dimension and the labels of the parameters 36 | # containing a range 37 | dims, labels = p.parameter_space_dimension_labels() 38 | print "dimensions: ", dims 39 | print ' labels: ', labels 40 | 41 | def calc_cc(p): 42 | """ 43 | Generate correlated spike trains from the ParameterSet. 44 | 45 | Parameter: 46 | p - ParameterSet containing parameters nu (rate), c (correlation), 47 | duration (in ms), jitter (in ms). 48 | 49 | Returns: (cc, time_axis_cc, corrcoef) 50 | cc - correlation coefficient 51 | time_axis_cc - time axis for cross-correlation (for plotting) 52 | corrcoef - correlation coefficient between the two SpikeTrains 53 | """ 54 | rate_independent = (1-p.c)*p.nu 55 | rate_shared = p.c*p.nu 56 | 57 | st1 = sg.poisson_generator(rate=rate_independent, t_stop = p.duration) 58 | st2 = sg.poisson_generator(rate=rate_independent, t_stop = p.duration) 59 | if p.c > 0.: 60 | st3 = sg.poisson_generator(rate=rate_shared, t_stop = p.duration) 61 | st1.merge(st3.jitter(p.jitter)) 62 | st2.merge(st3.jitter(p.jitter)) 63 | 64 | cc = numpy.correlate(st1.time_histogram(time_bin = 1.0), 65 | st2.time_histogram(time_bin = 1.),mode = 'same') 66 | corrcoef = numpy.corrcoef(st1.time_histogram(time_bin = 1.0), 67 | st2.time_histogram(time_bin = 1.)) 68 | time_axis_cc = numpy.linspace(-cc.shape[0]/2.,cc.shape[0]/2.,cc.shape[0]) 69 | return cc, time_axis_cc, corrcoef[0][1] 70 | 71 | 72 | # creating a results array, with the dimensions of the ParameterSpace 73 | corrcoef_results = numpy.empty(dims) 74 | 75 | # scanning the ParameterSpace 76 | for experiment in p.iter_inner(): 77 | # calculation of the index in the space 78 | index = p.parameter_space_index(experiment) 79 | # perfomring the experiment 80 | cc,time_axis_cc, corrcoef = calc_cc(experiment) 81 | corrcoef_results[index] = corrcoef 82 | # plotting the cc's 83 | subplot_index = (dims[1]*index[0])+index[1] 84 | pylab.subplot(dims[0],dims[1],subplot_index+1) 85 | pylab.plot(time_axis_cc,cc) 86 | pylab.title(make_name(experiment,p.range_keys())) 87 | pylab.xlim(-30,30.) 88 | pylab.ylim(0,10.) 89 | 90 | 91 | # plot the results 92 | pylab.matshow(corrcoef_results) 93 | pylab.xticks(numpy.arange(0.5,dims[1]+0.5,1.0),[str(i) for i in p.jitter._values]) 94 | pylab.yticks(numpy.arange(0.5,dims[0]+0.5,1.0),[str(i) for i in p.c._values]) 95 | pylab.xlim(0,dims[1]) 96 | pylab.ylim(dims[0],0) 97 | pylab.xlabel('jitter (ms)') 98 | pylab.ylabel('correlation') 99 | ax = pylab.colorbar() 100 | ax.set_label('correlation') 101 | pylab.draw() 102 | 103 | -------------------------------------------------------------------------------- /examples/sfn2008/sfn_example_simulated_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example to show off some capabilities of the signals module 3 | and the SpikeList and AnalogSignalList class. 4 | 5 | - loads spiking and voltage data simulated with pyNN 6 | - calculates various measurements (mean firing rate, cv...) 7 | - plot the signals 8 | - plots the spike-triggered-averages 9 | 10 | Performed at the NeuroTools demo session, INCF booth, 11 | SfN annual meeting 2008, Washington. DC. 12 | """ 13 | import NeuroTools.signals as signals 14 | 15 | # loading spiking data 16 | s = signals.load_spikelist('spike_data') 17 | 18 | # raster plot 19 | s.raster_plot() 20 | 21 | # mean rate 22 | print 'mean rate: ',s.mean_rate() 23 | print 'mean rates: ',s.mean_rates() 24 | # fano factor of isi 25 | print 'fano factor of isi: ',s.fano_factors_isi() 26 | # cv of isi 27 | print 'cv of isi:',s.cv_isi() 28 | 29 | # isi distribution 30 | hs = s.isi_hist(bins=20, display=True) 31 | 32 | 33 | # loading voltage data 34 | v = signals.load_vmlist('vm_data') 35 | 36 | # plot all the signals 37 | v.plot() 38 | # plot only one AnalogSignal 39 | v[1].plot() 40 | 41 | # spike triggered averages 42 | v.event_triggered_average(s,t_min=50.) 43 | 44 | -------------------------------------------------------------------------------- /examples/sfn2008/sfn_example_spike2.py: -------------------------------------------------------------------------------- 1 | import NeuroTools.spike2.spike2channels as spike2 2 | import pylab, numpy 3 | """ 4 | Example to show off some capabilities of the spike2 module 5 | and the SpikeTrain and AnalogSignal class. 6 | 7 | - loads content from a CED Son file which contains data from a IF-curve experiment 8 | - then the data is processed and a IF-Curve is plotted 9 | 10 | Performed at the NeuroTools demo session, INCF booth, 11 | SfN annual meeting 2008, Washington. DC. 12 | """ 13 | 14 | # IF-curve 15 | 16 | filename = 'IF-Curve-example-data-provided-by-Florian-Rau-University-of-Freiburg-2008.smr' 17 | 18 | # load all channels in the file 19 | #all_channels = spike2.load(filename) 20 | # or only selected channels 21 | all_channels = spike2.load(filename,channels=[1,2,32]) 22 | 23 | # seperate the channels, just for better reading 24 | vm = all_channels[2] 25 | dc = all_channels[1] 26 | dc_onset_marker = all_channels[32] 27 | 28 | pylab.close('all') 29 | # show original data 30 | # vm 31 | pylab.figure() 32 | pylab.plot(vm.time_axis(),vm.signal()) 33 | # currents 34 | pylab.figure() 35 | pylab.plot(dc.time_axis(),dc.signal()) 36 | # dc_onset_markers 37 | 38 | 39 | # cutout the dc and vm around a dc step, markers are in seconds, we need them in milliseonds 40 | dc_sequence = dc.slice_by_events(dc_onset_marker.times*1000,t_min=500,t_max=1000) 41 | vm_sequence = vm.slice_by_events(dc_onset_marker.times*1000,t_min=500,t_max=1000) 42 | 43 | xlim = [1000,27000] 44 | 45 | # first figure shows the data 46 | pylab.rcParams['figure.figsize'] = [15.,10.] 47 | pylab.figure() 48 | # plot dc signal with real time 49 | subplot = pylab.subplot(2,2,1) 50 | for dc_slice in dc_sequence.values(): 51 | # we plot each dc_slice with its real time 52 | pylab.plot(dc_slice.time_axis(),dc_slice.signal) 53 | # we show only part of the data 54 | pylab.xlim(xlim) 55 | pylab.ylabel('current (pA)') 56 | pylab.title('current with real time') 57 | xmin,xmax,ymin,ymax = subplot.axis() 58 | # plot dc signal with normalized time, such that each dc_slice starts at time 0.0 59 | pylab.subplot(2,2,2) 60 | for index, dc_slice in dc_sequence.items(): 61 | # we plot each dc_slice with its normalized time --> start at 0.0 ms 62 | if index <= 4: 63 | # only 5 slices are plotted 64 | pylab.plot(dc_slice.time_axis(normalized=True),dc_slice.signal) 65 | pylab.ylim([ymin,ymax]) 66 | pylab.title('current with normalized time') 67 | 68 | # plot vm signal with real time 69 | subplot = pylab.subplot(2,2,3) 70 | for vm_slice in vm_sequence.values(): 71 | pylab.plot(vm_slice.time_axis(),vm_slice.signal) 72 | pylab.xlim(xlim) 73 | pylab.xlabel('Time (ms)') 74 | pylab.ylabel('mp (mV)') 75 | pylab.title('mp with real time') 76 | xmin,xmax,ymin,ymax = subplot.axis() 77 | # plot vm signal with normalized time, such that each dc_slice starts at time 0.0 78 | pylab.subplot(2,2,4) 79 | for index, vm_slice in vm_sequence.items(): 80 | if index <= 4: 81 | pylab.plot(vm_slice.time_axis(normalized=True),vm_slice.signal) 82 | pylab.xlabel('Time (ms)') 83 | pylab.title('mp with normalized time') 84 | pylab.ylim([ymin,ymax]) 85 | pylab.savefig('IF_curve_data.png') 86 | pylab.close('all') 87 | 88 | 89 | # second figure shows the IF-curve 90 | pylab.rcParams['figure.figsize'] = [6.,5.] 91 | pylab.figure() 92 | dc_inputs = [] 93 | frs = [] 94 | 95 | # calculate the actual IF-curve 96 | for dc_slice, vm_slice in zip(dc_sequence.values(), vm_sequence.values()): 97 | # we calculate the max of the dc_slice 98 | input_dc = dc_slice.max() 99 | # we perform a threshold detection on the vm slice, which returns a SpikeTrain object, this knows its mean_rate (Hz) 100 | fr = vm_slice.threshold_detection(0.0).mean_rate() 101 | # we append the values 102 | dc_inputs.append(input_dc) 103 | frs.append(fr) 104 | # and plot them 105 | pylab.plot(dc_inputs,frs,'ok',label='data') 106 | 107 | pylab.xlabel('I (pA)') 108 | pylab.ylabel('spikes/s') 109 | pylab.title('IF-curve') 110 | 111 | 112 | pylab.savefig('IF_curve_curve.png') 113 | 114 | -------------------------------------------------------------------------------- /examples/sfn2008/sfn_example_stgen.py: -------------------------------------------------------------------------------- 1 | """ 2 | $Id: sfn_example_stgen.py 362 2008-12-08 17:35:59Z LaurentPerrinet $ 3 | 4 | Example to show off some capabilities of the stgen module 5 | and the SpikeTrain class. 6 | 7 | - Creates two poisson spike trains with approx. rate of 100 Hz. 8 | - Calculates rate, coefficient of variance and fano factor using the 9 | functions form SpikeTrain. 10 | - Calculates cross correlation between the two SpikeTrains. 11 | - generates a third SpikeTrain with rate ~10Hz and merges it into the 12 | previous two, effectively injecting correlation. 13 | - Plots the cross correlation for the correlated and uncorrelated SpikeTrains. 14 | - Generates shot noise from one SpikeTrain using the StGEn class and plots it. 15 | - Calculates spike triggered average from shot noise to reconstruct the initial 16 | shot noise kernel. 17 | 18 | Performed at the NeuroTools demo session, INCF booth, 19 | SfN annual meeting 2008, Washington. DC. 20 | """ 21 | import NeuroTools.stgen as stgen 22 | import pylab 23 | sg = stgen.StGen() 24 | duration = 10000. 25 | rate_independent = 100. #Hz 26 | rate_shared = 10. #Hz, 10 % correlation 27 | 28 | st1 = sg.poisson_generator(rate=rate_independent, t_stop = duration) 29 | print "Spiketrain 1:" 30 | print "mean rate: %f" % st1.mean_rate() 31 | print "coefficient of variation: %f" % st1.cv_isi() 32 | print "fano factor: %f" % st1.fano_factor_isi() 33 | 34 | st2 = sg.poisson_generator(rate=rate_independent, t_stop = duration) 35 | print "Spiketrain 2:" 36 | print "mean rate: %f" % st2.mean_rate() 37 | print "coefficient of variation: %f" % st2.cv_isi() 38 | print "fano factor: %f" % st2.fano_factor_isi() 39 | 40 | # cross correlation 41 | import numpy 42 | cc = numpy.correlate(st1.time_histogram(time_bin = 1.), 43 | st2.time_histogram(time_bin = 1.), 44 | mode = 'same') 45 | time_axis = numpy.linspace(cc.shape[0]/-2., cc.shape[0]/2, cc.shape[0]) 46 | 47 | pylab.plot(time_axis, cc, label = 'no correlation', alpha = 0.5) 48 | 49 | # inject correlation into st1 and st2 50 | st3 = sg.poisson_generator(rate = rate_shared, t_stop = duration) 51 | st1.merge(st3) 52 | st2.merge(st3) 53 | 54 | cc2 = numpy.correlate(st1.time_histogram(time_bin = 1.), 55 | st2.time_histogram(time_bin = 1.), 56 | mode = 'same') 57 | 58 | pylab.plot(time_axis, cc2, label = '10% correlation', alpha = 0.5) 59 | pylab.legend() 60 | pylab.show() 61 | 62 | #generate shot noise from st1 63 | st1_shot = stgen.shotnoise_fromspikes(st1, 64 | q = 1.0, 65 | tau = 10., 66 | t_start = st1.t_start, 67 | t_stop = st1.t_stop) 68 | f = pylab.figure() 69 | pylab.plot(st1_shot.signal) 70 | f.gca().set_title('shot noise') 71 | f.gca().set_xlabel('time [ms]') 72 | 73 | # spike triggered average reveals the original shot-noise kernel 74 | sta = st1_shot.event_triggered_average(st1, display = True, average = True) 75 | 76 | 77 | -------------------------------------------------------------------------------- /examples/sfn2008/spike_data: -------------------------------------------------------------------------------- 1 | # dimensions = [2] 2 | # first_id = 0 3 | # last_id = 1 4 | # dt = 0.1 5 | 14.5 0 6 | 15.8 1 7 | 22.3 1 8 | 22.4 0 9 | 39.8 0 10 | 40.1 1 11 | 47.4 1 12 | 54.6 0 13 | 63.8 1 14 | 66 0 15 | 74.9 0 16 | 77.2 1 17 | 83.1 0 18 | 85.4 1 19 | 95.5 1 20 | 99.6 0 21 | 107.2 1 22 | 110.1 0 23 | 118.7 1 24 | 125.9 0 25 | 127.5 1 26 | 136.6 1 27 | 142.9 0 28 | 150.1 1 29 | 151.3 0 30 | 168.8 0 31 | 175.8 1 32 | 185.3 1 33 | 186.5 0 34 | 199.7 1 35 | 206.6 1 36 | 210.9 0 37 | 217.8 1 38 | 219.3 0 39 | 234.7 0 40 | 237.6 1 41 | 251.5 0 42 | 259.5 1 43 | 267.7 1 44 | 276.1 0 45 | 277 1 46 | 287 0 47 | 289.6 1 48 | 303.7 1 49 | 305.8 0 50 | 316.8 0 51 | 322.2 1 52 | 340.4 0 53 | 351.8 1 54 | 356.2 0 55 | 358.7 1 56 | 372.4 0 57 | 373 1 58 | 381.8 1 59 | 387 0 60 | 388.6 1 61 | 401.6 0 62 | 406.7 1 63 | 417.9 0 64 | 419.6 1 65 | 426.9 0 66 | 432.6 1 67 | 438.2 0 68 | 446.4 1 69 | 447.9 0 70 | 459.5 0 71 | 466.4 1 72 | 471.5 0 73 | 479.7 1 74 | 485.7 0 75 | 495 0 76 | 497.1 1 77 | -------------------------------------------------------------------------------- /examples/single_neuron/CRF_neuron_vs_signal.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf8 -*- 3 | """ 4 | CRF_neuron_vs_signal.py 5 | 6 | Testing the mean firing rate of a fiber for different signal strengths. 7 | Prints to a figure the mean firing rate for the output (ON and OFF) as a function 8 | of the different parameter values. It's similar to a CRF function. 9 | 10 | Results illustrate that 11 | - the higher the value the more the neuron spikes (wouah!), 12 | - that this follows a ramp-type of function 13 | - and that noise "smoothes" the transition in theinput/output function. 14 | 15 | TODO: do a better plot as in benchmark_neuron_vs_noise.py 16 | 17 | $Id: CRF_neuron_vs_signal.py 362 2008-12-08 17:35:59Z LaurentPerrinet $ 18 | """ 19 | 20 | import os, sys, numpy, pylab, shelve 21 | 22 | from NeuroTools.parameters import * 23 | 24 | # this is not mandatory but just a "easy_install progressbar" away 25 | # else remove all corresponding 3 lines in this code... 26 | import progressbar # see http://projects.scipy.org/pipermail/scipy-dev/2008-January/008200.html 27 | 28 | 29 | N_exp_snr = 20 30 | N_exp_noise = 9 31 | 32 | ps = ParameterSpace({ 33 | 'snr' : ParameterRange(list(numpy.linspace(-1.,4.,N_exp_snr))), 34 | 'noise_std' : ParameterRange(list(10.**(numpy.linspace(-.50,1.,N_exp_noise))))}) 35 | 36 | 37 | name = sys.argv[0].split('.')[0] # name of the current script withpout the '.py' part 38 | results = shelve.open('results/mat-' + name) 39 | try: 40 | CRF = results['CRF'] 41 | except: 42 | 43 | # calculates the dimension of the parameter space 44 | results_dim, results_label = ps.parameter_space_dimension_labels() 45 | 46 | # creates results array with size of parameter space dimension 47 | import simple_single_neuron as model 48 | myFibers = model.FiberChannel() 49 | CRF = numpy.empty(results_dim) 50 | 51 | pbar=progressbar.ProgressBar(widgets=[name, " ", progressbar.Percentage(), ' ', 52 | progressbar.Bar(), ' ', progressbar.ETA()], maxval=numpy.prod(results_dim)) 53 | for i_exp,experiment in enumerate(ps.iter_inner()): 54 | params = myFibers.params 55 | params.update(experiment) # updates what changed in the dictionary 56 | # simulate the experiment and get its data 57 | data = myFibers.run(params,verbose=False) 58 | # calculating the index in the parameter space 59 | index = ps.parameter_space_index(experiment) 60 | # put the data at the right position in the results array 61 | CRF[index] = data.mean_rate()# 62 | pbar.update(i_exp) 63 | 64 | results['CRF'] = CRF 65 | pbar.finish() 66 | 67 | results.close() 68 | 69 | #numpy.array(p.noise_std._values),numpy.array(p.snr._values), 70 | #pylab.plot(ps.snr._values,CRF.transpose()) #color = (sin(2*pi*noise_list)**2,cos(2*pi*noise_list)**2,1)) 71 | for i_noise, noise in enumerate(ps.noise_std._values): 72 | pylab.plot(ps.snr._values,CRF[i_noise,:], label='noise = %5.3f' % noise) 73 | #pylab.yticks(p.noise_std._values[:2:]) 74 | pylab.ylabel('Firing Rate (Hz/neuron)') 75 | #pylab.xticks(p.snr._values[:2:]) 76 | pylab.xlabel('Signal') 77 | pylab.legend(loc = 'lower right') 78 | pylab.axis([numpy.min(ps.snr._values), numpy.max(ps.snr._values), 0.0, numpy.max(CRF[:])]) 79 | if 0: 80 | pylab.show() 81 | else: 82 | pylab.savefig('results/fig-' + name + '.pdf') 83 | pylab.savefig('results/fig-' + name + '.png') 84 | -------------------------------------------------------------------------------- /examples/single_neuron/SpikeTrain2Play.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/examples/single_neuron/SpikeTrain2Play.wav -------------------------------------------------------------------------------- /examples/single_neuron/fiber.param: -------------------------------------------------------------------------------- 1 | { 2 | "simulation": { 3 | "kernelseed": 4321097, 4 | "dt": 0.1, 5 | "syn_delay": 1.0, 6 | "simtime": 4000.0, 7 | "connectseed": 12345789, 8 | }, 9 | "snr": 2.0, 10 | "noise_std": 2.0, 11 | "weight": 1.0, 12 | "N": 100, 13 | } -------------------------------------------------------------------------------- /examples/single_neuron/playing_with_simple_single_neuron.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf8 -*- 3 | """ 4 | Simple demo to: 5 | - produce a spike train using a simulator, 6 | - record the produced spike list to an audio file, 7 | - play it through your loudspeaker! 8 | 9 | $Id: playing_with_simple_single_neuron.py 326 2008-11-12 18:49:46Z LaurentPerrinet $ 10 | """ 11 | import os, numpy, shelve 12 | import simple_single_neuron as model 13 | 14 | 15 | def record(output, cfilename = 'SpikeTrain2Play.wav', fs=44100, enc = 'pcm26'): 16 | """ record the 'sound' produced by a neuron. Takes a spike train as the 17 | output. 18 | 19 | >>> record(my_spike_train) 20 | 21 | """ 22 | 23 | 24 | # from the spike list 25 | simtime_seconds = (output.t_stop - output.t_start)/1000. 26 | #time = numpy.linspace(0, simtime_seconds , fs*simtime_seconds) 27 | (trace,time) = numpy.histogram(output.spike_times*1000., fs*simtime_seconds) 28 | 29 | 30 | # TODO convolve with proper spike... 31 | spike = numpy.ones((fs/1000.,)) # one ms 32 | 33 | trace = numpy.convolve(trace, spike, mode='same')#/2.0 34 | trace /= numpy.abs(trace).max() * 1.1 35 | 36 | try: 37 | from scikits.audiolab import wavwrite 38 | except ImportError: 39 | print "You need the scikits.audiolab package to produce sounds !" 40 | wavwrite(trace, cfilename, fs = fs, enc = enc) 41 | 42 | 43 | def play(output): 44 | """ 45 | plays a spike list to the audio output 46 | 47 | play(spike_list) where spike_list is a spike_list object 48 | 49 | see playing_with_simple_single_neuron.py for a sample use 50 | 51 | >>> play(my_spike_train) 52 | 53 | TODO: make it possible to play multiple spike trains in stereo 54 | """ 55 | 56 | 57 | from tempfile import mkstemp 58 | fd, cfilename = mkstemp('SpikeListPlay.wav') 59 | try: 60 | record(output, cfilename) 61 | import pyaudio 62 | import wave 63 | 64 | chunk = 1024 65 | wf = wave.open(cfilename, 'rb') 66 | p = pyaudio.PyAudio() 67 | 68 | # open stream 69 | stream = p.open(format = 70 | p.get_format_from_width(wf.getsampwidth()), 71 | channels = wf.getnchannels(), 72 | rate = wf.getframerate(), 73 | output = True) 74 | 75 | # read data 76 | data = wf.readframes(chunk) 77 | 78 | # play stream 79 | while data != '': 80 | stream.write(data) 81 | data = wf.readframes(chunk) 82 | 83 | stream.close() 84 | p.terminate() 85 | except: 86 | print "Error playing the SpikeTrain " 87 | # finally 88 | os.remove(cfilename) 89 | 90 | # Python 2.4 compatibility 91 | # finally: 92 | os.remove(cfilename) 93 | 94 | 95 | # in this demo, we generate a simple model 96 | myFibers = model.FiberChannel(N=1) 97 | # generating a simple spike train 98 | output = myFibers.run(myFibers.params) 99 | st = output[0] # the spike train is the first element of the generated spike list 100 | # that we may either record as a WAV file 101 | record(st) 102 | # or directly play with the appropriate library 103 | play(st) 104 | -------------------------------------------------------------------------------- /examples/single_neuron/results/fig-CRF_neuron_vs_signal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/examples/single_neuron/results/fig-CRF_neuron_vs_signal.png -------------------------------------------------------------------------------- /examples/single_neuron/simple_single_neuron.py: -------------------------------------------------------------------------------- 1 | #/usr/bin/python 2 | # -*- coding: utf8 -*- 3 | """ 4 | simple_single_neuron 5 | 6 | Running a single neuron using pyNN + NEST2 7 | 8 | Laurent Perrinet, INCM, CNRS -- October 2006 9 | 10 | Changes : 11 | Refactored to use the new NeuroTools-- Nov 2007 12 | Changed to use new pyNN syntax / NEST2-- May 2008 13 | 14 | 15 | $ Id $‚ 16 | 17 | """ 18 | 19 | 20 | import numpy 21 | import sys, os, tempfile 22 | # choosing the simulator 23 | import pyNN.nest2 as sim 24 | # the link to read SpikeList files with NeuroTools 25 | from NeuroTools.signals import load_spikelist 26 | # using parameters utility 27 | from NeuroTools.parameters import ParameterSet 28 | from pyNN.utility import Timer 29 | 30 | class FiberChannel(object): 31 | """ 32 | Model class for the fiber of simple neurons. 33 | 34 | """ 35 | def __init__(self,N=100): 36 | # simulator specific 37 | simulation_params = ParameterSet({'dt' : 0.1,# discretization step in simulations (ms) 38 | 'simtime' : 40000*0.1, # float; (ms) 39 | 'syn_delay' : 1.0, # float; (ms) 40 | 'kernelseed' : 4321097, # array with one element per thread 41 | 'connectseed' : 12345789 # seed for random generator(s) used during simulation 42 | }) 43 | # these may change 44 | self.params = ParameterSet({'simulation': simulation_params, 45 | 'N' : N, 46 | 'noise_std' : 6.0, # (nA??) standard deviation of the internal noise 47 | 'snr' : 1.0, # (nA??) size of the input signal 48 | 'weight' : 1.0 }, 49 | label="fiber_params") 50 | 51 | print self.params.pretty() 52 | 53 | def run(self,params, verbose =True): 54 | tmpdir = tempfile.mkdtemp() 55 | timer = Timer() 56 | timer.start() # start timer on construction 57 | 58 | # === Build the network ======================================================== 59 | if verbose: print "Setting up simulation" 60 | sim.setup(timestep=params.simulation.dt,max_delay=params.simulation.syn_delay, debug=False) 61 | 62 | N = params.N 63 | #dc_generator 64 | current_source = sim.DCSource( amplitude= params.snr, 65 | start=params.simulation.simtime/4, 66 | stop=params.simulation.simtime/4*3) 67 | 68 | # internal noise model (NEST specific) 69 | noise = sim.Population(N,'noise_generator',{'mean':0.,'std':params.noise_std}) 70 | # target population 71 | output = sim.Population(N , sim.IF_cond_exp) 72 | 73 | # initialize membrane potential 74 | numpy.random.seed(params.simulation.kernelseed) 75 | V_rest, V_spike = -70., -53. 76 | output.tset('v_init',V_rest + numpy.random.rand(N,)* (V_spike -V_rest)) 77 | 78 | # Connecting the network 79 | conn = sim.OneToOneConnector(weights = params.weight) 80 | sim.Projection(noise, output, conn) 81 | 82 | for cell in output: 83 | cell.inject(current_source) 84 | 85 | output.record() 86 | 87 | # reads out time used for building 88 | buildCPUTime= timer.elapsedTime() 89 | 90 | # === Run simulation =========================================================== 91 | if verbose: print "Running simulation" 92 | 93 | timer.reset() # start timer on construction 94 | sim.run(params.simulation.simtime) 95 | simCPUTime = timer.elapsedTime() 96 | 97 | timer.reset() # start timer on construction 98 | 99 | output_filename = os.path.join(tmpdir,'output.gdf') 100 | #print output_filename 101 | output.printSpikes(output_filename)# 102 | output_DATA = load_spikelist(output_filename,N, 103 | t_start=0.0, t_stop=params.simulation.simtime) 104 | writeCPUTime = timer.elapsedTime() 105 | 106 | if verbose: 107 | print "\nFiber Network Simulation:" 108 | print "Number of Neurons : ", N 109 | print "Mean Output rate : ", output_DATA.mean_rate(), "Hz during ",params.simulation.simtime, "ms" 110 | print("Build time : %g s" % buildCPUTime) 111 | print("Simulation time : %g s" % simCPUTime) 112 | print("Writing time : %g s" % writeCPUTime) 113 | 114 | os.remove(output_filename) 115 | os.rmdir(tmpdir) 116 | 117 | return output_DATA 118 | 119 | if __name__ == '__main__': 120 | myFibers = FiberChannel(N=50) 121 | spikes = myFibers.run(myFibers.params) 122 | spikes.raster_plot() 123 | import pylab 124 | pylab.show() 125 | -------------------------------------------------------------------------------- /examples/spike2/IF-Curve.py: -------------------------------------------------------------------------------- 1 | import NeuroTools.spike2.spike2channels as spike2 2 | import pylab, numpy 3 | 4 | # helper function 5 | def fit_crf(xs,ys,arg): 6 | from scipy.optimize import leastsq 7 | def minimize(arg): 8 | res_max,half_sat,n = arg 9 | res = [res_max*(x**n/(x**n+half_sat)) - ys[i] for i,x in enumerate(xs)] 10 | return res 11 | 12 | fitres = leastsq(minimize,arg) 13 | return fitres 14 | 15 | # IF-curve 16 | 17 | filename = '/Users/jenskremkow/src/NeuroTools/examples/spike2/IF-Curve-example-data-provided-by-Florian-Rau-University-of-Freiburg-2008.smr' 18 | 19 | # load all channels in the file 20 | #all_channels = spike2.load(filename) 21 | # or only selected channels 22 | all_channels = spike2.load(filename,channels=[1,2,32]) 23 | 24 | # seperate the channels, just for better reading 25 | vm = all_channels[2] 26 | dc = all_channels[1] 27 | dc_onset_marker = all_channels[32] 28 | 29 | # cutout the dc and vm around a dc step, markers are in seconds, we need them in milliseonds 30 | dc_sequence = dc.slice_by_events(dc_onset_marker.times*1000,t_min=500,t_max=1000) 31 | vm_sequence = vm.slice_by_events(dc_onset_marker.times*1000,t_min=500,t_max=1000) 32 | 33 | xlim = [1000,27000] 34 | 35 | # first figure shows the data 36 | pylab.rcParams['figure.figsize'] = [15.,10.] 37 | pylab.figure() 38 | # plot dc signal with real time 39 | subplot = pylab.subplot(2,2,1) 40 | for dc_slice in dc_sequence.values(): 41 | # we plot each dc_slice with its real time 42 | pylab.plot(dc_slice.time_axis(),dc_slice.signal) 43 | # we show only part of the data 44 | pylab.xlim(xlim) 45 | pylab.ylabel('current (pA)') 46 | pylab.title('current with real time') 47 | xmin,xmax,ymin,ymax = subplot.axis() 48 | # plot dc signal with normalized time, such that each dc_slice starts at time 0.0 49 | pylab.subplot(2,2,2) 50 | for index, dc_slice in dc_sequence.items(): 51 | # we plot each dc_slice with its normalized time --> start at 0.0 ms 52 | if index <= 4: 53 | # only 5 slices are plotted 54 | pylab.plot(dc_slice.time_axis(normalized=True),dc_slice.signal) 55 | pylab.ylim([ymin,ymax]) 56 | pylab.title('current with normalized time') 57 | 58 | # plot vm signal with real time 59 | subplot = pylab.subplot(2,2,3) 60 | for vm_slice in vm_sequence.values(): 61 | pylab.plot(vm_slice.time_axis(),vm_slice.signal) 62 | pylab.xlim(xlim) 63 | pylab.xlabel('Time (ms)') 64 | pylab.ylabel('mp (mV)') 65 | pylab.title('mp with real time') 66 | xmin,xmax,ymin,ymax = subplot.axis() 67 | # plot vm signal with normalized time, such that each dc_slice starts at time 0.0 68 | pylab.subplot(2,2,4) 69 | for index, vm_slice in vm_sequence.items(): 70 | if index <= 4: 71 | pylab.plot(vm_slice.time_axis(normalized=True),vm_slice.signal) 72 | pylab.xlabel('Time (ms)') 73 | pylab.title('mp with normalized time') 74 | pylab.ylim([ymin,ymax]) 75 | pylab.savefig('IF_curve_data.png') 76 | pylab.close('all') 77 | 78 | 79 | # second figure shows the IF-curve 80 | pylab.rcParams['figure.figsize'] = [6.,5.] 81 | pylab.figure() 82 | dc_inputs = [] 83 | frs = [] 84 | 85 | # calculate the actual IF-curve 86 | for dc_slice, vm_slice in zip(dc_sequence.values(), vm_sequence.values()): 87 | # we calculate the max of the dc_slice 88 | input_dc = dc_slice.max() 89 | # we perform a threshold detection on the vm slice, which returns a SpikeTrain object, this knows its mean_rate (Hz) 90 | fr = vm_slice.threshold_detection(0).mean_rate() 91 | # we append the values 92 | dc_inputs.append(input_dc) 93 | frs.append(fr) 94 | # and plot them 95 | pylab.plot(dc_inputs,frs,'ok',label='data') 96 | 97 | pylab.xlabel('I (pA)') 98 | pylab.ylabel('spikes/s') 99 | pylab.title('IF-curve') 100 | 101 | # we fit the IF curve 102 | res_max_fitted,half_sat_fitted,n_fitted = fit_crf(dc_inputs,frs,(12.,8.,1.))[0] 103 | x_fitted = numpy.arange(0.,1000.0,1.) 104 | pylab.plot(x_fitted,(res_max_fitted*(x_fitted**n_fitted/(x_fitted**n_fitted+half_sat_fitted))),label='fit',color='red') 105 | 106 | pylab.legend() 107 | 108 | pylab.savefig('IF_curve_curve.png') 109 | 110 | -------------------------------------------------------------------------------- /examples/spike2/neurons_parameter_estimate.py: -------------------------------------------------------------------------------- 1 | import NeuroTools.spike2.spike2channels as spike2 2 | import pylab, numpy 3 | from scipy.optimize import leastsq 4 | pylab.rcParams['text.usetex'] = True 5 | # helper function 6 | def fit_exp(xs,ys,arg): 7 | def minimize(arg): 8 | tau, offset, a = arg 9 | res = [a*numpy.exp(-x/tau)+offset - ys[i] for i,x in enumerate(xs)] 10 | return res 11 | fitres = leastsq(minimize,arg) 12 | return fitres 13 | filename = '/Users/jenskremkow/src/NeuroTools/examples/spike2/080814_cortex1_1cell001.smr' 14 | a = spike2.load(filename) 15 | current = a[1] 16 | vm = a[2] 17 | #marker = a[32] 18 | t_min = 200. 19 | t_max = 800. 20 | # since the markers dont really work 21 | marker = current.threshold_detection(threshold=-30.,sign='below') 22 | # current average 23 | current_average, time_axis = current.event_triggered_average(marker,average=True,t_min=t_min,t_max=t_max, with_time = True) 24 | I_base = current_average[0:t_min/current.dt].mean() 25 | I = current_average.min()-I_base 26 | # vm average 27 | vm_average= vm.event_triggered_average(marker,average=True,t_min=t_min,t_max=t_max) 28 | vm_base = vm_average[0:t_min/vm.dt].mean() 29 | # vm average for fit, I am just to lazy to slice the vm_avrage, so I recalculate it, starting with t_min 0.0 30 | vm_average_for_fit, time_axis_for_fit = vm.event_triggered_average(marker,average=True,t_min=0.0,t_max=60., with_time=True) 31 | # fit 32 | tau, offset, a = fit_exp(time_axis_for_fit,vm_average_for_fit,(10.,-69.,3.))[0] 33 | x = numpy.arange(0,200,0.1) 34 | y = a*numpy.exp(-x/tau)+offset 35 | vm_step = offset-vm_base 36 | R = vm_step/I 37 | C = tau/R 38 | # figure 39 | lw = 2. 40 | pylab.subplot(2,1,1) 41 | pylab.plot(time_axis,current_average,'k',lw=lw) 42 | pylab.ylim(-40,-10) 43 | pylab.ylabel('I (pA)') 44 | pylab.subplot(2,1,2) 45 | pylab.plot(time_axis,vm_average,'k',lw=lw) 46 | pylab.plot(x,y,'r',lw=lw) 47 | pylab.xlabel('Time (ms)') 48 | pylab.ylabel('U (mV)') 49 | pylab.ylim(-71.,-68.5) 50 | pylab.text(100,-69.,r'R: %0.3f; C: %0.3f; $\tau$: %0.2f'%(R,C,tau)) 51 | pylab.savefig('figure_neuron_parameters.png') 52 | -------------------------------------------------------------------------------- /examples/stgen/inh_2Dmarkov_psth.py: -------------------------------------------------------------------------------- 1 | # This script generates Figure 5 in: 2 | 3 | # Eilif Muller, Lars Buesing, Johannes Schemmel, and Karlheinz Meier 4 | # Spike-Frequency Adapting Neural Ensembles: Beyond Mean Adaptation and Renewal Theories 5 | # Neural Comput. 2007 19: 2958-3010. 6 | 7 | # i.e. the PSTH for a 2D adapting markov process undergoing 8 | # a step change in statistics due to a step stimulus. 9 | 10 | 11 | import numpy,sys 12 | from NeuroTools import stgen 13 | from pylab import zeros_like, plot 14 | 15 | dt = 10.0 16 | t = numpy.arange(0,1000.0,dt) 17 | a = numpy.ones_like(t)*11.346 18 | bq = numpy.ones_like(t)*0.1231*14.48 19 | 20 | # stepup 21 | 22 | i_start = t.searchsorted(400.0,'right')-1 23 | i_end = t.searchsorted(600.0,'right')-1 24 | 25 | a[i_start:i_end] = 92.25 26 | bq[i_start:i_end] = 0.09793*14.48 27 | 28 | psth = zeros_like(t) 29 | 30 | stg = stgen.StGen() 31 | 32 | tau_s = 110.0 33 | tau_r = 1.97 34 | qrqs = 221.96 35 | t_stop = 1000.0 36 | 37 | trials = 5000 38 | print "Running %d trials of %.2f milliseconds" % (trials, t_stop) 39 | for i in xrange(trials): 40 | if i%100==0: 41 | print "%d" % i, 42 | sys.stdout.flush() 43 | st = stg.inh_2Dadaptingmarkov_generator(a,bq,tau_s,tau_r,qrqs,t,t_stop,array=True) 44 | psth[1:]+=numpy.histogram(st,t)[0] 45 | 46 | print "\n" 47 | 48 | # normalize 49 | 50 | psth = psth.astype(float) 51 | psth/= dt*float(trials)/1000.0 52 | 53 | # this is for correct 'steps' plotting only 54 | psth[0] = psth[1] 55 | 56 | plot(t,psth,linestyle='steps') 57 | 58 | print "Done." 59 | 60 | 61 | -------------------------------------------------------------------------------- /examples/stgen/inh_gamma_psth.py: -------------------------------------------------------------------------------- 1 | # Generate the PSTH for an inhomogeneous gamma renewal process 2 | # with a step change in the rate (b changes, a stays fixed) 3 | 4 | 5 | 6 | import numpy 7 | from NeuroTools import stgen 8 | from pylab import zeros_like, plot 9 | import sys 10 | 11 | dt = 10.0 12 | t = numpy.arange(0,1000.0,dt) 13 | rate = numpy.ones_like(t)*20.0 14 | 15 | # stepup 16 | 17 | i_start = t.searchsorted(400.0,'right')-1 18 | i_end = t.searchsorted(600.0,'right')-1 19 | 20 | rate[i_start:i_end] = 40.0 21 | 22 | a = numpy.ones_like(t)*3.0 23 | b = numpy.ones_like(t)/a/rate 24 | 25 | psth = zeros_like(t) 26 | 27 | stg = stgen.StGen() 28 | 29 | trials = 5000 30 | tsim = 1000.0 31 | print "Running %d trials of %.2f milliseconds" % (trials, tsim) 32 | for i in xrange(trials): 33 | if i%100==0: 34 | print "%d" % i, 35 | sys.stdout.flush() 36 | st = stg.inh_gamma_generator(a,b,t,1000.0,array=True) 37 | psth[1:]+=numpy.histogram(st,t)[0] 38 | 39 | print "\n" 40 | 41 | # normalize 42 | 43 | psth = psth.astype(float) 44 | psth/= dt*10000.0/1000.0 45 | 46 | # this is for correct 'steps' plotting only 47 | psth[0] = psth[1] 48 | 49 | plot(t,psth,linestyle='steps') 50 | 51 | -------------------------------------------------------------------------------- /examples/stgen/shotnoise_step.py: -------------------------------------------------------------------------------- 1 | # generate shot noise from step poisson 2 | 3 | from NeuroTools import stgen 4 | 5 | clf() 6 | stg = stgen.StGen() 7 | 8 | rate = numpy.array([6.0,10.0,6.0,6.0]) 9 | t = numpy.array([0.0,100.0,200.0,300.0]) 10 | 11 | 12 | q = 2.0 13 | tau = 10.0 14 | 15 | trials = 1000 16 | st = stg.inh_poisson_generator(rate, t, 300.0) 17 | g,tsn = stgen.shotnoise_fromspikes(st,q,tau,dt=0.1,t_start=0.0,t_stop=300.0,array=True) 18 | for i in range(trials-1): 19 | st = stg.inh_poisson_generator(rate, t, 300.0) 20 | g_tmp,tmp = stgen.shotnoise_fromspikes(st,q,tau,dt=0.1,t_start=0.0,t_stop=300.0,array=True) 21 | g+=g_tmp 22 | 23 | plot(t,rate,'b:',ls='steps-post',lw=2) 24 | plot(tsn,g/q/tau/trials*1000.0,'r-',lw=2) 25 | xlabel('time [ms]',size=20) 26 | xticks(size=16) 27 | ylabel(r'$\nu(t)$ [Hz]',size=20) 28 | yticks(size=16) 29 | 30 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.2 2 | scipy 3 | matplotlib 4 | IPython 5 | tables>=1.4 6 | rpy2 7 | interval 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from distutils.core import setup 4 | 5 | setup( 6 | name = "NeuroTools", 7 | version = "0.3.1", 8 | package_dir={'NeuroTools': 'src'}, 9 | packages = ['NeuroTools', 10 | 'NeuroTools.utilities', 11 | 'NeuroTools.tisean', 12 | 'NeuroTools.spike2', 13 | 'NeuroTools.signals', 14 | 'NeuroTools.spike2.sonpy', 15 | 'NeuroTools.datastore', 16 | 'NeuroTools.parameters', 17 | 'NeuroTools.optimize', 18 | ], 19 | package_data={'NeuroTools': ['doc/*.rst']}, 20 | author = "The NeuralEnsemble Community", 21 | author_email = "neurotools@neuralensemble.org", 22 | description = "NeuroTools is a collection of tools to support all tasks associated with the analysis of neural activity - from neurophysiology to neural simulations. ", 23 | long_description=open("README.rst").read(), 24 | license = "GPLv2", 25 | keywords = ('computational neuroscience', 'simulation', 'analysis', 'visualization', 'parameters'), 26 | url = "http://neuralensemble.org/NeuroTools", 27 | classifiers = ['Development Status :: 3 - Alpha', 28 | 'Environment :: Console', 29 | 'License :: OSI Approved :: GNU General Public License (GPL)', 30 | 'Operating System :: POSIX', 31 | 'Topic :: Scientific/Engineering', 32 | 'Topic :: Utilities', 33 | 'Programming Language :: Python :: 2', 34 | 'Programming Language :: Python :: 2.7', 35 | 'Programming Language :: Python :: 3', 36 | 'Programming Language :: Python :: 3.5', 37 | ], 38 | ) 39 | -------------------------------------------------------------------------------- /src/datastore/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | NeuroTools.datastore 3 | ==================== 4 | 5 | The `datastore` package aims to present a consistent interface for persistent 6 | data storage, irrespective of storage back-end. 7 | 8 | It is intended for objects to be able to store part or all of their internal 9 | data, and so the storage/retrieval keys are based on the object identity and 10 | state. 11 | 12 | We assume that an object's identity is uniquely defined by its type (which may 13 | also depend on the source code revision number) and its parameters, while its 14 | state is defined by its identity and by its inputs (we should possibly add some 15 | concept of time to this). 16 | 17 | Hence, any object (which we call a 'component' in this context) must have 18 | the following attributes: 19 | 20 | `parameters` 21 | a `neurotools` `ParameterSet` object 22 | 23 | `input` 24 | another component or `None`; we assume a single 25 | input for now. A list of inputs should also be possible. We need to be wary 26 | of recurrent loops, in which two components both have each other as direct or 27 | indirect inputs). 28 | 29 | `full_type` 30 | the object class and module 31 | 32 | `version` 33 | the source-code version 34 | 35 | Classes 36 | ------- 37 | 38 | ShelveDataStore - Persistent data store based on the `shelve` module and the 39 | filesystem. 40 | DjangoORMDataStore - Persistent data store using the Django ORM 41 | (object-relational mapping - an object-oriented interface 42 | to an SQL database) to store/retrieve keys/indices 43 | with data stored using `pickle` on the filesystem. 44 | 45 | """ 46 | 47 | import warnings 48 | from shelve_ds import ShelveDataStore 49 | 50 | # other possibilities... 51 | # FileSystemDataStore 52 | # SRBDataStore 53 | # HttpDataStore 54 | # HDF5DataStore 55 | 56 | 57 | -------------------------------------------------------------------------------- /src/datastore/interface.py: -------------------------------------------------------------------------------- 1 | class AbstractDataStore(object): 2 | """ 3 | Abstract base class for a persistent data store. 4 | """ 5 | 6 | def retrieve(self, component, attribute_name): 7 | """ 8 | Retrieve data belonging to a component. 9 | 10 | The component must have the following attributes: 11 | `parameters`: a `NeuroTools` `ParameterSet` object 12 | `input`: another component, or None 13 | `full_type`: the object class and module 14 | `version`: the source-code version 15 | """ 16 | # construct a way to get the data if it exists 17 | # e.g. create a unique key, an SQL query 18 | # this will involve getting a key or something 19 | # for the input object 20 | # try to get the data 21 | # if the data exists, 22 | # return it 23 | # else 24 | # return None 25 | raise NotImplemented() 26 | 27 | def store(self, component, attribute_name, data): 28 | """ 29 | Store data belonging to a component. 30 | 31 | The component must have the following attributes: 32 | `parameters`: a `NeuroTools` `ParameterSet` object 33 | `input`: another component, or None 34 | `full_type`: the object class and module 35 | `version`: the source-code version 36 | """ 37 | # check we know how to handle the data type 38 | # construct a way to store the data, e.g. create a unique key, 39 | # an SQL query, etc 40 | # store the data 41 | # possibly we could check if data already exists, and raise an Exception if 42 | # it is different to the new data (should probably be a flag to control this, 43 | # because it might be heavyweight 44 | raise NotImplemented() -------------------------------------------------------------------------------- /src/datastore/keygenerators.py: -------------------------------------------------------------------------------- 1 | """ 2 | Key generators for data store objects 3 | """ 4 | import hashlib 5 | import pickle 6 | import sys 7 | import os.path 8 | 9 | 10 | def full_type(component): 11 | """Returns a string representing the full type of the component.""" 12 | if component.__class__.__name__ == 'module': # component is a module 13 | if component.__name__ == "__main__": 14 | return os.path.basename(sys.argv[0][:-3]) 15 | else: 16 | return component.__name__ 17 | else: 18 | return component.__module__ + '.' + component.__class__.__name__ 19 | 20 | 21 | def hash_pickle(component): 22 | """ 23 | Key generator. 24 | 25 | Use pickle to convert the component state dictionary to a string, then 26 | hash this string to give a unique identifier of fixed length. 27 | """ 28 | state = {'type': full_type(component), 29 | 'version': component.version, 30 | #'parameters_uri': component.parameters._url} 31 | 'parameters': component.parameters} 32 | if component.input is None: 33 | state['input'] = 'None' 34 | else: 35 | state['input'] = hash_pickle(component.input) 36 | return hashlib.sha1(pickle.dumps(state)).hexdigest() 37 | 38 | 39 | def join_with_underscores(component): 40 | """ 41 | Key generator. 42 | 43 | Return a string that contains all necessary information about the 44 | component state. 45 | """ 46 | s = "%s-r%s_%s" % (full_type(component), 47 | component.version, 48 | #component.parameters._url) 49 | component.parameters) 50 | if component.input is not None: 51 | s += "%s" % join_with_underscores(component.input) 52 | # remove characters that don't go well in filesystem paths 53 | replace = lambda s, r: s.replace(r[0], r[1]) 54 | replacements = [('/', '_'), (' ', '_'), ('[', ''), 55 | (']', ''), (':', ''), (',', '')] 56 | s = reduce(replace, [s] + replacements) 57 | return s 58 | -------------------------------------------------------------------------------- /src/datastore/shelve_ds.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implementation of a `shelve`-based DataStore 3 | """ 4 | 5 | from NeuroTools.datastore.interface import AbstractDataStore 6 | from NeuroTools.datastore.keygenerators import join_with_underscores 7 | import os.path, shelve 8 | import logging 9 | 10 | class ShelveDataStore(AbstractDataStore): 11 | """Persistent data store based on the `shelve` module and the filesystem.""" 12 | 13 | def __init__(self, root_dir, key_generator=join_with_underscores): 14 | """ 15 | `root_dir` is a filesystem directory below which all shelve files 16 | will be saved. 17 | `key_generator` is a function that accepts a mapping and returns a string. 18 | """ 19 | if os.path.exists(root_dir): 20 | if not os.path.isdir(root_dir): # should also test if the directory is readable/writeable 21 | raise Exception("The supplied root_dir exists but is not a directory.") 22 | else: 23 | os.mkdir(root_dir) 24 | self._root_dir = root_dir 25 | self._generate_key = key_generator 26 | 27 | def retrieve(self, component, attribute_name): 28 | __doc__ = AbstractDataStore.retrieve.__doc__ 29 | storage_key = self._generate_key(component) 30 | path = os.path.join(self._root_dir, storage_key+".shelf") 31 | if os.path.exists(path): 32 | shelf = shelve.open(path, flag='r') # 'r' means read-only 33 | if attribute_name in shelf: 34 | data = shelf[attribute_name] 35 | else: 36 | data = None 37 | shelf.close() 38 | return data 39 | else: 40 | return None 41 | 42 | def store(self, component, attribute_name, data): 43 | __doc__ = AbstractDataStore.store.__doc__ 44 | storage_key = self._generate_key(component) 45 | try: 46 | path = os.path.join(self._root_dir, storage_key+".shelf") 47 | shelf = shelve.open(path, flag='c') # 'c' means "create if doesn't exist" 48 | except Exception, errmsg: 49 | if errmsg[1] == 'File name too long': 50 | logging.error("shelf filename: '%s' is too long", os.path.join(self._root_dir, storage_key+".shelf")) 51 | raise 52 | shelf.update({attribute_name: data}) 53 | shelf.close() 54 | 55 | -------------------------------------------------------------------------------- /src/export.py: -------------------------------------------------------------------------------- 1 | 2 | from parameters import ParameterRange 3 | from parameters import ParameterTable 4 | 5 | 6 | def parameters_to_latex(filename,d,indent=0.5): 7 | lines = [] 8 | tables = [] 9 | 10 | def remove_non_valid_characters(lines): 11 | non_valid_characters = [('_',' ')] 12 | if isinstance(lines,list): 13 | new_lines = [] 14 | for line in lines: 15 | for non_char in non_valid_characters: 16 | line = line.replace(non_char[0],non_char[1]) 17 | new_lines.append(line) 18 | elif isinstance(lines,str): 19 | for non_char in non_valid_characters: 20 | lines = lines.replace(non_char[0],non_char[1]) 21 | new_lines = lines 22 | return new_lines 23 | 24 | def latex_table(k,v): 25 | """ 26 | """ 27 | tables.append((k,v)) 28 | 29 | def add_latex_tables(): 30 | """ 31 | """ 32 | def write_first_row(content): 33 | line = ' &' 34 | for column in content.column_labels(): 35 | line += ' '+column 36 | line += ' &' 37 | return line[:-1] 38 | def write_follwing_rows(content,lines): 39 | for row in content.rows(): 40 | line = row[0]+' &' 41 | for value in row[1].values(): 42 | if isinstance(value, basestring): 43 | line += value+' &' 44 | else: 45 | line += ' %s &'%value 46 | lines.append(line[:-1]+'\\\ \n') 47 | 48 | for table in tables: 49 | name,content = table 50 | pos = 'c'*(len(content.column_labels())+1) 51 | lines.append('\\begin{table*}[ht]\n') 52 | lines.append('\\begin{center}\n') 53 | lines.append('\\begin{tabular}{%s} \n'%pos) 54 | lines.append(write_first_row(content)+'\\\ \n') 55 | write_follwing_rows(content,lines) 56 | lines.append('\\end{tabular} \n') 57 | lines.append('\\end{center}\n') 58 | lines.append('\\caption{%s}\n'% name) 59 | lines.append('\\label{%s}'%name+'\n') 60 | lines.append('\\end{table*}\n') 61 | 62 | def walk(d,indent, ind_incr): 63 | """ 64 | """ 65 | s = [] 66 | keys = d.keys() 67 | keys.sort() 68 | for key in keys: 69 | k = key 70 | v = d[key] 71 | if hasattr(v, 'items') and not isinstance(v,ParameterTable): 72 | s.append("\\hspace*{%scm} %s: " % (indent, k)) 73 | s.append(walk(v, indent+ind_incr, ind_incr)) 74 | s.append('\\hspace*{%scm} ' % indent) 75 | elif isinstance(v,ParameterRange): 76 | s.append("\\hspace*{%scm} %s : %s" % (indent, k, str(v._values))) 77 | elif isinstance(v,ParameterTable): 78 | s.append("\\hspace*{%scm} %s : see Table~\\ref{%s} " % (indent, k,k)) 79 | latex_table(k,v) 80 | elif isinstance(v, basestring): 81 | s.append("\\hspace*{%scm} %s : %s" % (indent, k, v)) 82 | else: 83 | s.append("\\hspace*{%scm} %s : %s" % (indent, k, v)) 84 | return '\\\ \n'.join(s) 85 | 86 | 87 | line = walk(d,0.0,indent) 88 | f = open(filename,'w') 89 | line = remove_non_valid_characters(line) 90 | f.write(line) 91 | f.close() 92 | 93 | add_latex_tables() 94 | f = open('tables_'+filename,'w') 95 | lines = remove_non_valid_characters(lines) 96 | f.writelines(lines) 97 | f.close() 98 | 99 | 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /src/optimize/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======== 3 | Optimize 4 | ======== 5 | 6 | Optimize is a module to search parameter spaces for optima of an error function. 7 | 8 | $Id: __init__.py 366 2008-12-23 21:58:19Z mschmucker $ 9 | """ 10 | 11 | #__author__="Michael Schmuker" 12 | #__date__ ="$23.12.2008 10:55:36$" 13 | #__version__ = "$Rev$" 14 | # 15 | ##__all__ = ['parameter_search', 'optimizers'] 16 | # 17 | #from parameter_search import * 18 | #from optimizers import * 19 | -------------------------------------------------------------------------------- /src/optimize/optimizers.py: -------------------------------------------------------------------------------- 1 | """ 2 | =========== 3 | optimizers.py 4 | =========== 5 | 6 | Contains functions to search minima in parameter space. Uses parameter_search 7 | to do the actual searching. 8 | 9 | $Id: optimizers.py 366 2008-12-23 21:58:19Z mschmucker $ 10 | """ 11 | 12 | __author__="Michael Schmuker" 13 | __date__ ="$23.12.2008 11:17:16$" 14 | 15 | from NeuroTools.optimize import parameter_search 16 | from NeuroTools.parameters import ParameterSpace 17 | 18 | class GridSearcher(): 19 | """ 20 | Simple grid search algorithm. Calls the error function func with all 21 | parameters in grid. Returns the first parameter combination which yielded 22 | the minimal value for func, together with that value in a dictionary. 23 | """ 24 | def __init__(self, grid, func, 25 | searcher = parameter_search.ParameterSearcher, 26 | searcherargs = {}): 27 | """ 28 | Initialize the grid searcher. 29 | Parameters: 30 | 31 | grid - NeuroTools.ParameterSpace scpecifying the grid. 32 | func - function to minimize. It should take a dictionary with its 33 | parameters and return a float. 34 | searcher - the searcher backend to use. Should be of type 35 | NeuroTools.optimize.parameter_search.ParameterSearcher 36 | or a child thereof. Default is to use the plain 37 | ParameterSearcher. 38 | searcherargs - dictionary with additional keyword arguments for the searcher. 39 | """ 40 | import types 41 | if type(grid) != ParameterSpace: 42 | raise Exception("The grid must be defined as " + 43 | "NeuroTools.ParameterSpace.") 44 | self.grid = grid 45 | param_iter = grid.iter_inner() 46 | if type(func ) != types.FunctionType: 47 | raise Exception("func must be a function.") 48 | self.searcher = searcher(dict_iterable = param_iter, 49 | func = func, 50 | **searcherargs) 51 | 52 | def search(self): 53 | """ 54 | Do the actual searching. 55 | """ 56 | min_params = None 57 | self.searcher.search() 58 | retvals = self.searcher.harvest() 59 | import numpy 60 | minindex = numpy.argmin(retvals) 61 | min_val = retvals[minindex] 62 | # retrieve the parameter combination that yielded the minimum value 63 | tmp_iter = self.grid.iter_inner() 64 | for i in range(minindex): 65 | tmp_iter.next() 66 | min_params = tmp_iter.next() 67 | return {'min_params': min_params, 'min_value':min_val} 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /src/random.py: -------------------------------------------------------------------------------- 1 | """ 2 | NeuroTools.random 3 | ===================== 4 | 5 | A set of classes representing statistical distributions, with an interface that 6 | is compatible with the ParameterSpace class in the parameters module. 7 | 8 | Classes 9 | ------- 10 | 11 | GammaDist - gamma.pdf(x,a,b) = x**(a-1)*exp(-x/b)/gamma(a)/b**a 12 | NormalDist - normal distribution 13 | UniformDist - uniform distribution 14 | 15 | """ 16 | 17 | from NeuroTools import check_dependency 18 | 19 | import numpy, numpy.random 20 | 21 | 22 | class ParameterDist(object): 23 | 24 | def __init__(self,**params): 25 | self.params = params 26 | self.dist_name = 'ParameterDist' 27 | 28 | def __repr__(self): 29 | if len(self.params)==0: 30 | return '%s()'% (self.dist_name,) 31 | s = '%s('% (self.dist_name,) 32 | for key in self.params: 33 | s+='%s=%s,' % (key,str(self.params[key])) 34 | return s[:-1]+')' 35 | 36 | def next(self,n=1): 37 | raise NotImplementedError('This is an abstract base class and cannot be used directly') 38 | 39 | def from_stats(self,vals,bias=0.0,expand=1.0): 40 | self.__init__(mean=numpy.mean(vals)+bias, std=numpy.std(vals)*expand) 41 | 42 | def __eq__(self, o): 43 | # should we track the state of the rng and return False if it is different between self and o? 44 | if (type(self) == type(o) and 45 | self.dist_name == o.dist_name and 46 | self.params == o.params): 47 | return True 48 | else: 49 | return False 50 | 51 | class GammaDist(ParameterDist): 52 | """ 53 | gamma.pdf(x,a,b) = x**(a-1)*exp(-x/b)/gamma(a)/b**a 54 | 55 | Yields strictly positive numbers. 56 | Generally the distribution is implemented by scipy.stats.gamma.pdf(x/b,a)/b 57 | For more info, in ipython type: 58 | >>> ? scipy.stats.gamma 59 | 60 | """ 61 | 62 | def __init__(self,mean=None,std=None,repr_mode='ms',**params): 63 | """ 64 | repr_mode specifies how the dist is displayed, 65 | either mean,var ('ms', the default) or a,b ('ab') 66 | """ 67 | 68 | if check_dependency('scipy'): 69 | self.next = self._next_scipy 70 | 71 | self.repr_mode = repr_mode 72 | if 'm' in params and mean==None: 73 | mean = params['m'] 74 | if 's' in params and std==None: 75 | std = params['s'] 76 | 77 | # both mean and std not specified 78 | if (mean,std)==(None,None): 79 | if 'a' in params: 80 | a = params['a'] 81 | else: 82 | a = 1.0 83 | if 'b' in params: 84 | b = params['b'] 85 | else: 86 | b = 1.0 87 | else: 88 | if mean==None: 89 | mean = 0.0 90 | if std==None: 91 | std=1.0 92 | a = mean**2/std**2 93 | b = mean/a 94 | ParameterDist.__init__(self,a=a,b=b) 95 | self.dist_name = 'GammaDist' 96 | 97 | def _next_scipy(self,n=1): 98 | import scipy.stats 99 | return scipy.stats.gamma.rvs(self.params['a'],size=n)*self.params['b'] 100 | def _next_no_scipy(self,n=1): 101 | raise Exception('Error scipy was not found at import time. GammaDist realization disabled.') 102 | 103 | next = _next_no_scipy 104 | 105 | def mean(self): 106 | return self.params['a']*self.params['b'] 107 | 108 | def std(self): 109 | return self.params['b']*numpy.sqrt(self.params['a']) 110 | 111 | def __repr__(self): 112 | if self.repr_mode == 'ms': 113 | return '%s(m=%f,s=%f)' % (self.dist_name,self.mean(),self.std()) 114 | else: 115 | return '%s(a=%f,b=%f)' % (self.dist_name,self.params['a'],self.params['b']) 116 | 117 | 118 | class NormalDist(ParameterDist): 119 | """ 120 | normal distribution with parameters 121 | mean + std 122 | 123 | """ 124 | 125 | def __init__(self,mean=0.0,std=1.0): 126 | ParameterDist.__init__(self,mean=mean,std=std) 127 | self.dist_name = 'NormalDist' 128 | 129 | def next(self,n=1): 130 | return numpy.random.normal(loc=self.params['mean'],scale=self.params['std'],size=n) 131 | 132 | 133 | class UniformDist(ParameterDist): 134 | """ 135 | uniform distribution with min,max 136 | """ 137 | 138 | def __init__(self,min=0.0,max=1.0, return_type=float): 139 | ParameterDist.__init__(self,min=min,max=max) 140 | self.dist_name = 'UniformDist' 141 | self.return_type = return_type 142 | 143 | def next(self,n=1): 144 | vals = numpy.random.uniform(low=self.params['min'],high=self.params['max'],size=n) 145 | if self.return_type != float: 146 | vals = vals.astype(self.return_type) 147 | return vals 148 | 149 | def from_stats(self,vals,bias=0.0,expand=1.0): 150 | mn = numpy.min(vals) 151 | mx = numpy.max(vals) 152 | center = 0.5*(mx+mn)+bias 153 | hw = 0.5*(mx-mn)*expand 154 | self.__init__(min=center-hw,max=center+hw) 155 | -------------------------------------------------------------------------------- /src/signals/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | NeuroTools.signals 3 | ================== 4 | 5 | A collection of functions to create, manipulate and play with analog signals. 6 | 7 | Classes 8 | ------- 9 | 10 | AnalogSignal - object representing an analog signal, with its data. Can be used to do 11 | threshold detection, event triggered averages, ... 12 | AnalogSignalList - list of AnalogSignal objects, again with methods such as mean, std, plot, 13 | and so on 14 | VmList - AnalogSignalList object used for Vm traces 15 | ConductanceList - AnalogSignalList object used for conductance traces 16 | CurrentList - AnalogSignalList object used for current traces 17 | 18 | Functions 19 | --------- 20 | 21 | load_vmlist - function to load a VmList object (inherits from AnalogSignalList) from a file. 22 | Same comments on format as previously. 23 | load_currentlist - function to load a CurrentList object (inherits from AnalogSignalList) from a file. 24 | Same comments on format as previously. 25 | load_conductancelist - function to load a ConductanceList object (inherits from AnalogSignalList) from a file. 26 | Same comments on format as previously. load_conductancelist returns two 27 | ConductanceLists, one for the excitatory conductance and one for the inhibitory conductance 28 | load - a generic loader for all the previous load methods. 29 | """ 30 | 31 | from .spikes import * 32 | from .analogs import * 33 | 34 | def load(user_file, datatype): 35 | """ 36 | Convenient data loader for results produced by pyNN. Return the corresponding 37 | NeuroTools object. Datatype argument may become optionnal in the future, but 38 | for now it is necessary to specify the type of the recorded data. To have a better control 39 | on the parameters of the NeuroTools objects, see the load_*** functions. 40 | 41 | Inputs: 42 | user_file - the user_file object with read/write methods. By defaults, if a string 43 | is provided, a StandardTextFile object is created 44 | datatype - A string to specify the type od the data in 45 | 's' : spikes 46 | 'g' : conductances 47 | 'v' : membrane traces 48 | 'c' : currents 49 | 50 | Examples: 51 | >> load("simulation.dat",'v') 52 | >> load("spikes.dat",'s') 53 | >> load(StandardPickleFile("simulation.dat"), 'g') 54 | >> load(StandardTextFile("test.dat"), 's') 55 | 56 | See also: 57 | load_spikelist, load_conductancelist, load_vmlist, load_currentlist 58 | """ 59 | if datatype in ('s', 'spikes'): 60 | return load_spikelist(user_file) 61 | elif datatype == 'v': 62 | return load_vmlist(user_file) 63 | elif datatype == 'c': 64 | return load_currentlist(user_file) 65 | elif datatype == 'g': 66 | return load_conductancelist(user_file) 67 | else: 68 | raise Exception("The datatype %s is not handled ! Should be 's','g','c' or 'v'" %datatype) 69 | -------------------------------------------------------------------------------- /src/signals/intervals.py: -------------------------------------------------------------------------------- 1 | from NeuroTools import check_dependency 2 | 3 | HAVE_INTERVAL = check_dependency('interval') 4 | 5 | if HAVE_INTERVAL: 6 | from interval import * 7 | 8 | import numpy 9 | 10 | class Interval(object): 11 | """ 12 | Interval(start_times, end_times). 13 | 14 | Inputs: 15 | start_times - A list of the start times for all the sub intervals considered, in ms 16 | stop_times - A list of the stop times for all the sub intervals considered, in ms 17 | 18 | Examples: 19 | >> itv = Interval([0,100,200,300],[50,150,250,350]) 20 | >> itv.time_parameters() 21 | 0, 350 22 | """ 23 | 24 | def __init__(self, start_times, end_times) : 25 | """ 26 | Constructor of the Interval object. 27 | 28 | """ 29 | if HAVE_INTERVAL: 30 | self.start_times = start_times 31 | self.end_times = end_times 32 | # write the intervals to an interval object (pyinterval) 33 | scalar_types = (int, float, numpy.float, numpy.float32, numpy.float64, numpy.int, numpy.int8, 34 | numpy.int16, numpy.int32, numpy.int64) 35 | test = isinstance(start_times, scalar_types) 36 | if test: 37 | self.start_times = [self.start_times] 38 | test = isinstance(end_times, scalar_types) 39 | if test: 40 | self.end_times = [self.end_times] 41 | if len(self.start_times) != len(self.end_times) : 42 | raise Exception("There sould be an equal number of starts and stops") 43 | self.interval_data = interval(*numpy.transpose(numpy.array([start_times,end_times]))) 44 | else: 45 | test = isinstance(start_times, int) or isinstance(start_times, float) 46 | assert test, "Interval package not present, start_times should be a number !" 47 | test = isinstance(end_times, int) or isinstance(end_times, float) 48 | assert test, "Interval package not present, end_times should be a number !" 49 | self.start_times = [start_times] 50 | self.end_times = [end_times] 51 | 52 | def intersect(self, itv) : 53 | self.interval_data = self.interval_data & itv.interval_data 54 | 55 | def union(self, itv) : 56 | self.interval_data = self.interval_data | itv.interval_data 57 | 58 | def __str__(self): 59 | return str(self.interval_data) 60 | 61 | def __len__(self): 62 | return shape(self.interval_data)[0] 63 | 64 | def __getslice__(self, i, j): 65 | """ 66 | Return a sublist of the spike_times vector of the SpikeTrain 67 | """ 68 | return self.interval_data & interval([i,j]) 69 | 70 | def time_parameters(self): 71 | """ 72 | Return the time parameters of the SpikeTrain (t_start, t_stop) 73 | """ 74 | bounds = self.interval_data.extrema 75 | return (bounds[0][0],bounds[-1][0]) 76 | 77 | def t_start(self): 78 | if HAVE_INTERVAL: 79 | return self.interval_data.extrema[0][0] 80 | else: 81 | return self.start_times[0] 82 | 83 | def t_stop(self): 84 | if HAVE_INTERVAL: 85 | return self.interval_data.extrema[-1][0] 86 | else: 87 | return self.end_times[0] 88 | 89 | def copy(self): 90 | """ 91 | Return a copy of the SpikeTrain object 92 | """ 93 | return Interval(self.start_times, self.end_times, self.t_start, self.t_stop) 94 | 95 | def offset(self, start=None, end=None) : 96 | """ 97 | Modifies globally the intervals by offsetting the start and end of the stimulation. 98 | 99 | The start and/or stop arguments should be tuples (limit, offset) where limit 100 | defines the reference limit (0 : start, 1: end) from where the new value is 101 | defined by adding the offset value to the current start/end. 102 | """ 103 | n_intervals = len(list(self.interval_data)) 104 | new_iv = zeros((n_intervals,2)) 105 | if start is None : 106 | for i in range(n_intervals) : 107 | new_iv[i,0] = self.interval_data[i][0] 108 | else : 109 | if start[0] == 0 : 110 | for i in range(n_intervals) : 111 | new_iv[i,0] = self.interval_data[i][0] + start[1] 112 | if start[0] == 1 : 113 | for i in range(n_intervals) : 114 | new_iv[i,0] = self.interval_data[i][1] + start[1] 115 | 116 | if end is None : 117 | for i in range(n_intervals) : 118 | new_iv[i,1] = self.interval_data[i][1] 119 | else : 120 | if end[0] == 0 : 121 | for i in range(n_intervals) : 122 | new_iv[i,1] = self.interval_data[i][0] + end[1] 123 | if end[0] == 1 : 124 | for i in range(n_intervals) : 125 | new_iv[i,1] = self.interval_data[i][1] + end[1] 126 | 127 | self.interval_data = interval(*list(new_iv)) 128 | 129 | def total_duration(self) : 130 | """ 131 | Return the total duration of the interval 132 | """ 133 | tot_duration = 0 134 | for i in self.interval_data : 135 | tot_duration += i[1] - i[0] 136 | return tot_duration 137 | 138 | 139 | def slice_times(self, times): 140 | spikes_selector = numpy.zeros(len(times), dtype=numpy.bool) 141 | if HAVE_INTERVAL: 142 | for itv in self.interval_data : 143 | spikes_selector = spikes_selector + (times > itv[0])*(times <= itv[1]) 144 | else: 145 | spikes_selector = (times >= self.t_start()) & (times <= self.t_stop()) 146 | return numpy.extract(spikes_selector, times) 147 | 148 | 149 | 150 | #def build_psth(spiketrain, eventtrain, before_Dt, after_Dt, intervals=None): 151 | #""" 152 | #build a psth of the spikes around the events 153 | 154 | #If intervals != None, the eventtrain is restricted to the intervals provided. 155 | #""" 156 | ## tested : generates a correct PSTH when loaded with following data : 157 | ##spikes = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,100.,101.,102.,103.,104.,105.,106.,200.,201.,202.,203.,204.] 158 | ## 159 | #if intervals != None: 160 | ## keep only the events that are included in IntervalTrain object 161 | #eventtrain = intervals.of_spikes(eventtrain) 162 | 163 | #nRepeats = len(eventtrain.spike_times) 164 | ## accumuate spikes around the events. 165 | #spikes_around_event = [] 166 | #for event_time in eventtrain.spike_times : 167 | #spikes_around_event.extend(np.extract((spiketrain.spike_times > event_time - before_Dt)*(spiketrain.spike_times <= event_time + after_Dt), spiketrain.spike_times) - event_time) 168 | 169 | #return np.sort(spikes_around_event, kind="quicksort"), nRepeats 170 | -------------------------------------------------------------------------------- /src/spike2/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | =========== 3 | Spike2 Library 4 | =========== 5 | For reading data from CED's Spike2 Son files into the NeuroTools enviroment. 6 | The data is read from the CED files using sonpy. 7 | 8 | Jens Kremkow 9 | INCM-CNRS, Marseille, France 10 | ALUF, Freiburg, Germany 11 | 2008 12 | 13 | 14 | sonpy is written by: 15 | 16 | Antonio Gonzalez 17 | Department of Neuroscience 18 | Karolinska Institutet 19 | Antonio.Gonzalez@cantab.net 20 | 21 | http://www.neuro.ki.se/broberger/ 22 | 23 | ########################################### 24 | 25 | Usage: 26 | 27 | single_channel = load('filename',channels=[2]) 28 | dict_of_all_channels = load('filename') 29 | 30 | """ 31 | 32 | from sonpy import son 33 | 34 | __all__ = ['spike2channels'] 35 | -------------------------------------------------------------------------------- /src/spike2/sonpy/README: -------------------------------------------------------------------------------- 1 | SON LIBRARY for PYTHON. 2 | 3 | For reading data from CED's Spike2 Son files. 4 | 5 | Based on SON Library 2.0 for MATLAB, written by Malcolm Lidierth at 6 | King's College London. See http://www.kcl.ac.uk/depsta/biomedical/ 7 | cfnr/lidierth.html 8 | 9 | + Using the library + 10 | For details read the file __init__.py, or type the following in a Python 11 | shell (after placing the sonpy directory in your Python search path): 12 | >>> from sonpy import son 13 | >>> help(son) 14 | 15 | 16 | -- 17 | Antonio Gonzalez 18 | Department of Neuroscience 19 | Karolinska Institutet 20 | Antonio.Gonzalez@cantab.net 21 | 22 | http://www.neuro.ki.se/broberger/ 23 | 24 | June 2006 25 | -------------------------------------------------------------------------------- /src/spike2/sonpy/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | =========== 3 | SON LIBRARY 4 | =========== 5 | For reading data from CED's Spike2 Son files. 6 | 7 | Based on SON Library 2.0 for MATLAB, written by Malcolm Lidierth at 8 | King's College London. 9 | (See http://www.kcl.ac.uk/depsta/biomedical/cfnr/lidierth.html) 10 | 11 | Antonio Gonzalez 12 | Department of Neuroscience 13 | Karolinska Institutet 14 | Antonio.Gonzalez@cantab.net 15 | 16 | http://www.neuro.ki.se/broberger/ 17 | 18 | June 2006 19 | 20 | Last modified: 2006-07-18 21 | 22 | ============ 23 | Requirements 24 | ============ 25 | To use this library you need SciPy (http://scipy.org). 26 | 27 | ===== 28 | Usage 29 | ===== 30 | It can be used in any of three ways, depending on your needs: 31 | 32 | (A) Reading a channel: 33 | 34 | >>> from sonpy import son 35 | >>> filename = '/path/file.smr' 36 | >>> chan = son.Channel(chanNumber, filename) 37 | >>> chan.data() # Returns channel data. Type help(chan.data) for details. 38 | >>> chan.type() # Channel type. 39 | >>> chan.fhead.[...] # File header. See dir(chan.fhead) 40 | >>> chan.info.[...] # Channel information. See dir(chan.info) 41 | 42 | (B) Opening a Son file, then reading one or more channels: 43 | 44 | >>> from sonpy import son 45 | >>> filename = '/path/file.smr' 46 | >>> f = son.File(filename) 47 | >>> f.chanlist() # Prints a list of available channels in file 'filename'. 48 | >>> f.getchannel(N) # Selects a channel 'N', where N is the channel number. 49 | >>> f.chNN.data() # Returns data from channel 'N'. 50 | >>> f.chNN.type() # Returns channel 'N' type. 51 | 52 | (C) Reading Son file information: 53 | 54 | >>> from sonpy import son 55 | >>> filename = '/path/file.smr' 56 | >>> f = son.FileHeader(filename) 57 | >>> dir(f) # Elements in file header. 58 | >>> f.chanlist() # Prints a list of available channels in file 'filename'. 59 | ''' 60 | 61 | import son 62 | __all__ = ['son'] 63 | -------------------------------------------------------------------------------- /src/spike2/sonpy/_marker.py: -------------------------------------------------------------------------------- 1 | def data(Chan, start=None, stop=None, timeunits='seconds', as_float=True): 2 | ''' 3 | Reads data from an event/marker channel (ie, Event, Marker, AdcMark, 4 | RealMark, or TextMark channel) from a Son file. 5 | 6 | 'start', 'stop' set the data limits to be returned, in blocks. If 7 | only start is set, data from only that block will be returned. 8 | 9 | 'timeunits' scales time to appropriate unit. Valid options are 'ticks', 10 | 'microseconds', 'milliseconds' or 'seconds'. 11 | 12 | 'as_float' only makes sense for AdcMark or RealMark channels. If 13 | True, returns data as floating point values (scaling and applying 14 | offset for Adc data). Else, data will be of type int16. 15 | ''' 16 | from scipy import io, zeros 17 | fid = Chan.fhead.fid 18 | blockheader = Chan.blockheader 19 | SizeOfHeader = 20 # Block header is 20 bytes long 20 | 21 | # ==================================== 22 | # = Set start and end blocks to read = 23 | # ==================================== 24 | if not start and not stop: 25 | startBlock, endBlock = 0, Chan.info.blocks 26 | elif start and not stop: 27 | startBlock, endBlock = start-1, start 28 | elif start and stop: 29 | startBlock, endBlock = start-1, min([stop, Chan.info.blocks]) 30 | 31 | # == Sum of samples in required blocks == 32 | nItems = sum(blockheader[4, startBlock:endBlock]) 33 | 34 | # ============= 35 | # = Read data = 36 | # ============= 37 | # + Event data + 38 | if Chan.info.kind in [2, 3, 4]: 39 | # pre-allocate memory: 40 | timings = zeros(nItems, 'int32') 41 | # read data: 42 | pointer = 0 43 | for block in range(startBlock, endBlock): 44 | fid.seek(blockheader[0, block] + SizeOfHeader) 45 | timings[pointer : pointer+blockheader[4, block]] =\ 46 | io.fread(fid, blockheader[4, block], 'l') 47 | pointer += blockheader[4, block] 48 | 49 | # + Marker data + 50 | elif Chan.info.kind == 5: 51 | # pre-allocate memory: 52 | timings = zeros(nItems, 'int32') 53 | markers = zeros([nItems, 4], 'uint8') 54 | # read data: 55 | count = 0 56 | for block in range(startBlock, endBlock): 57 | fid.seek(blockheader[0, block] + SizeOfHeader) # start of block 58 | for i in range(blockheader[4, block]): # loop for each marker 59 | timings[count] = io.fread(fid, 1, 'l') # time 60 | markers[count] = io.fread(fid, 4, 'B') # 4x marker bytes 61 | count += 1 62 | markers = [chr(x) for x in markers[:,0]] 63 | 64 | # + AdcMark data + 65 | elif Chan.info.kind == 6: 66 | nValues = Chan.info.nExtra/2 # 2 because 2 bytes per int16 value 67 | # pre-allocate memory: 68 | timings = zeros(nItems, 'int32') 69 | markers = zeros([nItems, 4], 'uint8') 70 | adc = zeros([nItems, nValues], 'int16') 71 | # read data: 72 | count = 0 73 | for block in range(startBlock, endBlock): 74 | fid.seek(blockheader[0, block] + SizeOfHeader) # start of block 75 | for i in range(blockheader[4, block]): # loop for each marker 76 | timings[count] = io.fread(fid, 1, 'l') # time 77 | markers[count] = io.fread(fid, 4, 'B') # 4x marker bytes 78 | adc[count] = io.fread(fid, nValues, 'h') 79 | count += 1 80 | if as_double: 81 | from _waveform import _adc_to_double 82 | adc = _adc_to_double(Chan, adc) 83 | 84 | # + RealMark data + 85 | elif Chan.info.kind == 7: 86 | nValues = Chan.info.nExtra/4 # each value has 4 bytes (single precision) 87 | # pre-allocate: 88 | timings = zeros(nItems, 'int32') 89 | markers = zeros([nItems, 4], 'uint8') 90 | real = zeros([nItems, nValues], 'single') 91 | # read data: 92 | count = 0 93 | for block in range(startBlock, endBlock): 94 | fid.seek(blockheader[0, block] + SizeOfHeader) # start of block 95 | for i in range(blockheader[4, block]): # loop for each marker 96 | timings[count] = io.fread(fid, 1, 'l') # time 97 | markers[count] = io.fread(fid, 4, 'B') # 4x marker bytes 98 | real[count] = io.fread(fid, nValues, 'f') 99 | count += 1 100 | if not as_double: 101 | from _waveform import _real_to_adc 102 | real = _real_to_adc(real) 103 | 104 | # + TextMark data + 105 | elif Chan.info.kind == 8: 106 | # pre-allocate memory: 107 | timings = zeros(nItems, 'int32') 108 | markers = zeros([nItems, 4], 'uint8') 109 | text = zeros([nItems, Chan.info.nExtra], 'S1') 110 | # read data: 111 | count = 0 112 | for block in range(startBlock, endBlock): 113 | fid.seek(blockheader[0, block] + SizeOfHeader) # start of block 114 | for i in range(blockheader[4, block]): # loop for each marker 115 | timings[count] = io.fread(fid, 1, 'l') # time 116 | markers[count] = io.fread(fid, 4, 'B') # 4x marker bytes 117 | text[count] = io.fread(fid, Chan.info.nExtra, 'c') 118 | count += 1 119 | 120 | # ================ 121 | # = Convert time = 122 | # ================ 123 | timings = Chan.fhead._ticks_to_seconds(timings, timeunits) 124 | Chan.info.timeunits = timeunits 125 | Chan.info.Epochs = '%i--%i of %i block(s)'\ 126 | %(startBlock+1, endBlock, Chan.info.blocks) 127 | 128 | # =============== 129 | # = Return data = 130 | # =============== 131 | if Chan.info.kind in [2, 3, 4]: 132 | data = timings 133 | elif Chan.info.kind == 5: 134 | data = zip(timings, markers) 135 | elif Chan.info.kind == 6: 136 | data = zip(timings, markers, adc) 137 | elif Chan.info.kind == 7: 138 | data = zip(timings, markers, real) 139 | elif Chan.info.kind == 8: 140 | data = zip(timings, markers, text) 141 | return data 142 | -------------------------------------------------------------------------------- /src/spike2/spike2channels.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import NeuroTools.signals as signals 3 | from NeuroTools.spike2.sonpy import son 4 | 5 | """ 6 | Jens Kremkow 7 | INCM-CNRS, Marseille, France 8 | ALUF, Freiburg, Germany 9 | 2008 10 | """ 11 | 12 | 13 | def load(filename,channels=None,start=None,stop=None): 14 | if channels is None: 15 | fileheader = son.FileHeader(filename) 16 | channels = fileheader.return_chan_list() 17 | 18 | if len(channels) is 1: 19 | return load_channel(filename,channel=channels[0],start=None,stop=None) 20 | 21 | channel_obj = {} 22 | for chan in channels: 23 | try: 24 | channel_obj[chan] = load_channel(filename,channel=chan,start=None,stop=None) 25 | except ValueError: 26 | print 'channel ',chan,' could not be read' 27 | return channel_obj 28 | 29 | def load_channel(filename,channel=1,start=None,stop=None): 30 | chan = son.Channel(channel,filename) 31 | data = numpy.array(chan.data(start=start,stop=stop)) 32 | if chan.info.type() == 'Adc': 33 | dt = chan.info.dt[0] 34 | obj = Adc(data,dt) 35 | else: 36 | exec('obj = %s(data)'%chan.info.type()) 37 | 38 | obj.blockheader = chan.blockheader 39 | obj.fhead = chan.fhead 40 | obj.info = chan.info 41 | return obj 42 | 43 | 44 | class Channel(numpy.ndarray): 45 | """ 46 | Spike2 channels types which are not yet supported by NeuroTools.signals. 47 | 48 | timeunits = microseconds,milliseconds,seconds 49 | """ 50 | def __new__(cls, data): 51 | return numpy.array(data).view(cls) 52 | def __init__(self,data): 53 | self.timeunits = None 54 | def time(self,timestamps=None,timeunits='seconds'): 55 | """ 56 | """ 57 | interval = self.info.lChanDvd 58 | if timeunits== 'ticks': 59 | interval = 1 60 | if interval == 0: 61 | interval = 1. 62 | 63 | if timestamps == None: 64 | return self.fhead._ticks_to_seconds(numpy.arange(len(self))*interval,timeunits) 65 | else: 66 | return self.fhead._ticks_to_seconds(timestamps*interval,timeunits) 67 | 68 | def time_to_ticks(self,timestamps,timeunits='seconds'): 69 | """ 70 | Converts a timestamp into ticks. The timeunits of the timestamp should be given, default is seconds. 71 | """ 72 | timestamps = numpy.array(timestamps) 73 | interval = self.info.lChanDvd[0] 74 | 75 | if timeunits is 'microseconds': timestamps = (timestamps/1e6) 76 | elif timeunits is 'milliseconds': timestamps = (timestamps/1e3) 77 | 78 | timestamps = (timestamps/float(interval))/((self.fhead.usPerTime*self.fhead.timePerADC))/self.fhead.dTimeBase 79 | return timestamps.round().astype(int) 80 | 81 | def threshold_detection(self,threshold=None,timeunits='milliseconds',return_events=False,return_SpikeTrain=False): 82 | """ 83 | 84 | """ 85 | if threshold == None: 86 | print 'please give a threhold' 87 | return 0 88 | 89 | above = numpy.where(self > threshold)[0] 90 | take = (numpy.diff(above))>1. 91 | take[0] = True 92 | 93 | time = self.time(timeunits=timeunits) 94 | self.events = time[above][take] 95 | if return_events: 96 | return self.events 97 | if return_SpikeTrain: 98 | if timeunits is not 'milliseconds': 99 | time = self.time(timeunits='milliseconds') 100 | events = time[above][take] 101 | return signals.SpikeTrain(events) 102 | else: 103 | return signals.SpikeTrain(self.events) 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | class Adc(signals.AnalogSignal): 112 | """ 113 | Adc represented as analog signal. See NeuroTools.signal.AnalogSignal for further details. 114 | """ 115 | pass 116 | 117 | class EventFall(Channel): 118 | pass 119 | 120 | class EventRise(Channel): 121 | pass 122 | 123 | class EventBoth(Channel): 124 | pass 125 | 126 | class Marker(Channel): 127 | def __new__(cls, data): 128 | 129 | return numpy.array('times and markers').view(cls) 130 | def __init__(self,data): 131 | self.markers = data[:,1] 132 | self.times = data[:,0].astype('float') 133 | def get_times(self,marker=None): 134 | timeunits = 'seconds' 135 | if markers is None: print 'please give marker'; return None 136 | 137 | if timeunits == 'ticks': 138 | times = self.time_to_ticks(self.times,timeunits=timeunits) 139 | else: 140 | times = self.times 141 | 142 | return times[self.marker==marker] 143 | 144 | class AdcMark(Channel): 145 | pass 146 | 147 | class RealMark(Channel): 148 | pass 149 | 150 | class TextMark(Channel): 151 | pass 152 | 153 | class RealWave(Channel): 154 | pass 155 | 156 | -------------------------------------------------------------------------------- /src/tisean/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | NeuroTools.tisean 3 | ================== 4 | 5 | A collection of functions to create, manipulate and play with TISEAN functions 6 | """ 7 | 8 | from tisean import * 9 | -------------------------------------------------------------------------------- /src/utilities/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | utilities.py 3 | 4 | Routines and classes that make life easier. 5 | """ 6 | 7 | def imsave(mat,filename): 8 | 9 | raise Exception("This function has been moved to NeuroTools.plotting and is now called save_2D_image(...)!") 10 | 11 | 12 | 13 | def progress_bar(progress): 14 | 15 | raise Exception("This function has been moved to NeuroTools.plotting!") 16 | 17 | 18 | 19 | def exportPNGZip(frame_list, filename, frame_duration): 20 | 21 | raise Exception("This function has been moved to NeuroTools.plotting and is now called save_2D_movie(...)!") 22 | 23 | 24 | 25 | def show(url): 26 | 27 | raise Exception("This function has been moved to 'facets.mixedutils' because it contains FACETS specific code!") 28 | 29 | 30 | 31 | def save_image(arr, filename): 32 | 33 | raise Exception("This function has been moved to 'facets.mixedutils' because it contains FACETS specific code!") 34 | -------------------------------------------------------------------------------- /src/visual_logging.py: -------------------------------------------------------------------------------- 1 | """ 2 | NeuroTools.visual_logging 3 | ========================= 4 | 5 | Log graphs, rather than text. This is useful when dealing with large data 6 | structures, such as arrays. x-y data is plotted as a PNG file, which is stored 7 | inside a zip archive. 8 | 9 | You can specify a logging level such that only graphs with an importance above 10 | that level will be created. e.g., if the logging level is set to WARNING, 11 | log graphs with a level of DEBUG or INFO will not be created. 12 | 13 | The interface is a restricted version of that available in the standard 14 | library's logging module. 15 | 16 | Functions 17 | --------- 18 | 19 | basicConfig - specify the zipfile that will be used to store the graphs, and 20 | the logging level (DEBUG, INFO, WARN, etc) 21 | debug - plots data with level DEBUG 22 | info - plots data with level INFO 23 | warning - plots data with level WARNING 24 | error - plots data with level ERROR 25 | critical - plots data with level CRITICAL 26 | exception - plots data with level ERROR 27 | log - plots data with a user-specified level 28 | 29 | """ 30 | 31 | import zipfile, atexit, os 32 | from NeuroTools import check_dependency 33 | from datetime import datetime 34 | from logging import CRITICAL, DEBUG, ERROR, FATAL, INFO, WARN, WARNING, NOTSET 35 | from time import sleep 36 | 37 | 38 | if check_dependency('matplotlib'): 39 | import matplotlib 40 | matplotlib.use('Agg') 41 | 42 | 43 | if check_dependency('pylab'): 44 | import pylab 45 | 46 | _filename = 'visual_log.zip' 47 | _zipfile = None 48 | _level = INFO 49 | _last_timestamp = '' 50 | 51 | def _remove_if_empty(): 52 | if len(_zipfile.namelist()) == 0 and os.path.exists(_filename): 53 | os.remove(_filename) 54 | 55 | def basicConfig(filename, level=INFO): 56 | global _zipfile, _filename, _level 57 | _filename = filename 58 | _level = level 59 | #_zipfile.close() 60 | if os.path.exists(filename) and zipfile.is_zipfile(filename): 61 | mode = 'a' 62 | else: 63 | mode = 'w' 64 | _zipfile = zipfile.ZipFile(filename, mode=mode, compression=zipfile.ZIP_DEFLATED) 65 | atexit.register(_zipfile.close) 66 | atexit.register(_remove_if_empty) 67 | 68 | def _reopen(): 69 | global _zipfile 70 | if (_zipfile.fp is None) or _zipfile.fp.closed: 71 | _zipfile = zipfile.ZipFile(_filename, mode='a', compression=zipfile.ZIP_DEFLATED) 72 | 73 | def flush(): 74 | """Until the zipfile is closed (normally on exit), the zipfile cannot 75 | be accessed by other tools. Calling flush() closes the zipfile, which 76 | will be reopened the next time a log function is called. 77 | """ 78 | _zipfile.close() 79 | 80 | def _get_timestamp(): 81 | """At the moment, it is not possible to create visual 82 | logs at a rate of more than one/second.""" 83 | global _last_timestamp 84 | timestamp = datetime.now().strftime('%Y%m%d-%H%M%S') 85 | while timestamp == _last_timestamp: 86 | sleep(0.1) 87 | timestamp = datetime.now().strftime('%Y%m%d-%H%M%S') 88 | _last_timestamp = timestamp 89 | return timestamp 90 | 91 | def _plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs): 92 | _reopen() 93 | timestamp = _get_timestamp() 94 | # create figure 95 | pylab.clf() 96 | if xdata is not None: 97 | pylab.plot(xdata, ydata, **kwargs) 98 | else: 99 | if hasattr(ydata, 'shape') and len(ydata.shape) > 1: 100 | pylab.matshow(ydata, **kwargs) 101 | pylab.colorbar() 102 | else: 103 | pylab.plot(ydata) 104 | pylab.xlabel(xlabel) 105 | pylab.ylabel(ylabel) 106 | pylab.title(title) 107 | # add it to the zipfile 108 | fig_name = timestamp + '.png' 109 | pylab.savefig(fig_name) 110 | _zipfile.write(fig_name, 111 | os.path.join(os.path.basename(os.path.splitext(_filename)[0]), fig_name)) 112 | os.remove(timestamp+'.png') 113 | 114 | def debug(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs): 115 | if _level <= DEBUG: 116 | _plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs) 117 | 118 | def info(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs): 119 | if _level <= INFO: 120 | _plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs) 121 | 122 | def warning(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs): 123 | if _level <= WARNING: 124 | _plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs) 125 | 126 | def error(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs): 127 | if _level <= ERROR: 128 | _plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs) 129 | 130 | def critical(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs): 131 | if _level <= CRITICAL: 132 | _plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs) 133 | 134 | def exception(ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs): 135 | if _level <= ERROR: 136 | _plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs) 137 | 138 | def log(level, ydata, xdata=None, xlabel='', ylabel='', title='', **kwargs): 139 | if _level <= level: 140 | _plot_fig(ydata, xdata, xlabel, ylabel, title, **kwargs) 141 | 142 | def test(): 143 | test_file = 'visual_logging_test.zip' 144 | if os.path.exists(test_file): 145 | os.remove(test_file) 146 | basicConfig(test_file, level=DEBUG) 147 | xdata = pylab.arange(0, 2*pylab.pi, 0.02*pylab.pi) 148 | debug(pylab.sin(xdata), xdata, 'x', 'sin(x)', 'visual_logging test 1') 149 | flush() 150 | debug(0.5*pylab.sin(2*xdata-0.3), xdata, 'x', 'sin(2x-0.3)/2') 151 | debug(pylab.sqrt(xdata), xdata, 'x', 'sqrt(x)') 152 | flush() 153 | zf = zipfile.ZipFile(test_file, 'r') 154 | print zf.namelist() 155 | assert len(zf.namelist()) == 3, zf.namelist() 156 | zf.close() 157 | 158 | # ============================================================================== 159 | if __name__ == '__main__': 160 | test() 161 | -------------------------------------------------------------------------------- /std_params/PyNN/IF_cond_exp_gsfa_grr/muller_etal2007.param: -------------------------------------------------------------------------------- 1 | # These are the parameters for the excitatory and inhibitory neurons used in: 2 | # 3 | # Muller, E., Buesing, L., Schemmel, J., & Meier, K. (2007). Spike-frequency 4 | # adapting neural ensembles: Beyond mean adaptation and renewal theories. 5 | # Neural Computation, 19, 2958-3010. 6 | # 7 | # and 8 | # 9 | # Muller, E., Meier, K., & Schemmel, J. (2004). Methods for simulating 10 | # high-conductance states in neural microcircuits. Proc. of BICS2004. 11 | { 12 | 'excitatory' : \ 13 | {'v_thresh': -57.0, 'v_reset': -70.0, 'v_rest': -70.0, 14 | 'tau_refrac': 0.1, 'tau_m': 10.0, 'cm': 0.2895, 15 | 'e_rev_E': 0.0, 'e_rev_I': -75.0, 'tau_syn_E': 1.5, 16 | 'tau_syn_I': 10.0, 'e_rev_sfa': -70.0, 'q_sfa': 14.48, 17 | 'tau_sfa': 110.0, 'e_rev_rr': -70.0, 'q_rr': 3214.0, 18 | 'tau_rr': 1.97}, 19 | 20 | 'inhibitory' : \ 21 | {'v_thresh': -54.5, 'v_reset': -70.0, 'v_rest': -70.0, 22 | 'tau_refrac': 0.1, 'tau_m': 6.664, 'cm': 0.141, 23 | 'e_rev_E': 0.0, 'e_rev_I': -75.0, 'tau_syn_E': 1.5, 24 | 'tau_syn_I': 10.0, 'e_rev_sfa': -70.0, 'q_sfa': 0.0, 25 | 'tau_sfa': 110.0, 'e_rev_rr': -70.0, 'q_rr': 1565.0, 26 | 'tau_rr': 1.97} 27 | 28 | } -------------------------------------------------------------------------------- /std_params/example.param: -------------------------------------------------------------------------------- 1 | ParameterSet(dict( 2 | 3 | PS3 = { 4 | 'mylist': [1, 2, 3, 4], 5 | 'true': False, 6 | 'yourlist': [1, 2, {'e': 5, 'f': 6}], 7 | 'ps2': { 8 | 'ps': { 9 | 'a': 1, 10 | 'b': 2, 11 | }, 12 | 'c': 19, 13 | }, 14 | 'null': None, 15 | 'mydict': { 16 | 'c': 3, 17 | 'd': 4, 18 | }, 19 | 'hello': 'world', 20 | 'cells': url('https://neuralensemble.org/svn/NeuroTools/trunk/std_params/PyNN/IF_cond_exp_gsfa_grr/muller_etal2007.param'), 21 | }, 22 | 23 | ps = ParameterSet(dict(x=1,y=2, 24 | r = ParameterRange(['a','b']) 25 | )), 26 | range = ParameterRange([1.1,2.2]), 27 | range2 = ParameterRange([1,2]) 28 | 29 | )) -------------------------------------------------------------------------------- /test/analysis/crosscorrelate/spike_data: -------------------------------------------------------------------------------- 1 | # dimensions = [2] 2 | # first_id = 0 3 | # last_id = 1 4 | # dt = 0.1 5 | 14.5 0 6 | 15.8 1 7 | 22.3 1 8 | 22.4 0 9 | 39.8 0 10 | 40.1 1 11 | 47.4 1 12 | 54.6 0 13 | 63.8 1 14 | 66 0 15 | 74.9 0 16 | 77.2 1 17 | 83.1 0 18 | 85.4 1 19 | 95.5 1 20 | 99.6 0 21 | 107.2 1 22 | 110.1 0 23 | 118.7 1 24 | 125.9 0 25 | 127.5 1 26 | 136.6 1 27 | 142.9 0 28 | 150.1 1 29 | 151.3 0 30 | 168.8 0 31 | 175.8 1 32 | 185.3 1 33 | 186.5 0 34 | 199.7 1 35 | 206.6 1 36 | 210.9 0 37 | 217.8 1 38 | 219.3 0 39 | 234.7 0 40 | 237.6 1 41 | 251.5 0 42 | 259.5 1 43 | 267.7 1 44 | 276.1 0 45 | 277 1 46 | 287 0 47 | 289.6 1 48 | 303.7 1 49 | 305.8 0 50 | 316.8 0 51 | 322.2 1 52 | 340.4 0 53 | 351.8 1 54 | 356.2 0 55 | 358.7 1 56 | 372.4 0 57 | 373 1 58 | 381.8 1 59 | 387 0 60 | 388.6 1 61 | 401.6 0 62 | 406.7 1 63 | 417.9 0 64 | 419.6 1 65 | 426.9 0 66 | 432.6 1 67 | 438.2 0 68 | 446.4 1 69 | 447.9 0 70 | 459.5 0 71 | 466.4 1 72 | 471.5 0 73 | 479.7 1 74 | 485.7 0 75 | 495 0 76 | 497.1 1 77 | -------------------------------------------------------------------------------- /test/analysis/make_kernel/alp.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/test/analysis/make_kernel/alp.mat -------------------------------------------------------------------------------- /test/analysis/make_kernel/alp_reversed.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/test/analysis/make_kernel/alp_reversed.mat -------------------------------------------------------------------------------- /test/analysis/make_kernel/box.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/test/analysis/make_kernel/box.mat -------------------------------------------------------------------------------- /test/analysis/make_kernel/epa.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/test/analysis/make_kernel/epa.mat -------------------------------------------------------------------------------- /test/analysis/make_kernel/exp.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/test/analysis/make_kernel/exp.mat -------------------------------------------------------------------------------- /test/analysis/make_kernel/exp_reversed.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/test/analysis/make_kernel/exp_reversed.mat -------------------------------------------------------------------------------- /test/analysis/make_kernel/gau.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/test/analysis/make_kernel/gau.mat -------------------------------------------------------------------------------- /test/analysis/make_kernel/tri.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/NeuroTools/d0da16cce71253e849729e9a6bf5fa04744bbcfc/test/analysis/make_kernel/tri.mat -------------------------------------------------------------------------------- /test/test_datastore.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for the NeuroTools.datastore module 3 | 4 | """ 5 | 6 | import unittest 7 | import os 8 | import shutil 9 | from NeuroTools.datastore import * 10 | from NeuroTools.parameters import ParameterSet 11 | 12 | class DummyComponent(object): 13 | 14 | def __init__(self, input=None): 15 | self.parameters = ParameterSet({'a': 1, 'b': 2}) 16 | self.parameters._url = "http://www.example.com/parameters" 17 | self.version = 0.1 18 | self.input = input 19 | self.data = range(1000) 20 | 21 | class ShelveDataStoreTest(unittest.TestCase): 22 | 23 | def setUp(self): 24 | pass 25 | 26 | def tearDown(self): 27 | if os.path.exists('/tmp/test_datastore'): 28 | shutil.rmtree("/tmp/test_datastore") 29 | 30 | def test_create_with_existing_rootdir(self): 31 | ds = ShelveDataStore('/tmp') 32 | assert os.path.exists(ds._root_dir) 33 | 34 | def test_create_with_non_existing_rootdir(self): 35 | assert not os.path.exists('/tmp/test_datastore') 36 | ds = ShelveDataStore('/tmp/test_datastore') 37 | assert os.path.exists(ds._root_dir) 38 | 39 | def test_store_retrieve(self): 40 | ds = ShelveDataStore('/tmp') 41 | c = DummyComponent() 42 | ds.store(c, 'data', c.data) 43 | new_data = ds.retrieve(c, 'data') 44 | self.assertEqual(c.data, new_data) 45 | 46 | # def test_with_really_long_key(self): 47 | # ds = ShelveDataStore('/tmp') 48 | # c = DummyComponent() 49 | # c.parameters._url = 'http://www.example.com/liurfsnlieugcns9g8cy4h43icpw349chgwp938gn93gcw398cgnw398gc39qcgwccg3o87cgnq48w37qgcf478gf249gvpn9347gfnc9w58gn954wgv7nwp937gvn9w34gv7nw3579gntvw9p35gntvw59pgvn5937gc5gdnergfdnw3497fgn547gcfw7np349gvnp5947cgn9ericneirscfgserciwrugniwerugnciwergcwnregc' 50 | # self.assertRaises(Exception, ds.store, c, 'data', c.data) 51 | 52 | def test_hash_pickle(self): 53 | ds = ShelveDataStore('/tmp', keygenerators.hash_pickle) 54 | c = DummyComponent() 55 | ds.store(c, 'data', c.data) 56 | new_data = ds.retrieve(c, 'data') 57 | self.assertEqual(c.data, new_data) 58 | 59 | def test_with_component_chain(self): 60 | ds_jwu = ShelveDataStore('/tmp', keygenerators.join_with_underscores) 61 | ds_hp = ShelveDataStore('/tmp', keygenerators.hash_pickle) 62 | c1 = DummyComponent() 63 | c2 = DummyComponent(input=c1) 64 | for ds in ds_jwu, ds_hp: 65 | ds.store(c2, 'data', c2.data) 66 | new_data = ds.retrieve(c2, 'data') 67 | self.assertEqual(c2.data, new_data) 68 | 69 | # 70 | # class DjangoORMDataStoreTest(unittest.TestCase): 71 | # 72 | # def setUp(self): 73 | # self.ds = DjangoORMDataStore(database_parameters={'DATABASE_ENGINE': 'sqlite3', 74 | # 'DATABASE_NAME': '/tmp/test_datastore.db'}, 75 | # data_root_dir='/tmp/test_datastore_django') 76 | # 77 | # def tearDown(self): 78 | # if os.path.exists('/tmp/test_datastore.db'): 79 | # os.remove('/tmp/test_datastore.db') 80 | # if os.path.exists('/tmp/test_datastore_django'): 81 | # shutil.rmtree('/tmp/test_datastore_django') 82 | # 83 | # #def test_create(self): 84 | # # pass 85 | # 86 | # def test_store_retrieve(self): 87 | # c = DummyComponent() 88 | # self.ds.store(c, 'data', c.data) 89 | # new_data = self.ds.retrieve(c, 'data') 90 | # self.assertEqual(c.data, new_data) 91 | # 92 | # 93 | if __name__ == '__main__': 94 | unittest.main() -------------------------------------------------------------------------------- /test/test_io.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for the NeuroTools.io module 3 | """ -------------------------------------------------------------------------------- /test/test_logging.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for the NeuroTools.visual_logging module as well as the logging 3 | helper tools in NeuroTools.__init__ 4 | """ -------------------------------------------------------------------------------- /test/test_plotting.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for the NeuroTools.plotting module 3 | """ 4 | import matplotlib 5 | matplotlib.use('Agg') 6 | 7 | import unittest 8 | from NeuroTools import plotting 9 | import pylab 10 | import os 11 | 12 | 13 | 14 | class PylabParamsTest(unittest.TestCase): 15 | 16 | def runTest(self): 17 | 18 | # define arbitrary values 19 | fig_width_pt = 123.4 20 | ratio = 0.1234 21 | text_fontsize = 10 22 | tick_labelsize = 8 23 | useTex = False 24 | 25 | inches_per_pt = 1.0/72.27 # Convert pt to inch 26 | fig_width = fig_width_pt*inches_per_pt # width in inches 27 | fig_height = fig_width*ratio # height in inches 28 | 29 | testDict = { 30 | 'axes.labelsize' : text_fontsize, 31 | 'font.size' : text_fontsize, 32 | 'xtick.labelsize' : tick_labelsize, 33 | 'ytick.labelsize' : tick_labelsize, 34 | 'text.usetex' : useTex, 35 | 'figure.figsize' : [fig_width, fig_height]} 36 | 37 | plotting.set_pylab_params(fig_width_pt=fig_width_pt, ratio=ratio, 38 | text_fontsize=text_fontsize, 39 | tick_labelsize=tick_labelsize, useTex=useTex) 40 | for k in testDict.keys(): 41 | assert pylab.rcParams.has_key(k) 42 | assert pylab.rcParams[k] == testDict[k] 43 | 44 | 45 | 46 | class GetDisplayTest(unittest.TestCase): 47 | 48 | def runTest(self): 49 | 50 | a = plotting.get_display(True) 51 | assert a != None 52 | a = plotting.get_display(False) 53 | assert a == None 54 | a = plotting.get_display(1234) 55 | assert a == 1234 56 | 57 | 58 | 59 | class ProgressBarTest(unittest.TestCase): 60 | 61 | def runTest(self): 62 | 63 | import time 64 | print '\nINFO: Testing progress bar...' 65 | for i in range(100): 66 | plotting.progress_bar(i/100.) 67 | time.sleep(.01) 68 | print '\n' 69 | 70 | 71 | 72 | class Save2DImageTest(unittest.TestCase): 73 | 74 | def runTest(self): 75 | 76 | import numpy 77 | mat = numpy.random.random([50,50]) 78 | filename = 'deleteme.png' 79 | if os.path.exists(filename): os.remove(filename) 80 | plotting.save_2D_image(mat, filename) 81 | assert os.path.exists(filename) 82 | os.remove(filename) 83 | 84 | 85 | 86 | class Save2DMovieTest(unittest.TestCase): 87 | 88 | def runTest(self): 89 | 90 | import numpy 91 | frames = [] 92 | duration = 0.1 93 | for i in range(10): 94 | frames.append(numpy.random.randint(0,255,[10,10])) 95 | filename = 'deleteme.zip' 96 | if os.path.exists(filename): os.remove(filename) 97 | plotting.save_2D_movie(frames, filename, duration) 98 | assert os.path.exists(filename) 99 | os.remove(filename) 100 | 101 | 102 | 103 | class SetLabelsTest(unittest.TestCase): 104 | 105 | def runTest(self): 106 | 107 | f = plotting.get_display(True) 108 | x = range(10) 109 | p = pylab.plot(x) 110 | plotting.set_labels(pylab, 'the x axis', 'the y axis') 111 | 112 | # set up a SimpleMultiplot with arbitrary values 113 | self.nrows = 1 114 | self.ncolumns = 1 115 | title = 'testMultiplot' 116 | xlabel = 'testXlabel' 117 | ylabel = 'testYlabel' 118 | scaling = ('linear','log') 119 | self.smt = plotting.SimpleMultiplot(nrows=self.nrows, ncolumns=self.ncolumns, title=title, xlabel=xlabel, ylabel=ylabel, scaling=scaling) 120 | plotting.set_labels(self.smt.panel(0), 'the x axis', 'the y axis') 121 | 122 | 123 | 124 | 125 | class SetAxisLimitsTest(unittest.TestCase): 126 | 127 | def runTest(self): 128 | 129 | f = plotting.get_display(True) 130 | x = range(10) 131 | pylab.plot(x) 132 | plotting.set_axis_limits(pylab, 0., 123., -123., 456.) 133 | 134 | # set up a SimpleMultiplot with arbitrary values 135 | self.nrows = 1 136 | self.ncolumns = 1 137 | title = 'testMultiplot' 138 | xlabel = 'testXlabel' 139 | ylabel = 'testYlabel' 140 | scaling = ('linear','log') 141 | self.smt = plotting.SimpleMultiplot(nrows=self.nrows, ncolumns=self.ncolumns, title=title, xlabel=xlabel, ylabel=ylabel, scaling=scaling) 142 | plotting.set_axis_limits(self.smt.panel(0), 0., 123., -123., 456.) 143 | 144 | 145 | 146 | class SimpleMultiplotTest(unittest.TestCase): 147 | 148 | def setUp(self): 149 | 150 | # define arbitrary values 151 | self.nrows = 4 152 | self.ncolumns = 5 153 | title = 'testMultiplot' 154 | xlabel = 'testXlabel' 155 | ylabel = 'testYlabel' 156 | scaling = ('linear','log') 157 | self.smt = plotting.SimpleMultiplot(nrows=self.nrows, ncolumns=self.ncolumns, title=title, xlabel=xlabel, ylabel=ylabel, scaling=scaling) 158 | 159 | 160 | 161 | class SimpleMultiplotSaveTest(SimpleMultiplotTest): 162 | 163 | def runTest(self): 164 | 165 | filename = "deleteme.png" 166 | if os.path.exists(filename): os.remove(filename) 167 | self.smt.save(filename) 168 | assert os.path.exists(filename) 169 | os.remove(filename) 170 | 171 | 172 | 173 | class SimpleMultiplotSetFrameTest(SimpleMultiplotTest): 174 | 175 | def runTest(self): 176 | 177 | numPanels = self.nrows * self.ncolumns 178 | boollist = [True,False,False,True] 179 | for i in range(numPanels): 180 | ax_indexed = self.smt.panel(i) 181 | ax_next = self.smt.next_panel() 182 | assert ax_indexed == ax_next 183 | self.smt.set_frame(ax_indexed,boollist,linewidth=4) 184 | 185 | 186 | 187 | if __name__ == "__main__": 188 | unittest.main() 189 | -------------------------------------------------------------------------------- /test/test_random.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for the NeuroTools.random module 3 | 4 | """ 5 | 6 | import matplotlib 7 | matplotlib.use('Agg') 8 | 9 | import unittest 10 | from NeuroTools.random import * 11 | 12 | class RandomDistributionTest(unittest.TestCase): 13 | 14 | def test_GammaDistFromStats(self): 15 | g = GammaDist() 16 | vals = [1,2,3] 17 | g.from_stats(vals) 18 | self.assertEqual(g.mean(), numpy.mean(vals)) 19 | self.assertAlmostEqual(g.std(), numpy.std(vals), 10) 20 | 21 | def test_GammaDistFromArgs(self): 22 | g1 = GammaDist(mean=2.0, std=0.5) 23 | g2 = GammaDist(**{'m': 2.0, 's': 0.5}) 24 | g3 = GammaDist(**{'a': 16.0, 'b': 0.125}) 25 | for g in g1, g2, g3: 26 | self.assertEqual(g.mean(), 2.0) 27 | self.assertEqual(g.std(), 0.5) 28 | 29 | def test_UniformDistFromStats(self): 30 | u = UniformDist() 31 | vals = range(-5, 5) 32 | u.from_stats(vals) 33 | outputs = u.next(100) 34 | assert min(outputs) > -5 # should have >= here? 35 | assert max(outputs) < 4 36 | 37 | def test_ParameterDist(self): 38 | pd = ParameterDist() 39 | self.assertRaises(NotImplementedError, pd.next) 40 | 41 | # ============================================================================== 42 | if __name__ == '__main__': 43 | unittest.main() 44 | 45 | -------------------------------------------------------------------------------- /test/test_utilities.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for the NeuroTools.utilities module 3 | 4 | Also see test_srblib.py 5 | """ 6 | 7 | import unittest 8 | from NeuroTools import utilities 9 | 10 | 11 | 12 | class UtilitiesTest(unittest.TestCase): 13 | 14 | def runTest(self): 15 | 16 | print 'INFO: Up to this point, NeuroTools.utilities contains no functions or classes.' 17 | 18 | # these are dummy calls of the functions which just raise an exception, telling where the routine has been moved to. 19 | # satisfies coverage :) 20 | try: utilities.imsave(None,None) 21 | except: pass 22 | try: utilities.progress_bar(None) 23 | except: pass 24 | try: utilities.exportPNGZip(None, None, None) 25 | except: pass 26 | try: utilities.show(None) 27 | except: pass 28 | try: utilities.save_image(None, None) 29 | except: pass 30 | 31 | 32 | 33 | if __name__ == "__main__": 34 | unittest.main() 35 | 36 | --------------------------------------------------------------------------------