├── .circleci
└── config.yml
├── .gitattributes
├── .gitignore
├── .gitmodules
├── .travis.yml
├── Dockerfile
├── README.md
├── asv.conf.json
├── benchmarks
├── __init__.py
└── benchmarks.py
├── build.sh
├── codecov.yml
├── docs
├── Makefile
├── chapter1.ipynb
├── chapter2.ipynb
├── chapter3.ipynb
├── chapter4.ipynb
├── chapter5.ipynb
├── data
│ └── NeuroML2
│ │ └── LEMS_2007One.xml
├── morphology.ipynb
└── source
│ ├── conf.py
│ ├── index.rst
│ ├── modules.rst
│ ├── neuronunit.capabilities.rst
│ ├── neuronunit.models.rst
│ ├── neuronunit.neuroconstruct.rst
│ ├── neuronunit.rst
│ └── neuronunit.tests.rst
├── environment.yml
├── neuronunit
├── __init__.py
├── aibs.py
├── allenapi
│ ├── __init__.py
│ ├── aibs.py
│ ├── allen_data_driven.py
│ ├── allen_data_efel_features_opt.py
│ ├── make_allen_tests.py
│ ├── make_allen_tests_from_id.py
│ ├── neuroelectroapi.py
│ └── utils.py
├── bbp.py
├── capabilities
│ ├── __init__.py
│ ├── channel.py
│ ├── morphology.py
│ └── spike_functions.py
├── cellmodelp.py
├── docs
│ └── Chapter6.ipynb
├── examples
│ ├── agreement_df.ipynb
│ ├── begginer_friendly_backup.ipynb
│ ├── begginer_friendly_intro.ipynb
│ ├── brian_multi_comp_ca2_HH.ipynb
│ ├── cache-and-hash.ipynb
│ ├── cluster_script.py
│ ├── druckman_tests.ipynb
│ ├── geppeto_backend.py
│ ├── geppetto-prep.ipynb
│ ├── m2m_test.ipynb
│ ├── model_zoo
│ │ ├── LEMS_Test_ca_boyle.xml
│ │ └── models-1.ipynb
│ ├── nmldb.py
│ ├── reduced-model-simulation.ipynb
│ ├── round_trip_tests_five_param_two.ipynb
│ ├── rtt.ipynb
│ ├── serial-vs-parallel-rheobase-test.ipynb
│ └── upgrading_simulator_implemented_models_to_NU_models.ipynb
├── models
│ ├── NeuroML2
│ │ ├── Izh2007One.net.nml
│ │ ├── LEMS_2007One.xml
│ │ ├── fragments
│ │ │ ├── Izh2007One-no-input-1.net.nml
│ │ │ ├── Izh2007One-no-input-2.net.nml
│ │ │ ├── LEMS_2007One-no-input-1.xml
│ │ │ └── LEMS_2007One-no-input-2.xml
│ │ └── results
│ │ │ └── .gitkeep
│ ├── __init__.py
│ ├── backends
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── geppetto.py
│ │ ├── glif.py
│ │ ├── jNeuroML.py
│ │ ├── parse_glif.py
│ │ └── static.py
│ ├── channel.py
│ ├── lems.py
│ ├── morphology.py
│ ├── optimization_model_layer.py
│ ├── reduced.py
│ ├── static.py
│ ├── very_reduced.py
│ └── very_reduced_sans_lems.py
├── neuroconstruct
│ ├── __init__.py
│ ├── capabilities.py
│ └── models.py
├── neuroelectro.py
├── neuromldb.py
├── optimization
│ ├── __init__.py
│ ├── algorithms.py
│ ├── data_transport_container.py
│ ├── data_transport_container_orig.py
│ ├── model_parameters.py
│ ├── neuronunit_to_bpo.py
│ └── optimization_management.py
├── plotting
│ └── plot_utils.py
├── plottools.py
├── tests
│ ├── .gitignore
│ ├── __init__.py
│ ├── base.py
│ ├── channel.py
│ ├── druckmann2013.py
│ ├── dynamics.py
│ ├── fi.py
│ ├── make_allen_tests.py
│ ├── morphology.py
│ ├── passive.py
│ ├── target_spike_current.py
│ └── waveform.py
└── unit_test
│ ├── __init__.py
│ ├── __main__.py
│ ├── adexp_opt.py
│ ├── base.py
│ ├── bbp.ipynb
│ ├── cache_edit.ipynb
│ ├── cache_tests.py
│ ├── cache_use.ipynb
│ ├── capabilities_tests.py
│ ├── doc_tests.py
│ ├── get_tau.ipynb
│ ├── import_tests.py
│ ├── izhi_opt.py
│ ├── misc_nb.py
│ ├── misc_tests.py
│ ├── model_tests.py
│ ├── nml_extra_capability_check.ipynb
│ ├── observation_tests.py
│ ├── opt_ephys_properties.py
│ ├── relative_diff_unit_test.ipynb
│ ├── resource_tests.py
│ ├── rheobase_dtc_test.py
│ ├── rheobase_model_test.py
│ ├── sciunit_tests.py
│ ├── scores_unit_test.py
│ ├── scores_unit_test.py.orig
│ ├── test_check.ipynb
│ ├── test_druckmann2013.py
│ ├── test_tests.py
│ ├── validate_observation_vm.ipynb
│ └── validate_params_vm.ipynb
├── pyproject.toml
├── readthedocs.yml
├── requirements.txt
├── setup.cfg
├── setup.py
└── test.sh
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | defaults: &defaults
2 | working_directory: ~/markovmodel/PyEMMA
3 | docker:
4 | - image: continuumio/miniconda3
5 |
6 | inst_conda_bld: &inst_conda_bld
7 | - run: conda config --add channels conda-forge
8 | - run: conda config --set always_yes true
9 | - run: conda config --set quiet true
10 | - run: conda install conda-build
11 |
12 | version: 2
13 |
14 | jobs:
15 | build:
16 | <<: *defaults
17 | parallelism: 1
18 | steps:
19 | - checkout
20 | - run: git fetch --unshallow || true
21 | - run: apt-get install -y cpp gcc
22 | - run: apt-get install -y libx11-6 python-dev git build-essential
23 | - run: apt-get install -y autoconf automake gcc g++ make gfortran
24 | - run: apt-get install -y python-tables
25 | - run: apt-get install -y libhdf5-serial-dev
26 |
27 | - run: conda config --add channels conda-forge
28 | - run: conda config --set always_yes true
29 | - run: conda config --set quiet true
30 | - run: conda install conda-build
31 | - run: pip install pip --upgrade;
32 | - run: conda install numpy;
33 | - run: conda install numba;
34 | - run: conda install dask;
35 | - run: pip install tables
36 | - run: pip install scipy==1.5.4
37 | - run: pip install coverage
38 | - run: pip install cython
39 | - run: pip install asciiplotlib;
40 | - run: pip install ipfx
41 | - run: pip install streamlit
42 | - run: pip install sklearn
43 | - run: pip install seaborn
44 | - run: pip install frozendict
45 | - run: pip install igor
46 | #- run: pip install plotly
47 | - run: pip install allensdk==0.16.3
48 | - run: pip install --upgrade colorama
49 | - run: pip install -e .
50 | - run: rm -rf /opt/conda/lib/python3.8/site-packages/sciunit
51 | - run: git clone -b neuronunit https://github.com/russelljjarvis/jit_hub.git
52 | - run: cd jit_hub; pip install -e .; cd ..;
53 | - run: git clone -b neuronunit_reduced_cells https://github.com/russelljjarvis/BluePyOpt.git
54 | - run: cd BluePyOpt; pip install -e .
55 | - run: git clone -b dev https://github.com/russelljjarvis/sciunit.git
56 |
57 | - run: cd sciunit; pip install -e .; cd ..;
58 | - run: pip install git+https://github.com/russelljjarvis/eFEL
59 | - run: pip install coveralls
60 | - run: sh build.sh
61 | - run: sh test.sh;
62 | #- run: cd neuronunit/unit_test; coveralls -m unittest rheobase_model_test.py; cd -;
63 | #- run: cd neuronunit/unit_test; coverage report
64 | #- store_artifacts:
65 | # path: htmlcov
66 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Set the default behavior, in case people don't have core.autocrlf set.
2 | * text=auto
3 |
4 | # Explicitly declare text files you want to always be normalized and converted
5 | # to native line endings on checkout.
6 | *.c text
7 | *.h text
8 |
9 | # Declare files that will always have CRLF line endings on checkout.
10 | *.sln text eol=crlf
11 |
12 | # Denote all files that are truly binary and should not be modified.
13 | *.png binary
14 | *.jpg binary
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.vscode
2 | *.py[co]
3 | *.pkl
4 | *.p
5 | *do-not-add*
6 | neuronunit/examples/model_zoo/*.nml
7 | *.hoc
8 |
9 | # Packages
10 | *.egg
11 | *.egg-info
12 | dist
13 | build
14 | eggs
15 | parts
16 | bin
17 | var
18 | sdist
19 | develop-eggs
20 | .installed.cfg
21 | old_examples
22 |
23 | # Installer logs
24 | pip-log.txt
25 |
26 | # Unit test / coverage reports
27 | .coverage
28 | .tox
29 |
30 | #Translations
31 | *.mo
32 |
33 | #Mr Developer
34 | .mr.developer.cfg
35 |
36 | *.class
37 | *.DS_Store
38 | *sublime*
39 | *~
40 | *scratch.py
41 | *.ipynb_checkpoints
42 |
43 | x86_64
44 | temp_data/
45 | *.xcuserstate
46 | neuronunit.xcworkspace/contents.xcworkspacedata
47 | *.xccheckout
48 |
49 | .idea/
50 | *.pyproj
51 |
52 | hippounit_bak*
53 | scratch.ipynb
54 | docs/*.dat
55 | *.bak
56 | *.dir
57 | *.omt
58 | *.dat
59 | *.db
60 | *.xml
61 | docs/chapter*.py
62 | *#
63 | B95
64 | unit_test/bbp.py
65 | neuronunit/tests/NeuroML2/*.xml*
66 | unit_test/get_tau.py
67 | .eggs
68 | htmlcov
69 | *.mod
70 | *_nrn.py
71 | neuronunit/models/NeuroML2/*.py
72 | neuronunit/unit_test/bbp.py
73 | neuronunit/unit_test/get_tau.py
74 | scratch
75 | GeneratedFiles
76 | docs/tmp/
77 | tmp/
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "nml/NeuroML2"]
2 | path = nml/NeuroML2
3 | url = http://github.com/NeuroML/NeuroML2
4 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | #### Quick miniconda with numpy, scipy, etc. from ####
2 | # https://conda.io/docs/travis.html ####
3 | language: python
4 | python:
5 | # We don't actually use the Travis Python, but this keeps it organized.
6 | - "3.5"
7 | - "3.6"
8 | - "3.7"
9 | - "3.8"
10 | install:
11 | - sudo apt-get update
12 | # We do this conditionally because it saves us some downloading if the
13 | # version is the same.
14 | - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
15 | - bash miniconda.sh -b -p $HOME/miniconda
16 | - export PATH="$HOME/miniconda/bin:$PATH"
17 | - hash -r
18 | - conda config --set always_yes yes --set changeps1 no
19 | - conda update -q --all
20 | # Useful for debugging any issues with conda
21 | - conda info -a
22 | - pip install -U pip
23 | - pip install .
24 | - pip install sklearn
25 | - pip install seaborn
26 | - pip install coveralls
27 | - pip install pylmeasure # required by morphology tests
28 | - sh build.sh
29 |
30 | ######################################################
31 |
32 | script:
33 | - export NC_HOME='.' # NeuroConstruct isn't used but tests need this
34 | # variable set to pass.
35 | <<<<<<< HEAD
36 | - cd neuronunit/unit_test; python -m unittest scores_unit_test.py; cd -;
37 | - cd neuronunit/unit_test; python -m unittest rheobase_dtc_test.py; cd -;
38 | #- sh test.sh
39 | =======
40 | #- cd neuronunit/unit_test; python -m unittest scores_unit_test.py; cd -;
41 | #- cd neuronunit/unit_test; python -m unittest rheobase_model_test.py; cd -;
42 | - sh test.sh
43 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
44 | after_success:
45 | - coveralls
46 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM scidash/neuronunit-optimization
2 | USER jovyan
3 | RUN pip install psutil
4 | ENV QT_QPA_PLATFORM offscreen
5 | RUN pip install dask
6 | RUN pip install distributed
7 | RUN sudo apt-get update
8 | RUN pip install ioloop
9 | RUN sudo chown -R jovyan /home/jovyan
10 | RUN pip install git+https://github.com/OpenSourceBrain/OpenCortex
11 | RUN git clone https://github.com/vrhaynes/AllenInstituteNeuroML.git
12 | RUN pip install PyLEMS
13 |
14 | # RUN sudo /opt/conda/bin/pip install git+https://github.com/python-quantities/python-quantities
15 | # RUN sudo /opt/conda/bin/pip install git+https://github.com/scidash/sciunit@dev
16 | RUN sudo chown -R jovyan /home/jovyan
17 | WORKDIR /home/jovyan/neuronunit/neuronunit/unit_test
18 | RUN sudo chown -R jovyan /home/jovyan
19 | RUN git clone https://github.com/vrhaynes/AllenInstituteNeuroML.git
20 | RUN pip install git+https://github.com/OpenSourceBrain/OpenCortex
21 | # RUN sudo apt-get -y install ipython ipython-notebook
22 | # RUN sudo -H /opt/conda/bin/pip install jupyter
23 | # ADD neuronunit/unit_test/post_install.sh .
24 |
25 | RUN git clone https://github.com/OpenSourceBrain/osb-model-validation.git
26 | WORKDIR osb-model-validation
27 | RUN python setup.py install
28 | RUN pip --no-cache-dir install \
29 | ipykernel \
30 | jupyter \
31 | matplotlib \
32 | numpy \
33 | scipy \
34 | sklearn \
35 | pandas \
36 | Pillow
37 |
38 | RUN sudo /opt/conda/bin/python3 -m ipykernel.kernelspec
39 |
40 | # Then install the Jupyter Notebook using:
41 | RUN pip install jupyter
42 |
43 | RUN sudo /opt/conda/bin/pip uninstall -y tornado
44 | RUN pip install tornado==4.5.3
45 | RUN /opt/conda/bin/python3 -m pip install ipykernel
46 | RUN /opt/conda/bin/python3 -m ipykernel install --user
47 | RUN pip install deap
48 | WORKDIR $HOME
49 | # ADD . neuronunit
50 | # WORKDIR neuronunit
51 | # RUN sudo /opt/conda/bin/pip install -e .
52 | #RUN bash post_install.sh
53 | ENTRYPOINT /bin/bash
54 |
55 |
--------------------------------------------------------------------------------
/asv.conf.json:
--------------------------------------------------------------------------------
1 | {
2 | // The version of the config file format. Do not change, unless
3 | // you know what you are doing.
4 | "version": 1,
5 |
6 | // The name of the project being benchmarked
7 | "project": "neuronunit",
8 |
9 | // The project's homepage
10 | "project_url": "https://github.com/russelljjarvis/neuronunit",
11 |
12 | // The URL or local path of the source code repository for the
13 | // project being benchmarked
14 | "repo": ".",
15 |
16 | // The Python project's subdirectory in your repo. If missing or
17 | // the empty string, the project is assumed to be located at the root
18 | // of the repository.
19 | // "repo_subdir": "",
20 |
21 | // Customizable commands for building, installing, and
22 | // uninstalling the project. See asv.conf.json documentation.
23 | //
24 | // "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
25 | // "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
26 | // "build_command": [
27 | // "python setup.py build",
28 | // "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}"
29 | // ],
30 |
31 | // List of branches to benchmark. If not provided, defaults to "master"
32 | // (for git) or "default" (for mercurial).
33 | // "branches": ["master"], // for git
34 | // "branches": ["default"], // for mercurial
35 |
36 | // The DVCS being used. If not set, it will be automatically
37 | // determined from "repo" by looking at the protocol in the URL
38 | // (if remote), or by looking for special directories, such as
39 | // ".git" (if local).
40 | // "dvcs": "git",
41 |
42 | // The tool to use to create environments. May be "conda",
43 | // "virtualenv" or other value depending on the plugins in use.
44 | // If missing or the empty string, the tool will be automatically
45 | // determined by looking for tools on the PATH environment
46 | // variable.
47 | "environment_type": "virtualenv",
48 |
49 | // timeout in seconds for installing any dependencies in environment
50 | // defaults to 10 min
51 | //"install_timeout": 600,
52 |
53 | // the base URL to show a commit for the project.
54 | // "show_commit_url": "http://github.com/owner/project/commit/",
55 |
56 | // The Pythons you'd like to test against. If not provided, defaults
57 | // to the current version of Python used to run `asv`.
58 | // "pythons": ["2.7", "3.6"],
59 |
60 | // The list of conda channel names to be searched for benchmark
61 | // dependency packages in the specified order
62 | // "conda_channels": ["conda-forge", "defaults"],
63 |
64 | // The matrix of dependencies to test. Each key is the name of a
65 | // package (in PyPI) and the values are version numbers. An empty
66 | // list or empty string indicates to just test against the default
67 | // (latest) version. null indicates that the package is to not be
68 | // installed. If the package to be tested is only available from
69 | // PyPi, and the 'environment_type' is conda, then you can preface
70 | // the package name by 'pip+', and the package will be installed via
71 | // pip (with all the conda available packages installed first,
72 | // followed by the pip installed packages).
73 | //
74 | // "matrix": {
75 | // "numpy": ["1.6", "1.7"],
76 | // "six": ["", null], // test with and without six installed
77 | // "pip+emcee": [""], // emcee is only available for install with pip.
78 | // },
79 |
80 | // Combinations of libraries/python versions can be excluded/included
81 | // from the set to test. Each entry is a dictionary containing additional
82 | // key-value pairs to include/exclude.
83 | //
84 | // An exclude entry excludes entries where all values match. The
85 | // values are regexps that should match the whole string.
86 | //
87 | // An include entry adds an environment. Only the packages listed
88 | // are installed. The 'python' key is required. The exclude rules
89 | // do not apply to includes.
90 | //
91 | // In addition to package names, the following keys are available:
92 | //
93 | // - python
94 | // Python version, as in the *pythons* variable above.
95 | // - environment_type
96 | // Environment type, as above.
97 | // - sys_platform
98 | // Platform, as in sys.platform. Possible values for the common
99 | // cases: 'linux2', 'win32', 'cygwin', 'darwin'.
100 | //
101 | // "exclude": [
102 | // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows
103 | // {"environment_type": "conda", "six": null}, // don't run without six on conda
104 | // ],
105 | //
106 | // "include": [
107 | // // additional env for python2.7
108 | // {"python": "2.7", "numpy": "1.8"},
109 | // // additional env if run on windows+conda
110 | // {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""},
111 | // ],
112 |
113 | // The directory (relative to the current directory) that benchmarks are
114 | // stored in. If not provided, defaults to "benchmarks"
115 | // "benchmark_dir": "benchmarks",
116 |
117 | // The directory (relative to the current directory) to cache the Python
118 | // environments in. If not provided, defaults to "env"
119 | "env_dir": ".asv/env",
120 |
121 | // The directory (relative to the current directory) that raw benchmark
122 | // results are stored in. If not provided, defaults to "results".
123 | "results_dir": ".asv/results",
124 |
125 | // The directory (relative to the current directory) that the html tree
126 | // should be written to. If not provided, defaults to "html".
127 | "html_dir": ".asv/html",
128 |
129 | // The number of characters to retain in the commit hashes.
130 | // "hash_length": 8,
131 |
132 | // `asv` will cache results of the recent builds in each
133 | // environment, making them faster to install next time. This is
134 | // the number of builds to keep, per environment.
135 | // "build_cache_size": 2,
136 |
137 | // The commits after which the regression search in `asv publish`
138 | // should start looking for regressions. Dictionary whose keys are
139 | // regexps matching to benchmark names, and values corresponding to
140 | // the commit (exclusive) after which to start looking for
141 | // regressions. The default is to start from the first commit
142 | // with results. If the commit is `null`, regression detection is
143 | // skipped for the matching benchmark.
144 | //
145 | // "regressions_first_commits": {
146 | // "some_benchmark": "352cdf", // Consider regressions only after this commit
147 | // "another_benchmark": null, // Skip regression detection altogether
148 | // },
149 |
150 | // The thresholds for relative change in results, after which `asv
151 | // publish` starts reporting regressions. Dictionary of the same
152 | // form as in ``regressions_first_commits``, with values
153 | // indicating the thresholds. If multiple entries match, the
154 | // maximum is taken. If no entry matches, the default is 5%.
155 | //
156 | // "regressions_thresholds": {
157 | // "some_benchmark": 0.01, // Threshold of 1%
158 | // "another_benchmark": 0.5, // Threshold of 50%
159 | // },
160 | }
161 |
--------------------------------------------------------------------------------
/benchmarks/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/benchmarks/benchmarks.py:
--------------------------------------------------------------------------------
1 | # Write the benchmarking functions here.
2 | # See "Writing benchmarks" in the asv docs for more information.
3 | from neuronunit.unit_test.opt_ephys_properties import testOptimizationEphysCase
4 | from neuronunit.unit_test.scores_unit_test import testOptimizationAllenMultiSpike
5 | from neuronunit.unit_test.rheobase_model_test import testModelRheobase
6 |
7 |
8 | class TimeSuite:
9 | """
10 | An example benchmark that times the performance of various kinds
11 | of iterating over dictionaries in Python.
12 | """
13 | def speed_check():
14 | testModelRheobase.setUp()
15 | testModelRheobase.test_opt_1()
16 |
17 | class MemSuite:
18 | def mem_list(self):
19 | return [0] * 256
20 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | apt-get install -y cpp gcc
2 | apt-get install -y libx11-6 python-dev git build-essential
3 | apt-get install -y autoconf automake gcc g++ make gfortran
4 | apt-get install -y python-tables
5 | apt-get install -y libhdf5-serial-dev
6 | conda install numpy;
7 | conda install numba;
8 | conda install dask;
9 | pip install pip --upgrade;
10 | pip install tables
11 | pip install scipy==1.5.4
12 | pip install -e .
13 | pip install coverage
14 | git clone -b neuronunit https://github.com/russelljjarvis/jit_hub.git
15 | cd jit_hub; pip install -e .; cd ..;
16 | pip install cython
17 | pip install asciiplotlib;
18 | git clone -b neuronunit_reduced_cells https://github.com/russelljjarvis/BluePyOpt.git
19 | cd BluePyOpt; pip install -e .
20 | pip install git+https://github.com/russelljjarvis/eFEL
21 | pip install ipfx
22 | pip install streamlit
23 | pip install sklearn
24 | pip install seaborn
25 | pip install frozendict
26 | pip install plotly
27 | <<<<<<< HEAD
28 | =======
29 | pip install igor
30 | pip install pylmeasure
31 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
32 | pip install --upgrade colorama
33 | rm -rf /opt/conda/lib/python3.8/site-packages/sciunit
34 | git clone -b dev https://github.com/russelljjarvis/sciunit.git
35 | cd sciunit; pip install -e .; cd ..;
36 | pip install allensdk==0.16.3
37 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | range: "90...100"
3 |
4 | status:
5 | project:
6 | default:
7 | target: "90%"
8 | threshold: "5%"
9 | patch: false
10 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = NeuronUnit
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/data/NeuroML2/LEMS_2007One.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | #
4 | # NeuronUnit documentation build configuration file, created by
5 | # sphinx-quickstart on Fri Mar 31 23:55:44 2017.
6 | #
7 | # This file is execfile()d with the current directory set to its
8 | # containing dir.
9 | #
10 | # Note that not all possible configuration values are present in this
11 | # autogenerated file.
12 | #
13 | # All configuration values have a default; values that are commented out
14 | # serve to show the default.
15 |
16 | # If extensions (or modules to document with autodoc) are in another directory,
17 | # add these directories to sys.path here. If the directory is relative to the
18 | # documentation root, use os.path.abspath to make it absolute, like shown here.
19 | #
20 | # import os
21 | # import sys
22 | # sys.path.insert(0, os.path.abspath('.'))
23 |
24 |
25 | # -- General configuration ------------------------------------------------
26 |
27 | # If your documentation needs a minimal Sphinx version, state it here.
28 | #
29 | # needs_sphinx = '1.0'
30 |
31 | # Add any Sphinx extension module names here, as strings. They can be
32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 | # ones.
34 | extensions = ['sphinx.ext.autodoc',
35 | 'sphinx.ext.intersphinx',
36 | 'sphinx.ext.viewcode',
37 | 'sphinx.ext.githubpages']
38 |
39 | # Add any paths that contain templates here, relative to this directory.
40 | templates_path = ['_templates']
41 |
42 | # The suffix(es) of source filenames.
43 | # You can specify multiple suffix as a list of string:
44 | #
45 | # source_suffix = ['.rst', '.md']
46 | source_suffix = '.rst'
47 |
48 | # The master toctree document.
49 | master_doc = 'index'
50 |
51 | # General information about the project.
52 | project = 'NeuronUnit'
53 | copyright = '2017, Rick Gerkin'
54 | author = 'Rick Gerkin'
55 |
56 | # The version info for the project you're documenting, acts as replacement for
57 | # |version| and |release|, also used in various other places throughout the
58 | # built documents.
59 | #
60 | # The short X.Y version.
61 | version = ''
62 | # The full version, including alpha/beta/rc tags.
63 | release = ''
64 |
65 | # The language for content autogenerated by Sphinx. Refer to documentation
66 | # for a list of supported languages.
67 | #
68 | # This is also used if you do content translation via gettext catalogs.
69 | # Usually you set "language" from the command line for these cases.
70 | language = None
71 |
72 | # List of patterns, relative to source directory, that match files and
73 | # directories to ignore when looking for source files.
74 | # This patterns also effect to html_static_path and html_extra_path
75 | exclude_patterns = []
76 |
77 | # The name of the Pygments (syntax highlighting) style to use.
78 | pygments_style = 'sphinx'
79 |
80 | # If true, `todo` and `todoList` produce output, else they produce nothing.
81 | todo_include_todos = False
82 |
83 |
84 | # -- Options for HTML output ----------------------------------------------
85 |
86 | # The theme to use for HTML and HTML Help pages. See the documentation for
87 | # a list of builtin themes.
88 | #
89 | html_theme = 'bizstyle'
90 |
91 | # Theme options are theme-specific and customize the look and feel of a theme
92 | # further. For a list of options available for each theme, see the
93 | # documentation.
94 | #
95 | # html_theme_options = {}
96 |
97 | # Add any paths that contain custom static files (such as style sheets) here,
98 | # relative to this directory. They are copied after the builtin static files,
99 | # so a file named "default.css" will overwrite the builtin "default.css".
100 | html_static_path = ['_static']
101 |
102 |
103 | # -- Options for HTMLHelp output ------------------------------------------
104 |
105 | # Output file base name for HTML help builder.
106 | htmlhelp_basename = 'NeuronUnitdoc'
107 |
108 |
109 | # -- Options for LaTeX output ---------------------------------------------
110 |
111 | latex_elements = {
112 | # The paper size ('letterpaper' or 'a4paper').
113 | #
114 | # 'papersize': 'letterpaper',
115 |
116 | # The font size ('10pt', '11pt' or '12pt').
117 | #
118 | # 'pointsize': '10pt',
119 |
120 | # Additional stuff for the LaTeX preamble.
121 | #
122 | # 'preamble': '',
123 |
124 | # Latex figure (float) alignment
125 | #
126 | # 'figure_align': 'htbp',
127 | }
128 |
129 | # Grouping the document tree into LaTeX files. List of tuples
130 | # (source start file, target name, title,
131 | # author, documentclass [howto, manual, or own class]).
132 | latex_documents = [
133 | (master_doc, 'NeuronUnit.tex', 'NeuronUnit Documentation',
134 | 'Rick Gerkin', 'manual'),
135 | ]
136 |
137 |
138 | # -- Options for manual page output ---------------------------------------
139 |
140 | # One entry per manual page. List of tuples
141 | # (source start file, name, description, authors, manual section).
142 | man_pages = [
143 | (master_doc, 'neuronunit', 'NeuronUnit Documentation',
144 | [author], 1)
145 | ]
146 |
147 |
148 | # -- Options for Texinfo output -------------------------------------------
149 |
150 | # Grouping the document tree into Texinfo files. List of tuples
151 | # (source start file, target name, title, author,
152 | # dir menu entry, description, category)
153 | texinfo_documents = [
154 | (master_doc, 'NeuronUnit', 'NeuronUnit Documentation',
155 | author, 'NeuronUnit', 'One line description of project.',
156 | 'Miscellaneous'),
157 | ]
158 |
159 |
160 |
161 |
162 | # Example configuration for intersphinx: refer to the Python standard library.
163 | intersphinx_mapping = {'https://docs.python.org/': None}
164 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. NeuronUnit documentation master file, created by
2 | sphinx-quickstart on Fri Mar 31 23:55:44 2017.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to NeuronUnit's documentation!
7 | ======================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 |
14 |
15 | Indices and tables
16 | ==================
17 |
18 | * :ref:`genindex`
19 | * :ref:`modindex`
20 | * :ref:`search`
21 |
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | neuronunit
2 | ==========
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | neuronunit
8 |
--------------------------------------------------------------------------------
/docs/source/neuronunit.capabilities.rst:
--------------------------------------------------------------------------------
1 | neuronunit.capabilities package
2 | ===============================
3 |
4 | Submodules
5 | ----------
6 |
7 | neuronunit.capabilities.channel module
8 | --------------------------------------
9 |
10 | .. automodule:: neuronunit.capabilities.channel
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | neuronunit.capabilities.spike_functions module
16 | ----------------------------------------------
17 |
18 | .. automodule:: neuronunit.capabilities.spike_functions
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 |
24 | Module contents
25 | ---------------
26 |
27 | .. automodule:: neuronunit.capabilities
28 | :members:
29 | :undoc-members:
30 | :show-inheritance:
31 |
--------------------------------------------------------------------------------
/docs/source/neuronunit.models.rst:
--------------------------------------------------------------------------------
1 | neuronunit.models package
2 | =========================
3 |
4 | Submodules
5 | ----------
6 |
7 | neuronunit.models.backends module
8 | ---------------------------------
9 |
10 | .. automodule:: neuronunit.models.backends
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | neuronunit.models.channel module
16 | --------------------------------
17 |
18 | .. automodule:: neuronunit.models.channel
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | neuronunit.models.reduced module
24 | --------------------------------
25 |
26 | .. automodule:: neuronunit.models.reduced
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 |
32 | Module contents
33 | ---------------
34 |
35 | .. automodule:: neuronunit.models
36 | :members:
37 | :undoc-members:
38 | :show-inheritance:
39 |
--------------------------------------------------------------------------------
/docs/source/neuronunit.neuroconstruct.rst:
--------------------------------------------------------------------------------
1 | neuronunit.neuroconstruct package
2 | =================================
3 |
4 | Submodules
5 | ----------
6 |
7 | neuronunit.neuroconstruct.capabilities module
8 | ---------------------------------------------
9 |
10 | .. automodule:: neuronunit.neuroconstruct.capabilities
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | neuronunit.neuroconstruct.models module
16 | ---------------------------------------
17 |
18 | .. automodule:: neuronunit.neuroconstruct.models
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 |
24 | Module contents
25 | ---------------
26 |
27 | .. automodule:: neuronunit.neuroconstruct
28 | :members:
29 | :undoc-members:
30 | :show-inheritance:
31 |
--------------------------------------------------------------------------------
/docs/source/neuronunit.rst:
--------------------------------------------------------------------------------
1 | neuronunit package
2 | ==================
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 |
9 | neuronunit.capabilities
10 | neuronunit.models
11 | neuronunit.neuroconstruct
12 | neuronunit.tests
13 |
14 | Submodules
15 | ----------
16 |
17 | neuronunit.aibs module
18 | ----------------------
19 |
20 | .. automodule:: neuronunit.aibs
21 | :members:
22 | :undoc-members:
23 | :show-inheritance:
24 |
25 | neuronunit.neuroelectro module
26 | ------------------------------
27 |
28 | .. automodule:: neuronunit.neuroelectro
29 | :members:
30 | :undoc-members:
31 | :show-inheritance:
32 |
33 | neuronunit.plottools module
34 | ---------------------------
35 |
36 | .. automodule:: neuronunit.plottools
37 | :members:
38 | :undoc-members:
39 | :show-inheritance:
40 |
41 |
42 | Module contents
43 | ---------------
44 |
45 | .. automodule:: neuronunit
46 | :members:
47 | :undoc-members:
48 | :show-inheritance:
49 |
--------------------------------------------------------------------------------
/docs/source/neuronunit.tests.rst:
--------------------------------------------------------------------------------
1 | neuronunit.tests package
2 | ========================
3 |
4 | Submodules
5 | ----------
6 |
7 | neuronunit.tests.analysis module
8 | --------------------------------
9 |
10 | .. automodule:: neuronunit.tests.analysis
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | neuronunit.tests.channel module
16 | -------------------------------
17 |
18 | .. automodule:: neuronunit.tests.channel
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | neuronunit.tests.dynamics module
24 | --------------------------------
25 |
26 | .. automodule:: neuronunit.tests.dynamics
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | neuronunit.tests.exhaustive_search module
32 | -----------------------------------------
33 |
34 | .. automodule:: neuronunit.tests.exhaustive_search
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | neuronunit.tests.gbevaluator module
40 | -----------------------------------
41 |
42 | .. automodule:: neuronunit.tests.gbevaluator
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | neuronunit.tests.get_neab module
48 | --------------------------------
49 |
50 | .. automodule:: neuronunit.tests.get_neab
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | neuronunit.tests.model_parameters module
56 | ----------------------------------------
57 |
58 | .. automodule:: neuronunit.tests.model_parameters
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 | neuronunit.tests.nsga module
64 | ----------------------------
65 |
66 | .. automodule:: neuronunit.tests.nsga
67 | :members:
68 | :undoc-members:
69 | :show-inheritance:
70 |
71 | neuronunit.tests.rheobase_old module
72 | ------------------------------------
73 |
74 | .. automodule:: neuronunit.tests.rheobase_old
75 | :members:
76 | :undoc-members:
77 | :show-inheritance:
78 |
79 | neuronunit.tests.rheobase_old2 module
80 | -------------------------------------
81 |
82 | .. automodule:: neuronunit.tests.rheobase_old2
83 | :members:
84 | :undoc-members:
85 | :show-inheritance:
86 |
87 | neuronunit.tests.rheobase_old3 module
88 | -------------------------------------
89 |
90 | .. automodule:: neuronunit.tests.rheobase_old3
91 | :members:
92 | :undoc-members:
93 | :show-inheritance:
94 |
95 | neuronunit.tests.rheobase_only module
96 | -------------------------------------
97 |
98 | .. automodule:: neuronunit.tests.rheobase_only
99 | :members:
100 | :undoc-members:
101 | :show-inheritance:
102 |
103 | neuronunit.tests.stdputil module
104 | --------------------------------
105 |
106 | .. automodule:: neuronunit.tests.stdputil
107 | :members:
108 | :undoc-members:
109 | :show-inheritance:
110 |
111 | neuronunit.tests.test_all module
112 | --------------------------------
113 |
114 | .. automodule:: neuronunit.tests.test_all
115 | :members:
116 | :undoc-members:
117 | :show-inheritance:
118 |
119 |
120 | Module contents
121 | ---------------
122 |
123 | .. automodule:: neuronunit.tests
124 | :members:
125 | :undoc-members:
126 | :show-inheritance:
127 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: neuronunit_docs
2 | dependencies:
3 | - python==3.5
4 | - matplotlib>=1.5
5 | - sphinx>=1.5
6 | - pip:
7 | - neo==0.4
8 | - elephant
9 | - dask
10 | - numba
11 | - streamlit
12 | - sklearn
13 | - seaborn
14 | - frozendict
15 | - plotly
16 | - asciiplotlib
17 | - ipfx
18 | - git+https://github.com/russelljjarvis/jit_jub@neuronunit
19 | - git+https://github.com/russelljjarvis/BluePyOpt@neuronunit_reduced_cells
20 | - git+https://github.com/russelljjarvis/sciunit@dev
21 |
--------------------------------------------------------------------------------
/neuronunit/__init__.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit.
2 |
3 | Testing for neuron and ion channel models
4 | using the SciUnit framework.
5 | """
6 |
7 | import os
8 | import platform
9 |
10 | try:
11 | import sciunit
12 | assert sciunit
13 | except ImportError as e:
14 | print("NeuronUnit requires SciUnit: http://github.com/scidash/sciunit")
15 | raise e
16 |
17 | IMPLEMENTATION = platform.python_implementation()
18 | JYTHON = IMPLEMENTATION == 'Jython'
19 | CPYTHON = IMPLEMENTATION == 'CPython'
20 | DIR = os.path.dirname(os.path.realpath(__file__))
21 |
--------------------------------------------------------------------------------
/neuronunit/aibs.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit module for interaction with the AIBS Cell Types Database."""
2 |
3 | import shelve
4 | import requests
5 |
6 | import numpy as np
7 | import quantities as pq
8 | from allensdk.api.queries.cell_types_api import CellTypesApi
9 |
10 |
11 | def is_aibs_up():
12 | """Check whether the AIBS Cell Types Database API is working."""
13 | url = ("http://api.brain-map.org/api/v2/data/query.xml?criteria=model"
14 | "::Specimen,rma::criteria,[id$eq320654829],rma::include,"
15 | "ephys_result(well_known_files(well_known_file_type"
16 | "[name$eqNWBDownload]))")
17 | request = requests.get(url)
18 | return request.status_code == 200
19 |
20 |
21 | def get_sweep_params(dataset_id, sweep_id):
22 | """Get sweep parameters.
23 |
24 | Get those corresponding to the sweep with id 'sweep_id' from
25 | the dataset with id 'dataset_id'.
26 | """
27 | ct = CellTypesApi()
28 | experiment_params = ct.get_ephys_sweeps(dataset_id)
29 | sp = None
30 | for sp in experiment_params:
31 | if sp['id'] == sweep_id:
32 | sweep_num = sp['sweep_number']
33 | if sweep_num is None:
34 | msg = "Sweep with ID %d not found in dataset with ID %d."
35 | raise Exception(msg % (sweep_id, dataset_id))
36 | break
37 | return sp
38 |
39 |
40 | def get_sp(experiment_params, sweep_ids):
41 | """Get sweep parameters.
42 |
43 | A candidate method for replacing 'get_sweep_params'.
44 | This fix is necessary due to changes in the allensdk.
45 | Warning: This method may not properly convey the original meaning
46 | of 'get_sweep_params'.
47 | """
48 | sp = None
49 | for sp in experiment_params:
50 | for sweep_id in sweep_ids:
51 | if sp['id'] == sweep_id:
52 | sweep_num = sp['sweep_number']
53 | if sweep_num is None:
54 | raise Exception('Sweep with ID %d not found.' % sweep_id)
55 | break
56 | return sp
57 |
58 |
59 | def get_observation(dataset_id, kind, cached=True, quiet=False):
60 | """Get an observation.
61 |
62 | Get an observation of kind 'kind' from the dataset with id 'dataset_id'.
63 | optionally using the cached value retrieved previously.
64 | """
65 | db = shelve.open('aibs-cache') if cached else {}
66 | identifier = '%d_%s' % (dataset_id, kind)
67 | if identifier in db:
68 | print("Getting %s cached data value for from AIBS dataset %s"
69 | % (kind.title(), dataset_id))
70 | value = db[identifier]
71 | else:
72 | print("Getting %s data value for from AIBS dataset %s"
73 | % (kind.title(), dataset_id))
74 | ct = CellTypesApi()
75 | cmd = ct.get_cell(dataset_id) # Cell metadata
76 |
77 | if kind == 'rheobase':
78 | if 'ephys_features' in cmd:
79 | value = cmd['ephys_features'][0]['threshold_i_long_square'] # newer API
80 | else:
81 | value = cmd['ef__threshold_i_long_square'] # older API
82 |
83 | value = np.round(value, 2) # Round to nearest hundredth of a pA.
84 | value *= pq.pA # Apply units.
85 |
86 | else:
87 | value = cmd[kind]
88 |
89 | db[identifier] = value
90 |
91 | if cached:
92 | db.close()
93 | return {'value': value}
94 |
95 |
96 | def get_value_dict(experiment_params, sweep_ids, kind):
97 | """Get a dictionary of data values from the experiment.
98 |
99 | A candidate method for replacing 'get_observation'.
100 | This fix is necessary due to changes in the allensdk.
101 | Warning: Together with 'get_sp' this method may not properly
102 | convey the meaning of 'get_observation'.
103 | """
104 | if kind == str('rheobase'):
105 | sp = get_sp(experiment_params, sweep_ids)
106 | value = sp['stimulus_absolute_amplitude']
107 | value = np.round(value, 2) # Round to nearest hundredth of a pA.
108 | value *= pq.pA # Apply units.
109 | return {'value': value}
110 |
--------------------------------------------------------------------------------
/neuronunit/allenapi/__init__.py:
--------------------------------------------------------------------------------
1 | """Allen API for NeuronUnit"""
2 |
3 | import warnings
4 |
--------------------------------------------------------------------------------
/neuronunit/allenapi/aibs.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit module for interaction with the Allen Brain Insitute
2 | Cell Types database"""
3 | # import logging
4 | # logger = logging.getLogger(name)
5 | # logging.info("test")
6 | import matplotlib as mpl
7 |
8 | try:
9 | mpl.use("agg")
10 | except:
11 | pass
12 | import matplotlib.pyplot as plt
13 | import shelve
14 | import requests
15 | import numpy as np
16 | import quantities as pq
17 | from allensdk.api.queries.cell_types_api import CellTypesApi
18 | from allensdk.core.cell_types_cache import CellTypesCache
19 | from allensdk.api.queries.glif_api import GlifApi
20 | import os
21 | import pickle
22 | from allensdk.api.queries.biophysical_api import BiophysicalApi
23 |
24 | from allensdk.core.cell_types_cache import CellTypesCache
25 | from allensdk.ephys.extract_cell_features import extract_cell_features
26 | from collections import defaultdict
27 | from allensdk.core.nwb_data_set import NwbDataSet
28 |
29 | from neuronunit import models
30 | from neo.core import AnalogSignal
31 | import quantities as qt
32 | from types import MethodType
33 |
34 | from allensdk.ephys.extract_cell_features import extract_cell_features
35 | from collections import defaultdict
36 | from allensdk.core.cell_types_cache import CellTypesCache
37 |
38 | import neo
39 | from elephant.spike_train_generation import threshold_detection
40 | from quantities import mV, ms
41 | from numba import jit
42 | import sciunit
43 | import math
44 | import pdb
45 | from allensdk.ephys.extract_cell_features import extract_cell_features
46 |
47 |
48 | def is_aibs_up():
49 | """Check whether the AIBS Cell Types Database API is working."""
50 | url = (
51 | "http://api.brain-map.org/api/v2/data/query.xml?criteria=model"
52 | "::Specimen,rma::criteria,[id$eq320654829],rma::include,"
53 | "ephys_result(well_known_files(well_known_file_type"
54 | "[name$eqNWBDownload]))"
55 | )
56 | request = requests.get(url)
57 | return request.status_code == 200
58 |
59 |
60 | def get_observation(dataset_id, kind, cached=True, quiet=False):
61 | """Get an observation.
62 |
63 | Get an observation of kind 'kind' from the dataset with id 'dataset_id'.
64 | optionally using the cached value retrieved previously.
65 | """
66 |
67 | db = shelve.open("aibs-cache") if cached else {}
68 | identifier = "%d_%s" % (dataset_id, kind)
69 | if identifier in db:
70 | print(
71 | "Getting %s cached data value for from AIBS dataset %s"
72 | % (kind.title(), dataset_id)
73 | )
74 | value = db[identifier]
75 | else:
76 | print(
77 | "Getting %s data value for from AIBS dataset %s"
78 | % (kind.title(), dataset_id)
79 | )
80 | ct = CellTypesApi()
81 | cmd = ct.get_cell(dataset_id) # Cell metadata
82 |
83 | if kind == "rheobase":
84 | if "ephys_features" in cmd:
85 | value = cmd["ephys_features"][0]["threshold_i_long_square"] # newer API
86 | else:
87 | value = cmd["ef__threshold_i_long_square"] # older API
88 |
89 | value = np.round(value, 2) # Round to nearest hundredth of a pA.
90 | value *= pq.pA # Apply units.
91 |
92 | else:
93 | value = cmd[kind]
94 |
95 | db[identifier] = value
96 |
97 | if cached:
98 | db.close()
99 | return {"value": value}
100 |
101 |
102 | def get_value_dict(experiment_params, sweep_ids, kind):
103 | """Get a dictionary of data values from the experiment.
104 |
105 | A candidate method for replacing 'get_observation'.
106 | This fix is necessary due to changes in the allensdk.
107 | Warning: Together with 'get_sp' this method may not properly
108 | convey the meaning of 'get_observation'.
109 | """
110 |
111 | if kind == str("rheobase"):
112 | sp = get_sp(experiment_params, sweep_ids)
113 | value = sp["stimulus_absolute_amplitude"]
114 | value = np.round(value, 2) # Round to nearest hundredth of a pA.
115 | value *= pq.pA # Apply units.
116 | return {"value": value}
117 |
118 |
119 | """Auxiliary helper functions for analysis of spiking."""
120 |
121 |
122 | def find_nearest(array, value):
123 | array = np.asarray(array)
124 | idx = (np.abs(array - value)).argmin()
125 | return (array[idx], idx)
126 |
127 |
128 | def inject_square_current(model, current):
129 | if type(current) is type({}):
130 | current = float(current["amplitude"])
131 | data_set = model.data_set
132 | numbers = data_set.get_sweep_numbers()
133 | injections = [np.max(data_set.get_sweep(sn)["stimulus"]) for sn in numbers]
134 | sns = [sn for sn in numbers]
135 | (nearest, idx) = find_nearest(injections, current)
136 | index = np.asarray(numbers)[idx]
137 | sweep_data = data_set.get_sweep(index)
138 | temp_vm = sweep_data["response"]
139 | injection = sweep_data["stimulus"]
140 | sampling_rate = sweep_data["sampling_rate"]
141 | vm = AnalogSignal(temp_vm, sampling_rate=sampling_rate * qt.Hz, units=qt.V)
142 | model._vm = vm
143 | return model._vm
144 |
145 |
146 | def get_membrane_potential(model):
147 | return model._vm
148 |
149 |
150 | def get_spike_train(vm, threshold=0.0 * mV):
151 | """
152 | Inputs:
153 | vm: a neo.core.AnalogSignal corresponding to a membrane potential trace.
154 | threshold: the value (in mV) above which vm has to cross for there
155 | to be a spike. Scalar float.
156 |
157 | Returns:
158 | a neo.core.SpikeTrain containing the times of spikes.
159 | """
160 | spike_train = threshold_detection(vm, threshold=threshold)
161 | return spike_train
162 |
163 |
164 | def get_spike_count(model):
165 | vm = model.get_membrane_potential()
166 | train = get_spike_train(vm)
167 | return len(train)
168 |
169 |
170 | def appropriate_features():
171 | for s in sweeps:
172 | if s["ramp"]:
173 | print([(k, v) for k, v in s.items()])
174 | current = {}
175 | current["amplitude"] = s["stimulus_absolute_amplitude"]
176 | current["duration"] = s["stimulus_duration"]
177 | current["delay"] = s["stimulus_start_time"]
178 |
179 |
180 | def get_features(specimen_id=485909730):
181 | data_set = ctc.get_ephys_data(specimen_id)
182 | sweeps = ctc.get_ephys_sweeps(specimen_id)
183 |
184 | # group the sweeps by stimulus
185 | sweep_numbers = defaultdict(list)
186 | for sweep in sweeps:
187 | sweep_numbers[sweep["stimulus_name"]].append(sweep["sweep_number"])
188 |
189 | # calculate features
190 | cell_features = extract_cell_features(
191 | data_set,
192 | sweep_numbers["Ramp"],
193 | sweep_numbers["Short Square"],
194 | sweep_numbers["Long Square"],
195 | )
196 |
197 |
198 | def get_sweep_params(dataset_id, sweep_id):
199 | """Get sweep parameters.
200 |
201 | Get those corresponding to the sweep with id 'sweep_id' from
202 | the dataset with id 'dataset_id'.
203 | """
204 |
205 | ct = CellTypesApi()
206 | experiment_params = ct.get_ephys_sweeps(dataset_id)
207 | sp = None
208 | for sp in experiment_params:
209 | if sp["id"] == sweep_id:
210 | sweep_num = sp["sweep_number"]
211 | if sweep_num is None:
212 | msg = "Sweep with ID %d not found in dataset with ID %d."
213 | raise Exception(msg % (sweep_id, dataset_id))
214 | break
215 | return sp
216 |
217 |
218 | def get_sp(experiment_params, sweep_ids):
219 |
220 | """Get sweep parameters.
221 | A candidate method for replacing 'get_sweep_params'.
222 | This fix is necessary due to changes in the allensdk.
223 | Warning: This method may not properly convey the original meaning
224 | of 'get_sweep_params'.
225 | """
226 |
227 | sp = None
228 | for sp in experiment_params:
229 | for sweep_id in sweep_ids:
230 | if sp["id"] == sweep_id:
231 | sweep_num = sp["sweep_number"]
232 | if sweep_num is None:
233 | raise Exception("Sweep with ID %d not found." % sweep_id)
234 | break
235 | return sp
236 |
--------------------------------------------------------------------------------
/neuronunit/allenapi/make_allen_tests.py:
--------------------------------------------------------------------------------
1 | from neuronunit.tests.base import VmTest
2 | import pickle
3 | import numpy as np
4 | from allensdk.core.cell_types_cache import CellTypesCache
5 | from neuronunit.models.optimization_model_layer import OptimizationModel
6 |
7 | from neuronunit.optimization.optimization_management import (
8 | multi_spiking_feature_extraction,
9 | )
10 | from sciunit.scores import RelativeDifferenceScore
11 |
12 |
13 | class AllenTest(VmTest):
14 | def __init__(
15 | self,
16 | observation={"mean": None, "std": None},
17 | name="generic_allen",
18 | prediction={"mean": None, "std": None},
19 | ):
20 | super(AllenTest, self).__init__(observation, name)
21 | self.name = name
22 | self.score_type = RelativeDifferenceScore
23 | self.observation = observation
24 | self.prediction = prediction
25 |
26 | aliases = ""
27 |
28 | def generate_prediction(self, model=None):
29 | if self.prediction is None:
30 | dtc = OptimizationModel()
31 | dtc.backed = model.backend
32 | dtc.attrs = model.attrs
33 | dtc.rheobase = model.rheobase
34 | dtc.tests = [self]
35 | dtc = multi_spiking_feature_extraction(dtc)
36 | dtc, ephys0 = allen_wave_predictions(dtc, thirty=True)
37 | dtc, ephys1 = allen_wave_predictions(dtc, thirty=False)
38 | if self.name in ephys0.keys():
39 | feature = ephys0[self.name]
40 | self.prediction = {}
41 | self.prediction["value"] = feature
42 | # self.prediction['std'] = feature
43 | if self.name in ephys1.keys():
44 | feature = ephys1[self.name]
45 | self.prediction = {}
46 | self.prediction["value"] = feature
47 | # self.prediction['std'] = feature
48 | return self.prediction
49 | # ephys1.update()
50 | # if not len(self.prediction.keys()):
51 |
52 | def compute_params(self):
53 | self.params["t_max"] = (
54 | self.params["delay"] + self.params["duration"] + self.params["padding"]
55 | )
56 |
57 | # @property
58 | # def prediction(self):
59 | # return self._prediction
60 |
61 | # @property
62 | # def observation(self):
63 | # return self._observation
64 |
65 | # @observation.setter
66 | def set_observation(self, value):
67 | self.observation = {}
68 | self.observation["mean"] = value
69 | self.observation["std"] = value
70 |
71 | # @prediction.setter
72 | def set_prediction(self, value):
73 | self.prediction = {}
74 | self.prediction["mean"] = value
75 | self.prediction["std"] = value
76 |
--------------------------------------------------------------------------------
/neuronunit/bbp.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit module for interaction with the Blue Brain Project data."""
2 |
3 | import os
4 | import zipfile
5 | import json
6 |
7 | import requests
8 | import matplotlib.pyplot as plt
9 | from neo.io import IgorIO
10 | from io import BytesIO
11 | from urllib.request import urlopen, URLError
12 |
13 |
14 | def is_bbp_up():
15 | """Check whether the BBP microcircuit portal is up."""
16 | url = "http://microcircuits.epfl.ch/data/released_data/B95.zip"
17 | request = requests.get(url)
18 | return request.status_code == 200
19 |
20 |
21 | def list_curated_data():
22 | """List all curated datasets as of July 1st, 2017.
23 |
24 | Includes those found at
25 | http://microcircuits.epfl.ch/#/article/article_4_eph
26 | """
27 | url = "http://microcircuits.epfl.ch/data/articles/article_4_eph.json"
28 | cells = []
29 | try:
30 | response = urlopen(url)
31 | except URLError:
32 | print ("Could not find list of curated data at %s" % URL)
33 | else:
34 | data = json.load(response)
35 | table = data['data_table']['table']['rows']
36 | for section in table:
37 | for row in section:
38 | if 'term' in row:
39 | cell = row['term'].split(' ')[1]
40 | cells.append(cell)
41 | return cells
42 |
43 |
44 | def get_curated_data(data_id, sweeps=None):
45 | """Download curated data (Igor files) from the microcircuit portal.
46 |
47 | data_id: An ID number like the ones in 'list_curated_data()' that appears
48 | in http://microcircuits.epfl.ch/#/article/article_4_eph.
49 | """
50 | url = "http://microcircuits.epfl.ch/data/released_data/%s.zip" % data_id
51 | data = get_sweeps(url, sweeps=sweeps)
52 | return data
53 |
54 |
55 | def get_uncurated_data(data_id, sweeps=None):
56 | """Download uncurated data (Igor files) from the microcircuit portal."""
57 | url = "http://microcircuits.epfl.ch/data/uncurated/%s_folder.zip" % data_id
58 | data = get_sweeps(url, sweeps=sweeps)
59 | return data
60 |
61 |
62 | def get_sweeps(url, sweeps=None):
63 | """Get sweeps of data from the given URL."""
64 | print("Getting data from %s" % url)
65 | path = find_or_download_data(url) # Base path for this data
66 | assert type(sweeps) in [type(None), list], "Sweeps must be None or a list."
67 | sweep_paths = list_sweeps(path) # Available sweeps
68 | if sweeps is None:
69 | sweeps = sweep_paths
70 | else:
71 | sweeps = []
72 | for sweep_path in sweep_paths:
73 | if any([sweep_path.endswith(sweep for sweep in sweeps)]):
74 | sweeps.append(sweep_path)
75 | sweeps = set(sweeps)
76 | data = {sweep: open_data(sweep) for sweep in sweeps}
77 | return data
78 |
79 |
80 | def find_or_download_data(url):
81 | """Find or download data from the given URL.
82 |
83 | Return a path to a local directory containing the unzipped data found
84 | at the provided url. The zipped file will be downloaded and unzipped if
85 | the directory cannot be found. The path to the directory is returned.
86 | """
87 | zipped = url.split('/')[-1] # Name of zip file
88 | unzipped = zipped.split('.')[0] # Name when unzipped
89 | z = None
90 | if not os.path.isdir(unzipped): # If unzipped version not found
91 | r = requests.get(url)
92 | z = zipfile.ZipFile(BytesIO(r.content))
93 | z.extractall(unzipped)
94 | return unzipped
95 |
96 |
97 | def list_sweeps(url, extension='.ibw'):
98 | """List all sweeps available in the file at the given URL."""
99 | path = find_or_download_data(url) # Base path for this data
100 | sweeps = find_sweeps(path, extension=extension)
101 | return sweeps
102 |
103 |
104 | def find_sweeps(path, extension='.ibw', depth=0):
105 | """Find sweeps available at the given path.
106 |
107 | Starting from 'path', recursively searches subdirectories and returns
108 | full paths to all files ending with 'extension'.
109 | """
110 | sweeps = []
111 | items = os.listdir(path)
112 | for item in items:
113 | new_path = os.path.join(path, item)
114 | if os.path.isdir(new_path):
115 | sweeps += find_sweeps(new_path, extension=extension, depth=depth+1)
116 | if os.path.isfile(new_path) and item.endswith(extension):
117 | sweeps += [new_path]
118 | return sweeps
119 |
120 |
121 | def open_data(path):
122 | """Take a 'path' to an .ibw file and returns a neo.core.AnalogSignal."""
123 | igor_io = IgorIO(filename=path)
124 | analog_signal = igor_io.read_analogsignal()
125 | return analog_signal
126 |
127 |
128 | def plot_data(signal):
129 | """Plot the data in a neo.core.AnalogSignal."""
130 | plt.plot(signal.times, signal)
131 | plt.xlabel(signal.sampling_period.dimensionality)
132 | plt.ylabel(signal.dimensionality)
133 |
--------------------------------------------------------------------------------
/neuronunit/capabilities/__init__.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit abstract Capabilities.
2 |
3 | The goal is to enumerate all possible capabilities of a model that would be
4 | tested using NeuronUnit. These capabilities exchange 'neo' objects.
5 | """
6 |
7 | import numpy as np
8 | import quantities as pq
9 | import sciunit
10 | import matplotlib.pyplot as plt
11 | from .spike_functions import spikes2amplitudes, spikes2widths,\
12 | spikes2thresholds
13 |
14 |
15 | class ProducesMembranePotential(sciunit.Capability):
16 | """Indicates that the model produces a somatic membrane potential."""
17 |
18 | def get_membrane_potential(self, **kwargs):
19 | """Must return a neo.core.AnalogSignal."""
20 | raise NotImplementedError()
21 |
22 | def get_mean_vm(self, **kwargs):
23 | """Get the mean membrane potential."""
24 | vm = self.get_membrane_potential(**kwargs)
25 | return np.mean(vm.base)
26 |
27 | def get_median_vm(self, **kwargs):
28 | """Get the median membrane potential."""
29 | vm = self.get_membrane_potential(**kwargs)
30 | return np.median(vm.base)
31 |
32 | def get_std_vm(self, **kwargs):
33 | """Get the standard deviation of the membrane potential."""
34 | vm = self.get_membrane_potential(**kwargs)
35 | return np.std(vm.base)
36 |
37 | def get_iqr_vm(self, **kwargs):
38 | """Get the inter-quartile range of the membrane potential."""
39 | vm = self.get_membrane_potential(**kwargs)
40 | return (np.percentile(vm, 75) - np.percentile(vm, 25))*vm.units
41 |
42 | def get_initial_vm(self, **kwargs):
43 | """Return a quantity corresponding to the starting membrane potential.
44 |
45 | This will in some cases be the resting potential.
46 | """
47 | vm = self.get_membrane_potential(**kwargs)
48 | return vm[0] # A neo.core.AnalogSignal object
49 |
50 | def plot_membrane_potential(self, ax=None, ylim=(None, None), **kwargs):
51 | """Plot the membrane potential."""
52 | vm = self.get_membrane_potential(**kwargs)
53 | if ax is None:
54 | ax = plt.gca()
55 | vm = vm.rescale('mV')
56 | ax.plot(vm.times, vm)
57 | y_min = float(vm.min()-5.0*pq.mV) if ylim[0] is None else ylim[0]
58 | y_max = float(vm.max()+5.0*pq.mV) if ylim[1] is None else ylim[1]
59 | ax.set_xlim(vm.times.min(), vm.times.max())
60 | ax.set_ylim(y_min, y_max)
61 | ax.set_xlabel('Time (s)')
62 | ax.set_ylabel('Vm (mV)')
63 |
64 |
65 | class ProducesSpikes(sciunit.Capability):
66 | """Indicate that the model produces spikes.
67 |
68 | No duration is required for these spikes.
69 | """
70 |
71 | def get_spike_train(self):
72 | """Get computed spike times from the model.
73 |
74 | Arguments: None.
75 | Returns: a neo.core.SpikeTrain object.
76 | """
77 | raise NotImplementedError()
78 |
79 | def get_spike_count(self):
80 | """Get the number of spikes."""
81 | spike_train = self.get_spike_train()
82 | return len(spike_train)
83 |
84 |
85 | class ProducesActionPotentials(ProducesSpikes,
86 | ProducesMembranePotential):
87 | """Indicate the model produces action potential waveforms.
88 |
89 | Waveforms must have a temporal extent.
90 | """
91 |
92 | def get_APs(self):
93 | """Get action potential waveform chunks from the model.
94 |
95 | Returns
96 | -------
97 | Must return a neo.core.AnalogSignal.
98 | Each column of the AnalogSignal should be a spike waveform.
99 | """
100 | raise NotImplementedError()
101 |
102 | def get_AP_widths(self):
103 | """Get widths of action potentials."""
104 | action_potentials = self.get_APs()
105 | widths = spikes2widths(action_potentials)
106 | return widths
107 |
108 | def get_AP_amplitudes(self):
109 | """Get amplitudes of action potentials."""
110 | action_potentials = self.get_APs()
111 | amplitudes = spikes2amplitudes(action_potentials)
112 | return amplitudes
113 |
114 | def get_AP_thresholds(self):
115 | """Get thresholds of action potentials."""
116 | action_potentials = self.get_APs()
117 | thresholds = spikes2thresholds(action_potentials)
118 | return thresholds
119 |
120 |
121 | class ReceivesSquareCurrent(sciunit.Capability):
122 | """Indicate that somatic current can be injected into the model as
123 | a square pulse.
124 | """
125 |
126 | def inject_square_current(self, current):
127 | """Injects somatic current into the model.
128 |
129 | Parameters
130 | ----------
131 | current : a dictionary like:
132 | {'amplitude':-10.0*pq.pA,
133 | 'delay':100*pq.ms,
134 | 'duration':500*pq.ms}}
135 | where 'pq' is the quantities package
136 | This describes the current to be injected.
137 | """
138 | raise NotImplementedError()
139 |
140 |
141 | class ReceivesCurrent(ReceivesSquareCurrent):
142 | """Indicate that somatic current can be injected into the model as
143 | either an arbitrary waveform or as a square pulse.
144 | """
145 |
146 | def inject_current(self, current):
147 | """Inject somatic current into the model.
148 |
149 | Parameters
150 | ----------
151 | current : neo.core.AnalogSignal
152 | This is a time series of the current to be injected.
153 | """
154 | raise NotImplementedError()
155 |
--------------------------------------------------------------------------------
/neuronunit/capabilities/channel.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit abstract Capabilities for channel models"""
2 |
3 | import inspect
4 |
5 | import sciunit
6 |
7 |
8 | class NML2ChannelAnalysis(sciunit.Capability):
9 | """Capability for models that can be altered using functions available
10 | in pyNeuroML.analsysi.NML2ChannelAnalysis"""
11 |
12 | def ca_make_lems_file(self, **run_params):
13 | """Makes a LEMS file using the provided run parameters using
14 | the ChannelAnalysis module."""
15 | return NotImplementedError("%s not implemented" %
16 | inspect.stack()[0][3])
17 |
18 | def ca_run_lems_file(self):
19 | """Run the LEMS file using ChannelAnalysis module."""
20 | return NotImplementedError("%s not implemented" %
21 | inspect.stack()[0][3])
22 |
23 | def compute_iv_curve(self, results):
24 | """Compute an IV Curve from the iv data in `results`."""
25 | return NotImplementedError("%s not implemented" %
26 | inspect.stack()[0][3])
27 |
28 | def plot_iv_curve(self, v, i, *plt_args, **plt_kwargs):
29 | """Plots IV Curve using array-like voltage 'v'
30 | and array-like current 'i'"""
31 | return NotImplementedError("%s not implemented" %
32 | inspect.stack()[0][3])
33 |
--------------------------------------------------------------------------------
/neuronunit/capabilities/morphology.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit abstract Capabilities for multicompartment cell models"""
2 |
3 | import inspect
4 | import sciunit
5 |
6 | class ProducesSWC(sciunit.Capability):
7 | '''
8 | The capability to produce a morphology SWC file
9 | '''
10 | def produce_swc(self):
11 | '''
12 | Produces morphology description file in SWC file format
13 |
14 | :return: absolute path to the produced SWC file
15 | '''
16 | return NotImplementedError("%s not implemented" % inspect.stack()[0][3])
--------------------------------------------------------------------------------
/neuronunit/capabilities/spike_functions.py:
--------------------------------------------------------------------------------
1 | """Auxiliary helper functions for analysis of spiking."""
2 |
3 | import numpy as np
4 | import neo
5 | from elephant.spike_train_generation import threshold_detection
6 | from quantities import mV, ms
7 | from numba import jit
8 | import sciunit
9 | import math
10 |
11 |
12 | def get_spike_train(vm, threshold=0.0*mV):
13 | """
14 | Inputs:
15 | vm: a neo.core.AnalogSignal corresponding to a membrane potential trace.
16 | threshold: the value (in mV) above which vm has to cross for there
17 | to be a spike. Scalar float.
18 |
19 | Returns:
20 | a neo.core.SpikeTrain containing the times of spikes.
21 | """
22 | spike_train = threshold_detection(vm, threshold=threshold)
23 | return spike_train
24 |
25 |
26 | def get_spike_waveforms(vm, threshold=0.0*mV, width=10*ms):
27 | """
28 | Membrane potential trace (1D numpy array) to matrix of
29 | spike snippets (2D numpy array)
30 |
31 | Inputs:
32 | vm: a neo.core.AnalogSignal corresponding to a membrane potential trace.
33 | threshold: the value (in mV) above which vm has to cross for there
34 | to be a spike. Scalar float.
35 | width: the length (in ms) of the snippet extracted,
36 | centered at the spike peak.
37 |
38 | Returns:
39 | a neo.core.AnalogSignal where each column contains a membrane potential
40 | snippets corresponding to one spike.
41 | """
42 | spike_train = threshold_detection(vm, threshold=threshold)
43 |
44 | # Fix for 0-length spike train issue in elephant.
45 | try:
46 | assert len(spike_train) != 0
47 | except TypeError:
48 | spike_train = neo.core.SpikeTrain([], t_start=spike_train.t_start,
49 | t_stop=spike_train.t_stop,
50 | units=spike_train.units)
51 |
52 | too_short = True
53 | too_long = True
54 |
55 | # This code checks that you are not asking for a window into an array,
56 | # with out of bounds indicies.
57 | t = spike_train[0]
58 | if t-width/2.0 > 0.0*ms:
59 | too_short = False
60 |
61 | t = spike_train[-1]
62 | if t+width/2.0 < vm.times[-1]:
63 | too_long = False
64 | if not too_short and not too_long:
65 | snippets = [vm.time_slice(t-width/2, t+width/2) for t in spike_train]
66 | elif too_long:
67 | snippets = [vm.time_slice(t-width/2, t) for t in spike_train]
68 | elif too_short:
69 | snippets = [vm.time_slice(t, t+width/2) for t in spike_train]
70 |
71 | result = neo.core.AnalogSignal(np.array(snippets).T.squeeze(),
72 | units=vm.units,
73 | sampling_rate=vm.sampling_rate)
74 |
75 | return result
76 |
77 |
78 | def spikes2amplitudes(spike_waveforms):
79 | """
80 | IN:
81 | spike_waveforms: Spike waveforms, e.g. from get_spike_waveforms().
82 | neo.core.AnalogSignal
83 | OUT:
84 | 1D numpy array of spike amplitudes, i.e. the maxima in each waveform.
85 | """
86 |
87 | n_spikes = spike_waveforms.shape[1]
88 | ampls = np.array([spike_waveforms[:, i].max() for i in range(n_spikes)])
89 | if n_spikes:
90 | # Add units.
91 | ampls = ampls * spike_waveforms[:, 0].units
92 | return ampls
93 |
94 |
95 | def spikes2widths(spike_waveforms):
96 | """
97 | IN:
98 | spike_waveforms: Spike waveforms, e.g. from get_spike_waveforms().
99 | neo.core.AnalogSignal
100 | OUT:
101 | 1D numpy array of spike widths, specifically the full width
102 | at half the maximum amplitude.
103 | """
104 | n_spikes = spike_waveforms.shape[1]
105 | widths = []
106 | for i in range(n_spikes):
107 | s = spike_waveforms[:, i].squeeze()
108 | try:
109 | x_high = int(np.argmax(s))
110 | high = s[x_high]
111 | except:
112 | high = 0
113 | for k in s:
114 | for i, j in enumerate(k):
115 | if j > high:
116 | high = j
117 | x_high = i
118 |
119 | if x_high > 0:
120 | try: # Use threshold to compute half-max.
121 | y = np.array(s)
122 | dvdt = np.diff(y)
123 | trigger = dvdt.max()/10
124 | x_loc = int(np.where(dvdt >= trigger)[0][0])
125 | thresh = (s[x_loc]+s[x_loc+1])/2
126 | mid = (high+thresh)/2
127 | except: # Use minimum value to compute half-max.
128 | sciunit.log(("Could not compute threshold; using pre-spike "
129 | "minimum to compute width"))
130 | low = np.min(s[:x_high])
131 | mid = (high+low)/2
132 | n_samples = sum(s > mid) # Number of samples above the half-max.
133 | widths.append(n_samples)
134 | widths = np.array(widths, dtype='float')
135 | if n_spikes:
136 | # Convert from samples to time.
137 | widths = widths*spike_waveforms.sampling_period
138 | return widths
139 |
140 |
141 | def spikes2thresholds(spike_waveforms):
142 | """
143 | IN:
144 | spike_waveforms: Spike waveforms, e.g. from get_spike_waveforms().
145 | neo.core.AnalogSignal
146 | OUT:
147 | 1D numpy array of spike thresholds, specifically the membrane potential
148 | at which 1/10 the maximum slope is reached.
149 |
150 | If the derivative contains NaNs, probably because vm contains NaNs
151 | Return an empty list with the appropriate units
152 |
153 | """
154 |
155 | n_spikes = spike_waveforms.shape[1]
156 | thresholds = []
157 | for i in range(n_spikes):
158 | s = spike_waveforms[:, i].squeeze()
159 | s = np.array(s)
160 | dvdt = np.diff(s)
161 | for j in dvdt:
162 | if math.isnan(j):
163 | return thresholds * spike_waveforms.units
164 |
165 | trigger = dvdt.max()/10
166 | x_loc = np.where(dvdt >= trigger)[0][0]
167 | thresh = (s[x_loc]+s[x_loc+1])/2
168 | thresholds.append(thresh)
169 | return thresholds * spike_waveforms.units
170 |
--------------------------------------------------------------------------------
/neuronunit/examples/geppeto_backend.py:
--------------------------------------------------------------------------------
1 | import quantities as pq
2 | from neuronunit.tests.passive import InputResistanceTest
3 | from neuronunit.models.reduced import ReducedModel
4 | test = InputResistanceTest(observation={'mean': 200.0*pq.MOhm,
5 | 'std': 50.0*pq.MOhm})
6 | model_url = ("https://raw.githubusercontent.com/scidash/neuronunit"
7 | "/dev/neuronunit/models/NeuroML2/LEMS_2007One.xml")
8 | model = ReducedModel(model_url, backend='Geppetto')
9 | test.setup_protocol(model)
10 | print(model.lems_file_path)
--------------------------------------------------------------------------------
/neuronunit/examples/geppetto-prep.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Demonstration of using `Test.setup_protocol` to rewrite the NeuroML file before sending it to a remote Geppetto server."
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 10,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import quantities as pq\n",
17 | "from neuronunit.tests.passive import InputResistanceTest\n",
18 | "from neuronunit.models.reduced import ReducedModel\n",
19 | "# Don't worry about not being able to load the NEURONBackend"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 2,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "test = InputResistanceTest(observation={'mean':200.0*pq.MOhm, \n",
29 | " 'std':50.0*pq.MOhm})\n",
30 | "model_url = (\"https://raw.githubusercontent.com/scidash/neuronunit\"\n",
31 | " \"/dev/neuronunit/models/NeuroML2/LEMS_2007One.xml\")\n",
32 | "\n",
33 | "# A new Backend which is just like the jNeuroMLBackend, in that it writes nml files, \n",
34 | "# but does not actually so simulation\n",
35 | "model = ReducedModel(model_url, backend='Geppetto') "
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 3,
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "# Actually do current injection. In the Geppetto (or jNeuroMLBackend, this will write new nml files)\n",
45 | "# For the InputResistanceTest, it should change the amplitude to -10.0 pA. \n",
46 | "test.setup_protocol(model) "
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 4,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "# Get paths to the files (same path before and after writing, but these are the ones edited)\n",
56 | "nml_paths = model.get_nml_paths() \n",
57 | "# In this examples there is only one nml file, which is an include of the LEMS file at `model_url`. \n",
58 | "path = nml_paths[0] "
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": 7,
64 | "metadata": {},
65 | "outputs": [
66 | {
67 | "name": "stdout",
68 | "output_type": "stream",
69 | "text": [
70 | "\u001b[0m\u001b[01;32m/tmp/tmpt6muan44/Izh2007One.net.nml\u001b[0m*\n"
71 | ]
72 | }
73 | ],
74 | "source": [
75 | "ls /tmp/tmpt6muan44/Izh2007One.net.nml"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": 9,
81 | "metadata": {},
82 | "outputs": [
83 | {
84 | "data": {
85 | "text/plain": [
86 | "\n",
87 | "\n",
88 | "\n",
89 | "\n",
90 | "\n",
91 | "\n",
92 | "\n",
93 | "\n",
94 | "\n",
95 | " \n",
96 | " \n",
97 | " \n",
98 | " \n",
99 | "\n",
100 | "\n",
101 | ""
102 | ]
103 | },
104 | "metadata": {},
105 | "output_type": "display_data"
106 | }
107 | ],
108 | "source": [
109 | "# IPython line magic to display the contents of the nml file at this path. \n",
110 | "# It displays the correct, new current amplitude (-10 pA) for me.\n",
111 | "%more $path"
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": null,
117 | "metadata": {},
118 | "outputs": [],
119 | "source": []
120 | }
121 | ],
122 | "metadata": {
123 | "kernelspec": {
124 | "display_name": "Python 3",
125 | "language": "python",
126 | "name": "python3"
127 | },
128 | "language_info": {
129 | "codemirror_mode": {
130 | "name": "ipython",
131 | "version": 3
132 | },
133 | "file_extension": ".py",
134 | "mimetype": "text/x-python",
135 | "name": "python",
136 | "nbconvert_exporter": "python",
137 | "pygments_lexer": "ipython3",
138 | "version": "3.6.7"
139 | }
140 | },
141 | "nbformat": 4,
142 | "nbformat_minor": 2
143 | }
144 |
--------------------------------------------------------------------------------
/neuronunit/examples/nmldb.py:
--------------------------------------------------------------------------------
1 | import os
2 | from urllib import request, parse
3 |
4 | # Example URL including an extra meaningless query key-value pair
5 | example_url = 'https://neuroml-db.org/model_info?model_id=NMLCL000129&stuff=3'
6 |
7 | # Parse the model_id from URL
8 | parsed = parse.urlparse(example_url)
9 | query = parse.parse_qs(parsed.query)
10 | model_id = query['model_id'][0]
11 |
12 | # Build the URL to the zip file and download it
13 | zip_url = "https://neuroml-db.org/GetModelZip?modelID=%s&version=NeuroML" % model_id
14 | location = '/tmp/%s.zip' % model_id
15 | request.urlretrieve(zip_url, location)
16 | assert os.path.isfile(location)
--------------------------------------------------------------------------------
/neuronunit/models/NeuroML2/Izh2007One.net.nml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/neuronunit/models/NeuroML2/LEMS_2007One.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/neuronunit/models/NeuroML2/fragments/Izh2007One-no-input-1.net.nml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/neuronunit/models/NeuroML2/fragments/Izh2007One-no-input-2.net.nml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/neuronunit/models/NeuroML2/fragments/LEMS_2007One-no-input-1.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/neuronunit/models/NeuroML2/fragments/LEMS_2007One-no-input-2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/neuronunit/models/NeuroML2/results/.gitkeep:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/neuronunit/models/__init__.py:
--------------------------------------------------------------------------------
1 | """Model classes for NeuronUnit"""
2 |
3 | import warnings
4 | from .static import StaticModel, ExternalModel, RandomVmModel
5 | try:
6 | from .lems import LEMSModel
7 | from .channel import ChannelModel
8 | except:
9 | print("neuroml not installed")
10 | from .reduced import ReducedModel
11 | from . import backends # Required to register backends
12 |
--------------------------------------------------------------------------------
/neuronunit/models/backends/__init__.py:
--------------------------------------------------------------------------------
1 | """Neuronunit-specific model backends."""
2 |
3 | import contextlib
4 | import io
5 | import importlib
6 | import inspect
7 | import pathlib
8 | import re
9 | import warnings
10 |
11 | import sciunit.models.backends as su_backends
12 | from sciunit.utils import PLATFORM, PYTHON_MAJOR_VERSION
13 | from .base import Backend
14 | available_backends = su_backends.available_backends
15 |
16 | backend_paths = ['static.StaticBackend',
17 | 'geppetto.GeppettoBackend',
18 | 'jNeuroML.jNeuroMLBackend',
19 | #'neuron.NeuronBackend',
20 | 'adexp.JIT_ADEXPBackend',
21 | 'izhikevich.JIT_IZHIBackend']
22 | def check_backend(partial_path):
23 | full_path = 'jithub.models.backends.%s' % partial_path
24 | class_name = full_path.split('.')[-1]
25 | module_path = '.'.join(full_path.split('.')[:-1])
26 | try:
27 | backend_stdout = io.StringIO()
28 | with contextlib.redirect_stdout(backend_stdout):
29 | module = importlib.import_module(module_path)
30 | backend = getattr(module, class_name)
31 | except Exception as e:
32 | msg = "Import of %s failed due to:" % partial_path
33 | stdout = backend_stdout.read()
34 | if stdout:
35 | msg += '\n%s' % stdout
36 | msg += '\n%s' % e
37 | print(msg)
38 | #warnings.warn(msg)
39 | return (None, None)
40 | else:
41 | return (backend.name, backend)
42 |
43 | def register_backends(backend_paths):
44 | provided_backends = {}
45 | for partial_path in backend_paths:
46 | name, backend = check_backend(partial_path)
47 | if name is not None:
48 | provided_backends[name] = backend
49 | su_backends.register_backends(provided_backends)
50 |
51 |
52 | register_backends(backend_paths)
53 |
54 | #from .adexp import ADEXPBackend
55 | #from .glif import GLIFBackend
56 | #from .l5pcSciUnit import L5PCBackend
57 |
--------------------------------------------------------------------------------
/neuronunit/models/backends/base.py:
--------------------------------------------------------------------------------
1 | """Simulator backends for NeuronUnit models"""
2 | import sys
3 | import os
4 | import platform
5 | import re
6 | import copy
7 | import tempfile
8 | import pickle
9 | import importlib
10 | import shelve
11 | import subprocess
12 |
13 | import neuronunit.capabilities as cap
14 | import quantities as pq
15 | from pyneuroml import pynml
16 | from neo.core import AnalogSignal
17 | import neuronunit.capabilities.spike_functions as sf
18 | import sciunit
19 | from sciunit.models.backends import Backend, BackendException
20 | from sciunit.utils import dict_hash, import_module_from_path, \
21 | TemporaryDirectory
22 |
23 | # Test for NEURON support in a separate python process
24 | NEURON_SUPPORT = (os.system("python -c 'import neuron' > /dev/null 2>&1") == 0)
25 | PYNN_SUPPORT = (os.system("python -c 'import pyNN' > /dev/null 2>&1") == 0)
--------------------------------------------------------------------------------
/neuronunit/models/backends/geppetto.py:
--------------------------------------------------------------------------------
1 | """jNeuroML Backend."""
2 |
3 | from .jNeuroML import jNeuroMLBackend
4 |
5 | class GeppettoBackend(jNeuroMLBackend):
6 | """Use for simulation with the Geppetto backend for SciDash."""
7 |
8 | backend = 'Geppetto'
9 |
10 | def init_backend(self, *args, **kwargs):
11 | """Initialize the Geppetto backend."""
12 | super(GeppettoBackend, self).init_backend(*args, **kwargs)
13 |
14 | def _backend_run(self):
15 | """Send the simulation to Geppetto to run.
16 | You have two options here. Either:
17 | (1) Run the simulation and return a dictionay of results, as other backends do.
18 | (2) Implement nothing here and never call it, always writing to the backend's cache instead.
19 | """
20 | results = None
21 | return results
--------------------------------------------------------------------------------
/neuronunit/models/backends/glif.py:
--------------------------------------------------------------------------------
1 | from quantities import mV, ms, s, V
2 | import sciunit
3 | from neo import AnalogSignal
4 | import neuronunit.capabilities as cap
5 | import numpy as np
6 | from neuronunit.models.backends import parse_glif
7 | from neuronunit.models.backends.base import Backend
8 | import quantities as qt
9 | import quantities as pq
10 |
11 | from quantities import mV, ms, s
12 | import pickle
13 | import copy
14 | import re
15 |
16 |
17 | import allensdk.core.json_utilities as json_utilities
18 | from allensdk.model.glif.glif_neuron import GlifNeuron
19 | from allensdk.api.queries.cell_types_api import CellTypesApi
20 | # from neuronunit.models.reduced import ReducedModel
21 |
22 | try:
23 | from allensdk.api.queries.glif_api import GlifApi
24 | from allensdk.core.cell_types_cache import CellTypesCache
25 | import allensdk.core.json_utilities as json_utilities
26 | import sciunit
27 | except:
28 | import os
29 | os.system('pip install allensdk')
30 | from allensdk.api.queries.glif_api import GlifApi
31 | from allensdk.core.cell_types_cache import CellTypesCache
32 | import allensdk.core.json_utilities as json_utilities
33 |
34 | os.system('pip install git+https://github.com/scidash/sciunit@dev')
35 |
36 |
37 |
38 | class GLIFBackend(Backend):
39 | def init_backend(self, attrs = None, cell_name = 'alice', current_src_name = 'hannah', DTC = None):
40 | backend = 'GLIF'
41 | super(GLIFBackend,self).init_backend()
42 |
43 | self.model._backend.use_memory_cache = False
44 | self.current_src_name = current_src_name
45 | self.cell_name = cell_name
46 | self.vM = None
47 | self.allen_id = None
48 | self.attrs = attrs
49 | self.nc = None
50 |
51 | self.temp_attrs = None
52 |
53 |
54 | if self.allen_id == None:
55 | try:
56 | self.nc = pickle.load(open(str('allen_id.p'),'rb'))
57 | except:
58 | self.allen_id = 566302806
59 | glif_api = GlifApi()
60 |
61 | self.nc = glif_api.get_neuron_configs([self.allen_id])[self.allen_id]
62 | pickle.dump(copy.copy(self.nc),open(str('allen_id.p'),'wb'))
63 |
64 |
65 | else:
66 |
67 | try:
68 | self.nc = pickle.load(open(str('allen_id.p'),'rb'))
69 | except:
70 | glif_api = GlifApi()
71 | self.allen_id = allen_id
72 | self.glif = glif_api.get_neuronal_models_by_id([allen_id])[0]
73 | self.nc = glif_api.get_neuron_configs([self.allen_id])[self.allen_id]
74 | pickle.dump(self.nc,open(str('allen_id.p'),'wb'))
75 |
76 |
77 | self.glif = GlifNeuron.from_dict(self.nc)
78 |
79 |
80 | if type(attrs) is not type(None):
81 | self.set_attrs(**attrs)
82 | self.sim_attrs = attrs
83 |
84 | if type(DTC) is not type(None):
85 | if type(DTC.attrs) is not type(None):
86 |
87 | self.set_attrs(**DTC.attrs)
88 |
89 |
90 | if hasattr(DTC,'current_src_name'):
91 | self._current_src_name = DTC.current_src_name
92 |
93 | if hasattr(DTC,'cell_name'):
94 | self.cell_name = DTC.cell_name
95 |
96 | #print(self.internal_params)
97 | def as_lems_model(self, backend=None):
98 | glif_package = []
99 | glif_package.append(self.metad)
100 | glif_package.append(self.nc)
101 | glif_package.append(self.get_sweeps)
102 | lems_file_path = parse_glif.generate_lems(glif_package)
103 | return ReducedModel(lems_file_path, backend=backend)
104 |
105 | def get_sweeps(self,specimen_id = None):
106 | if specimen_id == None:
107 | self.sweeps = ctc.get_ephys_sweeps(self.glif[self.allen_id], \
108 | file_name='%d_ephys_sweeps.json' % self.allen_id)
109 |
110 | def get_sweep(self, n,specimen_id = None):
111 | if specimen_id == None:
112 | self.sweeps = ctc.get_ephys_sweeps(self.glif[self.allen_id], \
113 | file_name='%d_ephys_sweeps.json' % self.allen_id)
114 | sweep_info = self.sweeps[n]
115 | sweep_number = sweep_info['sweep_number']
116 | sweep = ds.get_sweep(sweep_number)
117 | return sweep
118 |
119 | def get_stimulus(self, n):
120 | sweep = self.get_sweep(n)
121 | return sweep['stimulus']
122 |
123 | def apply_stimulus(self, n):
124 | self.stimulus = self.get_stimulus(n)
125 |
126 | def get_spike_train(self):
127 | #vms = self.get_membrane_potential()
128 | #from neuronunit.capabilities.spike_functions import get_spike_train
129 | #import numpy as np
130 | spike_times = self.results['interpolated_spike_times']
131 | return np.array(spike_times)
132 |
133 | def get_membrane_potential(self):
134 | """Must return a neo.core.AnalogSignal.
135 | And must destroy the hoc vectors that comprise it.
136 | """
137 | threshold = self.results['threshold']
138 | interpolated_spike_times = self.results['interpolated_spike_times']
139 |
140 | interpolated_spike_thresholds = self.results['interpolated_spike_threshold']
141 | grid_spike_indices = self.results['spike_time_steps']
142 | grid_spike_times = self.results['grid_spike_times']
143 | after_spike_currents = self.results['AScurrents']
144 |
145 | vm = self.results['voltage']
146 | if len(self.results['interpolated_spike_voltage']) > 0:
147 | isv = self.results['interpolated_spike_voltage'].tolist()[0]
148 | vm = list(map(lambda x: isv if np.isnan(x) else x, vm))
149 | dt = self.glif.dt
150 | self.vM = AnalogSignal(vm,units = mV,sampling_period = dt * ms)
151 | return vms
152 |
153 | def _local_run(self):
154 | #self.results = np.array(self.glif.run(self.stim))
155 | results = {}
156 | results['vm'] = self.vM
157 | results['t'] = self.vM.times
158 | results['run_number'] = results.get('run_number',0) + 1
159 | return results
160 |
161 | return self.results
162 |
163 |
164 | def set_attrs(self, **attrs):
165 | self.model.attrs.update(attrs)
166 | #self.nc.update(attrs)
167 | for k,v in attrs.items():
168 | self.nc[k] = v
169 | self.glif = GlifNeuron.from_dict(self.nc)
170 | return self.glif
171 |
172 |
173 | def set_stop_time(self, stop_time = 650*pq.ms):
174 | """Sets the simulation duration
175 | stopTimeMs: duration in milliseconds
176 | """
177 | self.tstop = float(stop_time.rescale(pq.ms))
178 |
179 | def inject_square_current(self, current):
180 | if 'injected_square_current' in current.keys():
181 | c = current['injected_square_current']
182 | else:
183 | c = current
184 | stop = float(c['delay'])+float(c['duration'])
185 | start = float(c['delay'])
186 | duration = float(c['duration'])
187 | amplitude = float(c['amplitude'])/1000.0
188 | self.glif.dt = 0.001
189 | dt = self.glif.dt
190 | self.stim = [ 0.0 ] * int(start) + [ amplitude ] * int(duration) + [ 0.0 ] * int(stop)
191 | #self.glif.init_voltage = -0.0065
192 | self.results = self.glif.run(self.stim)
193 | vm = self.results['voltage']
194 | if len(self.results['interpolated_spike_voltage']) > 0:
195 | isv = self.results['interpolated_spike_voltage'].tolist()[0]
196 | vm = list(map(lambda x: isv if np.isnan(x) else x, vm))
197 |
198 | vms = AnalogSignal(vm,units = V,sampling_period = dt * s)
199 | self.vM = vms
200 | return vms
201 |
--------------------------------------------------------------------------------
/neuronunit/models/backends/jNeuroML.py:
--------------------------------------------------------------------------------
1 | """jNeuroML Backend."""
2 |
3 | import os
4 | import io
5 | import tempfile
6 |
7 | from pyneuroml import pynml
8 |
9 | from sciunit.utils import redirect_stdout
10 | from .base import Backend
11 | from elephant.spike_train_generation import threshold_detection
12 |
13 |
14 |
15 | class jNeuroMLBackend(Backend):
16 | """Use for simulation with jNeuroML, a reference simulator for NeuroML."""
17 |
18 | name = 'jNeuroML'
19 |
20 | def init_backend(self, *args, **kwargs):
21 | """Initialize the jNeuroML backend."""
22 | assert hasattr(self.model, 'set_lems_run_params'), \
23 | "A model using %s must implement `set_lems_run_params`" % \
24 | self.backend
25 | self.stdout = io.StringIO()
26 | self.model.create_lems_file_copy() # Create a copy of the LEMS file
27 | super(jNeuroMLBackend, self).init_backend(*args, **kwargs)
28 |
29 | def set_attrs(self, **attrs):
30 | """Set the model attributes, i.e. model parameters."""
31 | self.model.set_lems_attrs()
32 |
33 | def set_run_params(self, **run_params):
34 | """Sey the backend runtime parameters, i.e. simulation parameters."""
35 | self.model.set_lems_run_params()
36 |
37 | def inject_square_current(self, current):
38 | """Inject a square current into the cell."""
39 | self.model.run_params['injected_square_current'] = current
40 | self.set_run_params() # Doesn't work yet.
41 | self._backend_run()
42 | self.vm
43 | def set_stop_time(self, t_stop):
44 | """Set the stop time of the simulation."""
45 | self.model.run_params['t_stop'] = t_stop
46 | self.set_run_params()
47 |
48 | def set_time_step(self, dt):
49 | """Set the time step of the simulation."""
50 | self.model.run_params['dt'] = dt
51 | self.set_run_params()
52 | def get_spike_count(self):
53 | thresh = threshold_detection(self.vm)
54 | return len(thresh)
55 |
56 | def _backend_run(self):
57 | """Run the simulation."""
58 | f = pynml.run_lems_with_jneuroml
59 | self.exec_in_dir = tempfile.mkdtemp()
60 | lems_path = os.path.dirname(self.model.orig_lems_file_path)
61 | with redirect_stdout(self.stdout):
62 | results = f(self.model.lems_file_path,
63 | paths_to_include=[lems_path],
64 | skip_run=self.model.skip_run,
65 | nogui=self.model.run_params['nogui'],
66 | load_saved_data=True, plot=False,
67 | exec_in_dir=self.exec_in_dir,
68 | verbose=self.model.run_params['v'])
69 | self.vm = results['vm']
70 | return results
71 |
--------------------------------------------------------------------------------
/neuronunit/models/backends/parse_glif.py:
--------------------------------------------------------------------------------
1 | usage='''
2 | Provenance: This file is originally from
3 | https://github.com/vrhaynes/AllenInstituteNeuroML
4 | https://github.com/OpenSourceBrain/AllenInstituteNeuroML
5 | It is authored by pgleeson@github.com, vrhaynes@github.com and russelljjarvis@github.com
6 |
7 | This file can be used to generate LEMS components for each of a number of GLIF models
8 |
9 | Usage:
10 |
11 | python parse_glif.py -all
12 |
13 | '''
14 |
15 | import sys
16 | import os
17 | import json
18 |
19 | from pyneuroml import pynml
20 |
21 | def generate_lems(glif_package, curr_pA=None, show_plot=False):
22 | if curr_pA == None:
23 | curr_pA = 10
24 | glif_dir = os.getcwd()
25 |
26 | model_metadata,neuron_config,ephys_sweeps = glif_package
27 |
28 | template_cell = '''
29 |
30 | <%s %s/>
31 |
32 |
33 | '''
34 |
35 | type = '???'
36 | print(model_metadata['name'])
37 | if '(LIF)' in model_metadata['name']:
38 | type = 'glifCell'
39 | if '(LIF-ASC)' in model_metadata['name']:
40 | type = 'glifAscCell'
41 | if '(LIF-R)' in model_metadata['name']:
42 | type = 'glifRCell'
43 | if '(LIF-R-ASC)' in model_metadata['name']:
44 | type = 'glifRAscCell'
45 | if '(LIF-R-ASC-A)' in model_metadata['name']:
46 | type = 'glifRAscATCell'
47 |
48 | cell_id = 'GLIF_%s'%glif_dir
49 |
50 | #model_metadata['name']
51 |
52 | attributes = ""
53 |
54 | attributes +=' id="%s"'%cell_id
55 | attributes +='\n C="%s F"'%neuron_config["C"]
56 | attributes +='\n leakReversal="%s V"'%neuron_config["El"]
57 | attributes +='\n reset="%s V"'%neuron_config["El"]
58 | attributes +='\n thresh="%s V"'%( float(neuron_config["th_inf"]) * float(neuron_config["coeffs"]["th_inf"]))
59 | attributes +='\n leakConductance="%s S"'%(1/float(neuron_config["R_input"]))
60 |
61 | if 'Asc' in type:
62 | attributes +='\n tau1="%s s"'%neuron_config["asc_tau_array"][0]
63 | attributes +='\n tau2="%s s"'%neuron_config["asc_tau_array"][1]
64 | attributes +='\n amp1="%s A"'% ( float(neuron_config["asc_amp_array"][0]) * float(neuron_config["coeffs"]["asc_amp_array"][0]) )
65 | attributes +='\n amp2="%s A"'% ( float(neuron_config["asc_amp_array"][1]) * float(neuron_config["coeffs"]["asc_amp_array"][1]) )
66 |
67 | if 'glifR' in type:
68 | attributes +='\n bs="%s per_s"'%neuron_config["threshold_dynamics_method"]["params"]["b_spike"]
69 | attributes +='\n deltaThresh="%s V"'%neuron_config["threshold_dynamics_method"]["params"]["a_spike"]
70 | attributes +='\n fv="%s"'%neuron_config["voltage_reset_method"]["params"]["a"]
71 | attributes +='\n deltaV="%s V"'%neuron_config["voltage_reset_method"]["params"]["b"]
72 |
73 | if 'glifRAscATCell' in type:
74 | attributes +='\n bv="%s per_s"'%neuron_config["threshold_dynamics_method"]["params"]["b_voltage"]
75 | attributes +='\n a="%s per_s"'%neuron_config["threshold_dynamics_method"]["params"]["a_voltage"]
76 |
77 |
78 | file_contents = template_cell%(type, attributes)
79 |
80 | print(file_contents)
81 |
82 | #cell_file_name = '%s.xml'%(cell_id)
83 | cell_file_name = '{0}{1}.xml'.format(os.getcwd(),str(model_metadata['name']))
84 | cell_file = open(cell_file_name,'w')
85 | cell_file.write(file_contents)
86 | cell_file.close()
87 | return cell_file_name
88 |
89 | '''
90 | import opencortex.build as oc
91 |
92 |
93 | nml_doc, network = oc.generate_network("Test_%s"%glif_dir)
94 |
95 | pop = oc.add_single_cell_population(network,
96 | 'pop_%s'%glif_dir,
97 | cell_id)
98 |
99 |
100 | pg = oc.add_pulse_generator(nml_doc,
101 | id="pg0",
102 | delay="100ms",
103 | duration="1000ms",
104 | amplitude="%s pA"%curr_pA)
105 |
106 |
107 | oc.add_inputs_to_population(network,
108 | "Stim0",
109 | pop,
110 | pg.id,
111 | all_cells=True)
112 |
113 |
114 |
115 | nml_file_name = '%s.net.nml'%network.id
116 | oc.save_network(nml_doc, nml_file_name, validate=True)
117 |
118 |
119 | thresh = 'thresh'
120 | if 'glifR' in type:
121 | thresh = 'threshTotal'
122 |
123 | lems_file_name = oc.generate_lems_simulation(nml_doc,
124 | network,
125 | nml_file_name,
126 | include_extra_lems_files = [cell_file_name,'../GLIFs.xml'],
127 | duration = 1200,
128 | dt = 0.01,
129 | gen_saves_for_quantities = {'thresh.dat':['pop_%s/0/GLIF_%s/%s'%(glif_dir,glif_dir,thresh)]},
130 | gen_plots_for_quantities = {'Threshold':['pop_%s/0/GLIF_%s/%s'%(glif_dir,glif_dir,thresh)]})
131 |
132 | results = pynml.run_lems_with_jneuroml(lems_file_name,
133 | nogui=True,
134 | load_saved_data=True)
135 |
136 | print("Ran simulation; results reloaded for: %s"%results.keys())
137 |
138 | info = "Model %s; %spA stimulation"%(glif_dir,curr_pA)
139 |
140 | times = [results['t']]
141 | vs = [results['pop_%s/0/GLIF_%s/v'%(glif_dir,glif_dir)]]
142 | labels = ['LEMS - jNeuroML']
143 |
144 | original_model_v = 'original.v.dat'
145 | if os.path.isfile(original_model_v):
146 | data, indices = pynml.reload_standard_dat_file(original_model_v)
147 | times.append(data['t'])
148 | vs.append(data[0])
149 | labels.append('Allen SDK')
150 |
151 |
152 | pynml.generate_plot(times,
153 | vs,
154 | "Membrane potential; %s"%info,
155 | xaxis = "Time (s)",
156 | yaxis = "Voltage (V)",
157 | labels = labels,
158 | grid = True,
159 | show_plot_already=False,
160 | save_figure_to='Comparison_%ipA.png'%(curr_pA))
161 |
162 | times = [results['t']]
163 | vs = [results['pop_%s/0/GLIF_%s/%s'%(glif_dir,glif_dir,thresh)]]
164 | labels = ['LEMS - jNeuroML']
165 |
166 | original_model_th = 'original.thresh.dat'
167 | if os.path.isfile(original_model_th):
168 | data, indeces = pynml.reload_standard_dat_file(original_model_th)
169 | times.append(data['t'])
170 | vs.append(data[0])
171 | labels.append('Allen SDK')
172 |
173 |
174 | pynml.generate_plot(times,
175 | vs,
176 | "Threshold; %s"%info,
177 | xaxis = "Time (s)",
178 | yaxis = "Voltage (V)",
179 | labels = labels,
180 | grid = True,
181 | show_plot_already=show_plot,
182 | save_figure_to='Comparison_Threshold_%ipA.png'%(curr_pA))
183 |
184 | readme =
185 | '''
186 |
--------------------------------------------------------------------------------
/neuronunit/models/backends/static.py:
--------------------------------------------------------------------------------
1 | """Static Backend."""
2 |
3 | from .base import Backend
4 |
5 |
6 | class StaticBackend(Backend):
7 | def _backend_run(self):
8 | pass
9 |
10 | def set_stop_time(self, t_stop):
11 | pass
12 |
--------------------------------------------------------------------------------
/neuronunit/models/channel.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit model class for ion channels models"""
2 |
3 | import os
4 | import re
5 |
6 | import neuronunit.capabilities.channel as cap
7 | from .lems import LEMSModel
8 | from pyneuroml.analysis import NML2ChannelAnalysis as ca
9 | import quantities as pq
10 |
11 |
12 | class ChannelModel(LEMSModel, cap.NML2ChannelAnalysis):
13 | """A model for ion channels"""
14 |
15 | def __init__(self, channel_file_path_or_url, channel_index=0, name=None, backend='jNeuroML'):
16 | """
17 | channel_file_path: Path to NML file.
18 | channel_index: Order of channel in NML file
19 | (usually 0 since most files contain one channel).
20 | name: Optional model name.
21 | """
22 | if name is None:
23 | base, file_name = os.path.split(channel_file_path_or_url)
24 | name = file_name.split('.')[0]
25 | super(ChannelModel, self).__init__(channel_file_path_or_url, name=name,
26 | backend=backend)
27 | channels = ca.get_channels_from_channel_file(self.orig_lems_file_path)
28 | self.channel = channels[channel_index]
29 | self.a = None
30 | # Temperature, clamp parameters, etc.
31 | self.default_params = ca.DEFAULTS.copy()
32 | self.default_params.update({'nogui': True})
33 |
34 | """
35 | DEPRECATED
36 | def NML2_run(self, rerun=False, a=None, verbose=None, **params):
37 | self.params = self.default_params.copy()
38 | self.params.update(params)
39 | # Convert keyword args to a namespace.
40 | a = ca.build_namespace(a=a, **self.params)
41 | if verbose is None:
42 | verbose = a.v
43 | # Only rerun if params have changed.
44 | if self.a is None or a.__dict__ != self.a.__dict__ or rerun:
45 | self.a = a
46 | # Force the Channel Analysis module to write files to the
47 | # temporary directory
48 | ca.OUTPUT_DIR = self.temp_dir.name
49 | # Create a lems file.
50 | self.lems_file_path = ca.make_lems_file(self.channel, self.a)
51 | # Writes data to disk.
52 | self.results = ca.run_lems_file(self.lems_file_path, verbose)
53 | """
54 |
55 |
56 |
57 | def ca_make_lems_file(self, **params):
58 | # Set params in the SciUnit model instance
59 | self.params = params
60 | # ChannelAnalysis only accepts camelCase parameter names
61 | # This converts snake_case to camelCase
62 | params = {snake_to_camel(key): value for key, value in params.items()}
63 | # Build a namespace for use by ChannelAnalysis
64 | self.ca_namespace = ca.build_namespace(**params)
65 | # Make the new LEMS file
66 | self.lems_file_path = ca.make_lems_file(self.channel,
67 | self.ca_namespace)
68 |
69 | def ca_run_lems_file(self, verbose=True):
70 | self.run(verbose=verbose)
71 | return self.results
72 |
73 | def ca_compute_iv_curve(self, results):
74 | iv_data = ca.compute_iv_curve(self.channel, self.ca_namespace, results)
75 | self.iv_data = {}
76 | for kind in ['i_peak', 'i_steady']:
77 | self.iv_data[kind] = {}
78 | for v, i in iv_data[kind].items():
79 | v = float((v * pq.V).rescale(pq.mV))
80 | self.iv_data[kind][v] = (i * pq.A).rescale(pq.pA)
81 | self.iv_data['hold_v'] = (iv_data['hold_v'] * pq.V).rescale(pq.mV)
82 | return self.iv_data
83 |
84 | def plot_iv_curve(self, v, i, *plt_args, **plt_kwargs):
85 | ca.plot_iv_curve(self.a, v, i, *plt_args, **plt_kwargs)
86 |
87 |
88 | def snake_to_camel(string):
89 | return re.sub(r'_([a-z])', lambda x: x.group(1).upper(), string)
90 |
--------------------------------------------------------------------------------
/neuronunit/models/morphology.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit model class for NEURON HOC defined cell models"""
2 |
3 | import os
4 | import sciunit
5 | import neuronunit.capabilities.morphology as cap
6 | import quantities as pq
7 |
8 | class SwcCellModel(sciunit.Model, cap.ProducesSWC):
9 | """A model for cells defined using SWC files. Requires a path to the SWC file."""
10 |
11 | def __init__(self, swc_path, name=None):
12 | """
13 | hoc_path: Path to SWC file.
14 |
15 | name: Optional model name.
16 | """
17 |
18 | self.swc_path = os.path.abspath(swc_path)
19 |
20 | if name is None:
21 | name = os.path.basename(self.swc_path).replace('.swc','')
22 |
23 | super(SwcCellModel,self).__init__(name=name)
24 |
25 | def produce_swc(self):
26 | return os.path.abspath(self.swc_path)
27 |
--------------------------------------------------------------------------------
/neuronunit/models/reduced.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit model class for reduced neuron models."""
2 |
3 | import numpy as np
4 | from neo.core import AnalogSignal
5 | import quantities as pq
6 |
7 | import neuronunit.capabilities as cap
8 | from .lems import LEMSModel
9 | from .static import ExternalModel
10 | import neuronunit.capabilities.spike_functions as sf
11 | from copy import deepcopy
12 | from sciunit.models import RunnableModel
13 |
14 | class ReducedModel(LEMSModel,
15 | cap.ReceivesSquareCurrent,
16 | cap.ProducesActionPotentials,
17 | ):
18 | """Base class for reduced models, using LEMS"""
19 |
20 | def __init__(self, LEMS_file_path, name=None, backend=None, attrs=None):
21 | """Instantiate a reduced model.
22 |
23 | LEMS_file_path: Path to LEMS file (an xml file).
24 | name: Optional model name.
25 | """
26 |
27 | super(ReducedModel, self).__init__(LEMS_file_path, name=name,
28 | backend=backend, attrs=attrs)
29 | self.run_number = 0
30 | self.tstop = None
31 |
32 | def get_membrane_potential(self, **run_params):
33 | self.run(**run_params)
34 | v = None
35 | for rkey in self.results.keys():
36 | if 'v' in rkey or 'vm' in rkey:
37 | v = np.array(self.results[rkey])
38 | t = np.array(self.results['t'])
39 | dt = (t[1]-t[0])*pq.s # Time per sample in seconds.
40 | vm = AnalogSignal(v, units=pq.V, sampling_rate=1.0/dt)
41 | return vm
42 |
43 | def get_APs(self, **run_params):
44 | vm = self.get_membrane_potential(**run_params)
45 | waveforms = sf.get_spike_waveforms(vm)
46 | return waveforms
47 |
48 | def get_spike_train(self, **run_params):
49 | vm = self.get_membrane_potential(**run_params)
50 | spike_train = sf.get_spike_train(vm)
51 | return spike_train
52 |
53 | def inject_square_current(self, current):
54 | assert isinstance(current, dict)
55 | assert all(x in current for x in
56 | ['amplitude', 'delay', 'duration'])
57 | self.set_run_params(injected_square_current=current)
58 | self._backend.inject_square_current(current)
59 |
60 | class VeryReducedModel(RunnableModel,
61 | cap.ReceivesCurrent,
62 | cap.ProducesActionPotentials,
63 | ):
64 | """Base class for reduced models, using LEMS"""
65 |
66 | def __init__(self, name=None, backend=None, attrs=None):
67 | """Instantiate a reduced model.
68 | LEMS_file_path: Path to LEMS file (an xml file).
69 | name: Optional model name.
70 | """
71 | super(VeryReducedModel,self).__init__(name=name, backend=backend, attrs=attrs)
72 | self.run_number = 0
73 | self.tstop = None
74 |
75 | def run(self, rerun=None, **run_params):
76 | if rerun is None:
77 | rerun = self.rerun
78 | self.set_run_params(**run_params)
79 | for key,value in self.run_defaults.items():
80 | if key not in self.run_params:
81 | self.set_run_params(**{key:value})
82 | #if (not rerun) and hasattr(self,'last_run_params') and \
83 | # self.run_params == self.last_run_params:
84 | # print("Same run_params; skipping...")
85 | # return
86 |
87 | self.results = self._backend.local_run()
88 | self.last_run_params = deepcopy(self.run_params)
89 | #self.rerun = False
90 | # Reset run parameters so the next test has to pass its own
91 | # run parameters and not use the same ones
92 | self.run_params = {}
93 |
94 | def set_run_params(self, **params):
95 | self._backend.set_run_params(**params)
96 |
97 | # Methods to override using inheritance.
98 | def get_membrane_potential(self, **run_params):
99 | pass
100 | def get_APs(self, **run_params):
101 | pass
102 | def get_spike_train(self, **run_params):
103 | pass
104 | def inject_square_current(self, current):
105 | pass
106 |
--------------------------------------------------------------------------------
/neuronunit/models/static.py:
--------------------------------------------------------------------------------
1 | from neo.core import AnalogSignal
2 | import neuronunit.capabilities as cap
3 | import neuronunit.capabilities.spike_functions as sf
4 | import numpy as np
5 | import pickle
6 | import quantities as pq
7 | import sciunit
8 | from sciunit.models import RunnableModel
9 | import sciunit.capabilities as scap
10 | from sciunit.models import RunnableModel
11 |
12 |
13 | class StaticModel(RunnableModel,
14 | cap.ReceivesSquareCurrent,
15 | cap.ProducesActionPotentials,
16 | cap.ProducesMembranePotential):
17 | """A model which produces a frozen membrane potential waveform."""
18 |
19 | def __init__(self, vm):
20 | """Create an instace of a model that produces a static waveform.
21 |
22 | :param vm: either a neo.core.AnalogSignal or a path to a
23 | pickled neo.core.AnalogSignal
24 | """
25 | if isinstance(vm, str):
26 | with open(vm, 'r') as f:
27 | vm = pickle.load(f)
28 |
29 | if not isinstance(vm, AnalogSignal):
30 | raise TypeError('vm must be a neo.core.AnalogSignal')
31 |
32 | self.vm = vm
33 | self.backend = 'static_model'
34 | def run(self, **kwargs):
35 | pass
36 |
37 | def get_membrane_potential(self, **kwargs):
38 | """Return the Vm passed into the class constructor."""
39 | return self.vm
40 |
41 | def get_APs(self, **run_params):
42 | """Return the APs, if any, contained in the static waveform."""
43 | vm = self.get_membrane_potential(**run_params)
44 | waveforms = sf.get_spike_waveforms(vm)
45 | return waveforms
46 |
47 | def inject_square_current(self, current):
48 | """Static model always returns the same waveform.
49 | This method is for compatibility only."""
50 | pass
51 |
52 |
53 | class ExternalModel(sciunit.models.RunnableModel,
54 | cap.ProducesMembranePotential,
55 | scap.Runnable):
56 | """A model which produces a frozen membrane potential waveform."""
57 |
58 | def __init__(self, *args, **kwargs):
59 | """Create an instace of a model that produces a static waveform."""
60 | super(ExternalModel, self).__init__(*args, **kwargs)
61 |
62 |
63 | def set_membrane_potential(self, vm):
64 | self.vm = vm
65 |
66 | def set_model_attrs(self, attrs):
67 | self.attrs = attrs
68 |
69 | def get_membrane_potential(self):
70 | return self.vm
71 | def get_APs(self, **run_params):
72 | """Return the APs, if any, contained in the static waveform."""
73 | vm = self.get_membrane_potential(**run_params)
74 | waveforms = sf.get_spike_waveforms(vm)
75 | return waveforms
76 |
77 |
78 | class RandomVmModel(RunnableModel, cap.ProducesMembranePotential, cap.ReceivesCurrent):
79 | def get_membrane_potential(self):
80 | # Random membrane potential signal
81 | vm = (np.random.randn(10000)-60)*pq.mV
82 | vm = AnalogSignal(vm, sampling_period=0.1*pq.ms)
83 | return vm
84 |
85 | def inject_square_current(self, current):
86 | pass
87 |
--------------------------------------------------------------------------------
/neuronunit/models/very_reduced.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit model class for reduced neuron models."""
2 |
3 | import numpy as np
4 | from neo.core import AnalogSignal
5 | import quantities as pq
6 |
7 | import neuronunit.capabilities as cap
8 | #import neuronunit.models as mod
9 | import neuronunit.capabilities.spike_functions as sf
10 |
11 | from sciunit.models import RunnableModel
12 |
13 | class VeryReducedModel(RunnableModel,
14 | cap.ReceivesCurrent,
15 | cap.ProducesActionPotentials,
16 | ):
17 | """Base class for reduced models, not using LEMS,
18 | and not requiring file paths this is to wrap pyNN models, Brian models,
19 | and other self contained models+model descriptions"""
20 |
21 | def __init__(self, name=None, backend=None, attrs=None):
22 | """Instantiate a reduced model.
23 |
24 | """
25 | super(VeryReducedModel,self).__init__(name)
26 | self.backend = backend
27 | self.attrs = attrs
28 | self.run_number = 0
29 | self.tstop = None
30 |
31 | def get_membrane_potential(self, **run_params):
32 | self.run(**run_params)
33 | v = None
34 | for rkey in self.results.keys():
35 | if 'v' in rkey or 'vm' in rkey:
36 | v = np.array(self.results[rkey])
37 | t = np.array(self.results['t'])
38 | dt = (t[1]-t[0])*pq.s # Time per sample in seconds.
39 | vm = AnalogSignal(v, units=pq.V, sampling_rate=1.0/dt)
40 | return vm
41 |
42 | def get_APs(self, **run_params):
43 | vm = self.get_membrane_potential(**run_params)
44 | waveforms = sf.get_spike_waveforms(vm)
45 | return waveforms
46 |
47 | def get_spike_train(self, **run_params):
48 | vm = self.get_membrane_potential(**run_params)
49 | spike_train = sf.get_spike_train(vm)
50 | return spike_train
51 |
52 | #def inject_square_current(self, current):
53 | # self.set_run_params(injected_square_current=current)
54 | # self._backend.inject_square_current(current)
55 |
--------------------------------------------------------------------------------
/neuronunit/models/very_reduced_sans_lems.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit model class for reduced neuron models."""
2 |
3 | <<<<<<< HEAD
4 | import sciunit
5 | import neuronunit.capabilities as cap
6 | from sciunit.models.runnable import RunnableModel
7 |
8 |
9 | import numpy as np
10 | from neo.core import AnalogSignal
11 | import quantities as pq
12 | from neuronunit.optimization.data_transport_container import DataTC
13 | import copy
14 |
15 | import neuronunit.capabilities.spike_functions as sf
16 | =======
17 | import numpy as np
18 | from neo.core import AnalogSignal
19 | import quantities as pq
20 | import copy
21 |
22 | import sciunit
23 | from sciunit.models.runnable import RunnableModel
24 |
25 | import neuronunit.capabilities.spike_functions as sf
26 | import neuronunit.capabilities as cap
27 |
28 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
29 | class VeryReducedModel(RunnableModel,
30 | cap.ReceivesSquareCurrent,
31 | cap.ProducesActionPotentials,
32 | cap.ProducesMembranePotential):
33 | """Base class for reduced models, not using LEMS,
34 | and not requiring file paths this is to wrap pyNN models, Brian models,
35 | and other self contained models+model descriptions"""
36 |
37 | def __init__(self,name='',backend=None, attrs={}):
38 | """Instantiate a reduced model.
39 |
40 | LEMS_file_path: Path to LEMS file (an xml file).
41 | name: Optional model name.
42 | """
43 | #sciunit.Model()
44 |
45 | super(VeryReducedModel, self).__init__(name=name,backend=backend, attrs=attrs)
46 | self.backend = backend
47 | self.attrs = {}
48 | self.run_number = 0
49 | self.tstop = None
50 | self.rheobse = None
51 | <<<<<<< HEAD
52 |
53 | =======
54 | '''
55 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
56 | def model_test_eval(self,tests):
57 | """
58 | Take a model and some tests
59 | Evaluate a test suite over them.
60 | """
61 | from sciunit import TestSuite
62 | if type(tests) is TestSuite:
63 | not_suite = TSD({t.name:t for t in tests.tests})
64 | OM = OptMan(tests, backend = self._backend)
65 | dtc = DataTC()
66 | dtc.attrs = self.attrs
67 | assert set(self._backend.attrs.keys()) in set(self.attrs.keys())
68 | dtc.backend = self._backend
69 | dtc.tests = copy.copy(not_suite)
70 | dtc = dtc_to_rheo(dtc)
71 | if dtc.rheobase is not None:
72 | dtc.tests = dtc.format_test()
73 | dtc = list(map(OM.elephant_evaluation,[dtc]))
74 | model = dtc.dtc_to_model()
75 | model.SM = dtc.SM
76 | model.obs_preds = dtc.obs_preds
77 | return dtc[0], model
78 |
79 | def model_to_dtc(self):
80 | dtc = DataTC()
81 | dtc.attrs = self.attrs
82 | try:
83 | dtc.backend = self.get_backend()
84 | except:
85 | dtc.backend = self.backend
86 | if hasattr(self,'rheobase'):
87 | dtc.rheobase = self.rheobase
88 | return dtc
89 | <<<<<<< HEAD
90 |
91 | =======
92 | '''
93 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
94 | def inject_square_current(self, current):
95 | #pass
96 | vm = self._backend.inject_square_current(current)
97 | return vm
98 | '''
99 | def get_membrane_potential(self,**run_params):
100 | #try:
101 | # vm = self._backend.get_membrane_potential()
102 | #except:
103 | #vm = self.get_membrane_potential()
104 | print(run_params)
105 | vm = self.get_membrane_potential(**run_params)
106 |
107 | return vm
108 | '''
109 | def get_APs(self, **run_params):
110 | vm = self.get_membrane_potential(**run_params)
111 | waveforms = sf.get_spike_waveforms(vm)#,width=10*ms)
112 | return waveforms
113 |
114 | def get_spike_train(self, **run_params):
115 | vm = self._backend.get_membrane_potential(**run_params)
116 | spike_train = sf.get_spike_train(vm)
117 | return spike_train
118 |
119 | def get_spike_count(self, **run_params):
120 | train = self.get_spike_train(**run_params)
121 | return len(train)
122 |
123 | def set_attrs(self,attrs):
124 | self.attrs.update(attrs)
125 |
--------------------------------------------------------------------------------
/neuronunit/neuroconstruct/__init__.py:
--------------------------------------------------------------------------------
1 | """Neuroconstruct classes for use with neurounit"""
2 |
3 | import os
4 | import sys
5 | import warnings
6 |
7 | NC_HOME_DEFAULT = os.path.join(os.environ['HOME'],'neuroConstruct')
8 |
9 | try:
10 | NC_HOME = os.environ["NC_HOME"]
11 | except KeyError:
12 | warnings.warn(("Please add an NC_HOME environment variable corresponding "
13 | "to the location of the neuroConstruct directory. The location "
14 | "%s is being used as a default") % NC_HOME_DEFAULT)
15 | NC_HOME = NC_HOME_DEFAULT
16 |
17 | if NC_HOME not in sys.path:
18 | sys.path.append(NC_HOME)
19 |
--------------------------------------------------------------------------------
/neuronunit/neuroconstruct/capabilities.py:
--------------------------------------------------------------------------------
1 | # Deprecated
2 |
--------------------------------------------------------------------------------
/neuronunit/neuromldb.py:
--------------------------------------------------------------------------------
1 | from urllib import request
2 | import sys
3 | import json
4 | import zipfile, tempfile
5 | import os
6 | import pathlib
7 |
8 | import quantities
9 | import quantities as pq
10 | from scipy.interpolate import interp1d
11 | import numpy as np
12 | from neo import AnalogSignal
13 | from neuronunit.models.static import StaticModel
14 |
15 | import urllib.request as urllib
16 |
17 | class NeuroMLDBModel:
18 | def __init__(self, model_id = "NMLCL000086"):
19 | self.model_id = model_id
20 | self.api_url = "https://neuroml-db.org/api/" # See docs at: https://neuroml-db.org/api
21 |
22 | self.waveforms = None
23 |
24 | self.waveform_signals = {}
25 | self.url_responses = {}
26 |
27 | def get_files(self):
28 | zip_url = "https://neuroml-db.org/GetModelZip?modelID=%s&version=NeuroML" % self.model_id
29 | location = pathlib.Path(tempfile.mkdtemp())
30 | zip_location = location / ('%s.zip' % self.model_id)
31 | request.urlretrieve(zip_url, zip_location)
32 | assert zip_location.is_file()
33 | with zipfile.ZipFile(zip_location, 'r') as zip_ref:
34 | zip_ref.extractall(location)
35 | return location
36 |
37 | def read_api_url(self, url):
38 | if url not in self.url_responses:
39 | response = urllib.urlopen(url).read()
40 | response = response.decode("utf-8")
41 | self.url_responses[url] = json.loads(response)
42 |
43 | return self.url_responses[url]
44 |
45 | def fetch_waveform_list(self):
46 |
47 | # Fetch the list of waveforms from the API and cache the result
48 | if not self.waveforms:
49 | data = self.read_api_url(self.api_url + "model?id=" + str(self.model_id))
50 | self.waveforms = data["waveform_list"]
51 |
52 | return self.waveforms
53 |
54 | def fetch_waveform_as_AnalogSignal(self, waveform_id, resolution_ms = 0.01, units = "mV"):
55 |
56 | # If signal not in cache
57 | if waveform_id not in self.waveform_signals:
58 | # Load api URL into Python
59 | data = self.read_api_url(self.api_url + "waveform?id=" + str(waveform_id))
60 |
61 | # Get time and signal values (from CSV format)
62 | t = np.array(data["Times"].split(','),float)
63 | signal = np.array(data["Variable_Values"].split(','),float)
64 |
65 | # Interpolate to regularly sampled series (API returns irregularly sampled)
66 | sig = interp1d(t,signal,fill_value="extrapolate")
67 | signal = sig(np.arange(min(t),max(t),resolution_ms))
68 |
69 | # Convert to neo.AnalogSignal
70 | signal = AnalogSignal(signal,units=units, sampling_period=resolution_ms*quantities.ms)
71 |
72 | starts_from_ss = next(w for w in self.waveforms if w["ID"] == waveform_id)["Starts_From_Steady_State"] == 1
73 |
74 | if starts_from_ss:
75 | rest_wave = self.get_steady_state_waveform()
76 |
77 | t = np.concatenate((rest_wave.times, signal.times + rest_wave.t_stop)) * quantities.s
78 | v = np.concatenate((np.array(rest_wave), np.array(signal))) * quantities.mV
79 |
80 | signal = AnalogSignal(v, units=units, sampling_period=resolution_ms * quantities.ms)
81 |
82 | self.waveform_signals[waveform_id] = signal
83 |
84 | return self.waveform_signals[waveform_id]
85 |
86 | def get_steady_state_waveform(self):
87 | if not hasattr(self, "steady_state_waveform") or self.steady_state_waveform is None:
88 | for w in self.waveforms:
89 | if w["Protocol_ID"] == "STEADY_STATE" and w["Variable_Name"] == "Voltage":
90 | self.steady_state_waveform = self.fetch_waveform_as_AnalogSignal(w["ID"])
91 | return self.steady_state_waveform
92 |
93 | raise Exception("Did not find the resting waveform." +
94 | " See " + self.api_url + "model?id=" + self.model_id +
95 | " for the list of available model waveforms.")
96 |
97 | return self.steady_state_waveform
98 |
99 | def get_waveform_by_current(self, amplitude_nA):
100 | for w in self.waveforms:
101 | if w["Variable_Name"] == "Voltage":
102 | wave_amp = self.get_waveform_current_amplitude(w)
103 | if ((amplitude_nA < 0 * pq.nA and w["Protocol_ID"] == "SQUARE") or
104 | (amplitude_nA >= 0 * pq.nA and w["Protocol_ID"] == "LONG_SQUARE")) \
105 | and amplitude_nA == wave_amp:
106 | return self.fetch_waveform_as_AnalogSignal(w["ID"])
107 |
108 | raise Exception("Did not find a Voltage waveform with injected " + str(amplitude_nA) +
109 | ". See " + self.api_url + "model?id=" + self.model_id +
110 | " for the list of available model waveforms.")
111 |
112 | def get_druckmann2013_standard_current(self):
113 | currents = []
114 | for w in self.waveforms:
115 | if w["Protocol_ID"] == "LONG_SQUARE" and w["Variable_Name"] == "Voltage":
116 | currents.append(self.get_waveform_current_amplitude(w))
117 |
118 | if len(currents) != 4:
119 | raise Exception("The LONG_SQUARE protocol for the model should have 4 waveforms")
120 |
121 | return [currents[-2]] # 2nd to last one is RBx1.5 waveform
122 |
123 | def get_druckmann2013_strong_current(self):
124 | currents = []
125 |
126 | for w in self.waveforms:
127 | if w["Protocol_ID"] == "LONG_SQUARE" and w["Variable_Name"] == "Voltage":
128 | currents.append(self.get_waveform_current_amplitude(w))
129 |
130 | if len(currents) != 4:
131 | raise Exception("The LONG_SQUARE protocol for the model should have 4 waveforms")
132 |
133 | return [currents[-1]] # The last one is RBx3 waveform
134 |
135 | def get_druckmann2013_input_resistance_currents(self):
136 | currents = []
137 |
138 | # Find and return negative square current injections
139 | for w in self.waveforms:
140 | if w["Protocol_ID"] == "SQUARE" and w["Variable_Name"] == "Voltage":
141 | amp = self.get_waveform_current_amplitude(w)
142 | if amp < 0 * pq.nA:
143 | currents.append(amp)
144 |
145 | return currents
146 |
147 | def get_waveform_current_amplitude(self, waveform):
148 | return float(waveform["Waveform_Label"].replace(" nA", "")) * pq.nA
149 |
150 |
151 | class NeuroMLDBStaticModel(StaticModel):
152 | def __init__(self, model_id, **params):
153 | self.nmldb_model = NeuroMLDBModel(model_id)
154 | self.nmldb_model.fetch_waveform_list()
155 |
156 | def inject_square_current(self, current):
157 | self.vm = self.nmldb_model.get_waveform_by_current(current["amplitude"])
158 |
--------------------------------------------------------------------------------
/neuronunit/optimization/__init__.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit Optimization classes"""
2 |
3 | # from .evaluate_as_module import *
4 | # from .nsga_parallel import *
5 |
--------------------------------------------------------------------------------
/neuronunit/optimization/model_parameters.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | from collections import OrderedDict
4 | import collections
5 | import numpy as np
6 |
7 | # import pickle
8 |
9 |
10 | def to_bpo_param(attrs):
11 | from bluepyopt.parameters import Parameter
12 |
13 | lop = {}
14 | for k, v in attrs.items():
15 | temp = tuple(sorted(v))
16 | p = Parameter(name=k, bounds=temp[:], frozen=False)
17 | lop[k] = p
18 | return lop
19 |
20 |
21 | def check_if_param_stradles_boundary(opt, model_type):
22 | for k, v in MODEL_PARAMS[model_type].items():
23 | print(v, opt.attrs[k], k)
24 |
25 |
26 | MODEL_PARAMS = {}
27 | THIS_DIR = os.path.dirname(os.path.realpath(__file__))
28 | path_params = {}
29 | path_params["model_path"] = os.path.realpath(
30 | os.path.join(THIS_DIR, "..", "models", "NeuroML2", "LEMS_2007One.xml")
31 | )
32 | # Which Parameters
33 | # https://www.izhikevich.org/publications/spikes.htm
34 | type2007 = collections.OrderedDict(
35 | [
36 | # C k vr vt vpeak a b c d celltype
37 | ("RS", (100, 0.7, -60, -40, 35, 0.01, -2, -50, 100, 3)),
38 | ("IB", (150, 1.2, -75, -45, 50, 0.1, 5, -56, 130, 3)),
39 | ("TC", (200, 1.6, -60, -50, 35, 0.1, 15, -60, 10, 6)),
40 | ("TC_burst", (200, 1.6, -60, -50, 35, 0.1, 15, -60, 10, 6)),
41 | ("LTS", (100, 1.0, -56, -42, 40, 0.01, 8, -53, 20, 4)),
42 | ("RTN", (40, 0.25, -65, -45, 0, 0.015, 10, -55, 50, 7)),
43 | ("FS", (20, 1, -55, -40, 25, 0.2, -2, -45, -55, 5)),
44 | ("CH", (50, 1.5, -60, -40, 25, 0.01, 1, -40, 150, 3)),
45 | ]
46 | )
47 | temp = {k: [] for k in ["C", "k", "vr", "vt", "vPeak", "a", "b", "c", "d", "celltype"]}
48 | for i, k in enumerate(temp.keys()):
49 | for v in type2007.values():
50 | temp[k].append(v[i])
51 | explore_param = {k: (np.min(v), np.max(v)) for k, v in temp.items()}
52 | IZHI_PARAMS = {k: sorted(v) for k, v in explore_param.items()}
53 |
54 | IZHI_PARAMS = OrderedDict(IZHI_PARAMS)
55 | MODEL_PARAMS["IZHI"] = IZHI_PARAMS
56 | # Fast spiking cannot be reproduced as it requires modifications to the standard Izhi equation,
57 | # which are expressed in this mod file.
58 | # https://github.com/OpenSourceBrain/IzhikevichModel/blob/master/NEURON/izhi2007b.mod
59 | """
60 | depricated
61 | trans_dict = OrderedDict([(k,[]) for k in ['C','k','vr','vt','vPeak','a','b','c','d']])
62 | for i,k in enumerate(trans_dict.keys()):
63 | for v in type2007.values():
64 | trans_dict[k].append(v[i])
65 | reduced_cells = OrderedDict([(k,[]) for k in ['RS','IB','LTS','TC','TC_burst']])
66 | for index,key in enumerate(reduced_cells.keys()):
67 | reduced_cells[key] = {}
68 | for k,v in trans_dict.items():
69 | reduced_cells[key][k] = v[index]
70 | """
71 |
72 |
73 | # AdExp Model paramaters
74 | BAE1 = {}
75 | BAE1 = {}
76 | BAE1["cm"] = 0.281
77 | BAE1["v_spike"] = -40.0
78 | BAE1["v_reset"] = -70.6
79 | BAE1["v_rest"] = -70.6
80 | BAE1["tau_m"] = 9.3667
81 | BAE1["a"] = 4.0
82 | BAE1["b"] = 0.0805
83 | BAE1["delta_T"] = 2.0
84 | BAE1["tau_w"] = 144.0
85 | BAE1["v_thresh"] = -50.4
86 | BAE1["spike_delta"] = 30
87 | # general range rule:
88 | BAE1 = {
89 | k: (np.mean(v) - np.abs(np.mean(v) * 2.5), np.mean(v) + np.mean(v) * 2.5)
90 | for k, v in BAE1.items()
91 | }
92 | BAE1 = {k: sorted(v) for k, v in BAE1.items()}
93 | # specific ad hoc adjustments:
94 | # BAE1['v_spike']=[-70.0,-20]
95 | # BAE1['v_reset'] = [1, 983.5]
96 | # BAE1['v_rest'] = [-100, -35]
97 | BAE1["v_thresh"] = [-65, -15]
98 | BAE1["spike_delta"] = [1.25, 135]
99 | BAE1["b"] = [0.01, 20]
100 | BAE1["a"] = [0.01, 20]
101 | BAE1["tau_w"] = [0.05, 354] # Tau_w 0, means very low adaption
102 | BAE1["cm"] = [1, 983.5]
103 | BAE1["v_spike"] = [-70.0, -20]
104 | # BAE1['v_reset'] = [1, 983.5]
105 | BAE1["v_reset"] = [-100, -25]
106 | BAE1["v_rest"] = [-100, -35]
107 | BAE1["v_thresh"] = [-65, -15]
108 | BAE1["delta_T"] = [1, 10]
109 | BAE1["tau_m"] = [0.01, 62.78345]
110 | for v in BAE1.values():
111 | assert v[1] - v[0] != 0
112 | MODEL_PARAMS["ADEXP"] = BAE1
113 |
114 |
115 | # Multi TimeScale Adaptive Neuron
116 | MATNEURON = {
117 | "vr": -65.0,
118 | "vt": -55.0,
119 | "a1": 10,
120 | "a2": 2,
121 | "b": 0.001,
122 | "w": 5,
123 | "R": 10,
124 | "tm": 10,
125 | "t1": 10,
126 | "t2": 200,
127 | "tv": 5,
128 | "tref": 2,
129 | }
130 | MATNEURON = {
131 | k: (
132 | np.mean(v) - np.abs(np.mean(v) * 0.125),
133 | np.mean(v) + np.abs(np.mean(v)) * 0.125,
134 | )
135 | for k, v in MATNEURON.items()
136 | }
137 | MATNEURON["b"] = [0.0000001, 0.003]
138 | MATNEURON["R"] = [2.5, 200]
139 | MATNEURON["vr"] = [-85, -45]
140 | MATNEURON["vt"] = [-60, -35]
141 | MATNEURON["w"] = [0.125, 25]
142 | MATNEURON["tm"] = [5, 250]
143 |
144 | MATNEURON["tref"] = [0.5, 50]
145 | MATNEURON["a1"] = [9, 55]
146 | MATNEURON["a2"] = [0.5, 4]
147 | MATNEURON["t1"] = [5, 15]
148 | MATNEURON["t2"] = [150, 2089]
149 | MATNEURON["tv"] = [5, 255]
150 |
151 | MATNEURON = {k: sorted(v) for k, v in MATNEURON.items()}
152 | MODEL_PARAMS["MAT"] = MATNEURON
153 |
154 | for k, v in MATNEURON.items():
155 | assert v[1] - v[0] != 0
156 | GLIF_RANGE = {
157 | "El_reference": [-0.08569469261169435, -0.05463626766204832],
158 | "C": [3.5071610042390286e-13, 10 * 7.630189223327981e-10],
159 | "init_threshold": [0.009908733642683513, 0.06939040414685865],
160 | "th_inf": [0.009908733642683513, 0.04939040414685865],
161 | "init_AScurrents": [0.0, 0.0],
162 | "init_voltage": [-0.09, -0.01],
163 | "spike_cut_length": [0.25, 94],
164 | "El": [-0.08569469261169435, -0.05463626766204832],
165 | "asc_tau_array": [[0.01, 0.0033333333333333335], [0.3333333333333333, 0.1]],
166 | "R_input": [17743752.593817078, 10 * 1792774179.3647704],
167 | }
168 | GLIF_RANGE["th_adapt"] = [0.01, 1] # 0.1983063518904063]
169 | GLIF_RANGE["C"] = [0, 10]
170 | GLIF_RANGE.pop("init_AScurrents", None)
171 | GLIF_RANGE.pop("dt", None)
172 | GLIF_RANGE.pop("asc_tau_array", None)
173 | GLIF_RANGE.pop("El", None)
174 | GLIF_RANGE = {k: sorted(v) for k, v in GLIF_RANGE.items()}
175 | MODEL_PARAMS["GLIF"] = GLIF_RANGE
176 | BPO_PARAMS = {}
177 | for k, v in MODEL_PARAMS.items():
178 | BPO_PARAMS[k] = to_bpo_param(v)
179 |
180 |
181 | """
182 | Depricated
183 | l5_pc_keys = ['gNaTs2_tbar_NaTs2_t.apical', 'gSKv3_1bar_SKv3_1.apical', 'gImbar_Im.apical', 'gNaTa_tbar_NaTa_t.axonal', 'gNap_Et2bar_Nap_Et2.axonal', 'gK_Pstbar_K_Pst.axonal', 'gK_Tstbar_K_Tst.axonal', 'gSK_E2bar_SK_E2.axonal', 'gSKv3_1bar_SKv3_1.axonal', 'gCa_HVAbar_Ca_HVA.axonal', 'gCa_LVAstbar_Ca_LVAst.axonal', 'gamma_CaDynamics_E2.axonal', 'decay_CaDynamics_E2.axonal', 'gNaTs2_tbar_NaTs2_t.somatic', 'gSKv3_1bar_SKv3_1.somatic', 'gSK_E2bar_SK_E2.somatic', 'gCa_HVAbar_Ca_HVA.somatic', 'gCa_LVAstbar_Ca_LVAst.somatic', 'gamma_CaDynamics_E2.somatic', 'decay_CaDynamics_E2.somatic']
184 | l5_pc_values = [0.0009012730575340265, 0.024287352056036934, 0.0008315987398062784, 1.7100532387472567, 0.7671786030824507, 0.47339571930108143, 0.0025715065622581644, 0.024862299158354962, 0.7754822886266044, 0.0005560440082771592, 0.0020639185209852568, 0.013376906273759268, 207.56154268835758, 0.5154365543590191, 0.2565961138691978, 0.0024100296151316754, 0.0007416593834676707, 0.006240529502225737, 0.028595343511797353, 226.7501580822364]
185 |
186 | L5PC = OrderedDict()
187 | for k,v in zip(l5_pc_keys,l5_pc_values):
188 | L5PC[k] = sorted((v-0.1*v,v+0.1*v))
189 |
190 | MODEL_PARAMS['L5PC'] = L5PC
191 | """
192 |
--------------------------------------------------------------------------------
/neuronunit/optimization/neuronunit_to_bpo.py:
--------------------------------------------------------------------------------
1 | def make_passive_protocol():
2 | pass
3 | def make_zero_current_protocol():
4 | pass
5 | def make_unknown_rheobase_protocol():
6 | pass
7 | def make_unknown_multispike_protocol():
8 | pass
9 | def neuronunit_tests_to_bpo_protocols():
10 | pass
11 |
--------------------------------------------------------------------------------
/neuronunit/tests/.gitignore:
--------------------------------------------------------------------------------
1 | *.png
2 | *.p
3 | *.pyc
4 | *.orig
5 | *.pickle
6 | __pycache__
7 |
--------------------------------------------------------------------------------
/neuronunit/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """NeuronUnit Test classes."""
2 |
3 | from .passive import *
4 | from .waveform import *
5 | from .dynamics import FITest
6 |
7 | from .dynamics import *
8 | from .fi import *
9 |
10 | from sciunit import scores, errors
11 |
12 | from sciunit.errors import CapabilityError, InvalidScoreError
13 |
14 |
15 | class FakeTest(sciunit.Test):
16 |
17 | # from sciunit.errors import CapabilityError, InvalidScoreError
18 |
19 | # score_type = scores.RatioScore
20 | score_type = sciunit.scores.ZScore
21 |
22 | def generate_prediction(self, model):
23 | self.key_param = self.name.split("_")[1]
24 | self.prediction = model.attrs[self.key_param]
25 | return self.prediction
26 |
27 | def compute_score(self, observation, prediction):
28 | mean = observation[0]
29 | std = observation[1]
30 | z = (prediction - mean) / std
31 | # self.prediction = prediction
32 | # print(scores.ZScore(z))
33 | return scores.ZScore(z)
34 |
--------------------------------------------------------------------------------
/neuronunit/tests/make_allen_tests.py:
--------------------------------------------------------------------------------
1 | from neuronunit.tests.base import VmTest
2 | import pickle
3 | import numpy as np
4 | from allensdk.core.cell_types_cache import CellTypesCache
5 |
6 |
7 | class AllenTest(VmTest):
8 | def __init__(
9 | self,
10 | observation={"mean": None, "std": None},
11 | name="generic_allen",
12 | prediction={"mean": None, "std": None},
13 | **params
14 | ):
15 | super(AllenTest, self).__init__(observation, name, **params)
16 |
17 | name = ""
18 | aliases = ""
19 |
20 | def compute_params(self):
21 | self.params["t_max"] = (
22 | self.params["delay"] + self.params["duration"] + self.params["padding"]
23 | )
24 |
25 | def set_observation(self, observation):
26 | self.observation = {}
27 | self.observation["mean"] = observation
28 | self.observation["std"] = observation
29 |
30 | def set_prediction(self, prediction):
31 | self.prediction = {}
32 | self.prediction["mean"] = prediction
33 | self.prediction["std"] = prediction
34 |
35 |
36 | ctc = CellTypesCache(manifest_file="manifest.json")
37 | features = ctc.get_ephys_features()
38 |
--------------------------------------------------------------------------------
/neuronunit/tests/waveform.py:
--------------------------------------------------------------------------------
1 | """Waveform neuronunit tests, e.g. testing AP waveform properties"""
2 |
3 | from .base import np, pq, ncap, VmTest, scores
4 |
5 |
6 | class InjectedCurrent:
7 | """Metaclass to mixin with InjectedCurrent tests."""
8 |
9 | required_capabilities = (ncap.ReceivesSquareCurrent,)
10 |
11 | default_params = dict(VmTest.default_params)
12 | default_params.update({"amplitude": 100 * pq.pA})
13 |
14 | def compute_params(self):
15 | self.params["injected_square_current"] = self.get_injected_square_current()
16 | self.params["injected_square_current"]["amplitude"] = self.params["amplitude"]
17 |
18 |
19 | class APWidthTest(VmTest):
20 | """Test the full widths of action potentials at their half-maximum."""
21 |
22 | required_capabilities = (ncap.ProducesActionPotentials,)
23 |
24 | name = "AP width test"
25 |
26 | description = (
27 | "A test of the widths of action potentials " "at half of their maximum height."
28 | )
29 |
30 | score_type = scores.RatioScore
31 |
32 | units = pq.ms
33 |
34 | ephysprop_name = "Spike Half-Width"
35 |
36 | def generate_prediction(self, model):
37 | """Implement sciunit.Test.generate_prediction."""
38 | # Method implementation guaranteed by
39 | # ProducesActionPotentials capability.
40 | # if get_spike_count is zero, then widths will be None
41 | # len of None returns an exception that is not handled
42 | model.inject_square_current(**self.params["injected_square_current"])
43 |
44 | widths = model.get_AP_widths()
45 | # Put prediction in a form that compute_score() can use.
46 | prediction = {
47 | "mean": np.mean(widths) if len(widths) else None,
48 | "std": np.std(widths) if len(widths) else None,
49 | "n": len(widths),
50 | }
51 |
52 | return prediction
53 |
54 | def compute_score(self, observation, prediction):
55 | """Implement sciunit.Test.score_prediction."""
56 | if isinstance(prediction, type(None)):
57 | score = scores.InsufficientDataScore(None)
58 | elif prediction["n"] == 0:
59 | score = scores.InsufficientDataScore(None)
60 | else:
61 | score = super(APWidthTest, self).compute_score(observation, prediction)
62 | return score
63 |
64 |
65 | class InjectedCurrentAPWidthTest(InjectedCurrent, APWidthTest):
66 | """Tests the full widths of APs at their half-maximum
67 | under current injection.
68 | """
69 |
70 | score_type = scores.ZScore
71 |
72 | units = pq.ms
73 |
74 | name = "Injected current AP width test"
75 |
76 | description = (
77 | "A test of the widths of action potentials "
78 | "at half of their maximum height when current "
79 | "is injected into cell."
80 | )
81 |
82 | def generate_prediction(self, model):
83 | print(self.params["injected_square_current"])
84 | model.inject_square_current(**self.params["injected_square_current"])
85 | prediction = super(InjectedCurrentAPWidthTest, self).generate_prediction(model)
86 |
87 | return prediction
88 |
89 |
90 | class APAmplitudeTest(VmTest):
91 | """Test the heights (peak amplitude) of action potentials."""
92 |
93 | required_capabilities = (ncap.ProducesActionPotentials,)
94 |
95 | name = "AP amplitude test"
96 |
97 | description = (
98 | "A test of the amplitude (peak minus threshold) of " "action potentials."
99 | )
100 |
101 | score_type = scores.ZScore
102 |
103 | units = pq.mV
104 |
105 | ephysprop_name = "Spike Amplitude"
106 |
107 | def generate_prediction(self, model):
108 | """Implement sciunit.Test.generate_prediction."""
109 | # Method implementation guaranteed by
110 | # ProducesActionPotentials capability.
111 | model.inject_square_current(**self.params["injected_square_current"])
112 |
113 | heights = model.get_AP_amplitudes() - model.get_AP_thresholds()
114 | # Put prediction in a form that compute_score() can use.
115 | prediction = {
116 | "mean": np.mean(heights) if len(heights) else None,
117 | "std": np.std(heights) if len(heights) else None,
118 | "n": len(heights),
119 | }
120 | return prediction
121 |
122 | def compute_score(self, observation, prediction):
123 | """Implementat sciunit.Test.score_prediction."""
124 | if prediction["n"] == 0:
125 | score = scores.InsufficientDataScore(None)
126 | else:
127 | score = super(APAmplitudeTest, self).compute_score(observation, prediction)
128 | return score
129 |
130 |
131 | class InjectedCurrentAPAmplitudeTest(InjectedCurrent, APAmplitudeTest):
132 | """Test the heights (peak amplitude) of action potentials.
133 |
134 | Uses current injection.
135 | """
136 |
137 | name = "Injected current AP amplitude test"
138 |
139 | description = (
140 | "A test of the heights (peak amplitudes) of "
141 | "action potentials when current "
142 | "is injected into cell."
143 | )
144 |
145 | def generate_prediction(self, model):
146 | model.inject_square_current(**self.params["injected_square_current"])
147 | prediction = super(InjectedCurrentAPAmplitudeTest, self).generate_prediction(
148 | model
149 | )
150 | return prediction
151 |
152 |
153 | class APThresholdTest(VmTest):
154 | """Test the full widths of action potentials at their half-maximum."""
155 |
156 | required_capabilities = (ncap.ProducesActionPotentials,)
157 |
158 | name = "AP threshold test"
159 |
160 | description = (
161 | "A test of the membrane potential threshold at which "
162 | "action potentials are produced."
163 | )
164 |
165 | score_type = scores.ZScore
166 |
167 | units = pq.mV
168 |
169 | ephysprop_name = "Spike Threshold"
170 |
171 | def generate_prediction(self, model):
172 | """Implement sciunit.Test.generate_prediction."""
173 | # Method implementation guaranteed by
174 | # ProducesActionPotentials capability.
175 | model.inject_square_current(**self.params["injected_square_current"])
176 |
177 | threshes = model.get_AP_thresholds()
178 | # Put prediction in a form that compute_score() can use.
179 | prediction = {
180 | "mean": np.mean(threshes) if len(threshes) else None,
181 | "std": np.std(threshes) if len(threshes) else None,
182 | "n": len(threshes),
183 | }
184 | return prediction
185 |
186 | def compute_score(self, observation, prediction):
187 | """Implement sciunit.Test.score_prediction."""
188 | if prediction["n"] == 0:
189 | score = scores.InsufficientDataScore(None)
190 | else:
191 | score = super(APThresholdTest, self).compute_score(observation, prediction)
192 | return score
193 |
194 |
195 | class InjectedCurrentAPThresholdTest(InjectedCurrent, APThresholdTest):
196 | """Test the thresholds of action potentials under current injection."""
197 |
198 | name = "Injected current AP threshold test"
199 |
200 | description = (
201 | "A test of the membrane potential threshold at which "
202 | "action potentials are produced under current injection."
203 | )
204 |
205 | def generate_prediction(self, model):
206 | model.inject_square_current(self.params["injected_square_current"])
207 | return super(InjectedCurrentAPThresholdTest, self).generate_prediction(model)
208 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/__init__.py:
--------------------------------------------------------------------------------
1 | """Unit testing module for NeuronUnit"""
2 |
3 | import warnings
4 |
5 | import matplotlib as mpl
6 |
7 | mpl.use("Agg") # Needed for headless testing
8 | warnings.filterwarnings("ignore") # Suppress all warning messages
9 |
10 | from .base import *
11 | from .import_tests import ImportTestCase
12 | from .doc_tests import DocumentationTestCase
13 | from .resource_tests import NeuroElectroTestCase, BlueBrainTestCase, AIBSTestCase
14 | from .model_tests import (
15 | ReducedModelTestCase,
16 | ExtraCapabilitiesTestCase,
17 | HasSegmentTestCase,
18 | GeppettoBackendTestCase,
19 | VeryReducedModelTestCase,
20 | StaticExternalTestCase,
21 | )
22 |
23 | # from .observation_tests import ObservationsTestCase
24 | """
25 | from .test_tests import (
26 | TestsPassiveTestCase,
27 | TestsWaveformTestCase,
28 | TestsFITestCase,
29 | TestsDynamicsTestCase,
30 | TestsChannelTestCase,
31 | FakeTestCase,
32 | )
33 | """
34 | from .misc_tests import EphysPropertiesTestCase
35 |
36 | # from .cache_tests import BackendCacheTestCase
37 | <<<<<<< HEAD
38 | from .opt_ephys_properties import testOptimizationEphysCase
39 | from .scores_unit_test import testOptimizationAllenMultiSpike
40 | =======
41 | #from .opt_ephys_properties import testOptimizationEphysCase
42 | #from .scores_unit_test import testOptimizationAllenMultiSpike
43 | from .rheobase_model_test import testModelRheobase
44 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
45 |
46 | # from .adexp_opt import *
47 | """
48 | from .capabilities_tests import *
49 |
50 | from .test_druckmann2013 import (
51 | Model1TestCase,
52 | Model2TestCase,
53 | Model3TestCase,
54 | Model4TestCase,
55 | Model5TestCase,
56 | Model6TestCase,
57 | Model7TestCase,
58 | Model8TestCase,
59 | Model9TestCase,
60 | Model10TestCase,
61 | Model11TestCase,
62 | OthersTestCase,
63 | )
64 | """
65 | # from .test_morphology import MorphologyTestCase
66 | # from .test_optimization import DataTCTestCase
67 | # from .sciunit_tests import SciUnitTestCase
68 |
69 | # from .adexp_opt import *
70 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/__main__.py:
--------------------------------------------------------------------------------
1 | """All unit tests for NeuronUnit"""
2 |
3 | import sys
4 | import unittest
5 | from . import * # Import all the tests from the unit_test module
6 |
7 |
8 | def main():
9 | buffer = "buffer" in sys.argv
10 | sys.argv = sys.argv[:1] # Args need to be removed for __main__ to work.
11 | unittest.main(buffer=buffer)
12 |
13 |
14 | if __name__ == "__main__":
15 | main()
16 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/adexp_opt.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 | SILENT = True
4 | import warnings
5 |
6 | if SILENT:
7 | warnings.filterwarnings("ignore")
8 |
9 | import unittest
10 | import numpy as np
11 | import efel
12 | import matplotlib.pyplot as plt
13 | import quantities as qt
14 |
15 | import matplotlib
16 |
17 | matplotlib.use("Agg")
18 | from neuronunit.allenapi.allen_data_efel_features_opt import (
19 | opt_to_model,
20 | opt_setup,
21 | opt_exec,
22 | )
23 | from neuronunit.allenapi.allen_data_efel_features_opt import opt_to_model
24 | from neuronunit.allenapi.utils import dask_map_function
25 |
26 | from neuronunit.optimization.model_parameters import (
27 | MODEL_PARAMS,
28 | BPO_PARAMS,
29 | to_bpo_param,
30 | )
31 | from neuronunit.optimization.optimization_management import inject_model_soma
32 | <<<<<<< HEAD
33 | from neuronunit.optimization.data_transport_container import DataTC
34 | =======
35 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
36 | from jithub.models import model_classes
37 |
38 | from sciunit.scores import RelativeDifferenceScore
39 |
40 |
41 | class testOptimization(unittest.TestCase):
42 | def setUp(self):
43 | self.ids = [
44 | 324257146,
45 | 325479788,
46 | 476053392,
47 | 623893177,
48 | 623960880,
49 | 482493761,
50 | 471819401,
51 | ]
52 |
53 | def test_opt_1(self):
54 | specimen_id = self.ids[1]
55 | cellmodel = "ADEXP"
56 |
57 | if cellmodel == "IZHI":
58 | model = model_classes.IzhiModel()
59 | if cellmodel == "MAT":
60 | model = model_classes.MATModel()
61 | if cellmodel == "ADEXP":
62 | model = model_classes.ADEXPModel()
63 |
64 | target_num_spikes = 9
65 |
66 | efel_filter_iterable = [
67 | "ISI_log_slope",
68 | "mean_frequency",
69 | "adaptation_index2",
70 | "first_isi",
71 | "ISI_CV",
72 | "median_isi",
73 | "Spikecount",
74 | "all_ISI_values",
75 | "ISI_values",
76 | "time_to_first_spike",
77 | "time_to_last_spike",
78 | "time_to_second_spike",
79 | ]
80 | [suite, target_current, spk_count, cell_evaluator, simple_cell] = opt_setup(
81 | specimen_id,
82 | cellmodel,
83 | target_num_spikes,
84 | template_model=model,
85 | fixed_current=False,
86 | cached=False,
87 | score_type=RelativeDifferenceScore,
88 | efel_filter_iterable=efel_filter_iterable,
89 | )
90 |
91 | NGEN = 55
92 | MU = 35
93 |
94 | mapping_funct = dask_map_function
95 | final_pop, hall_of_fame, logs, hist = opt_exec(
96 | MU, NGEN, mapping_funct, cell_evaluator, cxpb=0.4, mutpb=0.01
97 | )
98 | opt, target, scores, obs_preds, df = opt_to_model(
99 | hall_of_fame, cell_evaluator, suite, target_current, spk_count
100 | )
101 | best_ind = hall_of_fame[0]
102 | fitnesses = cell_evaluator.evaluate_with_lists(best_ind)
103 | assert np.sum(fitnesses) < 10.7
104 | self.assertGreater(10.7, np.sum(fitnesses))
105 |
106 |
107 | if __name__ == "__main__":
108 | unittest.main()
109 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/base.py:
--------------------------------------------------------------------------------
1 | """Common imports for many unit tests in this directory"""
2 |
3 | import unittest
4 | import sys
5 | import os
6 | import warnings
7 | import pdb
8 | from urllib.request import urlretrieve
9 |
10 | import matplotlib as mpl
11 |
12 | OSX = sys.platform == "darwin"
13 | if OSX or "Qt" in mpl.rcParams["backend"]:
14 | mpl.use("Agg") # Avoid any problems with Macs or headless displays.
15 |
16 | from sciunit.utils import NotebookTools, import_all_modules
17 | import neuronunit
18 | from neuronunit.models import ReducedModel
19 | from neuronunit import neuroelectro, bbp, aibs, tests as nu_tests
20 |
21 | NU_BACKEND = os.environ.get("NU_BACKEND", "jNeuroML")
22 | NU_HOME = neuronunit.__path__[0]
23 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/cache_tests.py:
--------------------------------------------------------------------------------
1 | """"Tests of ephys property measurements"""
2 |
3 | from .base import *
4 |
5 |
6 | class BackendCacheTestCase(NotebookTools, unittest.TestCase):
7 | """Testing reading/writing to the backend cache"""
8 |
9 | path = "."
10 |
11 | def test_cache_use(self):
12 | self.do_notebook("cache_use")
13 |
14 | def test_cache_edit(self):
15 | self.do_notebook("cache_edit")
16 |
17 |
18 | if __name__ == "__main__":
19 | unittest.main()
20 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/cache_use.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Demonstrate the model cache"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import os\n",
17 | "import neuronunit\n",
18 | "from neuronunit.models.reduced import ReducedModel"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 2,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "model_path = os.path.join(neuronunit.DIR,'models/NeuroML2/LEMS_2007One.xml')\n",
28 | "model = ReducedModel(model_path, backend='jNeuroML')"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": 3,
34 | "metadata": {},
35 | "outputs": [
36 | {
37 | "name": "stdout",
38 | "output_type": "stream",
39 | "text": [
40 | "pyNeuroML >>> Reloading data specified in LEMS file: /tmp/tmpuoilk3ac/LEMS_2007One.xml (/tmp/tmpuoilk3ac/LEMS_2007One.xml), base_dir: /tmp/tmpxmlbpdef, cwd: /mnt/c/Users/Rick Gerkin/Dropbox (ASU)/dev/scidash/neuronunit/neuronunit/unit_test\n",
41 | "CPU times: user 297 ms, sys: 62.5 ms, total: 359 ms\n",
42 | "Wall time: 3.2 s\n"
43 | ]
44 | }
45 | ],
46 | "source": [
47 | "%time model.run() # Runs slow because the model is brand new"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": 4,
53 | "metadata": {},
54 | "outputs": [
55 | {
56 | "name": "stdout",
57 | "output_type": "stream",
58 | "text": [
59 | "CPU times: user 15.6 ms, sys: 0 ns, total: 15.6 ms\n",
60 | "Wall time: 5.48 ms\n"
61 | ]
62 | }
63 | ],
64 | "source": [
65 | "%time model.run() # Runs fast because it looks up the result in the memory cache"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": 5,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "model.get_backend().use_memory_cache = False # Turn off the memory cache"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": 6,
80 | "metadata": {},
81 | "outputs": [
82 | {
83 | "name": "stdout",
84 | "output_type": "stream",
85 | "text": [
86 | "pyNeuroML >>> Reloading data specified in LEMS file: /tmp/tmpuoilk3ac/LEMS_2007One.xml (/tmp/tmpuoilk3ac/LEMS_2007One.xml), base_dir: /tmp/tmpdg0a9a0g, cwd: /mnt/c/Users/Rick Gerkin/Dropbox (ASU)/dev/scidash/neuronunit/neuronunit/unit_test\n",
87 | "CPU times: user 281 ms, sys: 62.5 ms, total: 344 ms\n",
88 | "Wall time: 3.29 s\n"
89 | ]
90 | }
91 | ],
92 | "source": [
93 | "%time model.run() # Runs slow because it isn't allowed to use the memory cache and so has to run from scratch"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": 7,
99 | "metadata": {},
100 | "outputs": [],
101 | "source": [
102 | "model.get_backend().use_disk_cache = True # Turn on the disk cache"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": 8,
108 | "metadata": {},
109 | "outputs": [
110 | {
111 | "name": "stdout",
112 | "output_type": "stream",
113 | "text": [
114 | "pyNeuroML >>> Reloading data specified in LEMS file: /tmp/tmpuoilk3ac/LEMS_2007One.xml (/tmp/tmpuoilk3ac/LEMS_2007One.xml), base_dir: /tmp/tmpq43k6r5f, cwd: /mnt/c/Users/Rick Gerkin/Dropbox (ASU)/dev/scidash/neuronunit/neuronunit/unit_test\n",
115 | "CPU times: user 281 ms, sys: 93.8 ms, total: 375 ms\n",
116 | "Wall time: 3.34 s\n"
117 | ]
118 | }
119 | ],
120 | "source": [
121 | "%time model.run() # Still runs slow because it hasn't had a chance to get stored in the disk cache yet"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": 9,
127 | "metadata": {},
128 | "outputs": [
129 | {
130 | "name": "stdout",
131 | "output_type": "stream",
132 | "text": [
133 | "CPU times: user 78.1 ms, sys: 46.9 ms, total: 125 ms\n",
134 | "Wall time: 136 ms\n"
135 | ]
136 | }
137 | ],
138 | "source": [
139 | "%time model.run() # Runs medium because the disk cache is faster than running from scratch (but slower than the memory cache)"
140 | ]
141 | }
142 | ],
143 | "metadata": {
144 | "kernelspec": {
145 | "display_name": "Python 3",
146 | "language": "python",
147 | "name": "python3"
148 | },
149 | "language_info": {
150 | "codemirror_mode": {
151 | "name": "ipython",
152 | "version": 3
153 | },
154 | "file_extension": ".py",
155 | "mimetype": "text/x-python",
156 | "name": "python",
157 | "nbconvert_exporter": "python",
158 | "pygments_lexer": "ipython3",
159 | "version": "3.6.3"
160 | }
161 | },
162 | "nbformat": 4,
163 | "nbformat_minor": 2
164 | }
165 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/capabilities_tests.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 |
4 | class CapabilitiesTestCases(unittest.TestCase):
5 | def test_produces_swc(self):
6 | from neuronunit.capabilities.morphology import ProducesSWC
7 |
8 | p = ProducesSWC()
9 | self.assertEqual(NotImplementedError, p.produce_swc().__class__)
10 |
11 | def test_produces_membrane_potential(self):
12 | from neuronunit.capabilities import ProducesMembranePotential
13 |
14 | pmp = ProducesMembranePotential()
15 | with self.assertRaises(NotImplementedError):
16 | pmp.get_membrane_potential()
17 | pmp.get_mean_vm()
18 | pmp.get_median_vm()
19 | pmp.get_std_vm()
20 | pmp.get_iqr_vm()
21 | pmp.get_initial_vm()
22 | pmp.plot_membrane_potential()
23 |
24 | from neo.core import AnalogSignal
25 |
26 | class MyPMP(ProducesMembranePotential):
27 | def get_membrane_potential(self, signal=[[1, 2, 3], [4, 5, 6]], units="V"):
28 |
29 | import quantities as pq
30 |
31 | result = AnalogSignal(signal, units, sampling_rate=1 * pq.Hz)
32 | return result
33 |
34 | my_pmp = MyPMP()
35 | self.assertIsInstance(my_pmp.get_membrane_potential(), AnalogSignal)
36 | self.assertEqual(my_pmp.get_mean_vm().item(), 3.5)
37 | self.assertEqual(my_pmp.get_median_vm().item(), 3.5)
38 | self.assertAlmostEqual(my_pmp.get_std_vm().item(), 1.70782, 4)
39 | self.assertEqual(my_pmp.get_iqr_vm().item(), 2.5)
40 | self.assertEqual(my_pmp.get_mean_vm().item(), 3.5)
41 |
42 | import quantities as pq
43 |
44 | self.assertEqual(my_pmp.get_initial_vm()[0], 1 * pq.V)
45 | self.assertEqual(my_pmp.get_initial_vm()[1], 2 * pq.V)
46 | self.assertEqual(my_pmp.get_initial_vm()[2], 3 * pq.V)
47 |
48 | def test_produces_spikes(self):
49 | from neuronunit.capabilities import ProducesSpikes
50 |
51 | ps = ProducesSpikes()
52 | with self.assertRaises(NotImplementedError):
53 | ps.get_spike_count()
54 | ps.get_spike_train()
55 |
56 | from neo.core import SpikeTrain
57 |
58 | class MyPS(ProducesSpikes):
59 | def get_spike_train(self):
60 | from quantities import s
61 |
62 | return [SpikeTrain([3, 4, 5] * s, t_stop=10.0)]
63 |
64 | ps = MyPS()
65 | self.assertIsInstance(ps.get_spike_train()[0], SpikeTrain)
66 | self.assertEqual(ps.get_spike_count(), 1)
67 |
68 | def test_produces_action_potentials(self):
69 | from neuronunit.capabilities import ProducesActionPotentials
70 |
71 | pap = ProducesActionPotentials()
72 | with self.assertRaises(NotImplementedError):
73 | pap.get_APs()
74 |
75 | def test_receives_square_current(self):
76 | from neuronunit.capabilities import ReceivesSquareCurrent
77 |
78 | rsc = ReceivesSquareCurrent()
79 | with self.assertRaises(NotImplementedError):
80 | rsc.inject_square_current(0)
81 |
82 | def test_receives_current(self):
83 | from neuronunit.capabilities import ReceivesCurrent
84 |
85 | rc = ReceivesCurrent()
86 | with self.assertRaises(NotImplementedError):
87 | rc.inject_current(0)
88 |
89 | def test_nml2_channel_analysis(self):
90 | from neuronunit.capabilities.channel import NML2ChannelAnalysis
91 |
92 | nml2ca = NML2ChannelAnalysis()
93 | self.assertIsInstance(nml2ca.ca_make_lems_file(), NotImplementedError)
94 | self.assertIsInstance(nml2ca.ca_run_lems_file(), NotImplementedError)
95 | self.assertIsInstance(nml2ca.compute_iv_curve(None), NotImplementedError)
96 | self.assertIsInstance(nml2ca.plot_iv_curve(None, None), NotImplementedError)
97 |
98 |
99 | if __name__ == "__main__":
100 | unittest.main()
101 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/doc_tests.py:
--------------------------------------------------------------------------------
1 | """Tests of NeuronUnit documentation notebooks"""
2 |
3 | from .base import *
4 |
5 |
6 | class DocumentationTestCase(NotebookTools, unittest.TestCase):
7 | """Testing documentation notebooks"""
8 |
9 | path = "../../docs"
10 |
11 | @unittest.skip("Skipping chapter 1")
12 | def test_chapter1(self):
13 | self.do_notebook("chapter1")
14 |
15 | @unittest.skip("Skipping chapter 2")
16 | def test_chapter2(self):
17 | self.do_notebook("chapter2")
18 |
19 | @unittest.skip("Skipping chapter 3")
20 | def test_chapter3(self):
21 | self.do_notebook("chapter3")
22 |
23 | @unittest.skip("Skipping chapter 4")
24 | def test_chapter4(self):
25 | self.do_notebook("chapter4")
26 |
27 | @unittest.skip("Skipping chapter 5")
28 | def test_chapter5(self):
29 | self.do_notebook("chapter5")
30 |
31 |
32 | if __name__ == "__main__":
33 | unittest.main()
34 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/import_tests.py:
--------------------------------------------------------------------------------
1 | """Tests of imports of neuronunit submodules and other dependencies"""
2 |
3 | from .base import *
4 |
5 |
6 | class ImportTestCase(unittest.TestCase):
7 | """Testing imports of modules and packages"""
8 |
9 | def test_import_everything(self):
10 | import neuronunit
11 |
12 | # Recursively import all submodules
13 | import_all_modules(neuronunit, skip=["neuroconstruct"], verbose=True)
14 |
15 |
16 | if __name__ == "__main__":
17 | unittest.main()
18 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/izhi_opt.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 | SILENT = True
4 | import warnings
5 |
6 | if SILENT:
7 | warnings.filterwarnings("ignore")
8 |
9 | import unittest
10 | import numpy as np
11 | import efel
12 | import matplotlib.pyplot as plt
13 | import quantities as qt
14 |
15 | from neuronunit.allenapi.allen_data_efel_features_opt import (
16 | opt_to_model,
17 | opt_setup,
18 | opt_exec,
19 | )
20 | from neuronunit.allenapi.allen_data_efel_features_opt import opt_to_model
21 | from neuronunit.allenapi.utils import dask_map_function
22 |
23 | from neuronunit.optimization.model_parameters import (
24 | MODEL_PARAMS,
25 | BPO_PARAMS,
26 | to_bpo_param,
27 | )
28 | from neuronunit.optimization.optimization_management import inject_model_soma
29 | <<<<<<< HEAD
30 | from neuronunit.optimization.data_transport_container import DataTC
31 | from jithub.models import model_classes
32 | =======
33 | from jithub.models import model_classes
34 | from neuronunit.models.optimization_model_layer import OptimizationModel
35 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
36 |
37 | from sciunit.scores import RelativeDifferenceScore
38 |
39 |
40 | class testOptimization(unittest.TestCase):
41 | def setUp(self):
42 | self.ids = [
43 | 324257146,
44 | 325479788,
45 | 476053392,
46 | 623893177,
47 | 623960880,
48 | 482493761,
49 | 471819401,
50 | ]
51 |
52 | def test_opt_1(self):
53 | specimen_id = self.ids[1]
54 | cellmodel = "IZHI"
55 |
56 | if cellmodel == "IZHI":
57 | model = model_classes.IzhiModel()
58 | if cellmodel == "MAT":
59 | model = model_classes.MATModel()
60 | if cellmodel == "ADEXP":
61 | model = model_classes.ADEXPModel()
62 |
63 | target_num_spikes = 9
64 |
65 | efel_filter_iterable = [
66 | "ISI_log_slope",
67 | "mean_frequency",
68 | "adaptation_index2",
69 | "first_isi",
70 | "ISI_CV",
71 | "median_isi",
72 | "Spikecount",
73 | "all_ISI_values",
74 | "ISI_values",
75 | "time_to_first_spike",
76 | "time_to_last_spike",
77 | "time_to_second_spike",
78 | ]
79 | [suite, target_current, spk_count, cell_evaluator, simple_cell] = opt_setup(
80 | specimen_id,
81 | cellmodel,
82 | target_num_spikes,
83 | template_model=model,
84 | fixed_current=False,
85 | cached=False,
86 | score_type=RelativeDifferenceScore,
87 | )
88 |
89 | NGEN = 165
90 | MU = 55
91 |
92 | mapping_funct = dask_map_function
93 | final_pop, hall_of_fame, logs, hist = opt_exec(
94 | MU, NGEN, mapping_funct, cell_evaluator, cxpb=0.4, mutpb=0.01
95 | )
96 | opt, target, scores, obs_preds, df = opt_to_model(
97 | hall_of_fame, cell_evaluator, suite, target_current, spk_count
98 | )
99 | best_ind = hall_of_fame[0]
100 | fitnesses = cell_evaluator.evaluate_with_lists(best_ind)
101 | assert np.sum(fitnesses) < 10.5
102 | self.assertGreater(10.5, np.sum(fitnesses))
103 |
104 |
105 | if __name__ == "__main__":
106 | unittest.main()
107 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/misc_nb.py:
--------------------------------------------------------------------------------
1 | """Tests of NeuronUnit documentation notebooks"""
2 |
3 | from .base import *
4 |
5 |
6 | class DocumentationTestCase(NotebookTools, unittest.TestCase):
7 | """Testing documentation notebooks"""
8 |
9 | def test_chapter1(self):
10 | self.do_notebook("relative_diff_unit_test")
11 |
12 |
13 | if __name__ == "__main__":
14 | unittest.main()
15 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/misc_tests.py:
--------------------------------------------------------------------------------
1 | """"Tests of ephys property measurements"""
2 |
3 | from .base import *
4 |
5 |
6 | class EphysPropertiesTestCase(NotebookTools, unittest.TestCase):
7 | """Testing sciunit tests of ephys properties"""
8 |
9 | path = "."
10 |
11 | # @unittest.skip("Skipping get_tau test")
12 | def test_get_tau(self):
13 | self.do_notebook("get_tau")
14 |
15 |
16 | if __name__ == "__main__":
17 | unittest.main()
18 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/model_tests.py:
--------------------------------------------------------------------------------
1 | """Tests of NeuronUnit model classes"""
2 |
3 | from .base import *
4 | import quantities as pq
5 | import numpy as np
6 | from neo.core import AnalogSignal
7 |
8 |
9 | class ReducedModelTestCase(unittest.TestCase):
10 | """Test instantiation of the reduced model"""
11 |
12 | def setUp(self):
13 | from neuronunit.models.reduced import ReducedModel
14 |
15 | self.ReducedModel = ReducedModel
16 |
17 | def runTest(self):
18 | pass # Needed so that Python<3 can access the path attribute.
19 |
20 | @property
21 | def path(self):
22 | result = os.path.join(
23 | __file__, "..", "..", "models", "NeuroML2", "LEMS_2007One.xml"
24 | )
25 | result = os.path.realpath(result)
26 | return result
27 | @unittest.skip(
28 | "Ignoring , I don't know why jNeuroML breaks"
29 | )
30 | def test_reducedmodel_jneuroml(self):
31 | model = self.ReducedModel(self.path, backend="jNeuroML")
32 |
33 | @unittest.skip(
34 | "Ignoring NEURON until we make it an install requirement"
35 | ) # If(OSX,"NEURON unreliable on OSX")
36 | def test_reducedmodel_neuron(self):
37 | model = self.ReducedModel(self.path, backend="NEURON")
38 |
39 |
40 | class ExtraCapabilitiesTestCase(NotebookTools, unittest.TestCase):
41 | """Testing extra capability checks"""
42 |
43 | path = "."
44 | <<<<<<< HEAD
45 |
46 | =======
47 | @unittest.skip(
48 | "Ignoring , I don't know why jNeuroML breaks"
49 | )
50 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
51 | def test_receives_current(self):
52 | self.do_notebook("nml_extra_capability_check")
53 |
54 |
55 | class GeppettoBackendTestCase(unittest.TestCase):
56 | """Testing GeppettoBackend"""
57 |
58 | def test_geppetto_backend(self):
59 | from neuronunit.models.backends.geppetto import GeppettoBackend
60 |
61 | gb = GeppettoBackend()
62 | # gb.init_backend()
63 | gb._backend_run()
64 |
65 |
66 | class HasSegmentTestCase(unittest.TestCase):
67 | """Testing HasSegment and SingleCellModel"""
68 |
69 | @unittest.skip
70 | # this test does not really work
71 | def test_(self):
72 | from neuronunit.models.section_extension import HasSegment
73 |
74 | hs = HasSegment()
75 |
76 | def section(location):
77 | return location
78 |
79 | hs.setSegment(section)
80 | self.assertEqual(hs.getSegment(), 0.5)
81 |
82 |
83 | class VeryReducedModelTestCase(unittest.TestCase):
84 | def setUp(self):
85 | from sciunit.models.backends import Backend
86 |
87 | class MyBackend(Backend):
88 | def _backend_run(self) -> str:
89 | return sum(self.run_params.items())
90 |
91 | def local_run(self):
92 | return
93 |
94 | def set_run_params(self, **params):
95 | self.run_params = params
96 |
97 | def inject_square_current(*args, **kwargs):
98 | pass
99 |
100 | self.backend = MyBackend
101 | from sciunit.models.backends import register_backends
102 | <<<<<<< HEAD
103 |
104 | register_backends({"My": self.backend})
105 | =======
106 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
107 |
108 | register_backends({"My": self.backend})
109 | @unittest.skip(
110 | "currently coded to inherit from static model"
111 | ) # If(OSX,"NEURON unreliable on OSX")
112 | def test_very_reduced_using_lems(self):
113 | from neuronunit.models.reduced import VeryReducedModel
114 |
115 | <<<<<<< HEAD
116 | vrm = VeryReducedModel(name="test very redueced model", backend="My", attrs={})
117 | =======
118 | vrm = VeryReducedModel("test_very_reduced_model", backend=None, attrs={})
119 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
120 |
121 | vrm.rerun = False
122 | vrm.run_defaults = {"param3": 3}
123 | vrm.set_default_run_params(param1=1)
124 |
125 | vrm.set_attrs(a="a")
126 | vrm.get_membrane_potential()
127 | vrm.get_APs()
128 | vrm.get_spike_train()
129 | vrm.inject_square_current(0)
130 |
131 | self.assertIsInstance(vrm.get_backend(), self.backend)
132 | vrm.run(param2=2)
133 | @unittest.skip(
134 | "Ignoring fails to inject current"
135 | )
136 | def test_very_reduced_not_using_lems(self):
137 | from neuronunit.models.very_reduced import VeryReducedModel
138 | import quantities as pq
139 |
140 | class MyVRM(VeryReducedModel):
141 | def run(self):
142 | self.results = {
143 | "vm": [0.01 * pq.V, 0.05 * pq.V, 0 * pq.V],
144 | "t": [0.1 * pq.s, 0.2 * pq.s, 0.3 * pq.s],
145 | }
146 |
147 | vrm = MyVRM(name="test very redueced model", backend="My", attrs={})
148 | vrm.get_APs()
149 | vrm.get_spike_train()
150 | #vrm.inject_square_current(0.01 * pq.mA)
151 | pass
152 |
153 |
154 | class StaticExternalTestCase(unittest.TestCase):
155 | def setUp(self):
156 | array = np.ones(10000) * -60.0 * pq.mV
157 | dt = 1 * pq.ms
158 | self.vm = AnalogSignal(array, units=pq.mV, sampling_rate=1.0 / dt)
159 |
160 | def test_external_model(self):
161 | from neuronunit.models.static import ExternalModel
162 | import quantities as pq
163 |
164 | em = ExternalModel(name="test external model obj")
165 | em.set_membrane_potential(self.vm)
166 | em.set_model_attrs({"a": 1, "b": 2, "c": 3})
167 | vm = em.get_membrane_potential()
168 | self.assertIsInstance(vm, AnalogSignal)
169 |
170 | def test_static_model(self):
171 | from neuronunit.models.static import StaticModel
172 |
173 | with self.assertRaises(TypeError):
174 | sm = StaticModel(None)
175 | sm = StaticModel(self.vm)
176 | sm.inject_square_current(0.1 * pq.mA)
177 |
178 |
179 | if __name__ == "__main__":
180 | unittest.main()
181 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/nml_extra_capability_check.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Check for capabilities at the instance level in a LEMSModel"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import os\n",
17 | "import neuronunit\n",
18 | "NU = neuronunit.__path__[0]"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 2,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "from neuronunit.models.reduced import ReducedModel"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 3,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "# The regular Izhikevich model\n",
37 | "model = ReducedModel(os.path.join(NU,'models/NeuroML2/LEMS_2007One.xml'))\n",
38 | "\n",
39 | "# Same model with only explicitInput removed. \n",
40 | "model_no_input_1 = ReducedModel(os.path.join(NU,'models/NeuroML2/fragments/LEMS_2007One-no-input-1.xml'))\n",
41 | "\n",
42 | "# Same model with only pulseGenerator removed. \n",
43 | "model_no_input_2 = ReducedModel(os.path.join(NU,'models/NeuroML2/fragments/LEMS_2007One-no-input-2.xml'))"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": 4,
49 | "metadata": {},
50 | "outputs": [],
51 | "source": [
52 | "assert model.has_pulse_generator() # Because the regular model should have the components it needs\n",
53 | "assert not model_no_input_1.has_pulse_generator() # Because part of the implementation has been removed \n",
54 | "assert not model_no_input_2.has_pulse_generator() # Because another part of the implemntation has been removed"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": 5,
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "from neuronunit.capabilities import ReceivesSquareCurrent, ProducesActionPotentials"
64 | ]
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": 6,
69 | "metadata": {},
70 | "outputs": [
71 | {
72 | "data": {
73 | "text/plain": [
74 | "True"
75 | ]
76 | },
77 | "execution_count": 6,
78 | "metadata": {},
79 | "output_type": "execute_result"
80 | }
81 | ],
82 | "source": [
83 | "ReceivesSquareCurrent.check(model) # Should return True"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": 7,
89 | "metadata": {},
90 | "outputs": [
91 | {
92 | "data": {
93 | "text/plain": [
94 | "(False, False)"
95 | ]
96 | },
97 | "execution_count": 7,
98 | "metadata": {},
99 | "output_type": "execute_result"
100 | }
101 | ],
102 | "source": [
103 | "ReceivesSquareCurrent.check(model_no_input_1), ReceivesSquareCurrent.check(model_no_input_2) # Should return False, False"
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": 8,
109 | "metadata": {},
110 | "outputs": [
111 | {
112 | "data": {
113 | "text/plain": [
114 | "True"
115 | ]
116 | },
117 | "execution_count": 8,
118 | "metadata": {},
119 | "output_type": "execute_result"
120 | }
121 | ],
122 | "source": [
123 | "ProducesActionPotentials.check(model) # Should return True"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": 9,
129 | "metadata": {},
130 | "outputs": [
131 | {
132 | "data": {
133 | "text/plain": [
134 | "(True, True)"
135 | ]
136 | },
137 | "execution_count": 9,
138 | "metadata": {},
139 | "output_type": "execute_result"
140 | }
141 | ],
142 | "source": [
143 | "# Should return True, True because the removed components have nothing to do with action potential generating capabilities\n",
144 | "ProducesActionPotentials.check(model_no_input_1), ProducesActionPotentials.check(model_no_input_2)"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": 10,
150 | "metadata": {},
151 | "outputs": [
152 | {
153 | "data": {
154 | "text/plain": [
155 | "([],\n",
156 | " [neuronunit.capabilities.ReceivesSquareCurrent],\n",
157 | " [neuronunit.capabilities.ReceivesSquareCurrent])"
158 | ]
159 | },
160 | "execution_count": 10,
161 | "metadata": {},
162 | "output_type": "execute_result"
163 | }
164 | ],
165 | "source": [
166 | "# The first list should be empty because this model passes all extra capability checks. This list being empty\n",
167 | "# means that it should be safe to use. The other two models will contain ReceivesSquareCurrent, because this check failed\n",
168 | "# for each of those models. \n",
169 | "model.failed_extra_capabilities, model_no_input_1.failed_extra_capabilities, model_no_input_2.failed_extra_capabilities"
170 | ]
171 | }
172 | ],
173 | "metadata": {
174 | "kernelspec": {
175 | "display_name": "Python 3",
176 | "language": "python",
177 | "name": "python3"
178 | },
179 | "language_info": {
180 | "codemirror_mode": {
181 | "name": "ipython",
182 | "version": 3
183 | },
184 | "file_extension": ".py",
185 | "mimetype": "text/x-python",
186 | "name": "python",
187 | "nbconvert_exporter": "python",
188 | "pygments_lexer": "ipython3",
189 | "version": "3.6.7"
190 | }
191 | },
192 | "nbformat": 4,
193 | "nbformat_minor": 2
194 | }
195 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/observation_tests.py:
--------------------------------------------------------------------------------
1 | """Unit tests for observations."""
2 |
3 | from sciunit.utils import NotebookTools
4 | import unittest
5 |
6 |
7 | class ObservationsTestCase(unittest.TestCase, NotebookTools):
8 | """Unit tests for the sciunit module"""
9 |
10 | path = "."
11 |
12 | def test_observation_validation(self):
13 | """Test validation of observations against the `observation_schema`."""
14 | self.do_notebook("validate_observation_vm")
15 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/opt_ephys_properties.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 | import pandas as pd
4 | import matplotlib.pyplot as plt
5 | import pickle
6 | import glob
7 | import os
8 | import scipy
9 | import quantities as qt
10 | import unittest
11 |
12 | from neuronunit.optimization.optimization_management import _opt_
13 | from neuronunit.optimization.optimization_management import TSD
14 | from neuronunit.optimization.model_parameters import MODEL_PARAMS, BPO_PARAMS
15 | from neuronunit.allenapi.allen_data_driven import (
16 | make_allen_hard_coded_limited as make_allen_hard_coded,
17 | )
18 | from neuronunit.allenapi import neuroelectroapi
19 |
20 | from sciunit.scores import RelativeDifferenceScore, ZScore
21 | from sciunit import TestSuite
22 | from sciunit.utils import config_set, config_get
23 |
24 | config_set("PREVALIDATE", False)
25 | import warnings
26 |
27 | warnings.filterwarnings("ignore")
28 |
29 |
30 | class testOptimizationEphysCase(unittest.TestCase):
31 | def setUp(self):
32 | _, _, _, a_cells = make_allen_hard_coded()
33 | self.MU = 10
34 | self.NGEN = 10
35 | self.a_cells = a_cells
36 | if os.path.exists("processed_multicellular_constraints.p"):
37 | with open("processed_multicellular_constraints.p", "rb") as f:
38 | experimental_constraints = pickle.load(f)
39 | else:
40 | experimental_constraints = neuroelectroapi.process_all_cells()
41 |
42 | NC = TSD(experimental_constraints["Neocortex pyramidal cell layer 5-6"])
43 | NC.pop("InjectedCurrentAPWidthTest", None)
44 | NC.pop("InjectedCurrentAPAmplitudeTest", None)
45 | NC.pop("InjectedCurrentAPThresholdTest", None)
46 | self.NC = NC
47 | CA1 = TSD(experimental_constraints["Hippocampus CA1 pyramidal cell"])
48 | CA1.pop("InjectedCurrentAPWidthTest", None)
49 | CA1.pop("InjectedCurrentAPAmplitudeTest", None)
50 | CA1.pop("InjectedCurrentAPThresholdTest", None)
51 | self.CA1 = CA1
52 |
53 | def test_allen_good_agreement_opt(self):
54 | [
55 | final_pop,
56 | hall_of_fame,
57 | logs,
58 | hist,
59 | best_ind,
60 | best_fit_val,
61 | opt,
62 | obs_preds,
63 | df,
64 | ] = _opt_(
65 | self.a_cells["471819401"],
66 | BPO_PARAMS,
67 | "471819401",
68 | "ADEXP",
69 | self.MU,
70 | self.NGEN,
71 | "IBEA",
72 | use_streamlit=False,
73 | score_type=RelativeDifferenceScore,
74 | )
75 |
76 | def test_allen_fi_curve_opt(self):
77 | <<<<<<< HEAD
78 | =======
79 | self.a_cells["fi_curve"]["FITest"].observation['sem'] = self.a_cells["fi_curve"]["FITest"].observation['mean']
80 | self.a_cells["fi_curve"]["FITest"].observation['std'] = self.a_cells["fi_curve"]["FITest"].observation['mean']
81 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
82 | [
83 | final_pop,
84 | hall_of_fame,
85 | logs,
86 | hist,
87 | best_ind,
88 | best_fit_val,
89 | opt,
90 | obs_preds,
91 | df,
92 | ] = _opt_(
93 | self.a_cells["fi_curve"],
94 | BPO_PARAMS,
95 | "fi_curve",
96 | "ADEXP",
97 | self.MU,
98 | self.NGEN,
99 | "IBEA",
100 | use_streamlit=False,
101 | score_type=RelativeDifferenceScore,
102 | )
103 |
104 | def test_neuro_electro_adexp_opt(self):
105 | self.MU = 10
106 | self.NGEN = 10
107 | [
108 | final_pop,
109 | hall_of_fame,
110 | logs,
111 | hist,
112 | best_ind,
113 | best_fit_val,
114 | opt,
115 | obs_preds,
116 | df,
117 | ] = _opt_(
118 | self.NC,
119 | BPO_PARAMS,
120 | "Neocortex pyramidal cell layer 5-6",
121 | "ADEXP",
122 | self.MU,
123 | self.NGEN,
124 | "IBEA",
125 | use_streamlit=False,
126 | score_type=ZScore,
127 | )
128 |
129 | """
130 | Rick, some of these bellow are unit tests
131 | that cannot pass without changes to sciunit complete
132 | """
133 |
134 | @unittest.skip
135 | def test_neuro_electro_adexp_opt_ca1(self):
136 | self.MU = 35
137 | self.NGEN = 10
138 | final_pop, hall_of_fame, logs, hist, best_ind, best_fit_val, opt = _opt_(
139 | self.CA1,
140 | BPO_PARAMS,
141 | "Hippocampus CA1 pyramidal cell",
142 | "ADEXP",
143 | self.MU,
144 | self.NGEN,
145 | "IBEA",
146 | score_type=ZScore,
147 | )
148 |
149 | @unittest.skip
150 | def test_neuro_electro_izhi_opt_pyr(self):
151 | self.MU = 100
152 | self.NGEN = 1
153 | [
154 | final_pop,
155 | hall_of_fame,
156 | logs,
157 | hist,
158 | best_ind,
159 | best_fit_val,
160 | opt,
161 | obs_preds,
162 | df,
163 | ] = _opt_(
164 | self.NC,
165 | BPO_PARAMS,
166 | "Neocortex pyramidal cell layer 5-6",
167 | "IZHI",
168 | self.MU,
169 | self.NGEN,
170 | "IBEA",
171 | score_type=ZScore,
172 | )
173 | old_result = np.sum(best_fit_val)
174 | self.NGEN = 35
175 |
176 | final_pop, hall_of_fame, logs, hist, best_ind, best_fit_val, opt = _opt_(
177 | self.NC,
178 | BPO_PARAMS,
179 | "Neocortex pyramidal cell layer 5-6",
180 | "IZHI",
181 | self.MU,
182 | self.NGEN,
183 | "IBEA",
184 | use_streamlit=False,
185 | score_type=ZScore,
186 | )
187 | new_result = np.sum(best_fit_val)
188 | assert new_result < old_result
189 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/resource_tests.py:
--------------------------------------------------------------------------------
1 | """"Tests of external resources NeuronUnit may access"""
2 |
3 |
4 | from .base import *
5 |
6 |
7 | class NeuroElectroTestCase(unittest.TestCase):
8 | """Testing retrieval of data from NeuroElectro.org"""
9 |
10 | @unittest.skipUnless(
11 | neuroelectro.is_neuroelectro_up(), "NeuroElectro URL is not responsive"
12 | )
13 | def test_neuroelectro(self):
14 | x = neuroelectro.NeuroElectroDataMap()
15 | x.set_neuron(id=72)
16 | x.set_ephysprop(id=2)
17 | x.set_article(pmid=18667618)
18 | x.get_values()
19 | x.check()
20 |
21 | x = neuroelectro.NeuroElectroSummary()
22 | x.set_neuron(id=72)
23 | x.set_ephysprop(id=2)
24 | x.get_values()
25 | x.check()
26 |
27 |
28 | class BlueBrainTestCase(NotebookTools, unittest.TestCase):
29 |
30 | path = "."
31 |
32 | @unittest.skipUnless(bbp.is_bbp_up(), "Blue Brain URL is not responsive")
33 | def test_bluebrain(self):
34 | self.do_notebook("bbp")
35 |
36 |
37 | class AIBSTestCase(unittest.TestCase):
38 | @unittest.skipUnless(aibs.is_aibs_up(), "AIBS URL is not responsive")
39 | def test_aibs(self):
40 | dataset_id = (
41 | 354190013 # Internal ID that AIBS uses for a particular Scnn1a-Tg2-Cre
42 | )
43 | # Primary visual area, layer 5 neuron.
44 | observation = aibs.get_observation(dataset_id, "rheobase")
45 |
46 |
47 | if __name__ == "__main__":
48 | unittest.main()
49 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/rheobase_dtc_test.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | #!/usr/bin/env python
4 | # coding: utf-8
5 | import matplotlib
6 |
7 | import numpy as np
8 | from neuronunit.optimization.model_parameters import (
9 | MODEL_PARAMS,
10 | BPO_PARAMS,
11 | to_bpo_param,
12 | )
13 | from neuronunit.optimization.optimization_management import (
14 | dtc_to_rheo,
15 | inject_and_plot_model,
16 | )
17 | from neuronunit.optimization.data_transport_container import DataTC
18 | from jithub.models import model_classes
19 | import matplotlib.pyplot as plt
20 | import quantities as qt
21 |
22 |
23 | class testOptimization(unittest.TestCase):
24 | def setUp(self):
25 | self = self
26 |
27 | def test_opt_1(self):
28 | cellmodel = "ADEXP"
29 |
30 | if cellmodel == "IZHI":
31 | model = model_classes.IzhiModel()
32 | if cellmodel == "MAT":
33 | model = model_classes.MATModel()
34 | if cellmodel == "ADEXP":
35 | model = model_classes.ADEXPModel()
36 |
37 | dtc = DataTC(backend=cellmodel)
38 | assert dtc.backend == cellmodel
39 | #dtc._backend = model._backend
40 | #dtc.attrs = model.attrs
41 | dtc.params = {k: np.mean(v) for k, v in MODEL_PARAMS[cellmodel].items()}
42 | other_params = BPO_PARAMS[cellmodel]
43 | dtc = dtc_to_rheo(dtc)
44 | print(dtc.rheobase)
45 | assert dtc.rheobase is not None
46 | self.assertIsNotNone(dtc.rheobase)
47 | vm, plt, dtc = inject_and_plot_model(dtc, plotly=False)
48 | self.assertIsNotNone(vm)
49 | model = dtc.dtc_to_model()
50 | self.assertIsNotNone(model)
51 |
52 |
53 | if __name__ == "__main__":
54 | unittest.main()
55 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/rheobase_model_test.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | #!/usr/bin/env python
4 | # coding: utf-8
5 | import matplotlib
6 |
7 | import numpy as np
8 | from neuronunit.optimization.model_parameters import (
9 | MODEL_PARAMS,
10 | BPO_PARAMS,
11 | to_bpo_param
12 | )
13 | from neuronunit.optimization.optimization_management import (
14 | model_to_rheo,
15 | inject_and_plot_model
16 | )
17 | #from neuronunit.optimization.data_transport_container import DataTC
18 | from jithub.models import model_classes
19 | import matplotlib.pyplot as plt
20 | import quantities as qt
21 |
22 |
23 | class testModelRheobase(unittest.TestCase):
24 | def setUp(self):
25 | self = self
26 |
27 | def test_opt_1(self):
28 | model_type = "ADEXP"
29 |
30 | if model_type == "IZHI":
31 | from jithub.models.model_classes import IzhiModel
32 | cellmodel = IzhiModel()
33 | # model = model_classes.IzhiModel()
34 | #if cellmodel == "MAT":
35 | # model = model_classes.MATModel()
36 | if model_type == "ADEXP":
37 | from jithub.models.model_classes import ADEXPModel
38 | cellmodel = ADEXPModel()
39 |
40 | cellmodel.params = {k: np.mean(v) for k, v in MODEL_PARAMS[model_type].items()}
41 | cellmodel = model_to_rheo(cellmodel)
42 | print(cellmodel.rheobase)
43 | assert cellmodel.rheobase is not None
44 | self.assertIsNotNone(cellmodel.rheobase)
45 | vm, plt, cellmodel = inject_and_plot_model(cellmodel, plotly=False)
46 | self.assertIsNotNone(vm)
47 |
48 | self.assertIsNotNone(cellmodel)
49 |
50 |
51 | if __name__ == "__main__":
52 | unittest.main()
53 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/sciunit_tests.py:
--------------------------------------------------------------------------------
1 | """Tests of SciUnit integration"""
2 |
3 | from .base import *
4 |
5 |
6 | class SciUnitTestCase(NotebookTools, unittest.TestCase):
7 | """Testing documentation notebooks"""
8 |
9 | path = "."
10 |
11 | def test_serialization(self):
12 | self.do_notebook("serialization_test")
13 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/scores_unit_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | import unittest
5 | import matplotlib
6 |
7 | matplotlib.use("Agg")
8 | SILENT = True
9 | import warnings
10 |
11 | if SILENT:
12 | warnings.filterwarnings("ignore")
13 |
14 | from neuronunit.allenapi.allen_data_driven import opt_setup, opt_setup_two, opt_exec
15 | from neuronunit.allenapi.allen_data_driven import opt_to_model, wrap_setups
16 | from neuronunit.allenapi.utils import dask_map_function
17 | from neuronunit.optimization.model_parameters import (
18 | MODEL_PARAMS,
19 | BPO_PARAMS,
20 | to_bpo_param,
21 | )
22 | from neuronunit.optimization.optimization_management import (
23 | <<<<<<< HEAD
24 | dtc_to_rheo,
25 | inject_and_plot_model,
26 | )
27 | import numpy as np
28 | from neuronunit.optimization.data_transport_container import DataTC
29 | =======
30 | inject_and_plot_model
31 | )
32 | import numpy as np
33 | from neuronunit.models.optimization_model_layer import OptimizationModel
34 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
35 | from jithub.models import model_classes
36 | import matplotlib.pyplot as plt
37 | import quantities as qt
38 | import os
39 |
40 | from sciunit.scores import RelativeDifferenceScore, ZScore
41 | from sciunit.utils import config_set, config_get
42 |
43 | config_set("PREVALIDATE", False)
44 | assert config_get("PREVALIDATE") is False
45 |
46 |
47 | class testOptimizationAllenMultiSpike(unittest.TestCase):
48 | def setUp(self):
49 | self = self
50 | # In principle any index into data should work
51 | # but '1' is chosen here. Robust tests would use any index.
52 | self.ids = [
53 | 324257146,
54 | 325479788,
55 | 476053392,
56 | 623893177,
57 | 623960880,
58 | 482493761,
59 | 471819401,
60 | ]
61 | self.specimen_id = self.ids[1]
62 | <<<<<<< HEAD
63 |
64 | def optimize_job(self, model_type, score_type=ZScore):
65 | find_sweep_with_n_spikes = 8
66 | dtc = DataTC()
67 | dtc.backend = model_type
68 | model = dtc.dtc_to_model()
69 | model.params = BPO_PARAMS[model_type]
70 | fixed_current = 122 * qt.pA
71 | if model_type == "ADEXP":
72 | NGEN = 155
73 | =======
74 | def optimize_job(self, model_type, score_type=ZScore):
75 | find_sweep_with_n_spikes = 8
76 | from jithub.models.model_classes import ADEXPModel
77 | model = ADEXPModel()
78 | model.params = BPO_PARAMS[model_type]
79 | fixed_current = 122 * qt.pA
80 | if model_type == "ADEXP":
81 | NGEN = 355
82 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
83 | MU = 26
84 | else:
85 | NGEN = 45
86 | MU = 100
87 |
88 | mapping_funct = dask_map_function
89 | cell_evaluator, simple_cell, suite, target_current, spk_count = wrap_setups(
90 | self.specimen_id,
91 | model_type,
92 | find_sweep_with_n_spikes,
93 | template_model=model,
94 | fixed_current=False,
95 | cached=False,
96 | score_type=score_type,
97 | )
98 | final_pop, hall_of_fame, logs, hist = opt_exec(
99 | MU, NGEN, mapping_funct, cell_evaluator
100 | )
101 | opt, target, scores, obs_preds, df = opt_to_model(
102 | hall_of_fame, cell_evaluator, suite, target_current, spk_count
103 | )
104 | best_ind = hall_of_fame[0]
105 | fitnesses = cell_evaluator.evaluate_with_lists(best_ind)
106 | target.vm_soma = suite.traces["vm_soma"]
107 | return np.sum(fitnesses)
108 |
109 | def test_opt_relative_diff(self):
110 | model_type = "ADEXP"
111 | sum_fit = self.optimize_job(model_type, score_type=RelativeDifferenceScore)
112 | <<<<<<< HEAD
113 | assert sum_fit < 72
114 | =======
115 | assert sum_fit < 100
116 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
117 |
118 | # this is just to speed up CI tests to avoid timeout.
119 | @unittest.skip
120 | def test_opt_ZScore(self):
121 | model_type = "ADEXP"
122 | sum_fit = self.optimize_job(model_type, score_type=ZScore)
123 | assert sum_fit < 2.1
124 |
125 | @unittest.skip
126 | def test_opt_relative_diff_izhi(self):
127 | model_type = "IZHI"
128 | self.optimize_job(model_type, score_type=RelativeDifferenceScore)
129 | assert sum_fit < 32.0
130 |
131 | # this is just to speed up CI tests to avoid timeout.
132 | @unittest.skip
133 | def test_opt_ZScore_izhi(self):
134 | model_type = "IZHI"
135 | self.optimize_job(model_type, score_type=ZScore)
136 | assert sum_fit < 2.1
137 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/scores_unit_test.py.orig:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | import unittest
5 | import matplotlib
6 | matplotlib.use('Agg')
7 | <<<<<<< HEAD
8 | =======
9 |
10 | >>>>>>> bc9859eedf2195e6f81fc149cc5cc8c34009ac5b
11 | from neuronunit.allenapi.allen_data_driven import opt_setup, opt_setup_two, opt_exec
12 | from neuronunit.allenapi.allen_data_driven import opt_to_model,wrap_setups
13 | from neuronunit.allenapi.utils import dask_map_function
14 | from neuronunit.optimization.optimization_management import check_bin_vm_soma
15 | from neuronunit.optimization.model_parameters import MODEL_PARAMS, BPO_PARAMS, to_bpo_param
16 | from neuronunit.optimization.optimization_management import dtc_to_rheo,inject_and_plot_model
17 | import numpy as np
18 | from neuronunit.optimization.data_transport_container import DataTC
19 | import efel
20 | from jithub.models import model_classes
21 | import matplotlib.pyplot as plt
22 | import quantities as qt
23 | import os
24 | from sciunit.scores import RelativeDifferenceScore,ZScore
25 | import copy
26 |
27 |
28 |
29 | class testOptimization(unittest.TestCase):
30 | def setUp(self):
31 | self = self
32 | self.ids = [ 324257146,
33 | 325479788,
34 | 476053392,
35 | 623893177,
36 | 623960880,
37 | 482493761,
38 | 471819401
39 | ]
40 | self.specimen_id = self.ids[1]
41 | def optimize_job(self,model_type,score_type=ZScore):
42 | find_sweep_with_n_spikes = 8
43 | dtc = DataTC()
44 | dtc.backend = model_type
45 | model = dtc.dtc_to_model()
46 | model.params = BPO_PARAMS[model_type]
47 | fixed_current = 122 *qt.pA
48 | if model_type == "ADEXP":
49 | NGEN = 100
50 | MU = 20
51 | else:
52 | NGEN = 100
53 | MU = 100
54 |
55 | mapping_funct = dask_map_function
56 | cell_evaluator,simple_cell,suite,target_current,spk_count = wrap_setups(
57 | self.specimen_id,
58 | model_type,
59 | find_sweep_with_n_spikes,
60 | template_model=copy.copy(model),
61 | fixed_current=False,
62 | cached=False,
63 | score_type=score_type
64 | )
65 | final_pop, hall_of_fame, logs, hist = opt_exec(MU,NGEN,mapping_funct,cell_evaluator)
66 | opt,target = opt_to_model(hall_of_fame,cell_evaluator,suite, target_current, spk_count)
67 | best_ind = hall_of_fame[0]
68 | fitnesses = cell_evaluator.evaluate_with_lists(best_ind)
69 | target.vm_soma = suite.traces['vm15']
70 | check_bin_vm_soma(target,opt)
71 | return np.sum(fitnesses)
72 | def test_opt_relative_diff(self):
73 | model_type = "ADEXP"
74 | sum_fit = self.optimize_job(model_type,score_type=RelativeDifferenceScore)
75 | assert sum_fit<9.0
76 | def test_opt_ZScore(self):
77 | model_type = "ADEXP"
78 | sum_fit = self.optimize_job(model_type,score_type=ZScore)
79 | assert sum_fit<0.7
80 |
81 | def test_opt_relative_diff_izhi(self):
82 | model_type = "IZHI"
83 | self.optimize_job(model_type,score_type=RelativeDifferenceScore)
84 | assert sum_fit<9.0
85 |
86 | def test_opt_ZScore_izhi(self):
87 | model_type = "IZHI"
88 | self.optimize_job(model_type,score_type=ZScore)
89 | assert sum_fit<0.7
90 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/test_tests.py:
--------------------------------------------------------------------------------
1 | """Tests of NeuronUnit test classes."""
2 |
3 | import os
4 |
5 | from .base import unittest, nu_tests, ReducedModel, NU_BACKEND, NU_HOME
6 | from neuronunit.neuroelectro import is_neuroelectro_up
7 |
8 |
9 | def quick_test_builder(test_class=None, backend=NU_BACKEND):
10 | if test_class is None:
11 | from neuronunit.tests.fi import RheobaseTestP as test_class
12 | else:
13 | if isinstance(test_class, str):
14 | test_class = nu_tests.__dict__[test_class]
15 | path = os.path.join(NU_HOME, "models/NeuroML2/LEMS_2007One.xml")
16 | model = ReducedModel(path, backend=backend)
17 | observation = test_class.neuroelectro_summary_observation({"nlex_id": "nifext_50"})
18 | test = test_class(observation=observation)
19 | return model, test
20 |
21 |
22 | class TestsTestCase(object):
23 | """Abstract base class for testing tests."""
24 |
25 | def setUp(self):
26 | from .model_tests import ReducedModelTestCase
27 |
28 | path = ReducedModelTestCase().path
29 | self.model = ReducedModel(path, backend=NU_BACKEND)
30 | if not is_neuroelectro_up():
31 | return self.skipTest("Neuroelectro is down")
32 |
33 | def get_observation(self, cls):
34 | neuron = {"nlex_id": "nifext_50"} # Layer V pyramidal cell
35 | return cls.neuroelectro_summary_observation(neuron)
36 |
37 | def make_test(self, cls):
38 | observation = self.get_observation(cls)
39 | self.test = cls(observation=observation)
40 | return self.test
41 |
42 | def run_test(self, cls):
43 | self.make_test(cls)
44 | score = self.test.judge(self.model)
45 | score.summarize()
46 | return score.score
47 |
48 |
49 | class TestsPassiveTestCase(TestsTestCase, unittest.TestCase):
50 | """Test passive validation tests."""
51 |
52 | def test_inputresistance(self):
53 | from neuronunit.tests.passive import InputResistanceTest as T
54 |
55 | score = self.run_test(T)
56 | self.assertTrue(-0.6 < score < -0.5, "Score was %.2f" % score)
57 |
58 | def test_restingpotential(self):
59 | from neuronunit.tests.passive import RestingPotentialTest as T
60 |
61 | score = self.run_test(T)
62 | self.assertTrue(1.2 < score < 1.3, "Score was %.2f" % score)
63 |
64 | def test_capacitance(self):
65 | from neuronunit.tests.passive import CapacitanceTest as T
66 |
67 | score = self.run_test(T)
68 | self.assertTrue(-0.35 < score < -0.25, "Score was %.2f" % score)
69 |
70 | def test_timeconstant(self):
71 | from neuronunit.tests.passive import TimeConstantTest as T
72 |
73 | score = self.run_test(T)
74 | self.assertTrue(-1.00 < score < -0.90, "Score was %.2f" % score)
75 |
76 |
77 | class TestsWaveformTestCase(TestsTestCase, unittest.TestCase):
78 | """Test passive validation tests"""
79 |
80 | def test_ap_width(self):
81 | from neuronunit.tests.waveform import InjectedCurrentAPWidthTest as T
82 |
83 | score = self.run_test(T)
84 | self.assertTrue(-0.65 < score < -0.55)
85 |
86 | def test_ap_amplitude(self):
87 | from neuronunit.tests.waveform import InjectedCurrentAPAmplitudeTest as T
88 |
89 | score = self.run_test(T)
90 | self.assertTrue(-1.7 < score < -1.6)
91 |
92 | def test_ap_threshold(self):
93 | from neuronunit.tests.waveform import InjectedCurrentAPThresholdTest as T
94 |
95 | score = self.run_test(T)
96 | self.assertTrue(2.25 < score < 2.35)
97 |
98 |
99 | class TestsFITestCase(TestsTestCase, unittest.TestCase):
100 | """Test F/I validation tests"""
101 |
102 | # @unittest.skip("This test takes a long time")
103 | def test_rheobase_parallel(self):
104 | from neuronunit.tests.fi import RheobaseTestP as T
105 |
106 | score = self.run_test(T)
107 | self.assertTrue(0.2 < score < 0.3)
108 |
109 |
110 | class TestsDynamicsTestCase(TestsTestCase, unittest.TestCase):
111 | """Tests dynamical systems properties tests"""
112 |
113 | @unittest.skip("This test is not yet implemented")
114 | def test_threshold_firing(self):
115 | from neuronunit.tests.dynamics import TFRTypeTest as T
116 |
117 | # score = self.run_test(T)
118 | # self.assertTrue(0.2 < score < 0.3)
119 |
120 | @unittest.skip("This test is not yet implemented")
121 | def test_rheobase_parallel(self):
122 | from neuronunit.tests.dynamics import BurstinessTest as T
123 |
124 | # score = self.run_test(T)
125 | # self.assertTrue(0.2 < score < 0.3)
126 |
127 |
128 | class TestsChannelTestCase(unittest.TestCase):
129 | @unittest.skip("This test is not yet implemented")
130 | def test_iv_curve_ss(self):
131 | from neuronunit.tests.channel import IVCurveSSTest as T
132 |
133 | # score = self.run_test(T)
134 | # self.assertTrue(0.2 < score < 0.3)
135 |
136 | @unittest.skip("This test is not yet implemented")
137 | def test_iv_curve_peak(self):
138 | from neuronunit.tests.channel import IVCurvePeakTest as T
139 |
140 | # score = self.run_test(T)
141 | # self.assertTrue(0.2 < score < 0.3)
142 |
143 |
144 | class FakeTestCase(unittest.TestCase):
145 | def test_fake_test(self):
146 | from neuronunit.tests import FakeTest
147 | from sciunit import Model
148 |
149 | observation = {"a": 1, "b": 1}
150 | ft = FakeTest(observation, name="a_b_test")
151 | m = Model(name="a_b_model")
152 | m.attrs = {"a": 1, "b": 1}
153 | prediction = ft.generate_prediction(m)
154 | score = ft.compute_score([0.9, 1.1], prediction).score
155 | self.assertAlmostEqual(score, 0.09, 2)
156 |
157 |
158 | if __name__ == "__main__":
159 | unittest.main()
160 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/validate_observation_vm.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import quantities as pq\n",
10 | "import sciunit\n",
11 | "from sciunit.errors import ObservationError"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "from neuronunit.tests import APWidthTest"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 3,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "data": {
30 | "text/plain": [
31 | "True"
32 | ]
33 | },
34 | "execution_count": 3,
35 | "metadata": {},
36 | "output_type": "execute_result"
37 | }
38 | ],
39 | "source": [
40 | "sciunit.config_set('PREVALIDATE', True) # Must be set otherwise validation of the observation will not occur until the test is executed. "
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": 4,
46 | "metadata": {
47 | "tags": []
48 | },
49 | "outputs": [
50 | {
51 | "name": "stdout",
52 | "output_type": "stream",
53 | "text": [
54 | "{'observation': ['none or more than one rule validate', {'oneof definition 0': [{'std': ['required field']}], 'oneof definition 1': [{'n': ['required field'], 'sem': ['required field']}]}]}\n"
55 | ]
56 | }
57 | ],
58 | "source": [
59 | "try:\n",
60 | " APWidthTest({'mean':3.2*pq.ms}) # Note missing `std` key\n",
61 | "except ObservationError as e:\n",
62 | " print(e)"
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": 5,
68 | "metadata": {
69 | "tags": []
70 | },
71 | "outputs": [
72 | {
73 | "name": "stdout",
74 | "output_type": "stream",
75 | "text": [
76 | "{'observation': ['none or more than one rule validate', {'oneof definition 0': [{'std': [\"Must have units of 'millisecond'\"]}], 'oneof definition 1': [{'n': ['required field'], 'sem': ['required field'], 'std': ['unknown field']}]}]}\n"
77 | ]
78 | }
79 | ],
80 | "source": [
81 | "try:\n",
82 | " APWidthTest({'mean':3.2*pq.ms, 'std':1.4*pq.V}) # Note wrong units\n",
83 | "except ObservationError as e:\n",
84 | " print(e)"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": 6,
90 | "metadata": {
91 | "tags": []
92 | },
93 | "outputs": [
94 | {
95 | "name": "stdout",
96 | "output_type": "stream",
97 | "text": [
98 | "{'observation': ['none or more than one rule validate', {'oneof definition 0': [{'sem': ['unknown field']}], 'oneof definition 1': [{'n': ['required field'], 'std': ['unknown field']}]}]}\n"
99 | ]
100 | }
101 | ],
102 | "source": [
103 | "try:\n",
104 | " APWidthTest({'mean':3.2*pq.ms, 'std':1.4*pq.ms, 'sem':0.7*pq.ms}) # Note mutually exclusive `std` and `sem` keys\n",
105 | "except ObservationError as e:\n",
106 | " print(e)"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 7,
112 | "metadata": {},
113 | "outputs": [
114 | {
115 | "data": {
116 | "text/plain": [
117 | ""
118 | ]
119 | },
120 | "execution_count": 7,
121 | "metadata": {},
122 | "output_type": "execute_result"
123 | }
124 | ],
125 | "source": [
126 | "APWidthTest({'mean':3.2*pq.ms, 'std':1.4*pq.ms}) # Should validate"
127 | ]
128 | }
129 | ],
130 | "metadata": {
131 | "kernelspec": {
132 | "display_name": "Python 3",
133 | "language": "python",
134 | "name": "python3"
135 | },
136 | "language_info": {
137 | "codemirror_mode": {
138 | "name": "ipython",
139 | "version": 3
140 | },
141 | "file_extension": ".py",
142 | "mimetype": "text/x-python",
143 | "name": "python",
144 | "nbconvert_exporter": "python",
145 | "pygments_lexer": "ipython3",
146 | "version": "3.7.4"
147 | }
148 | },
149 | "nbformat": 4,
150 | "nbformat_minor": 2
151 | }
152 |
--------------------------------------------------------------------------------
/neuronunit/unit_test/validate_params_vm.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import quantities as pq\n",
10 | "import sciunit\n",
11 | "from sciunit.errors import ObservationError, ParametersError"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "from neuronunit.tests import APWidthTest"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 3,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "data": {
30 | "text/plain": [
31 | "{'dt': {'type': 'time', 'required': False},\n",
32 | " 'tmax': {'type': 'time', 'min': 0, 'required': False}}"
33 | ]
34 | },
35 | "execution_count": 3,
36 | "metadata": {},
37 | "output_type": "execute_result"
38 | }
39 | ],
40 | "source": [
41 | "# Display the parameters schema for this test\n",
42 | "APWidthTest.params_schema"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": 4,
48 | "metadata": {},
49 | "outputs": [
50 | {
51 | "name": "stdout",
52 | "output_type": "stream",
53 | "text": [
54 | "{'params': [{'3.0 mV': ['Must have dimensions of time.']}]}\n"
55 | ]
56 | }
57 | ],
58 | "source": [
59 | "# Try to instantiate the test using a parameter (tmax) that has the wrong units. \n",
60 | "# This should show an error message\n",
61 | "try:\n",
62 | " test = APWidthTest({'mean':3.2*pq.ms, 'std':1.4*pq.ms}, name=\"My Test\", tmax=3*pq.mV) # Should fail due to incorrect units\n",
63 | " raise Exception(\"Test should not have been allowed with incorrect parameter units\")\n",
64 | "except ParametersError as e:\n",
65 | " print(e)"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": 5,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "# Now do it correctly. There should be no error message. \n",
75 | "try:\n",
76 | " test = APWidthTest({'mean':3.2*pq.ms, 'std':1.4*pq.ms}, name=\"My Test\", tmax=3*pq.s) # Should validate successfully\n",
77 | "except ParametersError as e:\n",
78 | " print(e)\n",
79 | " raise Exception(\"Test should have been allowed with correct parameter units\")"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": 6,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "from sciunit.validators import ParametersValidator"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 7,
94 | "metadata": {},
95 | "outputs": [
96 | {
97 | "data": {
98 | "text/plain": [
99 | "'s'"
100 | ]
101 | },
102 | "execution_count": 7,
103 | "metadata": {},
104 | "output_type": "execute_result"
105 | }
106 | ],
107 | "source": [
108 | "# Get the units type (e.g. \"time\") from the parameters schema \n",
109 | "units_type = APWidthTest.params_schema['dt']['type']\n",
110 | "\n",
111 | "# Get the actual units for this units type\n",
112 | "ParametersValidator.units_map[units_type]"
113 | ]
114 | }
115 | ],
116 | "metadata": {
117 | "kernelspec": {
118 | "display_name": "Python 3",
119 | "language": "python",
120 | "name": "python3"
121 | },
122 | "language_info": {
123 | "codemirror_mode": {
124 | "name": "ipython",
125 | "version": 3
126 | },
127 | "file_extension": ".py",
128 | "mimetype": "text/x-python",
129 | "name": "python",
130 | "nbconvert_exporter": "python",
131 | "pygments_lexer": "ipython3",
132 | "version": "3.6.7"
133 | }
134 | },
135 | "nbformat": 4,
136 | "nbformat_minor": 2
137 | }
138 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = [
3 | "setuptools >= 40.9.0",
4 | "wheel"
5 | ]
6 | build-backend = "setuptools.build_meta"
7 |
--------------------------------------------------------------------------------
/readthedocs.yml:
--------------------------------------------------------------------------------
1 | conda:
2 | file: environment.yml
3 | python:
4 | version: 3.5
5 | pip_install: true
6 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | <<<<<<< HEAD
2 | typing
3 | typing-extensions
4 | =======
5 | asv
6 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
7 | dask
8 | lxml==4.4.1
9 | elephant==0.4.1
10 | mock==3.0.5
11 | tqdm==4.48.2
12 | neo==0.5.2
13 | plotly==4.5.2
14 | simplejson==3.17.0
15 | future==0.17.1
16 | streamlit
17 | pandas==1.1.0
18 | matplotlib
19 | seaborn
20 | quantities==0.12.4
21 | Jinja2==2.10.3
22 | Pebble==4.5.3
23 | backports.statistics==0.1.0
24 | python_dateutil
25 | scikit_learn==0.23.2
26 | frozendict==1.2
27 | neo==0.5.2
28 | numba
29 | tqdm==4.48.2
30 | allensdk==0.16.3
31 | natsort==7.0.1
32 | nbformat==4.4.0
33 | quantities==0.12.4
34 | dask[bag]
35 | fsspec>=0.3.3
36 | cython
37 | <<<<<<< HEAD
38 | git+https://github.com/russelljjarvis/sciunit@dev
39 | git+https://github.com/russelljjarvis/BluePyOpt@neuronunit_reduced_cells
40 | =======
41 | >>>>>>> 9fb0c2e613a1bf059f38eeeae80582d0cfb11f2f
42 | efel
43 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = neuronunit
3 | version = 0.19
4 | description = A SciUnit library for data-driven testing of single-neuron physiology models.
5 | long_description_content_type = text/markdown
6 | url = http://github.com/russelljjarvis/neuronunit
7 |
8 | author = Russell Jarvis
9 | author_email = russelljarvis@protonmail.com
10 | license = MIT
11 | classifiers =
12 | License :: OSI Approved :: MIT License
13 | Programming Language :: Python :: 3
14 | Programming Language :: Python :: 3.5
15 | Programming Language :: Python :: 3.6
16 | Programming Language :: Python :: 3.7
17 | Programming Language :: Python :: 3.8
18 |
19 |
20 | [options]
21 | zip_safe = False
22 | packages = find:
23 | python_requires = >=3.5
24 | install_requires =
25 | dask
26 | lxml==4.4.1
27 | elephant==0.4.1
28 | mock==3.0.5
29 | tqdm==4.48.2
30 | neo==0.5.2
31 | plotly==4.5.2
32 | simplejson==3.17.0
33 | future==0.17.1
34 | streamlit
35 | pandas==1.1.0
36 | matplotlib
37 | seaborn
38 | quantities==0.12.4
39 | Jinja2==2.10.3
40 | Pebble==4.5.3
41 | backports.statistics==0.1.0
42 | python_dateutil
43 | scikit_learn==0.23.2
44 | frozendict==1.2
45 | neo==0.5.2
46 | numba
47 | tqdm==4.48.2
48 | allensdk==0.16.3
49 | natsort==7.0.1
50 | nbformat==4.4.0
51 | quantities==0.12.4
52 | dask[bag]
53 | fsspec>=0.3.3
54 | cython
55 | efel
56 | pyneuroml
57 | BluePyOpt @ git+ssh://git@github.com/russelljjarvis/BluePyOpt.git@neuronunit_reduced_cells
58 |
59 | [options.extras_require]
60 | morphology = pylmeasure
61 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools; setuptools.setup()
2 |
--------------------------------------------------------------------------------
/test.sh:
--------------------------------------------------------------------------------
1 | UNIT_TEST_SUITE="neuronunit.unit_test buffer"
2 | # Fundamental Python bug prevents this other method from allowing
3 | # some notebook tests to pass.
4 | #UNIT_TEST_SUITE="setup.py test"
5 | coverage run -m --source=. --omit=*unit_test*,setup.py,.eggs $UNIT_TEST_SUITE
--------------------------------------------------------------------------------