├── .gitignore
├── LICENSE.txt
├── README.md
├── docs
├── .nojekyll
├── 00_README.txt
├── Makefile
├── build
│ └── html
│ │ ├── .buildinfo
│ │ ├── _images
│ │ └── spec_uses.jpg
│ │ ├── _sources
│ │ ├── api_usage.rst.txt
│ │ ├── index.rst.txt
│ │ └── specification_language.rst.txt
│ │ ├── _static
│ │ ├── ajax-loader.gif
│ │ ├── alabaster.css
│ │ ├── basic.css
│ │ ├── comment-bright.png
│ │ ├── comment-close.png
│ │ ├── comment.png
│ │ ├── doctools.js
│ │ ├── down-pressed.png
│ │ ├── down.png
│ │ ├── file.png
│ │ ├── jquery-3.1.0.js
│ │ ├── jquery.js
│ │ ├── minus.png
│ │ ├── plus.png
│ │ ├── pygments.css
│ │ ├── searchtools.js
│ │ ├── spec_uses.jpg
│ │ ├── underscore-1.3.1.js
│ │ ├── underscore.js
│ │ ├── up-pressed.png
│ │ ├── up.png
│ │ └── websupport.js
│ │ ├── api_usage.html
│ │ ├── genindex.html
│ │ ├── index.html
│ │ ├── objects.inv
│ │ ├── search.html
│ │ ├── searchindex.js
│ │ └── specification_language.html
├── index.html
├── make.bat
└── source
│ ├── _static
│ └── spec_uses.jpg
│ ├── api_usage.rst
│ ├── conf.py
│ ├── index.rst
│ └── specification_language.rst
├── examples
├── 0_README.txt
├── create_scripts
│ ├── 0_README.txt
│ ├── abstract_feature.py
│ ├── analysis_e.py
│ ├── annotations.py
│ ├── behavior-e.py
│ ├── behavior.py
│ ├── closed_interface-e.py
│ ├── crcns_alm-1.py
│ ├── crcns_hc-3.py
│ ├── crcns_ret-1.py
│ ├── crcns_ssc-1.py
│ ├── custom_link.py
│ ├── extensions
│ │ ├── 0_README.txt
│ │ ├── e-analysis.py
│ │ ├── e-behavior.py
│ │ ├── e-closed_interface.py
│ │ ├── e-genJson.py
│ │ ├── e-general.py
│ │ ├── e-interface.py
│ │ ├── e-interval.py
│ │ ├── e-intracellular.py
│ │ ├── e-link_test.py
│ │ ├── e-module.py
│ │ ├── e-timeseries.py
│ │ ├── e-trajectorySeries.py
│ │ └── e-trajectorySeries2.py
│ ├── extracellular_ephys.py
│ ├── extracellular_spikes.py
│ ├── general-e.py
│ ├── interface-e.py
│ ├── interval-e.py
│ ├── interval.py
│ ├── intracellular-e.py
│ ├── link_test-e.py
│ ├── module-e.py
│ ├── motion_correction.py
│ ├── motion_correction2.py
│ ├── run_all.sh
│ ├── timeseries-e.py
│ ├── trajectorySeries-e.py
│ └── trajectorySeries2-e.py
├── created_nwb_files
│ └── 0_README.txt
├── text_output_files
│ ├── 0_README.txt
│ ├── create
│ │ └── 0_README.txt
│ ├── diff
│ │ └── 0_README.txt
│ ├── doc
│ │ └── 0_README.txt
│ └── validate
│ │ └── 0_README.txt
└── utility_scripts
│ ├── 0_README.txt
│ ├── check_schemas.sh
│ ├── cmp_created.sh
│ ├── h5diffsig.sh
│ ├── install_source_data.py
│ ├── make_cv_diff.sh
│ ├── make_docs.py
│ ├── make_docs.sh
│ ├── make_h5sig.py
│ ├── make_pdf_doc.sh
│ ├── validate_all.py
│ ├── validate_behavior.py
│ ├── validate_internal.sh
│ └── validate_others.sh
├── matlab_bridge
├── 0_INSTALL.txt
├── 0_README.txt
├── matlab_bridge_api
│ ├── +h5g8
│ │ ├── dataset.m
│ │ ├── file.m
│ │ ├── group.m
│ │ └── node.m
│ ├── nwb_file.m
│ ├── nwb_utils.m
│ ├── parse_arguments.m
│ └── test_nwb.m
├── matlab_examples
│ ├── 0_README.txt
│ ├── create_scripts
│ │ ├── 0_README.txt
│ │ ├── abstract_feature.m
│ │ ├── analysis_e.m
│ │ ├── crcns_alm_1.m
│ │ └── run_all_examples.m
│ └── created_nwb_files
│ │ └── 0_README.txt
└── matlab_unittest
│ ├── run_all_tests.m
│ ├── t_annotation.m
│ ├── t_append.m
│ ├── t_array_layout.m
│ ├── t_epoch_tag.m
│ ├── t_general_ephys.m
│ ├── t_general_image.m
│ ├── t_general_opto.m
│ ├── t_general_patch.m
│ ├── t_general_species.m
│ ├── t_general_top.m
│ ├── t_if_add_ts.m
│ ├── t_if_isi.m
│ ├── t_modification_time.m
│ ├── t_no_data.m
│ ├── t_no_time.m
│ ├── t_ref_image.m
│ ├── t_softlink.m
│ ├── t_starting_time.m
│ ├── t_timeseries_link.m
│ ├── t_top_datasets.m
│ ├── t_unittimes.m
│ ├── test_utils.m
│ └── test_utils.py
├── nwb
├── __init__.py
├── check_schema.py
├── combine_messages.py
├── display_versions.py
├── doc_tools.py
├── find_links.py
├── h5diffsig.py
├── h5gate.py
├── make_docs.py
├── make_json.py
├── meta_schema.py
├── nwb_core.py
├── nwb_file.py
├── nwb_init.py
├── nwb_utils.py
├── suggest_spellings.py
├── validate.py
└── value_summary.py
├── setup.py
├── test_all
├── 0_README.txt
├── make_curr.sh
├── make_dirsig.py
├── orig
│ └── dirsig.txt
├── set_orig.py
├── show_diff.py
├── test_all.cfg
└── test_all.sh
└── unittest
├── 0_README.txt
├── run_tests.sh
├── t_annotation.py
├── t_append.py
├── t_array_layout.py
├── t_epoch_tag.py
├── t_general_ephys.py
├── t_general_image.py
├── t_general_opto.py
├── t_general_patch.py
├── t_general_species.py
├── t_general_top.py
├── t_if_add_ts.py
├── t_if_isi.py
├── t_modification_time.py
├── t_no_data.py
├── t_no_time.py
├── t_ref_image.py
├── t_region_reference.py
├── t_softlink.py
├── t_starting_time.py
├── t_strings.py
├── t_timeseries_link.py
├── t_top_datasets.py
├── t_unittimes.py
└── test_utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Optional source data downloaded separately
2 | /examples/source_data/crcns_*
3 | /examples/source_data_2/
4 |
5 | # Generated output from tests
6 | examples/text_output_files/create/*txt
7 | examples/text_output_files/validate/*txt
8 | matlab_bridge/matlab_examples/text_output_files/
9 | matlab_bridge/matlab_examples/created_nwb_files/
10 | test_all/curr/
11 | test_all/orig/
12 | test_all/orig_bk*/
13 |
14 | # Created nwb files
15 | *.nwb
16 |
17 |
18 | # Byte-compiled / optimized / DLL files
19 | __pycache__/
20 | *.py[cod]
21 | *$py.class
22 |
23 | # C extensions
24 | *.so
25 |
26 | # Distribution / packaging
27 | .Python
28 | env/
29 | build/
30 | develop-eggs/
31 | dist/
32 | downloads/
33 | eggs/
34 | .eggs/
35 | lib/
36 | lib64/
37 | parts/
38 | sdist/
39 | var/
40 | *.egg-info/
41 | .installed.cfg
42 | *.egg
43 |
44 | # PyInstaller
45 | # Usually these files are written by a python script from a template
46 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
47 | *.manifest
48 | *.spec
49 |
50 | # Installer logs
51 | pip-log.txt
52 | pip-delete-this-directory.txt
53 |
54 | # Unit test / coverage reports
55 | htmlcov/
56 | .tox/
57 | .coverage
58 | .coverage.*
59 | .cache
60 | nosetests.xml
61 | coverage.xml
62 | *,cover
63 |
64 | # Translations
65 | *.mo
66 | *.pot
67 |
68 | # Django stuff:
69 | *.log
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 |
78 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2015 Allen Institute, California Institute of Technology, New York University School
2 | of Medicine, the Howard Hughes Medical Institute, University of California, Berkeley, GE, the Kavli
3 | Foundation and the International Neuroinformatics Coordinating Facility. All rights reserved.
4 |
5 | Redistribution and use in source and binary forms, with or without modification, are permitted
6 | provided that the following conditions are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright notice,
9 | this list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
12 | the following disclaimer in the documentation and/or other materials provided with the distribution.
13 |
14 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
15 | promote products derived from this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
18 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
19 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
20 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
23 | TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
24 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Python (and Matlab) API for Neurodata Without Borders (NWB) format
2 |
3 | HTML formatted documentation about this API (including installation instructions) are at:
4 | http://neurodatawithoutborders.github.io/api-python
5 |
6 | Documentation in other formats (such as PDF) may be generated using the files in the ``docs``
7 | directory. This is described in file ``docs/0_README.txt``.
8 |
9 |
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/.nojekyll
--------------------------------------------------------------------------------
/docs/00_README.txt:
--------------------------------------------------------------------------------
1 | This directory contains documentation for the NWB Python (and Matlab) API.
2 | The documentation can be built on Unix-like systems by entering:
3 |
4 | "make html" - to make html documentation
5 | "make latexpdf" - to make PDF documentation
6 | "make" - to see the different types of documentation available
7 |
8 | On Windows systems, "make.bat" can be used to generate documentation.
9 |
10 | Documentation will be created in directory "build".
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = NWBAPI-Python
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/build/html/.buildinfo:
--------------------------------------------------------------------------------
1 | # Sphinx build info version 1
2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3 | config: f97f3f0445b07afce5a7af1c7a109867
4 | tags: 645f666f9bcd5a90fca523b33c5a78b7
5 |
--------------------------------------------------------------------------------
/docs/build/html/_images/spec_uses.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_images/spec_uses.jpg
--------------------------------------------------------------------------------
/docs/build/html/_sources/index.rst.txt:
--------------------------------------------------------------------------------
1 | .. NWB API-Python documentation master file, created by
2 | sphinx-quickstart on Mon May 1 15:37:19 2017.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | NWB API-Python documentation
7 | ============================
8 |
9 | | This documentation describes the NWB Python (and Matlab) API available at:
10 | | https://github.com/NeurodataWithoutBorders/api-python
11 |
12 | Contents
13 | --------
14 |
15 | .. toctree::
16 | :maxdepth: 2
17 | :numbered:
18 |
19 | api_usage
20 | specification_language
21 |
22 |
23 |
24 | ..
25 | Below are commeneted out for now
26 | Indices and tables
27 | ==================
28 |
29 | * :ref:`genindex`
30 | * :ref:`modindex`
31 | * :ref:`search`
32 |
--------------------------------------------------------------------------------
/docs/build/html/_static/ajax-loader.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/ajax-loader.gif
--------------------------------------------------------------------------------
/docs/build/html/_static/comment-bright.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/comment-bright.png
--------------------------------------------------------------------------------
/docs/build/html/_static/comment-close.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/comment-close.png
--------------------------------------------------------------------------------
/docs/build/html/_static/comment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/comment.png
--------------------------------------------------------------------------------
/docs/build/html/_static/down-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/down-pressed.png
--------------------------------------------------------------------------------
/docs/build/html/_static/down.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/down.png
--------------------------------------------------------------------------------
/docs/build/html/_static/file.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/file.png
--------------------------------------------------------------------------------
/docs/build/html/_static/minus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/minus.png
--------------------------------------------------------------------------------
/docs/build/html/_static/plus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/plus.png
--------------------------------------------------------------------------------
/docs/build/html/_static/spec_uses.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/spec_uses.jpg
--------------------------------------------------------------------------------
/docs/build/html/_static/up-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/up-pressed.png
--------------------------------------------------------------------------------
/docs/build/html/_static/up.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/_static/up.png
--------------------------------------------------------------------------------
/docs/build/html/genindex.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 | Index — NWB API-Python 0.8.1 documentation
11 |
12 |
13 |
14 |
15 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
Index
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
79 |
80 |
81 |
89 |
90 |
91 |
92 |
93 |
94 |
--------------------------------------------------------------------------------
/docs/build/html/objects.inv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/build/html/objects.inv
--------------------------------------------------------------------------------
/docs/build/html/search.html:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 | Search — NWB API-Python 0.8.1 documentation
10 |
11 |
12 |
13 |
14 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
Search
53 |
54 |
55 |
56 | Please activate JavaScript to enable the search
57 | functionality.
58 |
59 |
60 |
61 | From here you can search these documents. Enter your search
62 | words into the box below and click "search". Note that the search
63 | function will automatically search for all of the words. Pages
64 | containing fewer words won't appear in the result list.
65 |
66 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
89 |
90 |
91 |
99 |
100 |
101 |
102 |
103 |
104 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 | set SPHINXPROJ=NWBAPI-Python
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
20 | echo.installed, then set the SPHINXBUILD environment variable to point
21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
22 | echo.may add the Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/docs/source/_static/spec_uses.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/docs/source/_static/spec_uses.jpg
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. NWB API-Python documentation master file, created by
2 | sphinx-quickstart on Mon May 1 15:37:19 2017.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | NWB API-Python documentation
7 | ============================
8 |
9 | | This documentation describes the NWB Python (and Matlab) API available at:
10 | | https://github.com/NeurodataWithoutBorders/api-python
11 |
12 | Contents
13 | --------
14 |
15 | .. toctree::
16 | :maxdepth: 2
17 | :numbered:
18 |
19 | api_usage
20 | specification_language
21 |
22 |
23 |
24 | ..
25 | Below are commeneted out for now
26 | Indices and tables
27 | ==================
28 |
29 | * :ref:`genindex`
30 | * :ref:`modindex`
31 | * :ref:`search`
32 |
--------------------------------------------------------------------------------
/examples/0_README.txt:
--------------------------------------------------------------------------------
1 | 0_README.txt for NWB examples
2 |
3 |
4 | Directories are:
5 |
6 | create_scripts - scripts used to create sample NWB files
7 |
8 | created_nwb_files - location of where example NWB files will be created
9 |
10 | utility_scripts - scripts demonstrating validating files and
11 | generating documentation.
12 |
13 | text_output_files - text files created when running create scripts or
14 | utility scripts.
15 |
16 | source_data_2 - This is initially absent. When added, it contains
17 | source data that is needed to run scripts that have prefix "crcns_".
18 | The "sourc_data_2" directory can be installed by
19 | running "get_source_data.py" in the utility_scripts directory.
20 | To install manually, download file "source_data_2.tar.gz" from:
21 | https://portal.nersc.gov/project/crcns/download/nwb-1
22 | then upack it, and put it in this directory.
23 | After adding it, the "source_data_2" directory should have subdirectories:
24 | crcns_alm-1 crcns_hc-3 crcns_ret-1 crcns_ssc-1
25 |
26 | To run the create_scripts, or the utility_scripts, cd into the corresponding
27 | directory and follow the instructions in the readme file in that directory.
28 |
29 |
--------------------------------------------------------------------------------
/examples/create_scripts/0_README.txt:
--------------------------------------------------------------------------------
1 | This directory "examples/scripts" contains example
2 | scripts to create example NWB files.
3 |
4 | To run scripts individually, type:
5 | python
6 |
7 | To run all of the scripts, type:
8 | ./run_all_examples.sh
9 |
10 | Several types of scripts require additional files. These are:
11 |
12 |
13 | (1) Scripts that required input data.
14 |
15 | Scripts with with name starting with "crcns") require data in the
16 | "../examples/source_data" directory. This data must be downloaded
17 | and placed inside the ../source_data directory. Instructions
18 | for doing this are in the examples/0_README.txt file
19 | (i.e. the readme in the directory above this one).
20 |
21 |
22 | (2) Scripts that require extensions.
23 |
24 | Scripts that have name ending with "-e.py" require one or more "extensions"
25 | (files that define extensions to the NWB format). The extensions are
26 | stored in directory "extensions". Usually the name of the extension
27 | used with the script will be the same as the name of the create script,
28 | except "e-" will be in front of the extension.
29 |
30 | The convention of having "e-" in front of the extension (and "-e" at the
31 | end of the create script name) is only used for these examples. Any name for the
32 | create script and extension(s) can be used as long as the actual name of the
33 | extension(s) are referenced by the create script and passed as parameters to
34 | nwb_validate.py when validating NWB files created using one or more extensions.
--------------------------------------------------------------------------------
/examples/create_scripts/analysis_e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as ut
5 |
6 |
7 | """
8 | This example illustrates using an extension to store data in the the /analysis group.
9 | The /analysis group is reserved for results of lab-specific analysis.
10 |
11 | The extension definition is in file "extensions/e-analysis.py".
12 |
13 | The example is based on the contents of the Allen Institute for Brain Science
14 | Cell Types database NWB files.
15 |
16 | """
17 |
18 | OUTPUT_DIR = "../created_nwb_files/"
19 | file_name = __file__[0:-3] + ".nwb"
20 | ########################################################################
21 | # create a new NWB file
22 | settings = {}
23 | settings["file_name"] = OUTPUT_DIR + file_name
24 | settings["identifier"] = ut.create_identifier("example /analysis group using extension.")
25 | settings["mode"] = "w"
26 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
27 | settings["description"] = ("Test file demonstrating storing data in the /analysis group "
28 | "that is defined by an extension.")
29 |
30 | # specify the extension (Could be more than one. Only one used now).
31 | settings['extensions'] = ["extensions/e-analysis.py"]
32 |
33 | # create the NWB file object. this manages the file
34 | print("Creating " + settings["file_name"])
35 | f = nwb_file.open(**settings)
36 |
37 | ########################################################################
38 | # This example, stores spike times for multiple sweeps
39 | # create the group for the spike times
40 | # The group ("aibs_spike_times") is defined in the extension
41 |
42 | ast = f.make_group("aibs_spike_times")
43 |
44 | # some sample data
45 | times = [1.1, 1.2, 1.3, 1.4, 1.5]
46 |
47 | # create some sample sweeps
48 | for i in range(5):
49 | sweep_name = "sweep_%03i" % (i+1)
50 | ast.set_dataset("", times, name=sweep_name)
51 |
52 |
53 | # all done; close the file
54 | f.close()
55 |
56 |
--------------------------------------------------------------------------------
/examples/create_scripts/annotations.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as ut
5 |
6 |
7 | """
8 | Create and store experiment annotations
9 |
10 | Annotations are text strings that mark specific times in an
11 | experiment, for example "rat placed in enclosure" or "observed
12 | start of seizure activity".
13 | """
14 |
15 | OUTPUT_DIR = "../created_nwb_files/"
16 | file_name = __file__[0:-3] + ".nwb"
17 | ########################################################################
18 | # create a new NWB file
19 | # several settings are specified when doing so. these can be supplied within
20 | # the NWB constructor or defined in a dict, as in in this example
21 | settings = {}
22 | settings["file_name"] = OUTPUT_DIR + file_name
23 |
24 | # each file should have a descriptive globally unique identifier
25 | # that specifies the lab and this experiment session
26 | # the function nwb_utils.create_identifier() is recommended to use as it takes
27 | # the string and appends the present date and time
28 | settings["identifier"] = ut.create_identifier("annotation example")
29 |
30 | # indicate that it's OK to overwrite exting file
31 | settings["mode"] = "w"
32 |
33 | # specify the start time of the experiment. all times in the NWB file
34 | # are relative to experiment start time
35 | # if the start time is not specified the present time will be used
36 |
37 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
38 | # provide one or two sentences that describe the experiment and what
39 | # data is in the file
40 | settings["description"] = "Test file demonstrating use of the AbstractFeatureSeries"
41 |
42 | # create the NWB file object. this manages the file
43 | print("Creating " + settings["file_name"])
44 | f = nwb_file.open(**settings)
45 |
46 | ########################################################################
47 | # create an AnnotationSeries
48 | # this will be stored in 'acquisiiton' as annotations are an
49 | # observation or a record of something else that happened.
50 | # this means that it will be stored in the following location in the hdf5
51 | # file: acquisition/timeseries
52 | annot = f.make_group("", "notes", path="/acquisition/timeseries")
53 | annot.set_attr("description", "This is an AnnotationSeries with sample data")
54 | annot.set_attr("comments", "The comment and description fields can store arbitrary human-readable data")
55 | annot.set_attr("source", "Observation of Dr. J Doe")
56 |
57 | # store pretend data
58 | # all time is stored as seconds
59 | andata = []
60 | andata.append(("Rat in bed, beginning sleep 1", 15.0))
61 | andata.append(("Rat placed in enclosure, start run 1", 933.0))
62 | andata.append(("Rat taken out of enclosure, end run 1", 1456.0))
63 | andata.append(("Rat in bed, start sleep 2", 1461.0))
64 | andata.append(("Rat placed in enclosure, start run 2", 2401.0))
65 | andata.append(("Rat taken out of enclosure, end run 2", 3210.0))
66 | andata.append(("Rat in bed, start sleep 3", 3218.0))
67 | andata.append(("End sleep 3", 4193.0))
68 | # convert the data to an array of annotations and times
69 | annotations = [x[0] for x in andata]
70 | times = [x[1] for x in andata]
71 | # store the annotations and times in the AnnotationSeries
72 | annot.set_dataset("data",annotations)
73 | annot.set_dataset("timestamps", times)
74 |
75 | # Ignore this block. these were used for testing external links
76 | # annot.set_dataset("data", "extlink:unknown_data_file,/path/to/data")
77 | # annot.set_dataset("timestamps", "extlink:unknown file2\t/path/t,o/timestamps")
78 | # num_samples must be explicitly set
79 | # annot.set_dataset("num_samples", 0)
80 |
81 | ########################################################################
82 | # it can sometimes be useful to import documenting data from a file
83 | # in this case, we'll store this script in the metadata section of the
84 | # file, for a record of how the file was created
85 |
86 | script_name = sys.argv[0]
87 | f.set_dataset("source_script", ut.load_file(script_name), attrs= {
88 | "file_name": script_name})
89 |
90 | # when all data is entered, close the file
91 | f.close()
92 |
93 |
--------------------------------------------------------------------------------
/examples/create_scripts/behavior-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as ut
5 |
6 |
7 | """
8 | Store additional information (defined in an extension) in an Interface.
9 |
10 | This is the same as the behavior.py example, but adds a new dataset
11 | to the BehavioralEpochs, using an extension defined in file
12 | extensions/e-behavior.py.
13 |
14 | """
15 |
16 | OUTPUT_DIR = "../created_nwb_files/"
17 | file_name = __file__[0:-3] + ".nwb"
18 | ########################################################################
19 | # create a new NWB file
20 | settings = {}
21 | settings["file_name"] = OUTPUT_DIR + file_name
22 | settings["identifier"] = ut.create_identifier("behavioral interval example, with extension.")
23 | settings["mode"] = "w"
24 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
25 | settings["description"] = ("Test file demonstrating use of the BehavioralEpochs module "
26 | "interface with an extension")
27 |
28 | # specify the extension (Could be more than one. Only one used now).
29 | settings['extensions'] = ["extensions/e-behavior.py"]
30 |
31 | # create the NWB file object. this manages the file
32 | print("Creating " + settings["file_name"])
33 | f = nwb_file.open(**settings)
34 |
35 | ########################################################################
36 | # processed information is stored in modules, with each module publishing
37 | # one or more 'interfaces'. an interface is like a contract, promising
38 | # that the module will provide a specific and defined set of data.
39 | # this module will publish 'BehavioralEpochs' interface, which promises
40 | # that it will publish IntervalSeries (a type of time series storing
41 | # experimental intervals)
42 | #
43 | # create the module
44 | #- mod = neurodata.create_module("my behavioral module")
45 | mod = f.make_group("", "my behavioral module")
46 | mod.set_attr("description", "sample module that stores behavioral interval data")
47 |
48 | # add an interface
49 | if1 = mod.make_group("BehavioralEpochs")
50 | if1.set_attr("source", "a description of the original data that these intervals were calculated from ")
51 |
52 | # interval data is stored in an interval time series -- IntervalSeries
53 | # create it
54 | interval = if1.make_group("", "intervals")
55 | interval.set_attr("description", "Sample interval series -- two series are overlaid here, one with a code '1' and another with the code '2'")
56 | interval.set_attr("comments", "For example, '1' represents sound on(+1)/off(-1) and '2' represents light on(+2)/off(-2)")
57 | # create
58 | evts = [ 1, -1, 2, -2, 1, -1, 2, 1, -1, -2, 1, 2, -1, -2 ]
59 | interval.set_dataset("data", evts)
60 |
61 | # note: some timestamps will be duplicated if two different events start
62 | # and/or stop at the same time
63 | t = [ 1.0, 2.0, 2.0, 3.0, 5.0, 6.0, 6.0, 7.0, 8.0, 8.0, 10.0, 10.0, 11.0, 15.0 ]
64 | interval.set_dataset("timestamps", t)
65 |
66 | # Add additional information to the BehavioralEpochs interface. This is defined in
67 | # the extension "extensions/e-BehavioralEpochs.py"
68 | if1.set_dataset("my_extra_info", "extra info added to 'BehavioralEpochs' interface",
69 | attrs={"eia": "attribute for extra info"})
70 |
71 |
72 |
73 | ########################################################################
74 | # it can sometimes be useful to import documenting data from a file
75 | # in this case, we'll store this script in the metadata section of the
76 | # file, for a record of how the file was created
77 | script_name = sys.argv[0]
78 | f.set_dataset("source_script", ut.load_file(script_name), attrs= {
79 | "file_name": script_name})
80 |
81 | # when all data is entered, close the file
82 | f.close()
83 |
84 |
--------------------------------------------------------------------------------
/examples/create_scripts/closed_interface-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as utils
5 |
6 | """
7 | Example extending the format: using new Interface which is specified
8 | to be "closed" (does not allow any additional members).
9 |
10 | The extension is specified in file "extensions/e-closed_interface.py".
11 |
12 | The convention of having "e-" in front of the extension (and "-e" at the
13 | end of the create script name) is only used for these examples. Any name for the
14 | create script and extension(s) can be used as long as the actual name of the
15 | extension(s) are referenced by the create script and passed as parameters to
16 | nwb_validate.py when validating NWB files created using one or more extensions.
17 |
18 |
19 | """
20 | # create a new NWB file
21 | OUTPUT_DIR = "../created_nwb_files/"
22 | file_name = __file__[0:-3] + ".nwb"
23 |
24 | settings = {}
25 | settings["file_name"] = OUTPUT_DIR + file_name
26 | settings["identifier"] = utils.create_identifier("MyClosedInterface example")
27 | settings["mode"] = "w"
28 | settings["start_time"] = "2016-04-07T03:16:03.604121"
29 | settings["description"] = "Test file demonstrating using a new Interface which is closed"
30 |
31 | # specify the extension
32 | settings['extensions'] = ["extensions/e-closed_interface.py"]
33 | f = nwb_file.open(**settings)
34 |
35 |
36 | ########################################################################
37 |
38 | # create a module for the interface
39 |
40 | mod = f.make_group("", "my_module")
41 |
42 | # create the interface inside the module
43 |
44 | ig = mod.make_group("MyClosedInterface", attrs={"source": "source of data for MyClosedInterface"})
45 |
46 |
47 | # set attribute and dataset in interface
48 | ig.set_attr("foo", "MyClosedInterface - foo attribute")
49 | ig.set_dataset("bar", [1, 2, 3, 4, 5])
50 |
51 | # add an additional data set. This should generate an error on validation
52 | ig.set_custom_dataset("baz", [4, 6, 7, 9])
53 |
54 | # All done. Close the file
55 | f.close()
56 |
57 |
--------------------------------------------------------------------------------
/examples/create_scripts/custom_link.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as ut
5 |
6 |
7 | """
8 | Test making a custom link to TimeSeries::data
9 | """
10 |
11 |
12 | OUTPUT_DIR = "../created_nwb_files/"
13 | file_name = __file__[0:-3] + ".nwb"
14 | ########################################################################
15 | # create a new NWB file
16 | # several settings are specified when doing so. these can be supplied within
17 | # the NWB constructor or defined in a dict, as in in this example
18 | settings = {}
19 | settings["file_name"] = OUTPUT_DIR + file_name
20 | settings["identifier"] = ut.create_identifier("custom link example")
21 | settings["mode"] = "w"
22 | settings["start_time"] = "Aug 24, 2016"
23 | settings["description"] = "Test creating custom link to TimeSeries::data"
24 | f = nwb_file.open(**settings)
25 |
26 | rss = f.make_group("", "rat_position", path='/acquisition/timeseries',
27 | attrs={"source": "optical sensor"})
28 | rss_data = rss.set_dataset('data', [[1.1, 3.1], [1.2, 3.2], [1.3, 3.3]], attrs= {
29 | "conversion":1.0, "resolution":1.0, "unit":"meter"} )
30 | rss.set_dataset('timestamps', [0.1, 0.2, 0.3])
31 |
32 | # make custom group
33 | ag = f.make_group("analysis")
34 | cg = ag.make_custom_group("lab_data")
35 | ld = cg.set_custom_dataset("rat_position_data_link", rss_data)
36 |
37 | # Create another TimeSeries
38 | mss = f.make_group("", "mouse_position", path='/acquisition/timeseries',
39 | attrs={"source": "Differential GPS"})
40 | mss_data = mss.set_dataset('data', [[7.1, 4.1], [7.2, 4.2], [7.3, 4.3]], attrs= {
41 | "conversion":1.0, "resolution":1.0, "unit":"meter"} )
42 | mss.set_dataset('timestamps', [0.1, 0.2, 0.3])
43 | # add a custom dataset in this timeseries to the other TimeSeries data array
44 | mss.set_custom_dataset("rat_position_data_link", rss_data)
45 |
46 | f.close()
47 |
48 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/0_README.txt:
--------------------------------------------------------------------------------
1 | This directory, "examples/scripts/extensions" will contain example
2 | extensions. Currently there is only one (e-interval.py) which is
3 | an extension that goes with the script "interval-e.py").
4 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-analysis.py:
--------------------------------------------------------------------------------
1 |
2 | {"fs": {"aibs_ct_an": {
3 |
4 |
5 | "info": {
6 | "name": "AIBS cell types - analysis",
7 | "version": "0.9.2",
8 | "date": "May 6, 2016",
9 | "author": "Jeff Teeters, based on Allen Institute cell types DB HDF5 file",
10 | "contact": "jteeters@berkeley.edu",
11 | "description": "NWB extension for AIBS cell types data base NWB files /analysis section."
12 | },
13 |
14 | "schema": {
15 | "/analysis/": {
16 | "aibs_spike_times/": {
17 | "description": "Group for storing AIBS specific spike times",
18 | "attributes": {
19 | "comments": {
20 | "data_type": "text",
21 | "value": "Spike times are relative to sweep start. The are NOT absolute times."}
22 | },
23 | "": {
24 | "attributes": {
25 | "comments": {
26 | "data_type": "text",
27 | "value": "Spike times are relative to sweep start. The are NOT absolute times."}
28 | },
29 | "description": "Times associated with a single sweep",
30 | "dimensions": ["numSamples"],
31 | "data_type": "float64!"
32 | }
33 | }
34 | }
35 | }
36 |
37 | }}}
38 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-behavior.py:
--------------------------------------------------------------------------------
1 | # Definitions of extension to BehavioralEpochs Interface
2 |
3 |
4 | # behei is the schema id (or 'namespace')
5 |
6 | {"fs": {"behei": {
7 |
8 | "info": {
9 | "name": "BehavioralEpochs extra info",
10 | "version": "1.0",
11 | "date": "May 6, 2016",
12 | "author": "Jeff Teeters",
13 | "contact": "jteeters@berkeley.edu",
14 | "description": ("Example extension to NWB BehavioralEpochs interface.")
15 | },
16 |
17 | "schema": {
18 | "BehavioralEpochs/": {
19 | "description": "Extension to BehavioralEpochs interface to include a new dataset.",
20 | "my_extra_info": {
21 | "description": "dataset which contains extra info",
22 | "data_type": "text",
23 | "attributes": {
24 | "eia": {
25 | "description": "attribute for my_extra_info",
26 | "data_type": "text"}}}
27 | }
28 | }
29 |
30 | }}}
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-closed_interface.py:
--------------------------------------------------------------------------------
1 | # This defines a new interface, called MyClosedInterface
2 | # which is closed (does not allow new members to be added).
3 |
4 | # "eci" is the schema id for this extension.
5 |
6 | {"fs": { "eci": {
7 |
8 | "info": {
9 | "name": "Example closed Interface extension",
10 | "version": "1.0",
11 | "date": "Sept. 22, 2016",
12 | "author": "Jeff Teeters",
13 | "contact": "jteeters@berkeley.edu",
14 | "description": ("Extension defining a new closed Interface")
15 | },
16 |
17 | "schema": {
18 | "MyClosedInterface/": {
19 | "merge": ["core:/"],
20 | "description": ("A new interface defined in extension e-closed-interface.py."
21 | " This is closed (no new members can be added)."),
22 | "_properties": {"closed": True}, # specify that this group is closed (no new members can be added).
23 | "attributes": {
24 | "foo": {
25 | "description": "example text attributed for MyClosedInterface",
26 | "data_type": "text"}},
27 | "bar": {
28 | "description": ("Example dataset included with MyClosedInterface"),
29 | "data_type": "int",
30 | "dimensions": ["num_measurements"]},
31 | "bazc/": {
32 | "description": ("Example closed group in MyClosedInterface"),
33 | # "_closed": True,
34 | "_properties": {"closed": True}},
35 | "bazo/": {
36 | "description": ("Example open group in MyClosedInterface"),
37 | # "_closed": False,
38 | "_properties": {"closed": False}}
39 | }
40 | }
41 |
42 | }}}
43 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-genJson.py:
--------------------------------------------------------------------------------
1 |
2 | {"fs": {"genJson": {
3 |
4 | "info": {
5 | "name": "json storage extension",
6 | "version": "0.9.0",
7 | "date": "Oct 28, 2016",
8 | "author": "Jeff Teeters",
9 | "contact": "jteeters@berkeley.edu",
10 | "description": ("Extension for specifying storing of JSON formatted metadata in"
11 | " the general/json group.")
12 | },
13 |
14 | "schema": {
15 | "/general/json/": {
16 | "description": "Location for storing JSON encoded metadata as text",
17 | "*": {
18 | "description": "Individual JSON file",
19 | "data_type": "text"
20 | }
21 | }
22 | }
23 | }}}
24 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-general.py:
--------------------------------------------------------------------------------
1 |
2 | # define an extension to add metadata to the /general section
3 | # 'rctn' is a key used to identify this extension
4 | # rctn stands for "Redwood Center for Theoretical Neuroscience"
5 | # (Any name could be used).
6 |
7 |
8 | {"fs": {"rctn": {
9 |
10 | "info": {
11 | "name": "RCTN extension",
12 | "version": "0.9.0",
13 | "date": "Feb 21, 2016",
14 | "author": "Jeff Teeters",
15 | "contact": "jteeters@berkeley.edu",
16 | "description": ("Redwood Center for Theoretical Neuroscience extension for the NWB"
17 | " format. Also includes some metadata for the AIBS cell types database")
18 | },
19 |
20 | "schema": {
21 | '/general/activity_level': {
22 | 'data_type': 'text',
23 | 'description': "Activity level of animal. 10 - very active; 1-asleep"},
24 | 'time_since_fed': {
25 | 'data_type': 'text',
26 | 'description': "Time animal last fed prior to session"},
27 | "/general/": {
28 | "description": "Extension to core general",
29 | "include": { 'time_since_fed': {}},
30 | "_required": { # Specifies required member combinations",
31 | "test_req" :
32 | ["notes AND experiment_description",
33 | "notes and experiment_description are both required"]},
34 | "experimenter": {
35 | "attributes": {
36 | "orcid_id": {
37 | "description": "machine readable id, addeded with rctn schema",
38 | "data_type": "text"}}},
39 | "rctn_info/": {
40 | "description": "Custom directory for rctn information",
41 | 'seminars': {
42 | "description": "Names of speakers in some past seminars",
43 | 'data_type': 'text',
44 | 'dimensions': ["num_seminars"]},
45 | 'attendance': {
46 | "description": "Number of people attending corresponding seminar",
47 | 'data_type': 'int',
48 | 'dimensions': ["num_seminars"]}}
49 | },
50 | # added metadata about the subject, from AIBS cell types database
51 | # 'aibs_' prefix was convention used in this instance but no particular naming
52 | # scheme is required
53 | "/general/subject/": {
54 | "aibs_specimen_id": {
55 | "data_type": "int",
56 | "description":"AIBS specific specimen ID"},
57 | "aibs_specimen_name": {
58 | "data_type": "text",
59 | "description": "AIBS specific specimen_name"},
60 | "aibs_dendrite_state": {
61 | "data_type": "text",
62 | "description": "AIBS specific dendrite_state"},
63 | "aibs_dendrite_type": {
64 | "data_type": "text",
65 | "description": "AIBS specific dendrite type"},
66 | "aibs_cre_line": {
67 | "data_type": "text",
68 | "description": "AIBS specific cre_line"}
69 | }
70 | }
71 | }}}
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-interface.py:
--------------------------------------------------------------------------------
1 | # Definitions of an extension defining a new interface
2 |
3 | # This defines a new interface, called MyNewInterface
4 | # All format specifications must be stored in dictionary "fs"
5 |
6 | # "eint" is the schema id (or namespace) for this extension.
7 |
8 | {"fs": { "eint": {
9 |
10 | "info": {
11 | "name": "Example Interface extension",
12 | "version": "1.0",
13 | "date": "May 2, 2016",
14 | "author": "Jeff Teeters",
15 | "contact": "jteeters@berkeley.edu",
16 | "description": ("Extension defining a new Interface type, named 'MyNewInterface'")
17 | },
18 |
19 | "schema": {
20 | "MyNewInterface/": {
21 | "merge": ["core:/"],
22 | "description": ("A new interface defined in extension e-interface.py. Uses the "
23 | "new timeseries defined in extension e-timeseries.py"),
24 | "attributes": {
25 | "foo": {
26 | "description": "example text attributed for MyNewInterface",
27 | "data_type": "text"}},
28 | "/": {
29 | # use MyNewTimeSeries in the MyNewInterface
30 | "merge": ["mnts:/"]
31 | },
32 | "bar": {
33 | "description": ("Example dataset included with MyNewTimeSeries"),
34 | "data_type": "int",
35 | "dimensions": ["num_measurements"]}
36 | }
37 | }
38 |
39 | }}}
40 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-interval.py:
--------------------------------------------------------------------------------
1 | # Definitions of extension to IntervalSeries
2 |
3 | # "isc" is the schema id (or 'namespace')
4 | # "fs" must always be the top level key
5 |
6 | {"fs": {"isc": {
7 |
8 | "info": {
9 | "name": "Interval series code descriptions",
10 | "version": "1.0",
11 | "date": "April 7, 2016",
12 | "author": "Jeff Teeters",
13 | "contact": "jteeters@berkeley.edu",
14 | "description": ("Extension to NWB Interval Series to include a code and "
15 | "code_description dataset.")
16 | },
17 |
18 | "schema": {
19 | "/": {
20 | "description": "Extension to IntervalSeries to include code descriptions.",
21 | "codes": {
22 | "description": "Codes that are used in the IntervalSeries",
23 | "data_type": "int",
24 | "dimensions": ["num_codes"] },
25 | "code_descriptions": {
26 | "description": "Description of each code",
27 | "data_type": "text",
28 | "dimensions": ["num_codes"] }}
29 | }
30 |
31 | }}}
32 |
33 |
34 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-intracellular.py:
--------------------------------------------------------------------------------
1 | # Definitions of AIBS cell types extension to NWB format
2 |
3 | {"fs": {"aibs_ct_intra": {
4 |
5 | "info": {
6 | "name": "AIBS Cell Types - intracellular metadata",
7 | "version": "0.9.3",
8 | "date": "Oct 3, 2016",
9 | "author": "Jeff Teeters, based on HDF5 created by Keith Godfrey using AIBS NWB API",
10 | "contact": "jteeters@berkeley.edu",
11 | "description": "NWB extension for intracellular metadata base onf AIBS cell types data base NWB files."
12 | },
13 |
14 | "schema": {
15 | "/": {
16 | "description": ("AIBS specific VoltageClampSeries. Includes AIBS sepcific metadata."),
17 | "aibs_stimulus_amplitude_mv": {
18 | "description": "AIBS specific stimulus_amplitude",
19 | "attributes": {
20 | "unit": { "data_type": "text", "value": "mv"}},
21 | "data_type": "float"},
22 | "aibs_stimulus_description": {
23 | "description": "AIBS specific stimulus description",
24 | "data_type": "text"},
25 | "aibs_stimulus_interval": {
26 | "description": "AIBS specific stimulus interval",
27 | "data_type": "float"},
28 | "aibs_stimulus_name": {
29 | "description": "AIBS specific stimulus name",
30 | "data_type": "text"},
31 | },
32 | "/": {
33 | "description": ("AIBS specific CurrentClampSeries. Includes AIBS sepcific metadata."),
34 | "aibs_stimulus_amplitude_pa": {
35 | "description": "AIBS specific stimulus_amplitude",
36 | "attributes": {
37 | "unit": { "data_type": "text", "value": "pa"}},
38 | "data_type": "float"},
39 | "aibs_stimulus_description": {
40 | "description": "AIBS specific stimulus description",
41 | "data_type": "text"},
42 | "aibs_stimulus_interval": {
43 | "description": "AIBS specific stimulus interval",
44 | "data_type": "float"},
45 | "aibs_stimulus_name": {
46 | "description": "AIBS specific stimulus name",
47 | "data_type": "text"},
48 | },
49 | "/stimulus/presentation//": {
50 | "description": ("AIBS specific CurrentClampStimulusSeries, only applies to "
51 | "location /stimulus/presentation/, but not to location /stimulus/templates"),
52 | "aibs_stimulus_amplitude_pa": {
53 | "description": "AIBS specific stimulus_amplitude",
54 | "attributes": {
55 | "unit": { "data_type": "text", "value": "mv"}},
56 | "data_type": "float"},
57 | "aibs_stimulus_description": {
58 | "description": "AIBS specific stimulus description",
59 | "data_type": "text"},
60 | "aibs_stimulus_interval": {
61 | "description": "AIBS specific stimulus interval",
62 | "data_type": "float"},
63 | "aibs_stimulus_name": {
64 | "description": "AIBS specific stimulus name",
65 | "data_type": "text"},
66 | },
67 | "/stimulus/presentation//": {
68 | "description": ("AIBS specific VoltageClampStimulusSeries, only applies to "
69 | "location /stimulus/presentation/, not to location /stimulus/templates"),
70 | "aibs_stimulus_amplitude_mv": {
71 | "description": "AIBS specific stimulus_amplitude",
72 | "attributes": {
73 | "unit": { "data_type": "text", "value": "mv"}},
74 | "data_type": "float"},
75 | "aibs_stimulus_description": {
76 | "description": "AIBS specific stimulus description",
77 | "data_type": "text"},
78 | "aibs_stimulus_interval": {
79 | "description": "AIBS specific stimulus interval",
80 | "data_type": "float"},
81 | "aibs_stimulus_name": {
82 | "description": "AIBS specific stimulus name",
83 | "data_type": "text"},
84 | }
85 | }
86 |
87 | }}}
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-link_test.py:
--------------------------------------------------------------------------------
1 |
2 | {"fs": {"link_test": {
3 |
4 |
5 | "info": {
6 | "name": "link_test",
7 | "version": "0.1",
8 | "date": "May 6, 2016",
9 | "author": "Jeff Teeters",
10 | "contact": "jteeters@berkeley.edu",
11 | "description": "Test defining a link in an extension to debug schema_id."
12 | },
13 |
14 | "schema": {
15 | "/analysis/": {
16 | "aibs_spike_times/": {
17 | "description": "Group for storing AIBS specific spike times",
18 | "pto_link?": {
19 | "description": ("The offset from the frame timestamp at which each pixel was acquired."
20 | " Note that the offset is not time-varying, i.e. it is the same for"
21 | " each frame. These offsets are given in the same units as for the"
22 | " timestamps array, i.e. seconds."),
23 | "link": {"target_type": "pixel_time_offsets",
24 | # "allow_subclasses": False # allow_subclasses not allowed in dataset links
25 | },
26 | "data_type": "float64!"
27 | },
28 | "pixel_time_offsets": {
29 | "description": ("The offset from the frame timestamp at which each pixel in this ROI"
30 | " was acquired."
31 | " Note that the offset is not time-varying, i.e. it is the same for"
32 | " each frame. These offsets are given in the same units as for the"
33 | " timestamps array, i.e. seconds."),
34 | "data_type": "float64!",
35 | "dimensions": [["y"], ["y", "x"]]
36 | }
37 | }
38 | }
39 | }
40 |
41 | }}}
42 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-module.py:
--------------------------------------------------------------------------------
1 | # Definitions of an extension defining a custom module
2 |
3 | # In the NWB core format, each module can contain any number of interfaces
4 | # and no particular interfaces are required.
5 |
6 | # This extension shows how to create a custom module that specifies that
7 | # two interfaces are required. The two interfaces required are a custom
8 | # interface named "MyNewInterface" and the BehavioralTimeSeries interface.
9 |
10 | # The custom module is named ""
11 |
12 | # All format specifications must be stored in dictionary "fs"
13 |
14 | # "eint" is the schema id (or namespace) for this extension.
15 |
16 | {"fs": { "new_mod": {
17 |
18 |
19 | "info": {
20 | "name": "Example custom module extension",
21 | "version": "1.0",
22 | "date": "Sept. 22, 2016",
23 | "author": "Jeff Teeters",
24 | "contact": "jteeters@berkeley.edu",
25 | "description": ("Extension defining a new Module type, named 'MyNewModule', that has"
26 | " two required interfaces")
27 | },
28 |
29 | "schema": {
30 | "/": {
31 | "merge": ["core:/"],
32 | "description": ("A new module defined in extension e-module.py. Requires two "
33 | "Interfaces (MyNewInterface and BehavioralTimeSeries)."),
34 | "include": {
35 | "eint:MyNewInterface/": {},
36 | "core:BehavioralTimeSeries/": {} }}
37 | }
38 | }}}
39 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-timeseries.py:
--------------------------------------------------------------------------------
1 | # Example extension defining a new TimeSeries
2 |
3 | # All format specifications must be stored in dictionary with top level key "fs":
4 | # "mnts" is the "namespace" for this extension.
5 |
6 |
7 | {"fs": {"mnts": {
8 | "info": {
9 | "name": "Example TimeSeries extension",
10 | "version": "1.0",
11 | "date": "May 2, 2016",
12 | "author": "Jeff Teeters",
13 | "contact": "jteeters@berkeley.edu",
14 | "description": ("Extension defining a new TimeSeries type, named 'MyNewTimeSeries'")
15 | },
16 |
17 | "schema": {
18 | "/": {
19 | "description": "A new timeseries, defined in extension e-timeseries.py",
20 | "merge": ["core:/"],
21 | "attributes": {
22 | "ancestry": {
23 | "data_type": "text",
24 | "dimensions": ["2"],
25 | "value": ["TimeSeries", "MyNewTimeSeries"],
26 | "const": True},
27 | "help": {
28 | "data_type": "text",
29 | "value": "Short description of MyNewTimeSeries goes here",
30 | "const": True},
31 | "foo": {
32 | "description": "example new text attributed for MyNewTimeSeries",
33 | "data_type": "text"}},
34 | "data": {
35 | "description": ("Multiple measurements are recorded at each point of time."),
36 | "dimensions": ["num_times", "num_measurements"],
37 | "data_type": "float32"},
38 | "bar": {
39 | "description": ("Example dataset included with MyNewTimeSeries"),
40 | "data_type": "int",
41 | "dimensions": ["num_measurements"]}
42 | }
43 | }
44 | }}}
45 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-trajectorySeries.py:
--------------------------------------------------------------------------------
1 | # Definitions of extension to TimeSeries to store trajectory information
2 | # All format specifications must be stored in dictionary "fs":
3 | # "mnts" is the schema id or "namespace" for this extension.
4 |
5 | {"fs": {"mnts": {
6 |
7 | "info": {
8 | "name": "Sabes lab data trajectory series",
9 | "version": "1.0",
10 | "date": "May 2, 2016",
11 | "author": "Jeff Teeters",
12 | "contact": "jteeters@berkeley.edu",
13 | "description": ("Extension to store timeseries of hand trajectories for Sabes lab data")
14 | },
15 |
16 | "schema": {
17 | "/": {
18 | "merge": ["core:/"],
19 | "attributes": {
20 | "ancestry": {
21 | "data_type": "text",
22 | "dimensions": ["2"],
23 | "value": ["TimeSeries", "TrajectorySeries"],
24 | "const": True},
25 | "help": {
26 | "data_type": "text",
27 | "value": "Trajectory of hand movement positions",
28 | "const": True},
29 | "measurement_names": {
30 | "description": "Names of measurements at each time point",
31 | "data_type": "text",
32 | "dimensions": ["num_measurements"]},
33 | "measurement_units": {
34 | "description": "Units of each measurement",
35 | "data_type": "text",
36 | "dimensions": ["num_measurements"]},
37 | },
38 | "data": {
39 | "description": ("Measurements of hand trajectory, recorded at each point of time."),
40 | "dimensions": ["num_times", "num_measurements"],
41 | "data_type": "float32"},
42 | }
43 | }
44 |
45 | }}}
46 |
--------------------------------------------------------------------------------
/examples/create_scripts/extensions/e-trajectorySeries2.py:
--------------------------------------------------------------------------------
1 | # Definitions of extension to TimeSeries to store trajectory information
2 | # All format specifications must be stored in dictionary "fs"
3 | # "mnts2" is the "namespace" for this extension
4 | # This extension explicitly specifies meaning for each column of dataset data
5 |
6 | {"fs": {"mnts2": {
7 |
8 | "info": {
9 | "name": "Sabes lab data trajectory series",
10 | "version": "1.0",
11 | "date": "Oct 3, 2016",
12 | "author": "Jeff Teeters",
13 | "contact": "jteeters@berkeley.edu",
14 | "description": ("Extension to store timeseries of hand trajectories for Sabes lab data")
15 | },
16 |
17 | "schema": {
18 | "/": {
19 | "merge": ["core:/"],
20 | "attributes": {
21 | "ancestry": {
22 | "data_type": "text",
23 | "dimensions": ["3"],
24 | "value": ["TimeSeries", "SpatialSeries", "TrajectorySeries"],
25 | "const": True},
26 | "help": {
27 | "data_type": "text",
28 | "value": "Trajectory of hand movement positions",
29 | "const": True},
30 | },
31 | "data": {
32 | "description": ("Measurements of hand trajectory, recorded at each point of time."),
33 | "dimensions": ["num_times", "trajectories"],
34 | "data_type": "float32",
35 | "trajectories": {
36 | "type": "structure",
37 | # define components of trajectories dimension
38 | "components": [
39 | { "alias": "s1_x", "unit": "meter" },
40 | { "alias": "s1_y", "unit": "meter" },
41 | { "alias": "s1_z", "unit": "meter" },
42 | { "alias": "s1_pitch", "unit": "radian" },
43 | { "alias": "s1_roll", "unit": "radian" },
44 | { "alias": "s1_yaw", "unit": "radian" },
45 | { "alias": "s2_x", "unit": "meter" },
46 | { "alias": "s2_y", "unit": "meter" },
47 | { "alias": "s2_z", "unit": "meter" },
48 | { "alias": "s2_pitch", "unit": "radian" },
49 | { "alias": "s2_roll", "unit": "radian" },
50 | { "alias": "s2_yaw", "unit": "radian" },
51 | { "alias": "s3_x", "unit": "meter" },
52 | { "alias": "s3_y", "unit": "meter" },
53 | { "alias": "s3_z", "unit": "meter" },
54 | { "alias": "s3_pitch", "unit": "radian" },
55 | { "alias": "s3_roll", "unit": "radian" },
56 | { "alias": "s3_yaw", "unit": "radian" },
57 | { "alias": "s4_x", "unit": "meter" },
58 | { "alias": "s4_y", "unit": "meter" },
59 | { "alias": "s4_z", "unit": "meter" },
60 | { "alias": "s4_pitch", "unit": "radian" },
61 | { "alias": "s4_roll", "unit": "radian" },
62 | { "alias": "s4_yaw", "unit": "radian" } ] },
63 | }
64 | }
65 | }
66 |
67 | }}}
--------------------------------------------------------------------------------
/examples/create_scripts/general-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as utils
5 |
6 | """
7 | Example using extension to add metadata to group /general
8 |
9 | Group /general contains general metadata, i.e. metadata that
10 | applies to the entire session.
11 |
12 | This example uses the extension defined in extensions/e-general.py
13 | to add new metadata to define then add new metadata to section
14 | /general
15 |
16 | """
17 | # create a new NWB file
18 | OUTPUT_DIR = "../created_nwb_files/"
19 | file_name = __file__[0:-3] + ".nwb"
20 |
21 | settings = {}
22 | settings["file_name"] = OUTPUT_DIR + file_name
23 | settings["identifier"] = utils.create_identifier("add metadata to general")
24 | settings["mode"] = "w"
25 | settings["start_time"] = "2016-04-07T03:16:03.604121"
26 | settings["description"] = "Test file demonstrating use of an extension for general"
27 |
28 | # specify the extension (Could be more than one. Only one used now).
29 | settings['extensions'] = ["extensions/e-general.py"]
30 | f = nwb_file.open(**settings)
31 |
32 |
33 | ########################################################################
34 | # Specifier experimenter (this dataset is part of the core NWB format)
35 | eds = f.set_dataset('experimenter', "Joseline Doe")
36 |
37 | # specify attribute to experimenter, this defined in extension file.
38 | # it is not part of the core NWB format
39 | eds.set_attr("orcid_id", "7012023")
40 |
41 | # Now add metadata that is defined by the extension
42 | gri = f.make_group("rctn_info")
43 | gri.set_dataset("seminars", ["Thom Smith", "Dwight Keenan", "Sue Trimble"])
44 | gri.set_dataset("attendance", [23, 45, 33])
45 | f.set_dataset("rctn:activity_level", '7')
46 | f.set_dataset("rctn:time_since_fed", '6 hours 20 minutes')
47 |
48 | f.set_dataset("notes", "some notes")
49 |
50 | # also set extra metadata about subject
51 | # these datasets are also defined in the extension
52 | # dataset names and values are from a file in the AIBS cell types database
53 | f.set_dataset("aibs_specimen_id",313862134)
54 | f.set_dataset("aibs_specimen_name","Sst-IRES-Cre;Ai14(IVSCC)-167638.03.01.01")
55 | f.set_dataset("aibs_dendrite_state","NA")
56 | f.set_dataset("aibs_dendrite_type","aspiny")
57 | f.set_dataset("aibs_cre_line","Sst-IRES-Cre")
58 |
59 | # All done. Close the file
60 | f.close()
61 |
62 |
--------------------------------------------------------------------------------
/examples/create_scripts/interface-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as utils
5 |
6 | """
7 | Example extending the format: creating a new Interface.
8 |
9 | This example uses two extensions defined in director "extensions"
10 | e-interface.py - defines Interface extension
11 | e-timeseries.py - defines a new timeseries type (MyNewTimeSeries)
12 |
13 | The convention of having "e-" in front of the extension (and "-e" at the
14 | end of the create script name) is only used for these examples. Any name for the
15 | create script and extension(s) can be used as long as the actual name of the
16 | extension(s) are referenced by the create script and passed as parameters to
17 | nwb_validate.py when validating NWB files created using one or more extensions.
18 |
19 |
20 | """
21 | # create a new NWB file
22 | OUTPUT_DIR = "../created_nwb_files/"
23 | file_name = __file__[0:-3] + ".nwb"
24 |
25 | settings = {}
26 | settings["file_name"] = OUTPUT_DIR + file_name
27 | settings["identifier"] = utils.create_identifier("MyNewInterface example")
28 | settings["mode"] = "w"
29 | settings["start_time"] = "2016-04-07T03:16:03.604121"
30 | settings["description"] = "Test file demonstrating using a new Interface type using an extension"
31 |
32 | # specify the extensions, two are used.
33 | settings['extensions'] = ["extensions/e-timeseries.py", "extensions/e-interface.py"]
34 | f = nwb_file.open(**settings)
35 |
36 |
37 | ########################################################################
38 |
39 | # create a module for the interface
40 |
41 | mod = f.make_group("", "my_module")
42 |
43 | # create the interface inside the module
44 |
45 | ig = mod.make_group("MyNewInterface", attrs={"source": "source of data for MyNewInterface"})
46 |
47 |
48 | # set attribute and dataset in interface
49 | ig.set_attr("foo", "MyNewInterface - foo attribute")
50 | ig.set_dataset("bar", [1, 2, 3, 4, 5])
51 |
52 |
53 | # Make some sample data for the MyNewTimeseries
54 |
55 | data = [[1.2, 1.3, 1.4], [2.2, 2.3, 2.4], [3.2, 3.3, 3.4], [4.2, 4.3, 4.4], [5.2, 5.3, 5.4]]
56 | times = [0.1, 0.2, 0.3, 0.4, 0.5]
57 |
58 | # create the MyNewtimeseries inside the interface
59 | nts = ig.make_group("", "my_new_ts", attrs={"source": "source of data for my_new_ts"})
60 |
61 | nts.set_dataset("data", data, attrs={"conversion": 1.0, "resolution": 0.001, "unit": "--unit goes here--"})
62 | nts.set_dataset("timestamps", times)
63 |
64 | # specify metadata that is part of MyNewTimeSeries type
65 | nts.set_attr("foo", "This added to attribute 'foo'")
66 | nts.set_dataset("bar", [2, 4, 5, 6, 7])
67 |
68 | # All done. Close the file
69 | f.close()
70 |
71 |
--------------------------------------------------------------------------------
/examples/create_scripts/interval-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as utils
5 |
6 | """
7 | Create and store interval series
8 |
9 | This is the same as example: interval.py, except that it uses
10 | and extension (defined in extensions/e-interval.py) to add
11 | a two new datasets:
12 | - "codes"
13 | - "code_descriptions"
14 | to the IntervalSeries. The codes data set lists the numeric codes used.
15 | The "code_descriptions" data set, provides the description of each code.
16 | """
17 | # create a new NWB file
18 | OUTPUT_DIR = "../created_nwb_files/"
19 | file_name = __file__[0:-3] + ".nwb"
20 |
21 | settings = {}
22 | settings["file_name"] = OUTPUT_DIR + file_name
23 | settings["identifier"] = utils.create_identifier("extended interval example")
24 | settings["mode"] = "w"
25 | settings["start_time"] = "2016-04-07T03:16:03.604121"
26 | settings["description"] = "Test file demonstrating use of an extended IntervalSeries"
27 |
28 | # specify the extension (Could be more than one. Only one used now).
29 | settings['extensions'] = ["extensions/e-interval.py"]
30 | f = nwb_file.open(**settings)
31 |
32 |
33 | ########################################################################
34 | # Make some fake interval series data
35 | # store in codes and times
36 | event_codes = [
37 | 7, # 300 hz tone turned on at
38 | -7, # 300 hz tone turned off
39 | 3, # blue light turned on
40 | 4, # 200 hz tone turned on
41 | 6, # red light turned on
42 | -3, # blue light turned off
43 | -6, # red light turned off
44 | -4 # 200 hz tone turned off
45 | ]
46 | # times - interval times (in seconds) corresponding to above codes
47 | event_times = [1.1, 2.3, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8]
48 |
49 | # create two arrays, one containing the codes, the other the corresponding
50 | # descriptions
51 | codes = [3, 4, 6, 7]
52 | code_descriptions = ["blue light", "200 hz tone", "red light", "300 hz tone"]
53 |
54 |
55 | ########################################################################
56 | # create an IntervalSeries
57 | # All time series created at the "top level" in the file (e.g. not inside
58 | # a defined structure in the format specification) are stored in
59 | # one of three locations:
60 | # /stimulus/presentation - for presented stimuli data
61 | # /stimulus/templates - for templates for stimuli
62 | # /acquisition/timeseries - for acquired data
63 | #
64 | # Store this interval series in /stimulus/presentation
65 | # that is specified by the 'path' parameter in the call below
66 |
67 | # create the group for the IntervalSeries. Call returns group object
68 | g = f.make_group("", "tone_times", path="/stimulus/presentation")
69 |
70 | # # Inside the group, create the data and timestamps datasets
71 | g.set_dataset("data", event_codes)
72 | g.set_dataset("timestamps", event_times)
73 |
74 | # Now add the data sets defined in the extension
75 | g.set_dataset("codes", codes)
76 | g.set_dataset("code_descriptions", code_descriptions)
77 |
78 |
79 | # All done. Close the file
80 | f.close()
81 |
82 |
--------------------------------------------------------------------------------
/examples/create_scripts/interval.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as ut
5 |
6 | """
7 | Create and store interval series
8 |
9 | Intervals in the data can be efficiently stored using the NWB IntervalSeries.
10 |
11 | The timestamps field stores the beginning and end of intervals.
12 | The data field stores whether the interval just started (>0 value)
13 | or ended (<0 value). Different interval types can be represented in the
14 | same series by using multiple key values (eg, 1 for feature A,
15 | 2 for feature B, 3 for feature C, etc). The field data stores an 8-bit integer.
16 | This is largely an alias of a standard TimeSeries but that is identifiable as
17 | representing time intervals in a machine-readable way.
18 | """
19 |
20 | # create a new NWB file
21 | OUTPUT_DIR = "../created_nwb_files/"
22 | file_name = __file__[0:-3] + ".nwb"
23 | settings = {}
24 | settings["file_name"] = OUTPUT_DIR + file_name
25 | settings["identifier"] = ut.create_identifier("interval example")
26 | settings["mode"] = "w"
27 | settings["start_time"] = "2016-04-07T03:16:03.604121"
28 | settings["description"] = "Test file demonstrating use of the IntervalSeries"
29 | f = nwb_file.open(**settings)
30 |
31 |
32 | ########################################################################
33 | # Make some fake interval series data
34 | # store in codes and times
35 | event_codes = [
36 | 7, # 300 hz tone turned on
37 | -7, # 300 hz tone turned off
38 | 3, # blue light turned on
39 | 4, # 200 hz tone turned on
40 | 6, # red light turned on
41 | -3, # blue light turned off
42 | -6, # red light turned off
43 | -4 # 200 hz tone turned off
44 | ]
45 | # times - interval times (in seconds) corresponding to above codes
46 | event_times = [1.1, 2.3, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8]
47 |
48 | ########################################################################
49 | # create an IntervalSeries
50 | # All time series created at the "top level" in the file (e.g. not inside
51 | # a defined structure in the format specification) are stored in
52 | # one of three locations:
53 | # /stimulus/presentation - for presented stimuli data
54 | # /stimulus/templates - for templates for stimuli
55 | # /acquisition/timeseries - for acquired data
56 | #
57 | # Store this interval series in /stimulus/presentation
58 | # that is specified by the 'path' parameter in the call below
59 |
60 | # create the group for the IntervalSeries. Call returns group object
61 | g = f.make_group("", "tone_times", path="/stimulus/presentation")
62 | g.set_attr("description", "Times for tone turning on/off. data codes positive for on, negative for off")
63 |
64 | # # Inside the group, create the data and timestamps datasets
65 | g.set_dataset("data", event_codes)
66 | g.set_dataset("timestamps", event_times)
67 |
68 |
69 | # All done. Close the file
70 | f.close()
71 |
72 |
--------------------------------------------------------------------------------
/examples/create_scripts/link_test-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as ut
5 |
6 |
7 | """
8 | Test extension defining a link
9 |
10 | """
11 |
12 | OUTPUT_DIR = "../created_nwb_files/"
13 | file_name = __file__[0:-3] + ".nwb"
14 | ########################################################################
15 | # create a new NWB file
16 | settings = {}
17 | settings["file_name"] = OUTPUT_DIR + file_name
18 | settings["identifier"] = ut.create_identifier("test link extension.")
19 | settings["mode"] = "w"
20 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
21 | settings["description"] = ("Test making a link in the /analysis group "
22 | "that is defined by an extension.")
23 |
24 | # specify the extension (Could be more than one. Only one used now).
25 | settings['extensions'] = ["extensions/e-link_test.py"]
26 |
27 | # create the NWB file object. this manages the file
28 | print("Creating " + settings["file_name"])
29 | f = nwb_file.open(**settings)
30 |
31 | ########################################################################
32 | # This example, stores spike times for multiple sweeps
33 | # create the group for the spike times
34 | # The group ("aibs_spike_times") is defined in the extension
35 |
36 | ast = f.make_group("aibs_spike_times")
37 |
38 | # some sample data
39 | times = [1.1, 1.2, 1.3, 1.4, 1.5]
40 |
41 | #
42 | pto = ast.set_dataset("pixel_time_offsets", times)
43 |
44 | # now make the link
45 | ptl = ast.set_dataset("pto_link", pto, attrs={"hello": "Natasha"})
46 | ptl.set_attr("Mary", "bendrich")
47 |
48 | # all done; close the file
49 | f.close()
50 |
51 |
--------------------------------------------------------------------------------
/examples/create_scripts/module-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as utils
5 |
6 | """
7 | Example extending the format: creating a module with required interfaces.
8 |
9 | In the NWB core format, each module can contain any number of interfaces
10 | and no particular interfaces are required.
11 |
12 | This example shows using a custom module that specifies that two interfaces
13 | are required, a custom interface named "MyNewInterface" and the
14 | BehavioralTimeSeries interface.
15 |
16 | This example uses two extensions defined in director "extensions"
17 | e-module.py - defines the custom module
18 | e-interface.py - defines Interface extension
19 | e-timeseries.py - defines a new TimeSeries type (MyNewTimeSeries)
20 |
21 | The convention of having "e-" in front of the extension (and "-e" at the
22 | end of the create script name) is only used for these examples. Any name for the
23 | create script and extension(s) can be used as long as the actual name of the
24 | extension(s) are referenced by the create script and passed as parameters to
25 | nwb_validate.py when validating NWB files created using one or more extensions.
26 |
27 |
28 | """
29 | # create a new NWB file
30 | OUTPUT_DIR = "../created_nwb_files/"
31 | file_name = __file__[0:-3] + ".nwb"
32 |
33 | settings = {}
34 | settings["file_name"] = OUTPUT_DIR + file_name
35 | settings["identifier"] = utils.create_identifier("Custom module example")
36 | settings["mode"] = "w"
37 | settings["start_time"] = "2016-04-07T03:16:03.604121"
38 | settings["description"] = "Test file demonstrating using a custom module using an extension"
39 |
40 | # specify the extensions, two are used.
41 | settings['extensions'] = ["extensions/e-timeseries.py",
42 | "extensions/e-interface.py", "extensions/e-module.py"]
43 | f = nwb_file.open(**settings)
44 |
45 |
46 | ########################################################################
47 |
48 | # create a module for the interface
49 | mod = f.make_group("new_mod:", "custom_module")
50 |
51 | # create the interface inside the module
52 | ig = mod.make_group("MyNewInterface", attrs={"source": "source of data for MyNewInterface"})
53 |
54 | # set attribute and dataset in interface
55 | ig.set_attr("foo", "MyNewInterface - foo attribute")
56 | ig.set_dataset("bar", [1, 2, 3, 4, 5])
57 |
58 |
59 | # Make some sample data for the MyNewTimeseries
60 | data = [[1.2, 1.3, 1.4], [2.2, 2.3, 2.4], [3.2, 3.3, 3.4], [4.2, 4.3, 4.4], [5.2, 5.3, 5.4]]
61 | times = [0.1, 0.2, 0.3, 0.4, 0.5]
62 |
63 | # create the MyNewtimeseries inside the interface
64 | nts = ig.make_group("", "my_new_ts", attrs={"source": "source of data for my_new_ts"})
65 |
66 | nts.set_dataset("data", data, attrs={"conversion": 1.0, "resolution": 0.001, "unit": "--unit goes here--"})
67 | nts.set_dataset("timestamps", times)
68 |
69 | # specify metadata that is part of MyNewTimeSeries type
70 | nts.set_attr("foo", "This added to attribute 'foo'")
71 | nts.set_dataset("bar", [2, 4, 5, 6, 7])
72 |
73 | # add the behavioral timeseries to the module
74 |
75 | ib = mod.make_group("BehavioralTimeSeries", attrs={"source": "source of data for BehavioralTimeSeries"})
76 |
77 | ts1 = ib.make_group("", "rat_position", attrs={"source": "source of data for rat_position"})
78 | ts1.set_dataset("data", data, attrs={"conversion": 1.0, "resolution": 0.001, "unit": "--unit goes here--"})
79 | ts1.set_dataset("timestamps", times)
80 |
81 | # All done. Close the file
82 | f.close()
83 |
84 |
--------------------------------------------------------------------------------
/examples/create_scripts/run_all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script will run all the example create scripts.
4 | #
5 | # It is run by entering the script name, optionally followed by character "s" or "f".
6 | # "s" specifies text output should be displayed on the screen, "f" indicates the
7 | # text output should be sent to a file, in directory "../text_output_files/create".
8 | # Writing to a file is useful for comparing the text output of each
9 | # create script to the output of validating the created nwb file.
10 | # (The output from both should be mostly the same) and to the output generated
11 | # in other runs. The output is used by the scripts in the "test_all" directory
12 | # at the top level.
13 | # The 'validate_all.py' script (in directory '../utility_scripts') can be used
14 | # to validate all the NWB files created by these scripts.
15 |
16 | txt_output_dir="../text_output_files/create"
17 | nwb_output_dir="../created_nwb_files"
18 |
19 | output_mode=$1
20 | if [[ "$output_mode" == "" ]] ; then
21 | output_mode="f"
22 | fi
23 |
24 | if [[ ! "$output_mode" =~ [fs]$ ]] ; then
25 | echo "format is:"
26 | echo "$0 "
27 | echo "where, is one of:"
28 | echo " f - output to file in $txt_output_dir"
29 | echo " s - output to screen."
30 | echo " default is 'f'"
31 | exit 1
32 | fi
33 | if [[ "$output_mode" == 's' ]] ; then
34 | dest="screen"
35 | else
36 | dest="directory $txt_output_dir"
37 | fi
38 |
39 | source_data_dir="source_data_2"
40 | source_data_test_file="../$source_data_dir/crcns_alm-1/data_collection.txt"
41 |
42 | if [[ -f "$source_data_test_file" ]] ; then
43 | have_source_data="1"
44 | source_data_msg="Have ../$source_data_dir directory contents, running scripts that use it."
45 | else
46 | have_source_data="0"
47 | source_data_msg="Did not find ./$source_data_dir directory contents. Skipping scripts that use it."
48 | fi
49 |
50 | echo "sending output to $dest"
51 | echo "$source_data_msg"
52 |
53 |
54 | if [[ "$output_mode" == 'f' ]] ; then
55 | # make sure text output directory exists and is empty
56 | if [[ ! -d "$txt_output_dir" ]]; then
57 | mkdir $txt_output_dir
58 | echo "created $txt_output_dir"
59 | else
60 | rm -f $txt_output_dir/*.txt
61 | echo "cleared $txt_output_dir"
62 | fi
63 | fi
64 |
65 |
66 |
67 | function run_script {
68 | script=$1
69 | if [[ "$output_mode" == "f" ]] ; then
70 | stem=${script%.*}
71 | output_file="$stem.txt"
72 | echo "doing 'python $script > $txt_output_dir/$output_file'"
73 | python $script > $txt_output_dir/$output_file
74 | else
75 | echo "doing 'python $script'"
76 | python $script
77 | fi
78 | }
79 |
80 | # these scripts take longer to run and require source data. Run last.
81 | slow_scripts="crcns_hc-3.py crcns_ret-1.py crcns_ssc-1.py crcns_alm-1.py"
82 | all_scripts=`ls *.py`
83 |
84 | # first, run all scripts that do not take a long time
85 | for script in $all_scripts ; do
86 | if [[ $slow_scripts != *"$script"* ]]
87 | then
88 | run_script $script
89 | fi
90 | done
91 |
92 |
93 | # check if have source data (for scripts that take longer)
94 |
95 | if [[ "$have_source_data" == "0" ]] ; then
96 | echo $source_data_msg
97 | exit 0
98 | fi
99 |
100 | echo ""
101 | echo "Now running scripts that take longer..."
102 |
103 | for script in $slow_scripts ; do
104 | if [[ "$prompt_for_slow" == "y" ]] ; then
105 | yes_no "Run $script ?"
106 | if [[ "$yesno" == "y" ]] ; then
107 | run_script $script
108 | fi
109 | else
110 | run_script $script
111 | fi
112 | done
113 |
114 | echo "All done."
115 |
116 |
117 | exit 0
118 |
--------------------------------------------------------------------------------
/examples/create_scripts/timeseries-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | from nwb import nwb_file
4 | from nwb import nwb_utils as utils
5 |
6 | """
7 | Example extending the format: using MyNewTimeSeries type.
8 |
9 | This example uses an extension specified in file "examples/e-timeseries.py".
10 | The extension specified in that file defines a new type of TimeSeries
11 | (named "MyNewTimeSeries", which illustrates how to create a new TimeSeries
12 | type. The code here uses the MyNewTimeSeries type to store data in
13 | /acquisition/timeseries
14 |
15 |
16 | The convention of having "e-" in front of the extension (and "-e" at the
17 | end of the create script name) is only used for these examples. Any name for the
18 | create script and extension(s) can be used as long as the actual name of the
19 | extension(s) are referenced by the create script and passed as parameters to
20 | nwb_validate.py when validating NWB files created using one or more extensions.
21 |
22 |
23 | """
24 | # create a new NWB file
25 | OUTPUT_DIR = "../created_nwb_files/"
26 | file_name = __file__[0:-3] + ".nwb"
27 |
28 | settings = {}
29 | settings["file_name"] = OUTPUT_DIR + file_name
30 | settings["identifier"] = utils.create_identifier("MyNewTimeSeries example")
31 | settings["mode"] = "w"
32 | settings["start_time"] = "2016-04-07T03:16:03.604121"
33 | settings["description"] = "Test file demonstrating using a new TimeSeries type that was defined using an extension"
34 |
35 | # specify the extension (Could be more than one. Only one used now).
36 | settings['extensions'] = ["extensions/e-timeseries.py"]
37 | f = nwb_file.open(**settings)
38 |
39 |
40 | ########################################################################
41 | # Make some sample data for the foo_timeseries
42 |
43 | data = [[1.2, 1.3, 1.4], [2.2, 2.3, 2.4], [3.2, 3.3, 3.4], [4.2, 4.3, 4.4], [5.2, 5.3, 5.4]]
44 | times = [0.1, 0.2, 0.3, 0.4, 0.5]
45 |
46 | # create an instance of MyNewTimeseries. Name of group will be "my_new_timeseries
47 | # it will be stored in /acquisition/timeseries
48 |
49 | nts = f.make_group("mnts:", "my_new_timeseries", path="/acquisition/timeseries",
50 | attrs={"source": "source of data for my new timeseries"} )
51 | nts.set_dataset("data", data, attrs={"conversion": 1.0, "resolution": 0.001, "unit": "--unit goes here--"})
52 | nts.set_dataset("timestamps", times)
53 |
54 | # specify metadata that is part of MyNewTimeSeries type
55 | nts.set_attr("foo", "This added to attribute 'foo'")
56 | nts.set_dataset("bar", [2, 4, 5, 6, 7])
57 |
58 | # All done. Close the file
59 | f.close()
60 |
61 |
--------------------------------------------------------------------------------
/examples/create_scripts/trajectorySeries-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | import numpy as np
4 | from nwb import nwb_file
5 | from nwb import nwb_utils as utils
6 |
7 | """
8 | Example of extending the format.
9 |
10 | This example uses an extension specified in file "examples/e-trajectorySeries.py".
11 | The extension specified in that file defines a new type of Timeseries
12 | (named "TrajectorySeries", which stores the the trajectory of a hand
13 | measured during a primate experiment with reaching behavior.
14 |
15 | The trajectories are stored from four sensors and each sensor has
16 | six degrees of freedom.
17 |
18 | TrajectorySeries is an extension of the SpatialSeries. The
19 | definition (i.e. extension) is in file extensions/e-trajectorySeries.py.
20 |
21 | This version uses datasets to specify the measurement names and values.
22 | (These could also be specified as text using the SpatialSeries reference_frame,
23 | dataset, but not in a defined, machine readable way).
24 |
25 |
26 | """
27 | # create a new NWB file
28 | OUTPUT_DIR = "../created_nwb_files/"
29 | file_name = __file__[0:-3] + ".nwb"
30 |
31 | settings = {}
32 | settings["file_name"] = OUTPUT_DIR + file_name
33 | settings["identifier"] = utils.create_identifier("trajectorySeries example")
34 | settings["mode"] = "w"
35 | settings["start_time"] = "2016-04-07T03:16:03.604121"
36 | settings["description"] = "Test file demonstrating creating a new TimeSeries type using an extension"
37 |
38 | # specify the extension (Could be more than one. Only one used now).
39 | settings['extensions'] = ["extensions/e-trajectorySeries.py"]
40 | f = nwb_file.open(**settings)
41 |
42 |
43 | ########################################################################
44 | # Make some sample data for the foo_timeseries
45 |
46 | data = np.linspace(1., 100.0, 6*4*1000).reshape(24,1000)
47 | times = np.linspace(0.0, 60.*2., 1000)
48 |
49 | # create an instance of MyNewTimeseries. Name of group will be "my_new_timeseries
50 | # it will be stored in /acquisition/timeseries
51 |
52 | nts = f.make_group("", "hand_position", path="/acquisition/timeseries",
53 | attrs={"source": "source of data for my new timeseries"} )
54 | nts.set_dataset("data", data, attrs={"conversion": 1.0, "resolution": 0.001, "unit": "see measurements"})
55 | nts.set_dataset("timestamps", times)
56 |
57 | # specify metadata that is part of MyNewTimeSeries type
58 | measurement_names = ["s1_x", "s1_y", "s1_z", "s1_r", "s1_t", "s1_o",
59 | "s2_x", "s2_y", "s2_z", "s2_r", "s2_t", "s2_o",
60 | "s3_x", "s3_y", "s3_z", "s3_r", "s3_t", "s3_o"]
61 |
62 | nts.set_attr("measurement_names", measurement_names)
63 | measurement_units = ["meter", "meter", "meter", "radian", "radian", "radian",
64 | "meter", "meter", "meter", "radian", "radian", "radian",
65 | "meter", "meter", "meter", "radian", "radian", "radian"]
66 |
67 | nts.set_attr("measurement_units", measurement_units)
68 |
69 | # All done. Close the file
70 | f.close()
71 |
72 |
--------------------------------------------------------------------------------
/examples/create_scripts/trajectorySeries2-e.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | import numpy as np
4 | from nwb import nwb_file
5 | from nwb import nwb_utils as utils
6 |
7 | """
8 | Example of extending the format.
9 |
10 | This example uses an extension specified in file "examples/e-trajectorySeries.py".
11 | The extension specified in that file defines a new type of Timeseries
12 | (named "TrajectorySeries", which stores the the trajectory of a hand
13 | measured during a primate experiment with reaching behavior.
14 |
15 | The trajectories are stored from four sensors and each sensor has
16 | six degrees of freedom.
17 |
18 | TrajectorySeries is an extension of the SpatialSeries. The
19 | definition (i.e. extension) is in file extensions/e-trajectorySeries2.py.
20 |
21 | This version specifies all the measurement units in advance in the extension.
22 |
23 | """
24 | # create a new NWB file
25 | OUTPUT_DIR = "../created_nwb_files/"
26 | file_name = __file__[0:-3] + ".nwb"
27 |
28 | settings = {}
29 | settings["file_name"] = OUTPUT_DIR + file_name
30 | settings["identifier"] = utils.create_identifier("trajectorySeries example")
31 | settings["mode"] = "w"
32 | settings["start_time"] = "2016-04-07T03:16:03.604121"
33 | settings["description"] = "Test file demonstrating creating a new TimeSeries type using an extension"
34 |
35 | # specify the extension (Could be more than one. Only one used now).
36 | settings['extensions'] = ["extensions/e-trajectorySeries2.py"]
37 | f = nwb_file.open(**settings)
38 |
39 |
40 | ########################################################################
41 | # Make some sample data
42 |
43 | data = np.linspace(1., 100.0, 6*4*1000).reshape(24,1000)
44 | times = np.linspace(0.0, 60.*2., 1000)
45 |
46 | # create an instance of MyNewTimeseries. Name of group will be "my_new_timeseries
47 | # it will be stored in /acquisition/timeseries
48 |
49 | nts = f.make_group("", "hand_position", path="/acquisition/timeseries",
50 | attrs={"source": "source of data for my new timeseries"} )
51 | nts.set_dataset("data", data, attrs={"conversion": 1.0, "resolution": 0.001,
52 | "unit": "meter and radian; see definition of dimension trajectories in format specification"})
53 | nts.set_dataset("timestamps", times)
54 |
55 | # specify meaning of variables
56 | reference_frame = ("Meaning of measurement values in array data, (e.g. sensor s1, s2, s3, s4; "
57 | "x, y, z, pitch, roll, yaw) should be described here")
58 | nts.set_dataset("reference_frame", reference_frame)
59 |
60 | # Add in sample epochs to specify the trials
61 | trial_times = [ [0.5, 1.5], [2.5, 3.0], [3.5, 4.0]]
62 |
63 | for trial_num in range(len(trial_times)):
64 | trial_name = "Trial_%03i" % (trial_num+1)
65 | start_time, stop_time = trial_times[trial_num]
66 | ep = utils.create_epoch(f, trial_name, start_time, stop_time)
67 | utils.add_epoch_ts(ep, start_time, stop_time, "hand_position", nts)
68 |
69 |
70 | # All done. Close the file
71 | f.close()
72 |
73 |
--------------------------------------------------------------------------------
/examples/created_nwb_files/0_README.txt:
--------------------------------------------------------------------------------
1 | This directory, (examples/created_NWB_files) will contain NWB
2 | files that are created by running the scripts in
3 | directory ../create_scripts
4 |
5 |
--------------------------------------------------------------------------------
/examples/text_output_files/0_README.txt:
--------------------------------------------------------------------------------
1 | Directory examples/text_output_files
2 |
3 | Contains text files generated by scripts. Subdirectories are:
4 |
5 | create - generated by create_scrips
6 |
7 | validate - generated by validate scripts (in ../utility_scripts directory)
8 |
9 | doc - generated documention files, created by scripts "make_docs.py" and "make_docs.sh"
10 | in ../utility_scripts directory.
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/examples/text_output_files/create/0_README.txt:
--------------------------------------------------------------------------------
1 | Directory examples/text_output_files/create
2 |
3 | Will contain text file output of running script
4 | ../../create_scripts/run_all.sh
5 |
6 |
7 |
--------------------------------------------------------------------------------
/examples/text_output_files/diff/0_README.txt:
--------------------------------------------------------------------------------
1 | Directory examples/text_output_files/diff
2 |
3 | Will contain output of running script ../../utility_scripts/h5diffci.sh
4 | and script ../../utility_scipts/make_cv_diff.sh
5 |
6 |
--------------------------------------------------------------------------------
/examples/text_output_files/doc/0_README.txt:
--------------------------------------------------------------------------------
1 | Directory examples/text_output_files/doc
2 |
3 | Will contains documenation files created by script ../../utility_scripts/make_docs.sh
4 |
5 |
--------------------------------------------------------------------------------
/examples/text_output_files/validate/0_README.txt:
--------------------------------------------------------------------------------
1 | Directory: examples/text_output_files/validate
2 |
3 | Will contains output of running ../../utility_scripts/validate_all.py
4 |
5 |
--------------------------------------------------------------------------------
/examples/utility_scripts/0_README.txt:
--------------------------------------------------------------------------------
1 | Directory examples/utility_scripts
2 |
3 | Contains:
4 |
5 | h5diffci.sh - Demonstrates using h5diffci.py utility which displays difference between HDF5 files.
6 |
7 | make_cv_diff.sh - Makes diff of output of create and validate scripts.
8 | Useful to make sure they mostly match. If not there might be an error in software.
9 |
10 | make_docs.sh - Demonstrate making documentation. (Old version, does not generate docs
11 | for all extension examples).
12 |
13 | make_docs.py - Demonstrate making documentation. (New version). To run: python make_docs.py
14 |
15 | validate_all.py - Validates all NWB files created by create scripts. To run: python validate_all.py
16 |
17 | validate_internal.sh - Demo of validating file using internally stored format specifications.
18 |
19 | check_schemas.sh - Validate schema files (nwb_core.py and example extensions).
20 |
21 | cmp_created.sh - compare (diff) nwb files with the same names in two different directories, save
22 | output in third directory.
23 |
24 | install_source_data.py - download and install example/source_data_2 directory (which is needed
25 | to run some of the ../create_scripts
26 |
27 | make_h5sig.py - generate 'h5sig' (hdf5 signature) of nwb files that are in a directory, storing the
28 | generated signatures in an output directory.
29 |
30 | validate_others.sh - validates all nwb files in a directory, saving validation outout in an output
31 | directory.
32 |
33 |
--------------------------------------------------------------------------------
/examples/utility_scripts/check_schemas.sh:
--------------------------------------------------------------------------------
1 | # Script to validate nwb_core specification and all example extensions.
2 | # Uses the nwb.check_schema utility (which in turn, uses the json-schema
3 | # specification in file meta_schema.py).
4 |
5 | # nwb.check_schema requires having jsonschema installed.
6 | # available at: https://github.com/Julian/jsonschema
7 |
8 | # to run, type ./check_schemas.sh
9 |
10 | # check core schema
11 | echo "doing: python -m nwb.check_schema N"
12 | python -m nwb.check_schema N
13 |
14 | # get list of extensions to check
15 | extensions=`ls ../create_scripts/extensions/e-*.py`
16 |
17 | # check schema for each extension
18 | for fn in $extensions
19 | do
20 | echo "doing: python -m nwb.check_schema $fn"
21 | python -m nwb.check_schema $fn
22 | done
23 |
24 | echo "All done"
25 |
26 |
--------------------------------------------------------------------------------
/examples/utility_scripts/cmp_created.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script compares nwb files in two directories, saving the result
4 | # (showing differences) in a third directory.
5 |
6 |
7 | nwb1=$1
8 | nwb2=$2
9 | dest=$3
10 | if [ ! -d "$nwb1" ]; then
11 | nwb1="../created_nwb_files_py2"
12 | fi
13 | if [ ! -d "$nwb2" ]; then
14 | nwb2="../created_nwb_files_py3"
15 | fi
16 | if [ ! -d "$dest" ]; then
17 | dest="../text_output_files/diff/created_nwb23"
18 | fi
19 | if [ ! -d "$nwb1" ]; then
20 | echo "Directory nwb1, '$nwb1' does not exist"
21 | exit 1
22 | fi
23 | if [ ! -d "$nwb2" ]; then
24 | echo "Directory nwb2, '$nwb2' does not exist"
25 | exit 1
26 | fi
27 | if [ ! -d "$dest" ]; then
28 | echo "Directory dest, '$dest' does not exist"
29 | exit 1
30 | fi
31 |
32 | echo "comparing $nwb1/*.nwb to $nwb2/*.nwb, saving differences in $dest"
33 | # FILES=`find $nwb1 -name "*.nwb" -exec basename {} \;`
34 | FILES=`find -H $nwb1 -name "*.nwb"`
35 | for f in $FILES
36 | do
37 | bn=`basename $f`
38 | # Substring Removal
39 | path=${f#$nwb1}
40 | f_name=${bn%.*}
41 | file2="$nwb2$path"
42 | if [ ! -f "$file2" ]; then
43 | echo "Not found: $file2"
44 | continue
45 | fi
46 | echo "doing python -m nwb.h5diffsig $f $file2 > $dest/$f_name.txt"
47 | python -m nwb.h5diffsig $f $file2 > $dest/$f_name.txt
48 | done
49 |
50 |
--------------------------------------------------------------------------------
/examples/utility_scripts/h5diffsig.sh:
--------------------------------------------------------------------------------
1 |
2 | echo "Demonstrate running HDF5 diff utility 'h5diffsig.py'"
3 |
4 |
5 | echo "This demo finds difference between:"
6 | echo " ../created_nwb_files/motion_correction.nwb, and:"
7 | echo " ../created_nwb_files/motion_correction2.nwb"
8 | echo
9 | echo "The difference is displaed on the screen, and saved"
10 | echo "in file: ../text_output_files/diff/motion_correction1_2_diff.txt"
11 |
12 |
13 | python -m nwb.h5diffsig ../created_nwb_files/motion_correction.nwb ../created_nwb_files/motion_correction2.nwb > \
14 | ../text_output_files/diff/motion_correction1_2_diff.txt
15 |
16 | cat ../text_output_files/diff/motion_correction1_2_diff.txt
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/examples/utility_scripts/make_cv_diff.sh:
--------------------------------------------------------------------------------
1 | echo "This script creates a file showing the difference between"
2 | echo "the output of creating the NWB files and validating them."
3 | echo
4 | echo "It should only be run after runing scripts:"
5 | echo "../create_scripts/run_all.sh and validate_all.py"
6 | echo
7 | echo "Except for some warning messages, and any extra output"
8 | echo "generated by create scripts, the result of the diff should"
9 | echo "show that the output are mostly the same. If it is not,"
10 | echo "the diffrence may indicate there is a bug in the software."
11 |
12 | diff -r ../text_output_files/create -x 0_README.txt ../text_output_files/validate > ../text_output_files/diff/cv_diff.txt
13 |
14 | echo
15 | echo "done creating diff."
16 | echo "It is stored in file '../text_output_files/diff/cv_diff.txt'"
17 |
--------------------------------------------------------------------------------
/examples/utility_scripts/make_docs.py:
--------------------------------------------------------------------------------
1 | # This script demonstrates creating documentation for the NWB format."
2 | #
3 | # Documentation may be created for the core format alone, or the"
4 | # core format combined with one or more extensions."
5 | #
6 | # The documentation is generated from the format specifications."
7 | # The format specifications can be in standalone '.py' files or"
8 | # can be loaded from a created NWB file. The latter method "
9 | # is useful for generating documentaiton that is guaranteed to"
10 | # describe a particular NWB file."
11 |
12 |
13 | import sys
14 | import glob
15 | import os, fnmatch
16 | from subprocess import check_output
17 | from sys import version_info # py3
18 |
19 | # global constants
20 | txt_output_dir="../text_output_files/doc"
21 | nwb_dir="../created_nwb_files"
22 |
23 |
24 | # py3: convert bytes to str (unicode) if Python 3
25 | def make_str3(val):
26 | if isinstance(val, bytes) and version_info[0] > 2:
27 | return val.decode('utf-8')
28 | else:
29 | return val
30 |
31 |
32 | def do_command(cmd, output_file):
33 | """ execute command in cmd, write to output_file"""
34 | global txt_output_dir
35 | output_path = os.path.join(txt_output_dir, output_file)
36 | print ("doing: %s > %s" % (cmd, output_path))
37 | output = check_output(cmd.split(" "))
38 | with open(output_path, "w") as f:
39 | f.write(make_str3(output))
40 |
41 |
42 | print ("documentation for core, e.g. file 'nwb_core.py'")
43 | cmd = "python -m nwb.make_docs"
44 | out = "core_doc.html"
45 | do_command(cmd, out)
46 |
47 | print ("documentation using two external extensions")
48 | cmd = ("python -m nwb.make_docs ../create_scripts/extensions/e-general.py,"
49 | "../create_scripts/extensions/e-intracellular.py")
50 | out = "core_intra_gen.html"
51 | do_command(cmd, out)
52 |
53 | print ("documentation using schema's stored in created NWB files")
54 | nwb_files = glob.glob(os.path.join(nwb_dir, "*-e.nwb"))
55 | for file_path in nwb_files:
56 | base_name = os.path.basename(file_path)[0:-4]
57 | cmd = "python -m nwb.make_docs %s" % file_path
58 | out = "%s.html" % base_name
59 | do_command(cmd,out)
60 |
61 | sys.exit()
62 |
63 |
--------------------------------------------------------------------------------
/examples/utility_scripts/make_docs.sh:
--------------------------------------------------------------------------------
1 |
2 | echo
3 | echo "This script demonstrates creating documentation for the NWB format."
4 | echo
5 | echo "Documentation may be created for the core format alone, or the"
6 | echo "core format combined with one or more extensions."
7 | echo
8 | echo "The documentation is generated from the format specifications."
9 | echo "The format specifications can be in standalone '.py' files or"
10 | echo "can be loaded from a created NWB file. The latter method "
11 | echo "is useful for generating documentaiton that is guaranteed to"
12 | echo "describe a particular NWB file."
13 |
14 | echo
15 | echo "Making core doc by doing:"
16 | cmd="python -m nwb.make_docs > ../text_output_files/doc/core_doc.html"
17 | echo "$cmd"
18 | python -m nwb.make_docs > ../text_output_files/doc/core_doc.html
19 |
20 | echo
21 | echo "Making core doc with two extensions by doing:"
22 | cmd="python -m nwb.make_docs ../create_scripts/extensions/e-intracellular.py,../create_scripts/extensions/e-general.py > \
23 | ../text_output_files/doc/core_intra_gen.html"
24 | echo "$cmd"
25 | python -m nwb.make_docs ../create_scripts/extensions/e-intracellular.py,../create_scripts/extensions/e-general.py > \
26 | ../text_output_files/doc/core_intra_gen.html
27 |
28 | echo
29 | echo "Making documentation from created NWB file by doing:"
30 | cmd="python -m nwb.make_docs ../created_nwb_fles/interface-e.nwb > ../text_output/doc/interface-e.html"
31 | echo "$cmd"
32 | python -m nwb.make_docs ../created_nwb_files/interface-e.nwb > ../text_output_files/doc/interface-e.html
33 |
34 |
--------------------------------------------------------------------------------
/examples/utility_scripts/make_pdf_doc.sh:
--------------------------------------------------------------------------------
1 | # this script generates PDF files from html files, adding page numbers
2 | # it requires the "wkhtmltopdf" utility be installed
3 |
4 | if [ "$#" -ne 2 ]; then
5 | script_name=${0##*/}
6 | echo "Requires two command-line arguments:"
7 | echo " "
8 | echo "Example:"
9 | echo "./$script_name ../text_output_files/doc/ nwb_file_format_specification_1.0.3_beta"
10 | exit 1
11 | fi
12 |
13 | docdir=$1
14 | base_name=$2
15 | html="$base_name.html"
16 | pdf="$base_name.pdf"
17 |
18 | echo "doc dir=$docdir"
19 | echo "html file=$html"
20 | echo "pdf file=$pdf"
21 |
22 | wd=`pwd`
23 | cd $docdir
24 | wkhtmltopdf --footer-center [page]/[topage] $html $pdf
25 | cd $wd
26 |
27 |
28 |
--------------------------------------------------------------------------------
/examples/utility_scripts/validate_behavior.py:
--------------------------------------------------------------------------------
1 | # script demonstrating use of validate_file routine called from Python.
2 |
3 | from nwb import nwb_validate as nwbv
4 |
5 | file = "../created_nwb_files/behavior.nwb"
6 | validation_result = nwbv.validate_file(file, verbosity="none")
7 |
8 | print "validated", file
9 | print "validation_result=", validation_result
10 | print "all done"
11 |
12 |
--------------------------------------------------------------------------------
/examples/utility_scripts/validate_internal.sh:
--------------------------------------------------------------------------------
1 | edho
2 | echo "This script demonstrates validating an NWB file using the format"
3 | echo "specifications stored within the file."
4 |
5 | if [[ ! -e "../created_nwb_files/annotations.nwb" ]] ; then
6 | echo "This script requires file ../created_nwb_files/annotations.nwb"
7 | echo "do: python ../create_scripts/annotations.py to create that file"
8 | echo "before runnign this script"
9 | exit 1
10 | fi
11 |
12 | # Note the two dashes after the file name. First dash is for
13 | # extensions, second is for the core specification (nwb_core.py).
14 |
15 | python -m nwb.nwb_validate ../created_nwb_files/annotations.nwb - -
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/examples/utility_scripts/validate_others.sh:
--------------------------------------------------------------------------------
1 | # script to validate all NWB files stored in a directory, sending
2 | # output to another directory.
3 | # Validation is done using the external "nwb_core.py" definition and
4 | # not using any schema's included in the file. So it will not if
5 | # the nwb files were created using extensions. To validate files
6 | # that use extensions, eiter the extensions need to be specified in
7 | # the command line or using the specifications stored in the file
8 | # need to be specified. Examples of validations using these methods
9 | # are respectively in the scripts "validate_all.py" and
10 | # "validate_internal.sh"
11 |
12 | if [[ ! "$#" -eq 2 ]] ; then
13 | echo "Format is:"
14 | echo "$0 "
15 | echo "where:"
16 | echo " - directory containing nwb files"
17 | echo " - directory for validation output"
18 | exit 1
19 | fi
20 |
21 | nwb_dir=$1
22 | out_dir=$2
23 |
24 | if [[ ! -d "$nwb_dir" ]] ; then
25 | echo " does not exist: $nwb_dir"
26 | exit 1
27 | fi
28 |
29 |
30 | if [[ ! -d "$out_dir" ]] ; then
31 | echo " does not exist: $out_dir"
32 | exit 1
33 | fi
34 |
35 | # get list of nwb files
36 | files=`ls $nwb_dir/*.nwb`
37 |
38 | if [[ "$files" == "" ]] ; then
39 | echo "No files with extension '.nwb' found in : $nwb_dir"
40 | exit 1
41 | fi
42 |
43 | # validate each file
44 | for nwb_file in $files
45 | do
46 | fname_and_extension=${nwb_file##*/}
47 | fn=${fname_and_extension%.nwb}
48 | # fn="${nwb_file%.*}"
49 | echo "doing: python -m nwb.validate $nwb_dir/$fn.nwb > $out_dir/$fn.txt"
50 | python -m nwb.validate $nwb_dir/$fn.nwb > $out_dir/$fn.txt
51 | # if [ $? -ne 0 ]; then
52 | # echo "python -m nwb.validate $nwb_dir/$fn.nwb FAILED"
53 | # exit 1
54 | # fi
55 | done
56 |
57 | # echo "All done"
58 |
59 |
--------------------------------------------------------------------------------
/matlab_bridge/0_README.txt:
--------------------------------------------------------------------------------
1 | 0_README.txt for NWB matlab_bridge API
2 |
3 | The matlab_bridge implements a matlab API to the NWB format by interfacing
4 | matlab to the Python NWB API.
5 |
6 | See file 0_INSTALL.txt for instructions about how to install the matlab_bridge
7 | API.
8 |
9 | Directories are:
10 |
11 | matlab_examples - matlab scripts used to create sample NWB files
12 |
13 | matlab_unittest - matlab unittests
14 |
15 | matlab_bridge_api - software implementing the matlab bridge API
16 |
17 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_bridge_api/+h5g8/dataset.m:
--------------------------------------------------------------------------------
1 | classdef dataset < h5g8.node
2 | %Class for nwb dataset objects
3 | methods
4 | function obj = dataset(ml_f, fg_dataset)
5 | % create a matlab nwb_dataset that wraps the Python fg_dataset
6 | obj = obj@h5g8.node(ml_f, fg_dataset);
7 | end
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_bridge_api/+h5g8/node.m:
--------------------------------------------------------------------------------
1 | classdef node < handle
2 | % This class (node) is group or dataset object in matlab
3 | % Corresponds to Node in h5gate (Python)
4 |
5 | properties
6 | ml_file % matlat file object
7 | fg_node % file gate (Python, h5gate) node object
8 | end
9 |
10 | methods
11 | function obj = node(ml_file, fg_node)
12 | % save reference matlab file and h5gate (python) node
13 | obj.ml_file = ml_file;
14 | obj.fg_node = fg_node;
15 | end
16 | function [robj] = set_attr(obj, aid, value, varargin)
17 | % Set attribute with key aid to value 'value'
18 | % if custom true, don't give warning about custom attribute
19 | % parameters for h5gate set_attr:
20 | % (self, aid, value, custom=''):
21 | arg_names = { 'custom'};
22 | arg_types = { 'logical' };
23 | arg_default={ false };
24 | arg_vals = obj.ml_file.parse_arguments(varargin, arg_names, arg_types, arg_default);
25 | custom = arg_vals.custom;
26 | % flatten attribute value if necessary
27 | [val1d, shape] = obj.ml_file.flatten(value);
28 | % attrs = {aid, value};
29 | % [atrs1d, attrs_shape] = obj.ml_file.flatten_attrs(attrs); % modifies attrs
30 | % fvalue = atrs1d(2); % flattened value
31 | % call python code in h5g8
32 | % obj.fg_node.set_attr(aid, value, custom);
33 | obj.fg_node.set_attr(aid, val1d, custom, shape);
34 | % this call the matlab group constructor
35 | % obj.ml_file.process_h5commands()
36 | % return original node object to allow stacking calls to this
37 | robj = obj;
38 | end
39 | end
40 |
41 | end
42 |
43 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_bridge_api/nwb_file.m:
--------------------------------------------------------------------------------
1 | function [f] = nwb_file(varargin)
2 | % parameters to Python nwb_file.open are:
3 | % start_time=None, mode="w-", identifier=None, description=None, core_spec="nwb_core.py", extensions=[], default_ns="core", keep_original=False, auto_compress=True)
4 | arg_names = { 'file_name', 'start_time', 'mode', 'identifier', 'description', 'core_spec', 'extensions', 'default_ns', 'keep_original', 'auto_compress', 'verbosity' };
5 | arg_types = { 'char', 'char', 'char', 'char', 'char', 'char', 'cell', 'char', 'logical', 'logical', 'char'};
6 | arg_default={ '', '', 'w-', '', '', 'nwb_core.py', {}, 'core', false, true, 'all' };
7 | arg_vals = parse_arguments(varargin, arg_names, arg_types, arg_default);
8 | % check for correct parameter values
9 | file_name = arg_vals.file_name;
10 | if isempty(file_name)
11 | error('Must specify file name, e.g. "test.nwb"')
12 | end
13 | % fprintf('in nwb_file, file_name=%s', file_name)
14 | mode = arg_vals.mode;
15 | valid_modes = {'r', 'r+', 'w', 'w-', 'a'};
16 | if ~isa(mode, 'char') || ~(nnz(strcmp(mode, valid_modes)) == 1)
17 | error('Invalid mode (%s). Must be one of: %s', mode, strjoin(valid_modes,', '))
18 | end
19 | file_exists = (exist(file_name, 'file') == 2);
20 | if ~file_exists && (nnz(strcmp(mode, {'r', 'r+'})) == 1)
21 | error('File not found (%s). File must exist to use mode "r" or "r+"', file_name)
22 | end
23 | creating_file = (strcmp(mode, 'w') || (nnz(strcmp(mode, {'a', 'w-'}))==1 && ~file_exists));
24 | identifier = arg_vals.identifier;
25 | description = arg_vals.description;
26 | if creating_file
27 | % creating a new file. identifier and description required.
28 | if ~isa(identifier, 'char') || strcmp(identifier, '')
29 | error('When creating a file, "identifier" must be specified and be a string')
30 | end
31 | if ~isa(identifier, 'char') || strcmp(description, '')
32 | error('When creating a file, "description" must be specified and be a string')
33 | end
34 | end
35 | extensions = arg_vals.extensions;
36 | if ~iscellstr(extensions)
37 | error('extensions must be a cell array of strings')
38 | end
39 | % setup options for h5gate
40 | % previously had: 'custom_node_identifier', {'schema_id', 'Custom'}, ...
41 | options = {'mode', mode, ...
42 | 'keep_original', arg_vals.keep_original, ...
43 | 'auto_compress', arg_vals.auto_compress, ...
44 | 'custom_node_identifier', {'neurodata_type', 'Custom'}, ...
45 | 'verbosity', arg_vals.verbosity};
46 | if strcmp(arg_vals.core_spec, '-')
47 | spec_files = extensions;
48 | else
49 | spec_files = {extensions{:}, arg_vals.core_spec};
50 | end
51 | % open file
52 | f = h5g8.file(file_name, spec_files, arg_vals.default_ns, options);
53 | % set initial metadata
54 | py.nwb.nwb_init.nwb_init(f.fg_file, mode, arg_vals.start_time, identifier, description, creating_file)
55 | end
56 |
57 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_bridge_api/parse_arguments.m:
--------------------------------------------------------------------------------
1 | function [arg_vals] = parse_arguments(args, arg_names, arg_types, arg_default)
2 | % parse variable arguements passed to function, return
3 | % values for each defined in arg_defs. arg_names has argument
4 | % names, arg_types, the expected type, either 'char' (string) or 'cell'
5 | % 'cell' is cell array used for list of alternate key, values
6 | % set up default values to empty string or empty cell array
7 | arg_vals = struct;
8 | for i=1:numel(arg_names)
9 | arg_vals.(arg_names{i}) = arg_default{i};
10 | end
11 | found_named_arg = '';
12 | i = 1;
13 | while i <= numel(args)
14 | arg = args{i};
15 | if ischar(arg) && ismember(arg, arg_names)
16 | % found named argument
17 | val = args{i+1};
18 | [~, idx] = ismember(arg, arg_names);
19 | if ~strcmp(class(val), arg_types{idx})
20 | error('Unexpected type (%s) for parameter "%s", expecting "%s"', ...
21 | class(val), arg, arg_types{i})
22 | end
23 | found_named_arg = arg;
24 | arg_vals.(arg) = val;
25 | i = i + 2;
26 | continue
27 | end
28 | if found_named_arg
29 | error('Unnamed argument appears after named argument "%s"', ...
30 | found_named_arg)
31 | end
32 | % maybe found valid un-named argument
33 | if i > numel(arg_names)
34 | error('Too many un-named arguments in function call');
35 | end
36 | if ~strcmp(class(arg), arg_types{i})
37 | error('Unnamed argment "%s" is type "%s"; expected type "%s"', ...
38 | arg_names{i}, class(arg), arg_types{i});
39 | end
40 | % seems to be valid, save it
41 | arg_vals.(arg_names{i}) = arg;
42 | i = i + 1;
43 | end
44 | end
45 |
46 |
47 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_bridge_api/test_nwb.m:
--------------------------------------------------------------------------------
1 | function [ result ] = test_nwb( )
2 | % - tests calling h5gate matlab interface
3 | OUTPUT_DIR = './'; % '../created_nwb_files/';
4 | file_name = 'test_ml.nwb';
5 | settings = { ...
6 | 'file_name', [OUTPUT_DIR file_name], ...
7 | 'identifier', char(py.nwb.nwb_utils.create_identifier('matlab test')), ...
8 | 'mode', 'w', ...
9 | 'start_time', '2016-04-07T03:16:03.604121', ...
10 | 'description','Test file created using matlab bridge'};
11 | f = nwb_file(settings{:});
12 | % f.set_dataset('experimenter', 'Jeff Teeters');
13 | f.set_dataset('experimenter', 'Jeff Teeters');
14 | g = f.make_group('', 'shank_01','attrs', {'happy', 'day'});
15 | g.set_dataset('description', 'Test shank, with four recording sites');
16 | m = f.make_group('', 'shank_01', 'attrs', {'Its', 'working'} );
17 | lfp = m.make_group('LFP');
18 | ts = lfp.make_group('', 'LFP_Timeseries');
19 | ts.set_attr('source', 'moms backyard');
20 | ts.set_attr('source2', 'moms new backyard with bunnies');
21 | data = [ 1.2 1.3 1.4; 1.5 1.6 1.7 ];
22 | % data = zeros(10000, 1);
23 | times = [ 0.12, 0.13, 0.14, 0.15, 0.16 ];
24 | d = ts.set_dataset('data', data, 'compress', true);
25 | d.set_attr('unit', 'light_years');
26 | ts.set_dataset('timestamps', times);
27 | ts.set_dataset('num_samples', int64(5));
28 | % make some custom datasets
29 | g = f.make_custom_group('custom_subject_info', 'path', '/', 'attrs', {'upc', 'polly45'});
30 | g.set_custom_dataset('dog_breed', 'Saint Bernard');
31 | g.make_custom_group('lab_cats', 'attrs', {'info', 'this for help with cats', 'more_info', 'many more cats'});
32 | f.set_custom_dataset('custom_dataset', 'blue_sky', 'path', '/cust_info', 'attrs', {'info', 'about the blue sky'});
33 | g2 = f.get_node('/general/experimenter');
34 | fprintf('found ml_node, path is %s\n',char(g2.fg_node.full_path));
35 | f.close();
36 | fprintf('done');
37 | result = 0;
38 | end
39 |
40 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_examples/0_README.txt:
--------------------------------------------------------------------------------
1 | 0_README.txt for NWB MatLab examples
2 |
3 | Directories are:
4 |
5 | create_scripts - scripts used to create sample NWB files
6 |
7 | created_nwb_files - location of where example NWB files will be created
8 |
9 | text_output_files - location for storing output of unittests and examples
10 | This directory is created by scripts that run the
11 | unittests and examples.
12 |
13 | ../../examples/source_data_2 (Not located here, but needs to be installed
14 | in order to run the crcns_alm_1.m example script). This directory is
15 | installed by the "install_source_data.py" script in directory
16 | ../../examples/utility_scripts.
17 |
18 | To run the create_scripts, go into the directory (in Matlab) and run
19 | the script by typing the name into the command line.
20 |
21 | *** These require that the matlab NWB bridge is setup. See the instructions
22 | in the 0_INSTALL.txt file in the directory above this one.
23 |
24 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_examples/create_scripts/0_README.txt:
--------------------------------------------------------------------------------
1 | This directory "matlab_bridge/matlab_examples/create_scripts" contains example
2 | matlab scripts to create example NWB files.
3 |
4 | To run these scripts first install the matlab bridge as described in
5 | file mablab_bridge/0_INSTALL.txt
6 |
7 | Then to run the scripts, cd to this directory in matlab. Type
8 | the name of each script to run it, or "run_all_scripts" to
9 | run them all.
10 |
11 | Several types of scripts require additional files. These are:
12 |
13 |
14 | (1) Scripts that required input data.
15 |
16 | Scripts with with name starting with "crcns") require data in the
17 | "../source_data" directory. This data must be downloaded
18 | and placed inside the ../source_data directory. Instructions
19 | for doing this are in the examples/0_README.txt file
20 | (i.e. the readme in the directory above this one).
21 |
22 |
23 | (2) Scripts that require extensions.
24 |
25 | Scripts that have name ending with "-e.py" require one or more "extensions"
26 | (files that define extensions to the NWB format). The extensions are
27 | stored in directory "extensions". Usually the name of the extension
28 | used with the script will be the same as the name of the create script,
29 | except "e-" will be in front of the extension.
30 |
31 | The convention of having "e-" in front of the extension (and "-e" at the
32 | end of the create script name) is only used for these examples. Any name for the
33 | create script and extension(s) can be used as long as the actual name of the
34 | extension(s) are referenced by the create script and passed as parameters to
35 | nwb_validate.py when validating NWB files created using one or more extensions.
36 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_examples/create_scripts/analysis_e.m:
--------------------------------------------------------------------------------
1 | function analysis_e()
2 |
3 | % This example illustrates using an extension to store data in the the /analysis group.
4 | % The /analysis group is reserved for results of lab-specific analysis.
5 | %
6 | % The extension definition is in file "extensions/e-analysis.py".
7 | %
8 | % The example is based on the contents of the Allen Institute for Brain Science
9 | % Cell Types database NWB files.
10 |
11 |
12 | OUTPUT_DIR = '../created_nwb_files/';
13 | script_base_name = mfilename();
14 | nwb_file_name = [script_base_name '.nwb'];
15 | nwb_file_path = [OUTPUT_DIR nwb_file_name];
16 |
17 | % create a new NWB file
18 | settings = { ...
19 | 'file_name', nwb_file_path, ...
20 | 'identifier', ...
21 | char(py.nwb.nwb_utils.create_identifier('abstract-feature example')), ...
22 | 'mode', 'w', ...
23 | 'start_time', 'Sat Jul 04 2015 3:14:16', ...
24 | 'description',['Test file demonstrating storing data in the /analysis' ...
25 | ' group that is defined by an extension.'] ...
26 | 'extensions', {'../../../examples/create_scripts/extensions/e-analysis.py'} ...
27 | };
28 |
29 | fprintf('Creating %s', nwb_file_path);
30 | f = nwb_file(settings{:});
31 |
32 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
33 | % This example, stores spike times for multiple sweeps
34 | % create the group for the spike times
35 | % The group ("aibs_spike_times") is defined in the extension
36 |
37 | ast = f.make_group('aibs_spike_times');
38 |
39 | % some sample data
40 | times = [1.1, 1.2, 1.3, 1.4, 1.5];
41 |
42 | % create some sample sweeps
43 | for i = 1:5
44 | sweep_name = sprintf('sweep_%03i', i);
45 | ast.set_dataset('', times, 'name', sweep_name);
46 | end
47 |
48 | % all done; close the file
49 | f.close()
50 |
51 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_examples/create_scripts/run_all_examples.m:
--------------------------------------------------------------------------------
1 | function run_all_examples()
2 | % runs all examples. Created nwb files are stored
3 | % in directory ../created_nwb_files
4 |
5 | % if nargin < 1
6 | % % default 'none' to display no validation information (other than if the test
7 | % % passes). Other options are: 'all', 'summary'
8 | % verbosity = 'none';
9 | % end
10 |
11 | % directory for text output
12 | create_dir = '../text_output_files/create';
13 | if ~exist(create_dir, 'dir')
14 | mkdir(create_dir);
15 | end
16 |
17 | % find all examples
18 | mfiles = dir('*.m');
19 | script_name = mfilename();
20 | % run each example
21 | for i = 1:length(mfiles)
22 | mfile = mfiles(i).name;
23 | [trash, name] = fileparts(mfile); % strip off .m extension
24 | if ~strcmp(name, script_name)
25 | % not this script, so run it
26 | log_file = [create_dir '/' name '.txt' ];
27 | cmd = [ name, '();' ];
28 | fprintf('\n========= Running %s\n', cmd)
29 | [T] = evalc(cmd);
30 | % save output
31 | fileID = fopen(log_file,'w');
32 | fprintf(fileID, T);
33 | fclose(fileID);
34 | % diary(log_file)
35 | % run(cmd)
36 | % diary off
37 | end
38 | end
39 |
40 | % fprintf('\n========= Running abstract_feature()\n')
41 | % abstract_feature()
42 | % fprintf('\n========= Running analysis-e()\n')
43 | % analysis-e()
44 | % fprintf('\n========= Running crcns_alm_1()\n')
45 | % crcns_alm_1()
46 |
47 | end
48 |
49 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_examples/created_nwb_files/0_README.txt:
--------------------------------------------------------------------------------
1 | This directory, (matlab_bridge/matlab_examples/created_NWB_files) will
2 | contain NWB files that are created by running the scripts in
3 | directory ../create_scripts
4 |
5 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/run_all_tests.m:
--------------------------------------------------------------------------------
1 | function run_all_tests( verbosity )
2 | %Run all the tests, Set input verbosity == 'all' to display
3 | % validation output for each NWB file created
4 |
5 | if nargin < 1
6 | % default 'none' to display no validation information (other than if the test
7 | % passes). Other options are: 'all', 'summary'
8 | verbosity = 'none';
9 | end
10 |
11 | fprintf('pyversion is;')
12 | pyversion
13 |
14 | % directory for text output
15 | output_dir = '../matlab_examples/text_output_files';
16 | if ~exist(output_dir, 'dir')
17 | mkdir(output_dir);
18 | end
19 |
20 | % write log of running all tests to the following file
21 | log_file='../matlab_examples/text_output_files/unittest_results.txt';
22 | % delete '../matlab_examples/text_output_files/unittest_results.txt'
23 | if exist(log_file, 'file')==2
24 | delete(log_file);
25 | end
26 | diary(log_file)
27 |
28 | % find all test
29 | tests = dir('t_*.m');
30 | script_name = mfilename();
31 | % run each test
32 | for i = 1:length(tests)
33 | test = tests(i).name;
34 | [trash, name] = fileparts(test); % strip off .m extension
35 | if ~strcmp(name, script_name)
36 | % file is not this script, run it
37 | feval(name, verbosity);
38 | end
39 | end
40 | diary off
41 | end
42 |
43 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_annotation.m:
--------------------------------------------------------------------------------
1 | function t_annotation(verbosity)
2 |
3 | % test creation of annotation time series
4 | % TESTS AnnotationSeries creation
5 | % TESTS TimeSeries ancestry
6 |
7 | script_base_name = mfilename();
8 | script_name = [script_base_name '.m'];
9 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
10 | if nargin < 1
11 | % default, display all information. Other options are: 'none', 'summary'
12 | verbosity = 'all';
13 | end
14 |
15 |
16 | function test_annotation_series()
17 | name = 'annot';
18 | % create_annotation_series(fname, name, 'acquisition')
19 | create_annotation_series(fname, name, 'acquisition/timeseries');
20 | test_utils.verify_timeseries(fname, name, 'acquisition/timeseries', 'TimeSeries');
21 | test_utils.verify_timeseries(fname, name, 'acquisition/timeseries', 'AnnotationSeries');
22 | % create_annotation_series(fname, name, 'stimulus');
23 | create_annotation_series(fname, name, 'stimulus/presentation');
24 | test_utils.verify_timeseries(fname, name, 'stimulus/presentation', 'TimeSeries');
25 | test_utils.verify_timeseries(fname, name, 'stimulus/presentation', 'AnnotationSeries');
26 | end
27 |
28 | function create_annotation_series(fname, name, target)
29 | settings = { ...
30 | 'file_name', fname, 'verbosity', verbosity, ...
31 | 'identifier', nwb_utils.create_identifier('annotation example'), ...
32 | 'mode', 'w', ...
33 | 'start_time', 'Sat Jul 04 2015 3:14:16', ...
34 | 'description','Test file with AnnotationSeries' ...
35 | };
36 | f = nwb_file(settings{:});
37 | % annot = neurodata.create_timeseries('AnnotationSeries', name, target)
38 | annot = f.make_group('', name, 'path', ['/', target]);
39 |
40 | % annot.set_description('This is an AnnotationSeries with sample data')
41 | annot.set_attr('description', 'This is an AnnotationSeries with sample data');
42 | % annot.set_comment('The comment and description fields can store arbitrary human-readable data')
43 | annot.set_attr('comments', 'The comment and desscription fields can store arbitrary human-readable data');
44 | % annot.set_source('Observation of Dr. J Doe')
45 | annot.set_attr('source', 'Observation of Dr. J Doe');
46 | %
47 | % annot.add_annotation('Rat in bed, beginning sleep 1', 15.0)
48 | % annot.add_annotation('Rat placed in enclosure, start run 1', 933.0)
49 | % annot.add_annotation('Rat taken out of enclosure, end run 1', 1456.0)
50 | % annot.add_annotation('Rat in bed, start sleep 2', 1461.0)
51 | % annot.add_annotation('Rat placed in enclosure, start run 2', 2401.0)
52 | % annot.add_annotation('Rat taken out of enclosure, end run 2', 3210.0)
53 | % annot.add_annotation('Rat in bed, start sleep 3', 3218.0)
54 | % annot.add_annotation('End sleep 3', 4193.0)
55 | % store pretend data
56 | % all time is stored as seconds
57 | andata = {};
58 | andata{end+1} = {'Rat in bed, beginning sleep 1', 15.0};
59 | andata{end+1} = {'Rat placed in enclosure, start run 1', 933.0};
60 | andata{end+1} = {'Rat taken out of enclosure, end run 1', 1456.0};
61 | andata{end+1} = {'Rat in bed, start sleep 2', 1461.0};
62 | andata{end+1} = {'Rat placed in enclosure, start run 2', 2401.0};
63 | andata{end+1} = {'Rat taken out of enclosure, end run 2', 3210.0};
64 | andata{end+1} = {'Rat in bed, start sleep 3', 3218.0};
65 | andata{end+1} = {'End sleep 3', 4193.0};
66 | shape = [1, length(andata)];
67 | annotations = cell(shape);
68 | times = zeros(shape);
69 | for i = 1:length(andata)
70 | annotations{i} = andata{i}{1};
71 | times(i) = andata{i}{2};
72 | end
73 | annot.set_dataset('data',annotations);
74 | annot.set_dataset('timestamps', times);
75 | f.close()
76 | end
77 |
78 | test_annotation_series()
79 | fprintf('%s PASSED\n', script_name);
80 | end
81 |
82 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_append.m:
--------------------------------------------------------------------------------
1 | function t_append(verbosity)
2 |
3 | % test opening file in append mode
4 | % TESTS modifying existing file
5 | % TESTS creation of modification_time
6 | % TESTS addition of TimeSeries to existing file
7 | % TESTS preservation of TimeSeries when file modified
8 |
9 | script_base_name = mfilename();
10 | script_name = [script_base_name '.m'];
11 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
12 | if nargin < 1
13 | % default, display all information. Other options are: 'none', 'summary'
14 | verbosity = 'all';
15 | end
16 |
17 | function test_append()
18 | name1 = 'annot1';
19 | name2 = 'annot2';
20 | % create_annotation_series(fname, name1, 'acquisition', True)
21 | create_annotation_series(fname, name1, 'acquisition/timeseries', true);
22 | % create_annotation_series(fname, name2, 'acquisition', False)
23 | create_annotation_series(fname, name2, 'acquisition/timeseries', false);
24 | test_utils.verify_timeseries(fname, name1, 'acquisition/timeseries', 'TimeSeries');
25 | test_utils.verify_timeseries(fname, name1, 'acquisition/timeseries', 'AnnotationSeries');
26 | test_utils.verify_timeseries(fname, name2, 'acquisition/timeseries', 'TimeSeries');
27 | test_utils.verify_timeseries(fname, name2, 'acquisition/timeseries', 'AnnotationSeries');
28 | % ut.verify_attribute_present(fname, 'file_create_date', 'modification_time')
29 | end
30 |
31 |
32 | function create_annotation_series(fname, name, target, newfile)
33 | if newfile
34 | settings = {'file_name', fname, 'verbosity', verbosity, ...
35 | 'identifier', ...
36 | char(py.nwb.nwb_utils.create_identifier('append example')), ...
37 | 'mode', 'w', ...
38 | 'start_time', 'Sat Jul 04 2015 3:14:16', ...
39 | 'description','Test append file'};
40 | else
41 | settings = {'file_name', fname, 'mode', 'r+', 'verbosity', verbosity};
42 | end
43 | f = nwb_file(settings{:});
44 | %
45 | % annot = neurodata.create_timeseries('AnnotationSeries', name, target)
46 | annot = f.make_group('', name, 'path', ['/', target]);
47 | % annot.set_description('This is an AnnotationSeries '%s' with sample data' % name)
48 | % annot.set_comment('The comment and description fields can store arbitrary human-readable data')
49 | % annot.set_source('Observation of Dr. J Doe')
50 | annot.set_attr('description', sprintf('This is an AnnotationSeries ''%s'' with sample data', name));
51 | annot.set_attr('comments', 'The comment and description fields can store arbitrary human-readable data');
52 | annot.set_attr('source', 'Observation of Dr. J Doe');
53 |
54 | %
55 | % annot.add_annotation('Rat in bed, beginning sleep 1', 15.0)
56 | % annot.add_annotation('Rat placed in enclosure, start run 1', 933.0)
57 | % annot.add_annotation('Rat taken out of enclosure, end run 1', 1456.0)
58 | % annot.add_annotation('Rat in bed, start sleep 2', 1461.0)
59 | % annot.add_annotation('Rat placed in enclosure, start run 2', 2401.0)
60 | % annot.add_annotation('Rat taken out of enclosure, end run 2', 3210.0)
61 | % annot.add_annotation('Rat in bed, start sleep 3', 3218.0)
62 | % annot.add_annotation('End sleep 3', 4193.0)
63 | %
64 | andata = {};
65 | andata{end+1} = {'Rat in bed, beginning sleep 1', 15.0};
66 | andata{end+1} = {'Rat placed in enclosure, start run 1', 933.0};
67 | andata{end+1} = {'Rat taken out of enclosure, end run 1', 1456.0};
68 | andata{end+1} = {'Rat in bed, start sleep 2', 1461.0};
69 | andata{end+1} = {'Rat placed in enclosure, start run 2', 2401.0};
70 | andata{end+1} = {'Rat taken out of enclosure, end run 2', 3210.0};
71 | andata{end+1} = {'Rat in bed, start sleep 3', 3218.0};
72 | andata{end+1} = {'End sleep 3', 4193.0};
73 | shape = [1, length(andata)];
74 | annotations = cell(shape);
75 | times = zeros(shape);
76 | for i = 1:length(andata)
77 | annotations{i} = andata{i}{1};
78 | times(i) = andata{i}{2};
79 | end
80 | annot.set_dataset('data',annotations);
81 | annot.set_dataset('timestamps', times);
82 | f.close()
83 | end
84 |
85 | test_append()
86 | fprintf('%s PASSED\n', script_name);
87 | end
88 |
89 |
90 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_epoch_tag.m:
--------------------------------------------------------------------------------
1 | function t_epoch_tag(verbosity)
2 |
3 | % create two epochs, add different subset of tags to each
4 | % verify main epoch folder has tag attribute that contains
5 | % exactly the unique tags of each epoch and that each
6 | % epoch contains the assigned tags
7 |
8 | script_base_name = mfilename();
9 | script_name = [script_base_name '.m'];
10 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
11 | if nargin < 1
12 | % default, display all information. Other options are: 'none', 'summary'
13 | verbosity = 'all';
14 | end
15 |
16 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
17 | 'identifier', nwb_utils.create_identifier('Epoch tags'), ...
18 | 'description','softlink test'};
19 | f = nwb_file(settings{:});
20 |
21 | tags = {'tag-a', 'tag-b', 'tag-c'};
22 |
23 | % epoch1 = borg.create_epoch('epoch-1', 0, 3);
24 |
25 | epoch1 = f.make_group('', 'epoch-1');
26 | epoch1.set_dataset('start_time', 0);
27 | epoch1.set_dataset('stop_time', 3);
28 |
29 | % for i in range(len(tags)-1):
30 | % epoch1.add_tag(tags[i+1])
31 | epoch1.set_dataset('tags', tags(2:end));
32 |
33 | % epoch2 = borg.create_epoch('epoch-2', 1, 4);
34 | epoch2 = f.make_group('', 'epoch-2');
35 | epoch2.set_dataset('start_time', 1);
36 | epoch2.set_dataset('stop_time', 4);
37 |
38 | % for i in range(len(tags)-1):
39 | % epoch2.add_tag(tags[i])
40 | epoch2.set_dataset('tags', tags(1:2));
41 |
42 | f.close()
43 |
44 | % this test modified because tags are stored as dataset rather than attribute
45 | % tags = ut.verify_attribute_present(fname, 'epochs/epoch-1', 'tags');
46 | stored_tags = test_utils.verify_present(fname, 'epochs/epoch-1', 'tags');
47 | for i = 2:length(tags)
48 | if ~any(strcmp(tags(i), stored_tags))
49 | test_utils.error('Verifying epoch tag content', 'epoch-1: all tags not present')
50 | end
51 | end
52 |
53 | % tags = ut.verify_attribute_present(fname, 'epochs/epoch-2', 'tags');
54 | stored_tags = test_utils.verify_present(fname, 'epochs/epoch-2', 'tags');
55 | for i = 1:length(tags)-1 % in range(len(tags)-1):
56 | if ~any(strcmp(tags(i), stored_tags)) % tags[i] not in tags:
57 | test_utils.error('Verifying epoch tag content', 'epoch-2: all tags not present')
58 | end
59 | end
60 |
61 | stored_tags = test_utils.verify_attribute_present(fname, 'epochs', 'tags');
62 | for i = 1:length(tags) % in range(len(tags)):
63 | if ~any(strcmp(tags(i), stored_tags)) % tags[i] not in tags:
64 | test_utils.error('Verifying epoch tag content', 'epoch-3: all tags not present')
65 | end
66 | end
67 |
68 |
69 | fprintf('%s PASSED\n', script_name);
70 | end
71 |
72 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_general_opto.m:
--------------------------------------------------------------------------------
1 | function t_general_opto(verbosity)
2 |
3 | % TESTS fields stored in general/optogenetics
4 |
5 | script_base_name = mfilename();
6 | script_name = [script_base_name '.m'];
7 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
8 | if nargin < 1
9 | % default, display all information. Other options are: 'none', 'summary'
10 | verbosity = 'all';
11 | end
12 |
13 |
14 | function test_field(fname, name, subdir)
15 | val = test_utils.verify_present(fname, ['general/optogenetics/', subdir, '/'], lower(name));
16 | if ~strcmp(val, name)
17 | test_utils.error('Checking metadata', 'field value incorrect');
18 | end
19 | end
20 |
21 |
22 | function test_general_optogen()
23 | create_general_optogen(fname)
24 | %
25 | val = test_utils.verify_present(fname, 'general/optogenetics/', 'optogen_custom');
26 | if ~strcmp(val, 'OPTOGEN_CUSTOM')
27 | test_utils.error('Checking custom', 'Field value incorrect')
28 | end
29 | %
30 |
31 | test_field(fname, 'DESCRIPTION', 'p1');
32 | %test_field(fname, 'DESCRIPTIONx', 'p1')
33 | %test_field(fname, 'DESCRIPTION', 'p1x')
34 | test_field(fname, 'DEVICE', 'p1');
35 | % test_field(fname, 'LAMBDA', 'p1')
36 | test_field(fname, 'EXCITATION_LAMBDA', 'p1');
37 | test_field(fname, 'LOCATION', 'p1');
38 | val = test_utils.verify_present(fname, 'general/optogenetics/p1/', 'optogen_site_custom');
39 | if ~strcmp(val, 'OPTOGEN_SITE_CUSTOM')
40 | test_utils.error('Checking metadata', 'field value incorrect')
41 | end
42 | end
43 |
44 |
45 | function create_general_optogen(fname)
46 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
47 | 'identifier', nwb_utils.create_identifier('metadata optogenetic test'), ...
48 | 'description','test elements in /general/optogentics'};
49 | f = nwb_file(settings{:});
50 |
51 | % neurodata.set_metadata(OPTOGEN_CUSTOM('optogen_custom'), 'OPTOGEN_CUSTOM')
52 | % %
53 | % neurodata.set_metadata(OPTOGEN_SITE_DESCRIPTION('p1'), 'DESCRIPTION')
54 | % neurodata.set_metadata(OPTOGEN_SITE_DEVICE('p1'), 'DEVICE')
55 | % neurodata.set_metadata(OPTOGEN_SITE_LAMBDA('p1'), 'LAMBDA')
56 | % neurodata.set_metadata(OPTOGEN_SITE_LOCATION('p1'), 'LOCATION')
57 | % neurodata.set_metadata(OPTOGEN_SITE_CUSTOM('p1', 'optogen_site_custom'), 'OPTOGEN_SITE_CUSTOM')
58 | %
59 |
60 | g = f.make_group('optogenetics');
61 | g.set_custom_dataset('optogen_custom', 'OPTOGEN_CUSTOM');
62 |
63 | p1 = g.make_group('', 'p1');
64 | p1.set_dataset('description','DESCRIPTION');
65 | p1.set_dataset('device', 'DEVICE');
66 | p1.set_dataset('excitation_lambda','EXCITATION_LAMBDA');
67 | p1.set_dataset('location', 'LOCATION');
68 | p1.set_custom_dataset('optogen_site_custom', 'OPTOGEN_SITE_CUSTOM');
69 |
70 | % neurodata.close()
71 | f.close()
72 | end
73 |
74 | test_general_optogen()
75 | fprintf('%s PASSED\n', script_name);
76 | end
77 |
78 |
79 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_general_species.m:
--------------------------------------------------------------------------------
1 | function t_general_species(verbosity)
2 |
3 | % TESTS fields stored in general/subject
4 |
5 | script_base_name = mfilename();
6 | script_name = [script_base_name '.m'];
7 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
8 | if nargin < 1
9 | % default, display all information. Other options are: 'none', 'summary'
10 | verbosity = 'all';
11 | end
12 |
13 | function test_field(fname, name)
14 | val = test_utils.verify_present(fname, 'general/subject/', lower(name));
15 | if ~strcmp(val, name)
16 | test_utils.error('Checking metadata', 'field value incorrect');
17 | end
18 | end
19 |
20 | function test_general_subject()
21 | create_general_subject(fname);
22 | val = test_utils.verify_present(fname, 'general/subject/', 'description');
23 | if ~strcmp(val, 'SUBJECT')
24 | ut.error('Checking metadata', 'field value incorrect')
25 | end
26 | test_field(fname, 'SUBJECT_ID');
27 | test_field(fname, 'SPECIES');
28 | test_field(fname, 'GENOTYPE');
29 | test_field(fname, 'SEX');
30 | test_field(fname, 'AGE');
31 | test_field(fname, 'WEIGHT');
32 | end
33 |
34 |
35 | function create_general_subject(fname)
36 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
37 | 'identifier', nwb_utils.create_identifier('general subject test'), ...
38 | 'description','test elements in /general/subject'};
39 | f = nwb_file(settings{:});
40 |
41 | %
42 | % neurodata.set_metadata(SUBJECT, 'SUBJECT')
43 | % neurodata.set_metadata(SUBJECT_ID, 'SUBJECT_ID')
44 | % neurodata.set_metadata(SPECIES, 'SPECIES')
45 | % neurodata.set_metadata(GENOTYPE, 'GENOTYPE')
46 | % neurodata.set_metadata(SEX, 'SEX')
47 | % neurodata.set_metadata(AGE, 'AGE')
48 | % neurodata.set_metadata(WEIGHT, 'WEIGHT')
49 | %
50 |
51 | g = f.make_group('subject');
52 | g.set_dataset('description', 'SUBJECT');
53 | g.set_dataset('subject_id', 'SUBJECT_ID');
54 | g.set_dataset('species', 'SPECIES');
55 | g.set_dataset('genotype', 'GENOTYPE');
56 | g.set_dataset('sex', 'SEX');
57 | g.set_dataset('age', 'AGE');
58 | g.set_dataset('weight', 'WEIGHT');
59 |
60 | % neurodata.close()
61 | f.close()
62 | end
63 |
64 | test_general_subject()
65 | fprintf('%s PASSED\n', script_name);
66 | end
67 |
68 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_general_top.m:
--------------------------------------------------------------------------------
1 | function t_general_top(verbosity)
2 |
3 |
4 | % TESTS top-level fields stored in general
5 | % TESTS storing metadata from file
6 | % TESTS 'Custom' tagging on custom attributes
7 |
8 | script_base_name = mfilename();
9 | script_name = [script_base_name '.m'];
10 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
11 | if nargin < 1
12 | % default, display all information. Other options are: 'none', 'summary'
13 | verbosity = 'all';
14 | end
15 |
16 |
17 | function test_field(fname, name)
18 | val = test_utils.verify_present(fname, 'general/', lower(name));
19 | if ~strcmp(val, name)
20 | test_utils.error('Checking metadata', 'field value incorrect');
21 | end
22 | end
23 |
24 | function test_general_top()
25 | create_general_top(fname)
26 | test_field(fname, 'DATA_COLLECTION');
27 | test_field(fname, 'EXPERIMENT_DESCRIPTION');
28 | test_field(fname, 'EXPERIMENTER');
29 | test_field(fname, 'INSTITUTION');
30 | test_field(fname, 'LAB');
31 | test_field(fname, 'NOTES');
32 | test_field(fname, 'PROTOCOL');
33 | test_field(fname, 'PHARMACOLOGY');
34 | test_field(fname, 'RELATED_PUBLICATIONS');
35 | test_field(fname, 'SESSION_ID');
36 | test_field(fname, 'SLICES');
37 | test_field(fname, 'STIMULUS');
38 | test_field(fname, 'SURGERY');
39 | test_field(fname, 'VIRUS');
40 | val = test_utils.verify_present(fname, 'general/', 'source_script');
41 | if length(val) < 1000
42 | test_utils.error('Checking metadata_from_file', 'unexpected field size')
43 | end
44 | end
45 |
46 | % removing test for neurodata_type attribute custom on general/source_script, since its
47 | % not custom anymore
48 | % val = ut.verify_attribute_present(fname, 'general/source_script', 'neurodata_type')
49 | % if val != 'Custom' and val != b'Custom':
50 | % ut.error('Checking custom tag', 'neurodata_type incorrect')
51 |
52 |
53 | function create_general_top(fname)
54 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
55 | 'identifier', nwb_utils.create_identifier('general top test'), ...
56 | 'description','test top-level elements in /general'};
57 | f = nwb_file(settings{:});
58 |
59 | % neurodata.set_metadata(DATA_COLLECTION, 'DATA_COLLECTION')
60 | % neurodata.set_metadata(EXPERIMENT_DESCRIPTION, 'EXPERIMENT_DESCRIPTION')
61 | % neurodata.set_metadata(EXPERIMENTER, 'EXPERIMENTER')
62 | % neurodata.set_metadata(INSTITUTION, 'INSTITUTION')
63 | % neurodata.set_metadata(LAB, 'LAB')
64 | % neurodata.set_metadata(NOTES, 'NOTES')
65 | % neurodata.set_metadata(PROTOCOL, 'PROTOCOL')
66 | % neurodata.set_metadata(PHARMACOLOGY, 'PHARMACOLOGY')
67 | % neurodata.set_metadata(RELATED_PUBLICATIONS, 'RELATED_PUBLICATIONS')
68 | % neurodata.set_metadata(SESSION_ID, 'SESSION_ID')
69 | % neurodata.set_metadata(SLICES, 'SLICES')
70 | % neurodata.set_metadata(STIMULUS, 'STIMULUS')
71 | % neurodata.set_metadata(SURGERY, 'SURGERY')
72 | % neurodata.set_metadata(VIRUS, 'VIRUS')
73 | % %
74 | % neurodata.set_metadata_from_file('source_script', __file__)
75 | %
76 |
77 | f.set_dataset('data_collection','DATA_COLLECTION');
78 | f.set_dataset('experiment_description','EXPERIMENT_DESCRIPTION');
79 | f.set_dataset('experimenter','EXPERIMENTER');
80 | f.set_dataset('institution','INSTITUTION');
81 | f.set_dataset('lab','LAB');
82 | f.set_dataset('notes','NOTES');
83 | f.set_dataset('protocol','PROTOCOL');
84 | f.set_dataset('pharmacology','PHARMACOLOGY');
85 | f.set_dataset('related_publications', 'RELATED_PUBLICATIONS');
86 | f.set_dataset('session_id','SESSION_ID');
87 | f.set_dataset('slices','SLICES');
88 | f.set_dataset('stimulus','STIMULUS');
89 | f.set_dataset('surgery','SURGERY');
90 | f.set_dataset('virus', 'VIRUS');
91 |
92 | % f.neurodata.set_metadata_from_file('source_script', __file__)
93 | f.set_dataset('source_script', nwb_utils.load_file(script_name));
94 |
95 | % neurodata.close()
96 | f.close()
97 | end
98 |
99 | test_general_top()
100 | fprintf('%s PASSED\n', script_name);
101 | end
102 |
103 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_if_add_ts.m:
--------------------------------------------------------------------------------
1 | function t_if_add_ts(verbosity)
2 |
3 | % test opening file in append mode
4 | % TESTS creating a module
5 | % TESTS creating an interface
6 | % TESTS adding a timeseries to an interface
7 |
8 | script_base_name = mfilename();
9 | script_name = [script_base_name '.m'];
10 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
11 | if nargin < 1
12 | % default, display all information. Other options are: 'none', 'summary'
13 | verbosity = 'all';
14 | end
15 |
16 |
17 | function test_file()
18 | create_iface_series(fname, true);
19 | name1 = 'Ones';
20 | test_utils.verify_timeseries(fname, name1, 'processing/test module/BehavioralEvents', 'TimeSeries')
21 | end
22 |
23 |
24 | function create_iface_series(fname, newfile)
25 | if newfile
26 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
27 | 'start_time', 'Sat Jul 04 2015 3:14:16' ...
28 | 'identifier', nwb_utils.create_identifier('interface timeseries example'), ...
29 | 'description','Test interface timeseries file'};
30 | else
31 | settings = {'file_name', fname, 'mode', 'r+'};
32 | end
33 | f = nwb_file(settings{:});
34 |
35 | %
36 | % mod = neurodata.create_module('test module')
37 | % iface = mod.create_interface('BehavioralEvents')
38 | % ts = neurodata.create_timeseries('TimeSeries', 'Ones')
39 | % ts.set_data(np.ones(10), unit='Event', conversion=1.0, resolution=float('nan'))
40 | % ts.set_value('num_samples', 10)
41 | % ts.set_time(np.arange(10))
42 | % iface.add_timeseries(ts)
43 | % iface.finalize()
44 | % mod.finalize()
45 |
46 |
47 | mod = f.make_group('', 'test module');
48 | iface = mod.make_group('BehavioralEvents');
49 | ts = iface.make_group('', 'Ones');
50 | ts.set_dataset('data', ones([1, 10]), 'attrs', {'unit', 'Event', ...
51 | 'conversion', 1.0, 'resolution', NaN});
52 | ts.set_dataset('num_samples', 10);
53 | ts.set_dataset('timestamps',0:9);
54 |
55 | %
56 | % neurodata.close()
57 | f.close()
58 | end
59 |
60 | test_file()
61 | fprintf('%s PASSED\n', script_name);
62 | end
63 |
64 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_modification_time.m:
--------------------------------------------------------------------------------
1 | function t_modification_time(verbosity)
2 |
3 | % creates file and modifies it multiple times
4 |
5 | script_base_name = mfilename();
6 | script_name = [script_base_name '.m'];
7 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
8 | if nargin < 1
9 | % default, display all information. Other options are: 'none', 'summary'
10 | verbosity = 'all';
11 | end
12 |
13 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
14 | 'start_time', 'Sat Jul 04 2015 3:14:16' ...
15 | 'identifier', nwb_utils.create_identifier('Modification example'), ...
16 | 'description','Modified empty file'};
17 |
18 | f = nwb_file(settings{:});
19 | f.close()
20 |
21 |
22 | settings = {'file_name', fname, 'mode', 'r+', 'verbosity', verbosity};
23 | f = nwb_file(settings{:});
24 | % need to change the file for SLAPI to update file_create_date
25 | f.set_dataset('species', 'SPECIES');
26 | f.close()
27 |
28 |
29 | settings = {'file_name', fname, 'mode', 'r+', 'verbosity', verbosity};
30 | f = nwb_file(settings{:});
31 | % need to change the file for SLAPI to update file_create_date
32 | f.set_dataset('genotype', 'GENOTYPE');
33 | f.close()
34 |
35 | % read data using matlab hdf5 api
36 | dates = hdf5read(fname,'file_create_date');
37 | % f = py.h5py.File(fname);
38 | % dates = f['file_create_date']
39 | if length(dates) ~= 3
40 | test_utils.error(filename, sprintf('Expected 3 entries in file_create_date; found %d', length(dates)))
41 | end
42 |
43 | fprintf('%s PASSED\n', script_name);
44 | end
45 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_no_data.m:
--------------------------------------------------------------------------------
1 | function t_no_data(verbosity)
2 |
3 | % creates time series without 'data' field
4 | % TESTS TimeSeries.ignore_data()
5 |
6 | script_base_name = mfilename();
7 | script_name = [script_base_name '.m'];
8 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
9 | if nargin < 1
10 | % default, display all information. Other options are: 'none', 'summary'
11 | verbosity = 'all';
12 | end
13 |
14 |
15 | function test_nodata_series()
16 | name = 'nodata';
17 | % create_nodata_series(fname, name, 'acquisition')
18 | create_nodata_series(fname, name, '/acquisition/timeseries');
19 | test_utils.verify_timeseries(fname, name, 'acquisition/timeseries', 'TimeSeries');
20 | test_utils.verify_absent(fname, ['acquisition/timeseries/', name], 'data');
21 | end
22 |
23 | function create_nodata_series(fname, name, target)
24 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity ...
25 | 'start_time', 'Sat Jul 04 2015 3:14:16' ...
26 | 'identifier', nwb_utils.create_identifier('nodata example'), ...
27 | 'description','time series no data test'};
28 | f = nwb_file(settings{:});
29 | nodata = f.make_group('', name, 'path', target);
30 | nodata.set_dataset('timestamps', {0}); % use cell array to create 1 element array
31 | f.close()
32 | end
33 |
34 | test_nodata_series()
35 | fprintf('%s PASSED\n', script_name);
36 | end
37 |
38 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_no_time.m:
--------------------------------------------------------------------------------
1 | function t_no_time(verbosity)
2 |
3 | % creates time series without 'timestamps' or 'starting_time' fields
4 | % TESTS TimeSeries.ignore_time()
5 | % TESTS timeseries placement in acquisition, stimulus, templates
6 |
7 | script_base_name = mfilename();
8 | script_name = [script_base_name '.m'];
9 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
10 | if nargin < 1
11 | % default, display all information. Other options are: 'none', 'summary'
12 | verbosity = 'all';
13 | end
14 |
15 | function test_notime_series()
16 | name = 'notime';
17 | % create_notime_series(fname, name, 'acquisition')
18 | create_notime_series(fname, name, '/acquisition/timeseries')
19 | test_utils.verify_timeseries(fname, name, 'acquisition/timeseries', 'TimeSeries')
20 | test_utils.verify_absent(fname, ['acquisition/timeseries/',name], 'timestamps')
21 | test_utils.verify_absent(fname, ['acquisition/timeseries/',name], 'starting_time')
22 |
23 | % create_notime_series(fname, name, 'stimulus')
24 | create_notime_series(fname, name, '/stimulus/presentation')
25 | test_utils.verify_timeseries(fname, name, 'stimulus/presentation', 'TimeSeries')
26 | % create_notime_series(fname, name, 'template')
27 | create_notime_series(fname, name, '/stimulus/templates')
28 | test_utils.verify_timeseries(fname, name, 'stimulus/templates', 'TimeSeries')
29 | end
30 |
31 | function create_notime_series(fname, name, target)
32 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
33 | 'start_time', 'Sat Jul 04 2015 3:14:16' ...
34 | 'identifier', nwb_utils.create_identifier('notime example'), ...
35 | 'description','Test no time'};
36 | f = nwb_file(settings{:});
37 |
38 | notime = f.make_group('', name, 'path', target);
39 | % following used for testing more missing_fields
40 | % notime = f.make_group('', name, path=target)
41 | % not sure why cell array used below. Maybe required if only one
42 | % element
43 | notime.set_dataset('data', {0.0}, 'attrs', {'unit', 'n/a', ...
44 | 'conversion', 1.0, 'resolution', 1.0});
45 | f.close()
46 | end
47 |
48 | test_notime_series()
49 | fprintf('%s PASSED\n', script_name);
50 | end
51 |
52 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_ref_image.m:
--------------------------------------------------------------------------------
1 | function t_ref_image(verbosity)
2 |
3 | % TESTS storage of reference image
4 |
5 | script_base_name = mfilename();
6 | script_name = [script_base_name '.m'];
7 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
8 | if nargin < 1
9 | % default, display all information. Other options are: 'none', 'summary'
10 | verbosity = 'all';
11 | end
12 |
13 |
14 | function test_refimage_series()
15 | name = 'refimage';
16 | create_refimage(fname, name)
17 | val = test_utils.verify_present(fname, 'acquisition/images/', name);
18 | %if len(val) != 6:
19 | if length(val) ~= 5
20 | test_utils.error('Checking ref image contents', 'wrong dimension')
21 | end
22 | val = test_utils.verify_attribute_present(fname, ['acquisition/images/', name], 'format');
23 | if ~strcmp(val, 'raw')
24 | test_utils.error('Checking ref image format', 'Wrong value')
25 | end
26 | val = test_utils.verify_attribute_present(fname, ['acquisition/images/',name], 'description');
27 | if ~strcmp(val, 'test')
28 | test_utils.error('Checking ref image description', 'Wrong value')
29 | end
30 | end
31 |
32 | function create_refimage(fname, name)
33 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
34 | 'start_time', 'Sat Jul 04 2015 3:14:16' ...
35 | 'identifier', nwb_utils.create_identifier('reference image test'), ...
36 | 'description','reference image test'};
37 | f = nwb_file(settings{:});
38 |
39 | % neurodata.create_reference_image([1,2,3,4,5], name, 'raw', 'test')
40 | f.set_dataset('', [1,2,3,4,5], 'dtype', 'uint8', 'name', name, 'attrs', { ...
41 | 'description', 'test', 'format', 'raw'});
42 |
43 | f.close()
44 | end
45 |
46 | test_refimage_series()
47 | fprintf('%s PASSED\n', script_name);
48 | end
49 |
50 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_softlink.m:
--------------------------------------------------------------------------------
1 | function t_softlink(verbosity)
2 |
3 | % creates time series without 'data' field
4 | % TESTS softlink of TimeSeries.data
5 |
6 | script_base_name = mfilename();
7 | script_name = [script_base_name '.m'];
8 | fname1 = [regexprep(script_base_name, '^t_', 's_') '1.nwb'];
9 | fname2 = [regexprep(script_base_name, '^t_', 's_') '2.nwb'];
10 | if nargin < 1
11 | % default, display all information. Other options are: 'none', 'summary'
12 | verbosity = 'all';
13 | end
14 |
15 | function test_softlink()
16 | name1 = 'softlink_source';
17 | name2 = 'softlink_reader';
18 | % create_softlink_source(fname1, name1, 'acquisition')
19 | % create_softlink_reader(fname2, name2, fname1, name1, 'acquisition')
20 | create_softlink_source(fname1, name1, '/acquisition/timeseries');
21 | create_softlink_reader(fname2, name2, fname1, name1, '/acquisition/timeseries');
22 | %
23 | test_utils.verify_timeseries(fname1, name1, 'acquisition/timeseries', 'TimeSeries');
24 | test_utils.verify_timeseries(fname2, name2, 'acquisition/timeseries', 'TimeSeries');
25 | %
26 | val = test_utils.verify_present(fname2, ['acquisition/timeseries/', name2], 'data');
27 | end
28 |
29 |
30 | function create_softlink_reader(fname, name, src_fname, src_name, target)
31 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
32 | 'start_time', 'Sat Jul 04 2015 3:14:16' ...
33 | 'identifier', nwb_utils.create_identifier('softlink reader'), ...
34 | 'description','softlink test'};
35 | f = nwb_file(settings{:});
36 |
37 | % source = neurodata.create_timeseries('TimeSeries', name, target)
38 | % source.set_data_as_remote_link(src_fname, 'acquisition/timeseries/'+src_name+'/data')
39 | % source.set_time([345])
40 | % source.finalize()
41 | % neurodata.close()
42 |
43 | source = f.make_group('', name, 'path', target);
44 | % source.set_data_as_remote_link(src_fname, 'acquisition/timeseries/'+src_name+'/data')
45 | extlink = sprintf('extlink:%s,%s', src_fname, ['acquisition/timeseries/', src_name, '/data']);
46 | source.set_dataset('data', extlink);
47 | source.set_dataset('timestamps', {345}); % put in cell array to make array in hdf5 file
48 | % source.finalize()
49 | % neurodata.close()
50 | f.close()
51 | end
52 |
53 | function create_softlink_source(fname, name, target)
54 |
55 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
56 | 'start_time', 'Sat Jul 04 2015 3:14:16' ...
57 | 'identifier', nwb_utils.create_identifier('softlink source'), ...
58 | 'description','time series no data test'};
59 | f = nwb_file(settings{:});
60 | % source = neurodata.create_timeseries('TimeSeries', name, target)
61 | source = f.make_group('', name, 'path', target);
62 | % source.set_data([234], unit='parsec', conversion=1, resolution=1e-3)
63 | source.set_dataset('data', {234.0}, 'attrs', {'unit', 'parsec', ...
64 | 'conversion', 1.0, 'resolution', 1e-3});
65 | % source.set_time([123])
66 | source.set_dataset('timestamps', {123.0});
67 | % source.finalize()
68 | % neurodata.close()
69 | f.close()
70 | end
71 |
72 | test_softlink()
73 | fprintf('%s PASSED\n', script_name);
74 | end
75 |
76 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_starting_time.m:
--------------------------------------------------------------------------------
1 | function t_starting_time(verbosity)
2 |
3 | % TESTS use of TimeSeries.starting_time
4 |
5 | script_base_name = mfilename();
6 | script_name = [script_base_name '.m'];
7 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
8 | if nargin < 1
9 | % default, display all information. Other options are: 'none', 'summary'
10 | verbosity = 'all';
11 | end
12 |
13 | function test_nodata_series()
14 | name = 'starting_time';
15 | % create_startingtime_series(fname, name, 'acquisition')
16 | create_startingtime_series(fname, name, '/acquisition/timeseries');
17 | test_utils.verify_timeseries(fname, name, 'acquisition/timeseries', 'TimeSeries');
18 | test_utils.verify_absent(fname, ['acquisition/timeseries/', name], 'timestamps');
19 | val = test_utils.verify_present(fname, ['acquisition/timeseries/', name], 'starting_time');
20 | if val ~= 0.125
21 | test_utils.error('Checking start time', 'Incorrect value')
22 | end
23 | val = test_utils.verify_attribute_present(fname, ['acquisition/timeseries/starting_time/', name], 'rate');
24 | if val ~= 2
25 | test_utils.error('Checking rate', 'Incorrect value')
26 | end
27 | end
28 |
29 | function create_startingtime_series(fname, name, target)
30 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
31 | 'start_time', 'Sat Jul 04 2015 3:14:16' ...
32 | 'identifier', nwb_utils.create_identifier('starting time test'), ...
33 | 'description','time series starting time test'};
34 | f = nwb_file(settings{:});
35 |
36 | %
37 | % stime = neurodata.create_timeseries('TimeSeries', name, target)
38 | % stime.set_data([0, 1, 2, 3], unit='n/a', conversion=1, resolution=1)
39 | % stime.set_value('num_samples', 4)
40 | % stime.set_time_by_rate(0.125, 2)
41 | %
42 |
43 | stime = f.make_group('', name, 'path', target);
44 | stime.set_dataset('data', [0.0, 1.0, 2.0, 3.0], 'attrs', {'unit', 'n/a', ...
45 | 'conversion', 1.0, 'resolution', 1.0});
46 | stime.set_dataset('num_samples', 4);
47 |
48 | % stime.set_time_by_rate(0.125, 2)
49 | stime.set_dataset('starting_time', 0.125, 'attrs', {'rate', 2.0, 'unit', 'Seconds'});
50 | % stime.finalize()
51 | % neurodata.close()
52 | f.close()
53 | end
54 |
55 | test_nodata_series()
56 | fprintf('%s PASSED\n', script_name);
57 | end
58 |
59 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_top_datasets.m:
--------------------------------------------------------------------------------
1 | function t_top_datasets(verbosity)
2 |
3 | script_base_name = mfilename();
4 | script_name = [script_base_name '.m'];
5 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
6 | if nargin < 1
7 | % default, display all information. Other options are: 'none', 'summary'
8 | verbosity = 'all';
9 | end
10 |
11 | % TESTS top-level datasets
12 |
13 | function test_refimage_series()
14 | name = 'refimage';
15 | create_refimage(fname, name)
16 | val = test_utils.verify_present(fname, '/', 'identifier');
17 | if ~strcmp(val, 'vwx')
18 | test_utils.error('Checking file idenfier', 'wrong contents')
19 | end
20 | val = test_utils.verify_present(fname, '/', 'file_create_date');
21 | val = test_utils.verify_present(fname, '/', 'session_start_time');
22 | if ~strcmp(val, 'xyz')
23 | test_utils.error('Checking session start time', 'wrong contents')
24 | end
25 | val = test_utils.verify_present(fname, '/', 'session_description');
26 | if ~strcmp(val, 'wxy')
27 | test_utils.error('Checking session start time', 'wrong contents')
28 | end
29 | end
30 |
31 | function create_refimage(fname, name)
32 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
33 | 'start_time', 'xyz' ...
34 | 'identifier', 'vwx', ...
35 | 'description','wxy'};
36 | f = nwb_file(settings{:});
37 | f.close()
38 | end
39 |
40 | test_refimage_series()
41 | fprintf('%s PASSED\n', script_name);
42 | end
43 |
44 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/t_unittimes.m:
--------------------------------------------------------------------------------
1 | function t_unittimes(verbosity)
2 |
3 | % TESTS creation of UnitTimes interface and data stored within
4 |
5 | script_base_name = mfilename();
6 | script_name = [script_base_name '.m'];
7 | fname = [regexprep(script_base_name, '^t_', 's_') '.nwb'];
8 | if nargin < 1
9 | % default, display all information. Other options are: 'none', 'summary'
10 | verbosity = 'all';
11 | end
12 |
13 | function test_unit_times()
14 | % create the file we're going to use
15 | ndata = create_empty_file(fname);
16 | % create a module to store processed data
17 | % mod = ndata.create_module('my spike times')
18 | mod = ndata.make_group('', 'my spike times');
19 | % ad a unit times interface to the module
20 | % iface = mod.create_interface('UnitTimes')
21 | iface = mod.make_group('UnitTimes');
22 | % make some data to store
23 | spikes = create_spikes();
24 | % for i in range(len(spikes)):
25 | % iface.add_unit(unit_name = 'unit-%d' % i,
26 | % unit_times = spikes[i],
27 | % description = '',
28 | % source = 'Data spike-sorted by B. Bunny')
29 |
30 | for i = 1:length(spikes) % range(len(spikes)):
31 | unit_name = sprintf('unit-%d', i-1);
32 | ug = iface.make_group('', unit_name);
33 | ug.set_dataset('times', spikes{i});
34 | ug.set_dataset('unit_description', '');
35 | ug.set_dataset('source', 'Data spike-sorted by B. Bunny');
36 | end
37 |
38 |
39 | % clean up and close objects
40 | % iface.finalize()
41 | % mod.finalize()
42 | ndata.close()
43 |
44 | % test random sample to make sure data was stored correctly
45 | % h5 = h5py.File(fname)
46 | % times = h5['processing/my spike times/UnitTimes/unit-0/times'].value
47 | % assert len(times) == len(spikes[0]), 'Spike count for unit-0 wrong'
48 | % assert abs(times[1] - spikes[0][1]) < 0.001, 'Wrong time found in file'
49 | % h5.close()
50 | times = hdf5read(fname,'processing/my spike times/UnitTimes/unit-0/times');
51 | if length(times) ~= length(spikes{1})
52 | error('Spike count for unit-0 wrong')
53 | end
54 | if abs(times(2) - spikes{1}(2)) >= 0.001
55 | error('Wrong time found in file')
56 | end
57 | end
58 |
59 |
60 | function [spikes] = create_spikes()
61 | spikes = cell([1, 3]);
62 | spikes{1} = [1.3, 1.4, 1.9, 2.1, 2.2, 2.3];
63 | spikes{2} = [2.2, 3.0];
64 | spikes{3} = [0.3, 0.4, 1.0, 1.1, 1.45, 1.8, 1.81, 2.2];
65 | end
66 |
67 |
68 | function [f] = create_empty_file(fname)
69 | settings = {'file_name', fname, 'mode', 'w', 'verbosity', verbosity, ...
70 | 'start_time', 'Sat Jul 04 2015 3:14:16' ...
71 | 'identifier', nwb_utils.create_identifier('UnitTimes example'), ...
72 | 'description','Test file with spike times in processing module'};
73 | f = nwb_file(settings{:});
74 | end
75 |
76 | test_unit_times()
77 | fprintf('%s PASSED\n', script_name);
78 | end
79 |
80 |
--------------------------------------------------------------------------------
/matlab_bridge/matlab_unittest/test_utils.m:
--------------------------------------------------------------------------------
1 | classdef test_utils
2 | % Utility functions for matlab unit tests
3 | % These call the functions in Python file test_utils.py
4 |
5 | properties
6 | end
7 |
8 | methods(Static)
9 | function error(context, err_string)
10 | py.test_utils.error(context, err_string);
11 | end
12 | function [val] = verify_present(hfile, group, field)
13 | pval = py.test_utils.verify_present(hfile, group, field);
14 | val = nwb_utils.convert_py_data(pval);
15 | end
16 | function [val] = verify_attribute_present(hfile, obj, field)
17 | pval = py.test_utils.verify_attribute_present(hfile, obj, field);
18 | val = nwb_utils.convert_py_data(pval);
19 | end
20 | function verify_timeseries(hfile, name, location, ts_type)
21 | py.test_utils.verify_timeseries(hfile, name, location, ts_type);
22 | end
23 | function verify_absent(hfile, group, field)
24 | py.test_utils.verify_absent(hfile, group, field);
25 | end
26 | function [match] = search_for_substring(h5_str, value)
27 | if iscell(h5_str)
28 | % convert cell array to list
29 | h5_str = py.list(h5_str);
30 | end
31 | match = py.test_utils.search_for_substring(h5_str, value);
32 | end
33 |
34 | % function [cellP] = list2cell(pylist)
35 | % % convert python list of strings to cell array
36 | % cP = cell(pylist);
37 | % cellP = cell(1, numel(cP));
38 | % for n = 1:numel(cP)
39 | % strP = char(cP{n});
40 | % cellP(n) = {strP};
41 | % end
42 | % end
43 | end
44 | end
45 |
46 |
--------------------------------------------------------------------------------
/nwb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeurodataWithoutBorders/api-python/52e97e7642021913ae6505ab63b7cc77d2622d76/nwb/__init__.py
--------------------------------------------------------------------------------
/nwb/check_schema.py:
--------------------------------------------------------------------------------
1 | # script to validate h5gate schema files using json schema
2 |
3 | import os.path
4 | import sys
5 | # import json
6 | import jsonschema
7 | import ast
8 |
9 | def load_schema(file_name):
10 | """ Load Python file that contains JSON formatted as a Python dictionary.
11 | Files in this format are used to store the schema because, unlike pure JSON,
12 | they allow comments and formatting long strings to make them easier to read."""
13 | if not os.path.isfile(file_name):
14 | print ("Unable to locate file %s" % file_name)
15 | sys.exit(1)
16 | f = open(file_name)
17 | file_contents = f.read()
18 | f.close()
19 | #
20 | # with file(file_name) as f:
21 | # file_contents = f.read()
22 | try:
23 | # use use ast.literal_eval to parse
24 | pydict = ast.literal_eval(file_contents)
25 | except Exception as e:
26 | print ("** Unable to parse file '%s' (should be mostly JSON)" % file_name)
27 | print ("Error is: %s" % e)
28 | sys.exit(1)
29 | assert isinstance(pydict, dict), "** File '%s does not contain python dictionary" % file_name
30 | return pydict
31 |
32 |
33 | def load_meta_schema():
34 | meta_schema_file = "meta_schema.json"
35 | if not os.path.isfile(meta_schema_file):
36 | print ("Unable to locate file %s" % file_name)
37 | sys.exit(1)
38 | with file(meta_schema_file) as f:
39 | file_contents = f.read()
40 | meta_schema = json.loads(file_contents)
41 | return meta_schema
42 |
43 |
44 | if __name__ == "__main__":
45 | nwb_dir = os.path.dirname(os.path.realpath(__file__))
46 | meta_schema_file = os.path.join(nwb_dir, "meta_schema.py")
47 | fs_var = "fs"
48 | if len(sys.argv) != 2:
49 | print ("format is:")
50 | print ("python %s " % sys.argv[0])
51 | print ("where is either the name of a schema file, or '-' for the")
52 | print ("default core specification (nwb_core.py), or 'N' - to process the core specification")
53 | print ("but not display the full path to it (so output will not depend on the location")
54 | print ("NWB is installed in).")
55 | sys.exit(0)
56 | using_core = sys.argv[1] in ("-", "N")
57 | schema_file = os.path.join(nwb_dir, 'nwb_core.py') if using_core else sys.argv[1]
58 | filter_path = sys.argv[1] == "N"
59 | meta_schema = load_schema(meta_schema_file)
60 | schema = load_schema(schema_file)
61 | if fs_var not in schema:
62 | print ("** Error, key '%s' not defined in top level of file '%s'" % (fs_var, schema_file))
63 | sys.exit(1)
64 | schema = schema[fs_var]
65 | display_name = ".../nwb/nwb_core.py" if filter_path else schema_file
66 | print ("checking specification in file '%s'" % display_name)
67 | jsonschema.validate(schema, meta_schema)
68 | print ("File is valid.")
69 |
70 |
71 |
72 |
73 |
--------------------------------------------------------------------------------
/nwb/display_versions.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | # Function to display Python and hdf5 version from both Python
5 | # and Python called from matlab
6 |
7 | # To run by calling Python directly, enter the following command into the
8 | # operating system shell:
9 | # python -m nwb.display_versions
10 |
11 | # To run from Matlab enter the following command into the Matlab command window:
12 | # py.nwb.display_versions.matlab()
13 |
14 | # The versions displayed by the above two calls should be the same.
15 | # If the output is different, you need to specify the
16 | # correct version of Python to Matlab as described in the
17 | # matlab_bridge 0_INSTALL.txt file.
18 |
19 |
20 |
21 | def display_versions():
22 | print ("** environment variables:")
23 | for key in sorted(os.environ.keys()):
24 | print( "%s - %s" % (key, os.environ[key]))
25 | print ("** Versions:")
26 | print ("Python: %s" % sys.version)
27 | print ("Python executable: %s" % sys.executable)
28 | try:
29 | import h5py
30 | except:
31 | e = sys.exc_info()[0]
32 | print ("unable to import hd5f: %s" % e)
33 | else:
34 | print ("HDF5 API: %s" % h5py.version.api_version)
35 | print ("HDF5: %s" % h5py.version.hdf5_version)
36 |
37 | def matlab():
38 | print ("\n**** called from MatLab")
39 | display_versions()
40 |
41 |
42 | if __name__ == "__main__":
43 | print ("\n**** called directly from Python")
44 | display_versions()
45 |
--------------------------------------------------------------------------------
/nwb/make_docs.py:
--------------------------------------------------------------------------------
1 | # program to generate documentation from specification language definition(s)
2 |
3 | import sys
4 | # h5gate validate script
5 | from . import h5gate as g
6 | from . import doc_tools as dt
7 |
8 | # print "sys.argv=%s" % sys.argv
9 |
10 | if len(sys.argv) < 1 or len(sys.argv) > 3:
11 | print ("format is:")
12 | print ("pyhton %s [ [] ]" % sys.argv[0])
13 | print ("OR")
14 | print ("python %s ")
15 | print ("where:")
16 | print (" is a common separated list of extension files, or '-' for none")
17 | print (" is the core format specification file. Default is 'nwb_core.py'")
18 | print (" is an hdf5 (extension not '.py') containing format specifications")
19 | sys.exit(0)
20 | if len(sys.argv) == 2 and not sys.argv[1].endswith('.py'):
21 | # assume loading specifications from hdf5 file
22 | spec_files = []
23 | file_name = sys.argv[1]
24 | else:
25 | core_spec = 'nwb_core.py' if len(sys.argv) < 3 else sys.argv[2]
26 | extensions = [] if len(sys.argv) < 2 or sys.argv[1] == '-' else sys.argv[1].split(',')
27 | spec_files = [core_spec] + extensions
28 | file_name = None
29 | options={'mode':'no_file'}
30 | f = g.File(file_name, spec_files, options=options)
31 | # doc = dt.generate(f)
32 | dt.build_node_tree(f)
33 | html = dt.make_doc(f)
34 | f.close()
35 | print (html)
36 |
37 |
--------------------------------------------------------------------------------
/nwb/make_json.py:
--------------------------------------------------------------------------------
1 |
2 | # program to convert nwb_core.py specification language file to JSON
3 | # format
4 |
5 | import nwb_core
6 | import json
7 |
8 | json=json.dumps(nwb_core.nwb, indent=4, separators=(',', ': '))
9 | f1=open('nwb_core.json', 'w')
10 | f1.write(json)
11 |
--------------------------------------------------------------------------------
/nwb/nwb_init.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import nwb.nwb_utils as ut
3 |
4 | def nwb_init(f, mode, start_time, identifier, description, creating_file):
5 | """ Set initial metadata if creating a new file or set callback for
6 | updating modification time if modifying an existing file. f is the
7 | h5gate File object. creating_file is True if creating a new file.
8 | See file nwb_file for description of mode, start_time, identifier and
9 | description.
10 | """
11 | if mode in ('r+', 'a') and not creating_file:
12 | # modifying existing file. Setup callback to update modification_time
13 | f.set_close_callback(nwb_close_callback)
14 | elif creating_file:
15 | # set initial metadata for new file
16 | version = f.ddef[f.default_ns]['info']['version']
17 | # vinfo = ["Specification(s):"]
18 | # vinfo.append("file name\tnamespace\tversion\tdate")
19 | # for file_name in f.spec_files:
20 | # for ns in f.fsname2ns[file_name]:
21 | # version = f.ddef[ns]['info']['version']
22 | # date = f.ddef[ns]['info']['date']
23 | # info = "%s\t%s\t%s\t%s" % (file_name, ns, version, date)
24 | # vinfo.append(info)
25 | # vinfo.append("Default namespace='%s'" % f.default_ns)
26 | # api_version = f.get_version()
27 | # vinfo.append("API: %s" % api_version)
28 | # version = "\n".join(vinfo)
29 | f.set_dataset("nwb_version", version)
30 | f.set_dataset("identifier", identifier)
31 | f.set_dataset("session_description", description)
32 | curr_time = ut.current_time()
33 | f.set_dataset("file_create_date", [curr_time, ])
34 | if not start_time:
35 | start_time = curr_time
36 | f.set_dataset("session_start_time", start_time)
37 |
38 |
39 | def nwb_close_callback(f):
40 | """ Executed on close of nwb file. Updates modification time"""
41 | if f.file_changed:
42 | if f.creating_file or f.options['mode'] not in ('r+', 'a'):
43 | print ("Unexpected condition when calling close_callback. "
44 | "creating_file=%s, mode=%s") %(f.creating_file, f.options['mode'])
45 | sys.exit(1)
46 | # file changed. Append current time to modification time
47 | cd = f.get_node("/file_create_date", abort = False)
48 | if not cd:
49 | f.warning.append("Unable to append modification time to /file_create_date. "
50 | " Dataset does not exist.")
51 | else:
52 | cur_time = ut.current_time()
53 | cd.append(cur_time)
54 | else:
55 | f.warning.append("Not updating modification time because file was not changed.")
56 |
57 | # Below code used if modifications_time stored as attribute
58 | # if "modification_time" in cd.h5attrs:
59 | # mod_times = cd.h5attrs["modification_time"]
60 | # if not isinstance(mod_times, (list,)):
61 | # f.warning.append("Did not update modification time, because type "
62 | # "was unexpected: %s") % type(mod_times)
63 | # else:
64 | # cur_time = utils.current_time()
65 | # mod_times = mod_times + [cur_time] # don't use append to avoid changing original
66 | # cd.set_attr("modification_time", mod_times)
67 | # print "Updated modification time."""
68 |
69 |
70 |
71 |
--------------------------------------------------------------------------------
/nwb/suggest_spellings.py:
--------------------------------------------------------------------------------
1 |
2 | # function get_possible_matches - used to return list of possible
3 | # matching words in a list if the word was supposed to be in the
4 | # list but not found, possibly due to a spelling error.
5 | # adapted from:
6 | # http://norvig.com/spell-correct.html
7 |
8 |
9 | alphabet = 'abcdefghijklmnopqrstuvwxyz'
10 |
11 | def edits1(word):
12 | splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
13 | deletes = [a + b[1:] for a, b in splits if b]
14 | transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
15 | replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
16 | inserts = [a + c + b for a, b in splits for c in alphabet]
17 | return set(deletes + transposes + replaces + inserts)
18 |
19 | def known_edits2(word, choices):
20 | return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in choices)
21 |
22 | def known(words, choices): return set(w for w in words if w in choices)
23 |
24 |
25 | def get_possible_matches(word, choices):
26 | """ given word, that is not in list words, find possible matches based on
27 | edits. This used to generate list of possible matches in case there was a
28 | misspelling"""
29 | ch = set(choices)
30 | candidates = known(edits1(word), ch) or known_edits2(word, ch)
31 | return sorted(list(candidates))
32 |
33 |
34 | def test_get_possible_matches():
35 | choices = ["stimulus", "hippocampus", "mouse", "module", "interface"]
36 | words = ["stimuls", "hipppocampus", "mose", "modlue", "interfaces"]
37 | for word in words:
38 | possible_matches = get_possible_matches(word, choices)
39 | print ("%s found %s" % (word, possible_matches))
40 |
41 |
42 | if __name__ == '__main__':
43 | test_get_possible_matches()
44 |
--------------------------------------------------------------------------------
/nwb/validate.py:
--------------------------------------------------------------------------------
1 |
2 | # program to validate nwb files using specification language definition
3 |
4 | import sys
5 | import nwb.nwb_file as nwb_file
6 | # import cProfile # for profiling
7 |
8 | def validate_file(name, core_spec="nwb_core.py", extensions=None, verbosity="all"):
9 | """
10 | Parameters
11 | ----------
12 | name: string
13 | Name (including path) of file to be validated
14 |
15 | core_spec: string (default: 'nwb_core.py')
16 | Name of core specification file or '-' to load specification(s) from HDF5 file.
17 |
18 | extensions: array
19 | Array of extension files
20 |
21 | verbosity: string (default: 'all')
22 | Controls how much validation output is displayed. Options are:
23 | 'all', 'summary', and 'none'
24 |
25 |
26 | Returns
27 | -------
28 | validation_result: dict
29 | Result of validation. Has keys: 'errors', 'warnings', 'added' which
30 | contain counts of errors, warnings and additions. Additions are groups,
31 | datasets or attributes added which are not defined by the core_spec
32 | specification.
33 | """
34 | if extensions is None:
35 | extensions = []
36 | # to validate, open the file in read-only mode, then close it
37 | f = nwb_file.open(name, mode="r", core_spec=core_spec, extensions=extensions, verbosity=verbosity)
38 | validation_result = f.close()
39 | return validation_result
40 |
41 |
42 | if __name__ == "__main__":
43 |
44 | if len(sys.argv) < 2 or len(sys.argv) > 4:
45 | print("format is:")
46 | print("python %s [ [] ]" % sys.argv[0])
47 | print("where:")
48 | print(" is a common separated list of extension files, or '-' for none")
49 | print(" is the core format specification file. Default is 'nwb_core.py'")
50 | print("Use two dashes, e.g. '- -' to load saved specifications from ")
51 | sys.exit(0)
52 | core_spec = 'nwb_core.py' if len(sys.argv) < 4 else sys.argv[3]
53 | extensions = [] if len(sys.argv) < 3 or sys.argv[2] == '-' else sys.argv[2].split(',')
54 | file_name = sys.argv[1]
55 | if extensions == [] and core_spec == "-":
56 | print("Loading specifications from file '%s'" % file_name)
57 | validate_file(file_name, core_spec=core_spec, extensions=extensions)
58 | # replace above call with following to generate execution time profile
59 | # cProfile.run('validate_file("%s", core_spec="%s")' % (file_name, core_spec))
60 |
61 |
62 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | # from distutils.core import setup
3 |
4 | setup(
5 | name = 'nwb',
6 | version = '1.0.6',
7 | url='https://github.com/NeurodataWithoutBorders/api-python',
8 | author='Jeff Teeters and Keith Godfrey',
9 | author_email='jteeters@berkeley.edu',
10 | description = 'Python API for Neurodata Without Borders (NWB) format',
11 | packages = ['nwb'],
12 | install_requires = ['h5py', 'jsonschema']
13 | )
14 |
15 |
--------------------------------------------------------------------------------
/test_all/make_dirsig.py:
--------------------------------------------------------------------------------
1 |
2 | # This script creates a "signature file" for the contents of all
3 | # files in a directory. The signature file, has a list of all the files
4 | # and a hash code for each. This is used to verify that files generated
5 | # by scripts match those that were originally generated.
6 |
7 |
8 | import sys
9 | import glob
10 | import os, fnmatch
11 | import re
12 | from sys import version_info
13 | from nwb import value_summary as vs
14 |
15 | def error_exit(msg):
16 | if msg:
17 | print(msg)
18 | print ( "Format is")
19 | print ("%s " % sys.argv[0])
20 | print ("where:")
21 | print (" - a directory containing files, or a single file.")
22 | print (" If a directory is given, a single signature file, named 'dircig.txt'")
23 | print (" is created inside it. If a file is given, the name and hash of the")
24 | print (" file is written to standard output.")
25 | sys.exit(1)
26 |
27 | def remove_sig_somments(contents):
28 | # remove text inside <% ... %> from string contents
29 | # This done to remove text that is likely to be variable across nwb files
30 | # that are the same (such as file_create_date) so it is not used for hash
31 | # comparison
32 | contents = re.sub(b"<%.*?%>", b"", contents)
33 | return contents
34 |
35 |
36 | def get_file_hash(path):
37 | f = open(path, "rb")
38 | contents = f.read()
39 | f.close()
40 | contents = remove_sig_somments(contents)
41 | hash = vs.hashval(contents)
42 | # make sure hash is 6 characters
43 | if len(hash) < 6:
44 | hash = "%s%s" % (hash, hash[1]*(6-len(hash))) # append repeats of second character
45 | return hash
46 |
47 | def process_files(input_dir_or_file):
48 | if os.path.isfile(input_dir_or_file):
49 | hash = get_file_hash(input_dir_or_file)
50 | print ("%s %s" % (hash, input_dir_or_file))
51 | else:
52 | # input_dir_or_file is a directory, processes files within it
53 | output = []
54 | output_file_name = "dirsig.txt"
55 | ignore_files = [output_file_name, "log.txt"]
56 | for dirpath, dirnames, filenames in os.walk(input_dir_or_file):
57 | for filename in filenames:
58 | assert dirpath.startswith(input_dir_or_file)
59 | path = os.path.join(dirpath, filename)
60 | if path in [os.path.join(input_dir_or_file, x) for x in ignore_files]:
61 | # don't include signature for this file
62 | continue
63 | hash = get_file_hash(path)
64 | output_path = path[len(input_dir_or_file):].lstrip("/")
65 | output.append("%s %s" % (hash, output_path))
66 | # print ("dirpath=%s, output_path=%s, filename=%s" % (dirpath, output_path, filename))
67 | # write output
68 | output_file_path = os.path.join(input_dir_or_file, output_file_name)
69 | f = open(output_file_path, "w")
70 | f.write("\n".join(output))
71 | f.close()
72 |
73 | if __name__ == '__main__':
74 | if len(sys.argv) != 2:
75 | error_exit("Invalid number of command line arguments: %s" % len(sys.argv))
76 | input_dir_or_file = sys.argv[1]
77 | if not os.path.exists(input_dir_or_file):
78 | error_exit("Input does not exist: %s" % input_dir_or_file)
79 | process_files(input_dir_or_file)
--------------------------------------------------------------------------------
/test_all/set_orig.py:
--------------------------------------------------------------------------------
1 | # script to set the "orig" (original) directory to the "curr" (current) directory
2 |
3 | # Details:
4 | # If orig exists, moves it to orig_bkNN (NN == 01, 02, 03, ...).
5 | # then moves "curr" to "orig".
6 |
7 | import shutil
8 | import os
9 | import glob
10 | import re
11 |
12 | if not os.path.isdir("curr"):
13 | print ("Directory 'curr' does not exist. Cannot set orig to curr. Aborting.")
14 | sys.exit(1)
15 |
16 | if os.path.isdir("orig"):
17 | # orig exists, move it to orig_bkNN
18 |
19 | bk_files=sorted(glob.glob("orig_bk[0-9]*"))
20 | if bk_files:
21 | lastf=bk_files[-1]
22 | match = re.match("orig_bk([0-9]*)", lastf)
23 | if not match:
24 | print("Unable to isolate integer in name: %s" % lastf)
25 | sys.exit(1)
26 | last_num = int(match.group(1))
27 | next_num =last_num + 1
28 | next_name = "orig_bk%02i" % next_num
29 | # print ("next_name is %s" % next_name)
30 | else:
31 | next_name = "orig_bk01"
32 | print ("Renaming 'orig' => '%s'" % next_name)
33 | os.rename("orig", next_name)
34 |
35 | # now copy 'curr' to 'orig'
36 | print ("Copying 'curr' to 'orig'")
37 | shutil.copytree("curr", "orig")
38 |
39 | print ("all done")
40 |
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/test_all/test_all.cfg:
--------------------------------------------------------------------------------
1 | # configuration file for tests
2 |
3 | # Set below the path to the matlab executable to include matlab tests
4 | # To prevent testing matlab, use an empty path, e.g.
5 | # matlab=""
6 | matlab="/Applications/MATLAB_R2016b.app/bin/matlab"
7 |
--------------------------------------------------------------------------------
/test_all/test_all.sh:
--------------------------------------------------------------------------------
1 | # Script to perform all tests
2 |
3 | # Requires existing "orig" directory containing signature file
4 | # ("dirsig.txt"). Builds "curr" directory containing results of
5 | # all scripts and signature file for results. Compares
6 | # curr/dirsig.txt to orig/dirsig.txt
7 | # If these match, all tests pass.
8 |
9 | cwd=`pwd`
10 |
11 | orig_sig="orig/dirsig.txt"
12 | source_data_dir="../examples/source_data_2"
13 |
14 | if [[ ! -e "$orig_sig" ]] ; then
15 | echo "File '$orig/dirsig.txt' does not exist. Cannot run tests."
16 | exit 1
17 | fi
18 |
19 |
20 | if [[ ! -e "$source_data_dir" ]] ; then
21 | echo ""
22 | echo "*** Source data directory is not installed. Do you wish to install it?"
23 | echo "Directory '$source_data_dir' must be installed before running test."
24 | echo "Do you wish to install $source_data_dir (about 1.2GB; 580MB download)?"
25 | echo "Type 1 if Yes. 2 if no."
26 | select yn in "Yes" "No"; do
27 | if [[ "$yn" == "Yes" ]] ; then
28 | cd ../examples/utility_scripts
29 | python install_source_data.py
30 | status=$?
31 | cd $cwd
32 | if [[ "$status" -ne "0" ]] ; then
33 | echo "Tests aborted because $source_data_dir not installed."
34 | exit 1
35 | fi
36 | break ;
37 | else
38 | echo "Tests aborted because $source_data_dir not installed."
39 | exit 1
40 | fi
41 | done
42 | fi
43 |
44 |
45 | ./make_curr.sh
46 |
47 | curr_sig="curr/dirsig.txt"
48 | if [[ ! -e "$curr_sig" ]] ; then
49 | echo "File '$curr_sig' does not exist. Cannot complete tests."
50 | exit 1
51 | fi
52 |
53 | python show_diff.py
54 |
55 | status=$?
56 |
57 | echo
58 | if [[ "$status" -eq "0" ]] ; then
59 | echo "** ALL TESTS PASS **"
60 | else
61 | echo "** ONE OR MORE TESTS FAILED. **"
62 | fi
63 |
64 |
65 |
--------------------------------------------------------------------------------
/unittest/0_README.txt:
--------------------------------------------------------------------------------
1 | Directory unittests
2 |
3 | This directory contains the unit tests. To run an individual test,
4 | type:
5 | python
6 |
7 | To run all the test, run bash script:
8 | ./run_tests.sh
9 |
10 |
11 | When run, each test will first output a validation report for
12 | the created NWB file. After that will be a message indicating
13 | whether or not the test PASSES or not.
14 |
15 | Ignore the valdation report for the purposes of determing
16 | if the test passes.
17 |
--------------------------------------------------------------------------------
/unittest/run_tests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | FILES=t_*py
3 | for f in $FILES
4 | do
5 | python $f || >&2 echo "$f FAILED"
6 | done
7 |
8 |
--------------------------------------------------------------------------------
/unittest/t_annotation.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | # import nwb
4 | from nwb import nwb_file
5 | from nwb import nwb_utils as utils
6 |
7 | import test_utils as ut
8 |
9 | # test creation of annotation time series
10 | # TESTS AnnotationSeries creation
11 | # TESTS TimeSeries ancestry
12 |
13 |
14 | def test_annotation_series():
15 | if __file__.startswith("./"):
16 | fname = "s" + __file__[3:-3] + ".nwb"
17 | else:
18 | fname = "s" + __file__[1:-3] + ".nwb"
19 | name = "annot"
20 | # create_annotation_series(fname, name, "acquisition")
21 | create_annotation_series(fname, name, "acquisition/timeseries")
22 | ut.verify_timeseries(fname, name, "acquisition/timeseries", "TimeSeries")
23 | ut.verify_timeseries(fname, name, "acquisition/timeseries", "AnnotationSeries")
24 | # create_annotation_series(fname, name, "stimulus")
25 | create_annotation_series(fname, name, "stimulus/presentation")
26 | ut.verify_timeseries(fname, name, "stimulus/presentation", "TimeSeries")
27 | ut.verify_timeseries(fname, name, "stimulus/presentation", "AnnotationSeries")
28 |
29 | def create_annotation_series(fname, name, target):
30 | settings = {}
31 | settings["file_name"] = fname
32 | settings["identifier"] = utils.create_identifier("annotation example")
33 | settings["mode"] = "w"
34 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
35 | settings["description"] = "Test file with AnnotationSeries"
36 | settings["verbosity"] = "none"
37 | # neurodata = nwb.NWB(**settings)
38 | f = nwb_file.open(**settings)
39 | # annot = neurodata.create_timeseries("AnnotationSeries", name, target)
40 | annot = f.make_group("", name, path="/" + target)
41 |
42 | # annot.set_description("This is an AnnotationSeries with sample data")
43 | annot.set_attr('description', "This is an AnnotationSeries with sample data")
44 | # annot.set_comment("The comment and description fields can store arbitrary human-readable data")
45 | annot.set_attr("comments", "The comment and desscription fields can store arbitrary human-readable data")
46 | # annot.set_source("Observation of Dr. J Doe")
47 | annot.set_attr("source", "Observation of Dr. J Doe")
48 | #
49 | # annot.add_annotation("Rat in bed, beginning sleep 1", 15.0)
50 | # annot.add_annotation("Rat placed in enclosure, start run 1", 933.0)
51 | # annot.add_annotation("Rat taken out of enclosure, end run 1", 1456.0)
52 | # annot.add_annotation("Rat in bed, start sleep 2", 1461.0)
53 | # annot.add_annotation("Rat placed in enclosure, start run 2", 2401.0)
54 | # annot.add_annotation("Rat taken out of enclosure, end run 2", 3210.0)
55 | # annot.add_annotation("Rat in bed, start sleep 3", 3218.0)
56 | # annot.add_annotation("End sleep 3", 4193.0)
57 | # store pretend data
58 | # all time is stored as seconds
59 | andata = []
60 | andata.append(("Rat in bed, beginning sleep 1", 15.0))
61 | andata.append(("Rat placed in enclosure, start run 1", 933.0))
62 | andata.append(("Rat taken out of enclosure, end run 1", 1456.0))
63 | andata.append(("Rat in bed, start sleep 2", 1461.0))
64 | andata.append(("Rat placed in enclosure, start run 2", 2401.0))
65 | andata.append(("Rat taken out of enclosure, end run 2", 3210.0))
66 | andata.append(("Rat in bed, start sleep 3", 3218.0))
67 | andata.append(("End sleep 3", 4193.0))
68 | annotations = [x[0] for x in andata]
69 | times = [x[1] for x in andata]
70 | annot.set_dataset("data",annotations)
71 | annot.set_dataset("timestamps", times)
72 | #
73 | #annot.finalize()
74 | f.close()
75 |
76 | test_annotation_series()
77 | print("%s PASSED" % __file__)
78 |
79 |
--------------------------------------------------------------------------------
/unittest/t_append.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import test_utils as ut
3 | # import nwb
4 | from nwb import nwb_file
5 | from nwb import nwb_utils as utils
6 |
7 | # test opening file in append mode
8 | # TESTS modifying existing file
9 | # TESTS creation of modification_time
10 | # TESTS addition of TimeSeries to existing file
11 | # TESTS preservation of TimeSeries when file modified
12 |
13 | def test_append():
14 | if __file__.startswith("./"):
15 | fname = "s" + __file__[3:-3] + ".nwb"
16 | else:
17 | fname = "s" + __file__[1:-3] + ".nwb"
18 | name1 = "annot1"
19 | name2 = "annot2"
20 | # create_annotation_series(fname, name1, "acquisition", True)
21 | create_annotation_series(fname, name1, "acquisition/timeseries", True)
22 | # create_annotation_series(fname, name2, "acquisition", False)
23 | create_annotation_series(fname, name2, "acquisition/timeseries", False)
24 | ut.verify_timeseries(fname, name1, "acquisition/timeseries", "TimeSeries")
25 | ut.verify_timeseries(fname, name1, "acquisition/timeseries", "AnnotationSeries")
26 | ut.verify_timeseries(fname, name2, "acquisition/timeseries", "TimeSeries")
27 | ut.verify_timeseries(fname, name2, "acquisition/timeseries", "AnnotationSeries")
28 | # ut.verify_attribute_present(fname, "file_create_date", "modification_time")
29 |
30 |
31 | def create_annotation_series(fname, name, target, newfile):
32 | settings = {}
33 | settings["file_name"] = fname
34 | settings["verbosity"] = "none"
35 | if newfile:
36 | settings["identifier"] = utils.create_identifier("append example")
37 | settings["mode"] = "w"
38 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
39 | settings["description"] = "Test append file"
40 | else:
41 | settings["mode"] = "r+"
42 | f = nwb_file.open(**settings)
43 | #
44 | # annot = neurodata.create_timeseries("AnnotationSeries", name, target)
45 | annot = f.make_group("", name, path="/" + target)
46 | # annot.set_description("This is an AnnotationSeries '%s' with sample data" % name)
47 | # annot.set_comment("The comment and description fields can store arbitrary human-readable data")
48 | # annot.set_source("Observation of Dr. J Doe")
49 | annot.set_attr("description", "This is an AnnotationSeries '%s' with sample data" % name)
50 | annot.set_attr("comments", "The comment and description fields can store arbitrary human-readable data")
51 | annot.set_attr("source", "Observation of Dr. J Doe")
52 |
53 | #
54 | # annot.add_annotation("Rat in bed, beginning sleep 1", 15.0)
55 | # annot.add_annotation("Rat placed in enclosure, start run 1", 933.0)
56 | # annot.add_annotation("Rat taken out of enclosure, end run 1", 1456.0)
57 | # annot.add_annotation("Rat in bed, start sleep 2", 1461.0)
58 | # annot.add_annotation("Rat placed in enclosure, start run 2", 2401.0)
59 | # annot.add_annotation("Rat taken out of enclosure, end run 2", 3210.0)
60 | # annot.add_annotation("Rat in bed, start sleep 3", 3218.0)
61 | # annot.add_annotation("End sleep 3", 4193.0)
62 | #
63 |
64 | andata = []
65 | andata.append(("Rat in bed, beginning sleep 1", 15.0))
66 | andata.append(("Rat placed in enclosure, start run 1", 933.0))
67 | andata.append(("Rat taken out of enclosure, end run 1", 1456.0))
68 | andata.append(("Rat in bed, start sleep 2", 1461.0))
69 | andata.append(("Rat placed in enclosure, start run 2", 2401.0))
70 | andata.append(("Rat taken out of enclosure, end run 2", 3210.0))
71 | andata.append(("Rat in bed, start sleep 3", 3218.0))
72 | andata.append(("End sleep 3", 4193.0))
73 | annotations = [x[0] for x in andata]
74 | times = [x[1] for x in andata]
75 | annot.set_dataset("data",annotations)
76 | annot.set_dataset("timestamps", times)
77 |
78 | # annot.finalize()
79 | # neurodata.close()
80 | f.close()
81 |
82 | test_append()
83 | print("%s PASSED" % __file__)
84 |
85 |
--------------------------------------------------------------------------------
/unittest/t_epoch_tag.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | # import nwb
4 | from nwb import nwb_file
5 | from nwb import nwb_utils as utils
6 | import test_utils as ut
7 |
8 | # create two epochs, add different subset of tags to each
9 | # verify main epoch folder has tag attribute that contains
10 | # exactly the unique tags of each epoch and that each
11 | # epoch contains the assigned tags
12 |
13 | if __file__.startswith("./"):
14 | fname = "s" + __file__[3:-3] + ".nwb"
15 | else:
16 | fname = "s" + __file__[1:-3] + ".nwb"
17 | # borg = ut.create_new_file(fname, "Epoch tags")
18 |
19 | f = nwb_file.open(fname,
20 | start_time="2008-09-15T15:53:00-08:00",
21 | mode="w",
22 | identifier=utils.create_identifier("Epoch tags"),
23 | description="softlink test",
24 | verbosity="none")
25 |
26 | tags = ["tag-a", "tag-b", "tag-c"]
27 |
28 | # epoch1 = borg.create_epoch("epoch-1", 0, 3);
29 |
30 | epoch1 = f.make_group("", "epoch-1")
31 | epoch1.set_dataset("start_time", 0)
32 | epoch1.set_dataset("stop_time", 3)
33 |
34 | # for i in range(len(tags)-1):
35 | # epoch1.add_tag(tags[i+1])
36 | epoch1.set_dataset("tags", tags[1:])
37 |
38 | # epoch2 = borg.create_epoch("epoch-2", 1, 4);
39 | epoch2 = f.make_group("", "epoch-2")
40 | epoch2.set_dataset("start_time", 1)
41 | epoch2.set_dataset("stop_time", 4)
42 |
43 | # for i in range(len(tags)-1):
44 | # epoch2.add_tag(tags[i])
45 | epoch2.set_dataset("tags", tags[0:-1])
46 |
47 | f.close()
48 |
49 | # this test modified because tags are stored as dataset rather than attribute
50 | # tags = ut.verify_attribute_present(fname, "epochs/epoch-1", "tags");
51 | tags = ut.verify_present(fname, "epochs/epoch-1", "tags");
52 | for i in range(len(tags)-1):
53 | if tags[i+1] not in tags:
54 | ut.error("Verifying epoch tag content", "All tags not present")
55 |
56 | # tags = ut.verify_attribute_present(fname, "epochs/epoch-2", "tags");
57 | tags = ut.verify_present(fname, "epochs/epoch-2", "tags");
58 | for i in range(len(tags)-1):
59 | if tags[i] not in tags:
60 | ut.error("Verifying epoch tag content", "All tags not present")
61 |
62 | tags = ut.verify_attribute_present(fname, "epochs", "tags");
63 | for i in range(len(tags)):
64 | if tags[i] not in tags:
65 | ut.error("Verifying epoch tag content", "All tags not present")
66 |
67 |
68 | print("%s PASSED" % __file__)
69 |
70 |
--------------------------------------------------------------------------------
/unittest/t_general_opto.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import nwb
3 | # from nwb.nwbco import *
4 | import test_utils as ut
5 |
6 | from nwb import nwb_file
7 | from nwb import nwb_utils as utils
8 |
9 | # TESTS fields stored in general/optogenetics
10 |
11 | def test_field(fname, name, subdir):
12 | val = ut.verify_present(fname, "general/optogenetics/"+subdir+"/", name.lower())
13 | if not ut.strcmp(val, name):
14 | ut.error("Checking metadata", "field value incorrect")
15 |
16 | def test_general_optogen():
17 | if __file__.startswith("./"):
18 | fname = "s" + __file__[3:-3] + ".nwb"
19 | else:
20 | fname = "s" + __file__[1:-3] + ".nwb"
21 | create_general_optogen(fname)
22 | #
23 | val = ut.verify_present(fname, "general/optogenetics/", "optogen_custom")
24 | if not ut.strcmp(val, "OPTOGEN_CUSTOM"):
25 | ut.error("Checking custom", "Field value incorrect")
26 | #
27 |
28 | test_field(fname, "DESCRIPTION", "p1")
29 | #test_field(fname, "DESCRIPTIONx", "p1")
30 | #test_field(fname, "DESCRIPTION", "p1x")
31 | test_field(fname, "DEVICE", "p1")
32 | # test_field(fname, "LAMBDA", "p1")
33 | test_field(fname, "EXCITATION_LAMBDA", "p1")
34 | test_field(fname, "LOCATION", "p1")
35 | val = ut.verify_present(fname, "general/optogenetics/p1/", "optogen_site_custom")
36 | if not ut.strcmp(val, "OPTOGEN_SITE_CUSTOM"):
37 | ut.error("Checking metadata", "field value incorrect")
38 |
39 |
40 | def create_general_optogen(fname):
41 | settings = {}
42 | settings["start_time"] = "2008-09-15T15:53:00-08:00"
43 | settings["file_name"] = fname
44 | settings["identifier"] = utils.create_identifier("metadata optogenetic test")
45 | settings["mode"] = "w"
46 | settings["description"] = "test elements in /general/optogentics"
47 | settings["verbosity"] = "none"
48 | f = nwb_file.open(**settings)
49 |
50 |
51 | # neurodata.set_metadata(OPTOGEN_CUSTOM("optogen_custom"), "OPTOGEN_CUSTOM")
52 | # #
53 | # neurodata.set_metadata(OPTOGEN_SITE_DESCRIPTION("p1"), "DESCRIPTION")
54 | # neurodata.set_metadata(OPTOGEN_SITE_DEVICE("p1"), "DEVICE")
55 | # neurodata.set_metadata(OPTOGEN_SITE_LAMBDA("p1"), "LAMBDA")
56 | # neurodata.set_metadata(OPTOGEN_SITE_LOCATION("p1"), "LOCATION")
57 | # neurodata.set_metadata(OPTOGEN_SITE_CUSTOM("p1", "optogen_site_custom"), "OPTOGEN_SITE_CUSTOM")
58 | #
59 |
60 | g = f.make_group("optogenetics")
61 | g.set_custom_dataset("optogen_custom", "OPTOGEN_CUSTOM")
62 |
63 | p1 = g.make_group("", "p1")
64 | p1.set_dataset("description","DESCRIPTION")
65 | p1.set_dataset("device", "DEVICE")
66 | p1.set_dataset("excitation_lambda","EXCITATION_LAMBDA")
67 | p1.set_dataset("location", "LOCATION")
68 | p1.set_custom_dataset("optogen_site_custom", "OPTOGEN_SITE_CUSTOM")
69 |
70 | # neurodata.close()
71 | f.close()
72 |
73 | test_general_optogen()
74 | print("%s PASSED" % __file__)
75 |
76 |
--------------------------------------------------------------------------------
/unittest/t_general_species.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # import nwb
3 | # from nwb.nwbco import *
4 | import test_utils as ut
5 |
6 | from nwb import nwb_file
7 | from nwb import nwb_utils as utils
8 |
9 | # TESTS fields stored in general/subject
10 |
11 | def test_field(fname, name):
12 | val = ut.verify_present(fname, "general/subject/", name.lower())
13 | if not ut.strcmp(val, name):
14 | ut.error("Checking metadata", "field value incorrect")
15 |
16 | def test_general_subject():
17 | if __file__.startswith("./"):
18 | fname = "s" + __file__[3:-3] + ".nwb"
19 | else:
20 | fname = "s" + __file__[1:-3] + ".nwb"
21 | create_general_subject(fname)
22 | val = ut.verify_present(fname, "general/subject/", "description")
23 | if not ut.strcmp(val, "SUBJECT"):
24 | ut.error("Checking metadata", "field value incorrect")
25 | test_field(fname, "SUBJECT_ID")
26 | test_field(fname, "SPECIES")
27 | test_field(fname, "GENOTYPE")
28 | test_field(fname, "SEX")
29 | test_field(fname, "AGE")
30 | test_field(fname, "WEIGHT")
31 |
32 |
33 | def create_general_subject(fname):
34 | settings = {}
35 | settings["file_name"] = fname
36 | settings["start_time"] = "2008-09-15T15:53:00-08:00"
37 | settings["identifier"] = utils.create_identifier("general top test")
38 | settings["mode"] = "w"
39 | settings["description"] = "test elements in /general/subject"
40 | settings["verbosity"] = "none"
41 | f = nwb_file.open(**settings)
42 |
43 |
44 | #
45 | # neurodata.set_metadata(SUBJECT, "SUBJECT")
46 | # neurodata.set_metadata(SUBJECT_ID, "SUBJECT_ID")
47 | # neurodata.set_metadata(SPECIES, "SPECIES")
48 | # neurodata.set_metadata(GENOTYPE, "GENOTYPE")
49 | # neurodata.set_metadata(SEX, "SEX")
50 | # neurodata.set_metadata(AGE, "AGE")
51 | # neurodata.set_metadata(WEIGHT, "WEIGHT")
52 | #
53 |
54 | g = f.make_group("subject")
55 | g.set_dataset("description", "SUBJECT")
56 | g.set_dataset("subject_id", "SUBJECT_ID")
57 | g.set_dataset("species", "SPECIES")
58 | g.set_dataset("genotype", "GENOTYPE")
59 | g.set_dataset("sex", "SEX")
60 | g.set_dataset("age", "AGE")
61 | g.set_dataset("weight", "WEIGHT")
62 |
63 | # neurodata.close()
64 | f.close()
65 |
66 | test_general_subject()
67 | print("%s PASSED" % __file__)
68 |
69 |
--------------------------------------------------------------------------------
/unittest/t_if_add_ts.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import test_utils as ut
3 | import numpy as np
4 | # import nwb
5 |
6 | from nwb import nwb_file
7 | from nwb import nwb_utils as utils
8 |
9 |
10 | # test opening file in append mode
11 | # TESTS creating a module
12 | # TESTS creating an interface
13 | # TESTS adding a timeseries to an interface
14 |
15 | def test_file():
16 | if __file__.startswith("./"):
17 | fname = "s" + __file__[3:-3] + ".nwb"
18 | else:
19 | fname = "s" + __file__[1:-3] + ".nwb"
20 | create_iface_series(fname, True)
21 | name1 = "Ones"
22 | ut.verify_timeseries(fname, name1, "processing/test module/BehavioralEvents", "TimeSeries")
23 |
24 |
25 | def create_iface_series(fname, newfile):
26 | settings = {}
27 | settings["file_name"] = fname
28 | settings["verbosity"] = "none"
29 | if newfile:
30 | settings["identifier"] = utils.create_identifier("interface timeseries example")
31 | settings["mode"] = "w"
32 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
33 | settings["description"] = "Test interface timeseries file"
34 | else:
35 | settings["mode"] = "r+"
36 | f = nwb_file.open(**settings)
37 | #
38 | # mod = neurodata.create_module("test module")
39 | # iface = mod.create_interface("BehavioralEvents")
40 | # ts = neurodata.create_timeseries("TimeSeries", "Ones")
41 | # ts.set_data(np.ones(10), unit="Event", conversion=1.0, resolution=float('nan'))
42 | # ts.set_value("num_samples", 10)
43 | # ts.set_time(np.arange(10))
44 | # iface.add_timeseries(ts)
45 | # iface.finalize()
46 | # mod.finalize()
47 |
48 |
49 | mod = f.make_group("", "test module")
50 | iface = mod.make_group("BehavioralEvents")
51 | ts = iface.make_group("", "Ones")
52 | ts.set_dataset("data", np.ones(10), attrs={'unit':"Event",
53 | "conversion":1.0, "resolution": float('nan')})
54 | ts.set_dataset("num_samples", 10)
55 | ts.set_dataset("timestamps",np.arange(10))
56 |
57 | #
58 | # neurodata.close()
59 | f.close()
60 |
61 | test_file()
62 | print("%s PASSED" % __file__)
63 |
64 |
--------------------------------------------------------------------------------
/unittest/t_modification_time.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import h5py
3 | import sys
4 | # import nwb
5 | import test_utils as ut
6 | import time
7 |
8 | from nwb import nwb_file
9 | from nwb import nwb_utils as utils
10 |
11 | # creates file and modifies it multiple times
12 |
13 | if __file__.startswith("./"):
14 | fname = "s" + __file__[3:-3] + ".nwb"
15 | else:
16 | fname = "s" + __file__[1:-3] + ".nwb"
17 |
18 | settings = {}
19 | settings["file_name"] = fname
20 | settings["identifier"] = utils.create_identifier("Modification example")
21 | settings["mode"] = "w"
22 | settings["description"] = "Modified empty file"
23 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
24 | settings["verbosity"] = "none"
25 |
26 | f = nwb_file.open(**settings)
27 | f.close()
28 |
29 | #time.sleep(1)
30 | settings = {}
31 | settings["file_name"] = fname
32 | settings["mode"] = "r+"
33 | settings["verbosity"] = "none"
34 | f = nwb_file.open(**settings)
35 | # need to actually change the file for SLAPI to update file_create_date
36 | f.set_dataset("species", "SPECIES")
37 | f.close()
38 |
39 | #time.sleep(1)
40 | settings = {}
41 | settings["file_name"] = fname
42 | settings["mode"] = "r+"
43 | settings["verbosity"] = "none"
44 | f = nwb_file.open(**settings)
45 | # need to actually change the file for SLAPI to update file_create_date
46 | f.set_dataset("genotype", "GENOTYPE")
47 | f.close()
48 |
49 | f = h5py.File(fname)
50 | dates = f["file_create_date"]
51 | if len(dates) != 3:
52 | ut.error(__file__, "Expected 3 entries in file_create_date; found %d" % len(dates))
53 |
54 | print("%s PASSED" % __file__)
55 |
56 |
--------------------------------------------------------------------------------
/unittest/t_no_data.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | # import nwb
4 | import test_utils as ut
5 |
6 | from nwb import nwb_file
7 | from nwb import nwb_utils as utils
8 |
9 | # creates time series without 'data' field
10 | # TESTS TimeSeries.ignore_data()
11 |
12 | def test_nodata_series():
13 | if __file__.startswith("./"):
14 | fname = "s" + __file__[3:-3] + ".nwb"
15 | else:
16 | fname = "s" + __file__[1:-3] + ".nwb"
17 | name = "nodata"
18 | # create_nodata_series(fname, name, "acquisition")
19 | create_nodata_series(fname, name, "/acquisition/timeseries")
20 | ut.verify_timeseries(fname, name, "acquisition/timeseries", "TimeSeries")
21 | ut.verify_absent(fname, "acquisition/timeseries/"+name, "data")
22 |
23 | def create_nodata_series(fname, name, target):
24 | settings = {}
25 | settings["file_name"] = fname
26 | settings["identifier"] = utils.create_identifier("nodata example")
27 | settings["mode"] = "w"
28 | settings["description"] = "time series no data test"
29 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
30 | settings["verbosity"] = "none"
31 | f = nwb_file.open(**settings)
32 |
33 |
34 | #
35 | # nodata = neurodata.create_timeseries("TimeSeries", name, target)
36 | # nodata.ignore_data()
37 | # nodata.set_time([0])
38 |
39 | nodata = f.make_group("", name, path=target)
40 | nodata.set_dataset("timestamps", [0])
41 | #
42 | # nodata.finalize()
43 | # neurodata.close()
44 | f.close()
45 |
46 | test_nodata_series()
47 | print("%s PASSED" % __file__)
48 |
49 |
--------------------------------------------------------------------------------
/unittest/t_no_time.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | # import nwb
4 | import test_utils as ut
5 |
6 | from nwb import nwb_file
7 | from nwb import nwb_utils as utils
8 |
9 | # creates time series without 'timestamps' or 'starting_time' fields
10 | # TESTS TimeSeries.ignore_time()
11 | # TESTS timeseries placement in acquisition, stimulus, templates
12 |
13 | def test_notime_series():
14 | if __file__.startswith("./"):
15 | fname = "s" + __file__[3:-3] + ".nwb"
16 | else:
17 | fname = "s" + __file__[1:-3] + ".nwb"
18 | name = "notime"
19 | # create_notime_series(fname, name, "acquisition")
20 | create_notime_series(fname, name, "/acquisition/timeseries")
21 | ut.verify_timeseries(fname, name, "acquisition/timeseries", "TimeSeries")
22 | ut.verify_absent(fname, "acquisition/timeseries/"+name, "timestamps")
23 | ut.verify_absent(fname, "acquisition/timeseries/"+name, "starting_time")
24 |
25 | # create_notime_series(fname, name, "stimulus")
26 | create_notime_series(fname, name, "/stimulus/presentation")
27 | ut.verify_timeseries(fname, name, "stimulus/presentation", "TimeSeries")
28 | # create_notime_series(fname, name, "template")
29 | create_notime_series(fname, name, "/stimulus/templates")
30 | ut.verify_timeseries(fname, name, "stimulus/templates", "TimeSeries")
31 |
32 | def create_notime_series(fname, name, target):
33 | settings = {}
34 | settings["file_name"] = fname
35 | settings["identifier"] = utils.create_identifier("notime example")
36 | settings["mode"] = "w"
37 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
38 | settings["description"] = "Test no time"
39 | settings["verbosity"] = "none"
40 | f = nwb_file.open(**settings)
41 |
42 |
43 | #
44 | # notime = neurodata.create_timeseries("TimeSeries", name, target)
45 | # notime.ignore_time()
46 | # notime.set_data([0], unit="n/a", conversion=1, resolution=1)
47 |
48 | notime = f.make_group("", name, path=target)
49 | # following used for testing more missing_fields
50 | # notime = f.make_group("", name, path=target)
51 | notime.set_dataset("data", [0.0], attrs={"unit":"n/a",
52 | "conversion":1.0, "resolution":1.0})
53 |
54 | #
55 | # notime.finalize()
56 | # neurodata.close()
57 | f.close()
58 |
59 | test_notime_series()
60 | print("%s PASSED" % __file__)
61 |
62 |
--------------------------------------------------------------------------------
/unittest/t_ref_image.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # import nwb
3 | import test_utils as ut
4 |
5 | from nwb import nwb_file
6 | from nwb import nwb_utils as utils
7 |
8 |
9 | # TESTS storage of reference image
10 |
11 | def test_refimage_series():
12 | if __file__.startswith("./"):
13 | fname = "s" + __file__[3:-3] + ".nwb"
14 | else:
15 | fname = "s" + __file__[1:-3] + ".nwb"
16 | name = "refimage"
17 | create_refimage(fname, name)
18 | val = ut.verify_present(fname, "acquisition/images/", name)
19 | #if len(val) != 6:
20 | if len(val) != 5:
21 | ut.error("Checking ref image contents", "wrong dimension")
22 | val = ut.verify_attribute_present(fname, "acquisition/images/"+name, "format")
23 | if not ut.strcmp(val, "raw"):
24 | ut.error("Checking ref image format", "Wrong value")
25 | val = ut.verify_attribute_present(fname, "acquisition/images/"+name, "description")
26 | if not ut.strcmp(val, "test"):
27 | ut.error("Checking ref image description", "Wrong value")
28 |
29 | def create_refimage(fname, name):
30 | settings = {}
31 | settings["file_name"] = fname
32 | settings["start_time"] = "2008-09-15T15:53:00-08:00"
33 | settings["identifier"] = utils.create_identifier("reference image test")
34 | settings["mode"] = "w"
35 | settings["description"] = "reference image test"
36 | settings["verbosity"] = "none"
37 | f = nwb_file.open(**settings)
38 |
39 | # neurodata.create_reference_image([1,2,3,4,5], name, "raw", "test")
40 | f.set_dataset("", [1,2,3,4,5], dtype="uint8", name=name, attrs={
41 | "description": "test", "format":"raw"})
42 |
43 | # neurodata.close()
44 | f.close()
45 |
46 | test_refimage_series()
47 | print("%s PASSED" % __file__)
48 |
49 |
--------------------------------------------------------------------------------
/unittest/t_softlink.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import sys
3 | # import nwb
4 | import test_utils as ut
5 |
6 | from nwb import nwb_file
7 | from nwb import nwb_utils as utils
8 |
9 | # creates time series without 'data' field
10 | # TESTS softlink of TimeSeries.data
11 |
12 | def test_softlink():
13 | if __file__.startswith("./"):
14 | fname1 = "s" + __file__[3:-3] + "1" + ".nwb"
15 | fname2 = "s" + __file__[3:-3] + "2" + ".nwb"
16 | else:
17 | fname1 = "s" + __file__[1:-3] + "1" + ".nwb"
18 | fname2 = "s" + __file__[1:-3] + "2" + ".nwb"
19 | name1 = "softlink_source"
20 | name2 = "softlink_reader"
21 | # create_softlink_source(fname1, name1, "acquisition")
22 | # create_softlink_reader(fname2, name2, fname1, name1, "acquisition")
23 | create_softlink_source(fname1, name1, "/acquisition/timeseries")
24 | create_softlink_reader(fname2, name2, fname1, name1, "/acquisition/timeseries")
25 | #
26 | ut.verify_timeseries(fname1, name1, "acquisition/timeseries", "TimeSeries")
27 | ut.verify_timeseries(fname2, name2, "acquisition/timeseries", "TimeSeries")
28 | ##
29 | val = ut.verify_present(fname2, "acquisition/timeseries/"+name2, "data")
30 |
31 | def create_softlink_reader(fname, name, src_fname, src_name, target):
32 | settings = {}
33 | settings["file_name"] = fname
34 | settings["start_time"] = "2008-09-15T15:53:00-08:00"
35 | settings["identifier"] = utils.create_identifier("softlink reader")
36 | settings["mode"] = "w"
37 | settings["description"] = "softlink test"
38 | settings["verbosity"] = "none"
39 | f = nwb_file.open(**settings)
40 |
41 | # source = neurodata.create_timeseries("TimeSeries", name, target)
42 | # source.set_data_as_remote_link(src_fname, "acquisition/timeseries/"+src_name+"/data")
43 | # source.set_time([345])
44 | # source.finalize()
45 | # neurodata.close()
46 |
47 | source = f.make_group("", name, path=target)
48 | # source.set_data_as_remote_link(src_fname, "acquisition/timeseries/"+src_name+"/data")
49 | extlink = "extlink:%s,%s" % (src_fname, "acquisition/timeseries/"+src_name+"/data")
50 | source.set_dataset("data", extlink)
51 | source.set_dataset("timestamps", [345])
52 | # source.finalize()
53 | # neurodata.close()
54 | f.close()
55 |
56 | def create_softlink_source(fname, name, target):
57 | settings = {}
58 | settings["file_name"] = fname
59 | settings["identifier"] = utils.create_identifier("softlink source")
60 | settings["mode"] = "w"
61 | settings["description"] = "time series no data test"
62 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
63 | settings["verbosity"] = "none"
64 | f = nwb_file.open(**settings)
65 | # source = neurodata.create_timeseries("TimeSeries", name, target)
66 | source = f.make_group("", name, path=target)
67 | # source.set_data([234], unit="parsec", conversion=1.0, resolution=1e-3)
68 | source.set_dataset("data", [234.0], attrs={"unit":"parsec",
69 | "conversion":1.0, "resolution":1e-3})
70 | # source.set_time([123])
71 | source.set_dataset("timestamps", [123.0])
72 | # source.finalize()
73 | # neurodata.close()
74 | f.close()
75 |
76 | test_softlink()
77 | print("%s PASSED" % __file__)
78 |
79 |
--------------------------------------------------------------------------------
/unittest/t_starting_time.py:
--------------------------------------------------------------------------------
1 |
2 | #!/usr/bin/python
3 | # import nwb
4 | import test_utils as ut
5 |
6 | from nwb import nwb_file
7 | from nwb import nwb_utils as utils
8 |
9 | # TESTS use of TimeSeries.starting_time
10 |
11 | def test_nodata_series():
12 | if __file__.startswith("./"):
13 | fname = "s" + __file__[3:-3] + ".nwb"
14 | else:
15 | fname = "s" + __file__[1:-3] + ".nwb"
16 | name = "starting_time"
17 | # create_startingtime_series(fname, name, "acquisition")
18 | create_startingtime_series(fname, name, "/acquisition/timeseries")
19 | ut.verify_timeseries(fname, name, "acquisition/timeseries", "TimeSeries")
20 | ut.verify_absent(fname, "acquisition/timeseries/"+name, "timestamps")
21 | val = ut.verify_present(fname, "acquisition/timeseries/"+name, "starting_time")
22 | if val != 0.125:
23 | ut.error("Checking start time", "Incorrect value")
24 | val = ut.verify_attribute_present(fname, "acquisition/timeseries/starting_time/"+name, "rate")
25 | if val != 2:
26 | ut.error("Checking rate", "Incorrect value")
27 |
28 | def create_startingtime_series(fname, name, target):
29 | settings = {}
30 | settings["file_name"] = fname
31 | settings["identifier"] = utils.create_identifier("starting time test")
32 | settings["mode"] = "w"
33 | settings["description"] = "time series starting time test"
34 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
35 | settings["verbosity"] = "none"
36 | f = nwb_file.open(**settings)
37 |
38 | #
39 | # stime = neurodata.create_timeseries("TimeSeries", name, target)
40 | # stime.set_data([0, 1, 2, 3], unit="n/a", conversion=1, resolution=1)
41 | # stime.set_value("num_samples", 4)
42 | # stime.set_time_by_rate(0.125, 2)
43 | #
44 |
45 | stime = f.make_group("", name, path=target)
46 | stime.set_dataset("data", [0.0, 1.0, 2.0, 3.0], attrs={"unit": "n/a",
47 | "conversion":1.0, "resolution": 1.0})
48 | stime.set_dataset("num_samples", 4)
49 |
50 | # stime.set_time_by_rate(0.125, 2)
51 | stime.set_dataset("starting_time", 0.125, attrs={ "rate":2.0, "unit":"Seconds"})
52 | # stime.finalize()
53 | # neurodata.close()
54 | f.close()
55 |
56 | test_nodata_series()
57 | print("%s PASSED" % __file__)
58 |
59 |
--------------------------------------------------------------------------------
/unittest/t_top_datasets.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # import nwb
3 | import test_utils as ut
4 |
5 | from nwb import nwb_file
6 | from nwb import nwb_utils as utils
7 |
8 | # TESTS top-level datasets
9 |
10 | def test_refimage_series():
11 | if __file__.startswith("./"):
12 | fname = "s" + __file__[3:-3] + ".nwb"
13 | else:
14 | fname = "s" + __file__[1:-3] + ".nwb"
15 | name = "refimage"
16 | create_refimage(fname, name)
17 | val = ut.verify_present(fname, "/", "identifier")
18 | if not ut.strcmp(val, "vwx"):
19 | ut.error("Checking file idenfier", "wrong contents")
20 | val = ut.verify_present(fname, "/", "file_create_date")
21 | val = ut.verify_present(fname, "/", "session_start_time")
22 | if not ut.strcmp(val, "xyz"):
23 | ut.error("Checking session start time", "wrong contents")
24 | val = ut.verify_present(fname, "/", "session_description")
25 | if not ut.strcmp(val, "wxy"):
26 | ut.error("Checking session start time", "wrong contents")
27 |
28 | def create_refimage(fname, name):
29 | settings = {}
30 | settings["file_name"] = fname
31 | settings["identifier"] = "vwx"
32 | settings["mode"] = "w"
33 | settings["description"] = "wxy"
34 | settings["start_time"] = "xyz"
35 | settings["verbosity"] = "none"
36 | f = nwb_file.open(**settings)
37 | # neurodata.close()
38 | f.close()
39 |
40 | test_refimage_series()
41 | print("%s PASSED" % __file__)
42 |
43 |
--------------------------------------------------------------------------------
/unittest/t_unittimes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import h5py
3 | import sys
4 | # import nwb
5 | import test_utils as ut
6 |
7 | from nwb import nwb_file
8 | from nwb import nwb_utils as utils
9 |
10 | # TESTS creation of UnitTimes interface and data stored within
11 |
12 |
13 | def test_unit_times():
14 | if __file__.startswith("./"):
15 | fname = "s" + __file__[3:-3] + ".nwb"
16 | else:
17 | fname = "s" + __file__[1:-3] + ".nwb"
18 | # create the file we're going to use
19 | ndata = create_empty_file(fname)
20 | # create a module to store processed data
21 | # mod = ndata.create_module("my spike times")
22 | mod = ndata.make_group("", "my spike times")
23 | # ad a unit times interface to the module
24 | # iface = mod.create_interface("UnitTimes")
25 | iface = mod.make_group("UnitTimes")
26 | # make some data to store
27 | spikes = create_spikes()
28 | # for i in range(len(spikes)):
29 | # iface.add_unit(unit_name = "unit-%d" % i,
30 | # unit_times = spikes[i],
31 | # description = "",
32 | # source = "Data spike-sorted by B. Bunny")
33 |
34 | for i in range(len(spikes)):
35 | unit_name = "unit-%d" % i
36 | ug = iface.make_group("", unit_name)
37 | ug.set_dataset("times", spikes[i])
38 | ug.set_dataset("unit_description", "")
39 | ug.set_dataset("source", "Data spike-sorted by B. Bunny")
40 |
41 |
42 | # clean up and close objects
43 | # iface.finalize()
44 | # mod.finalize()
45 | ndata.close()
46 |
47 | # test random sample to make sure data was stored correctly
48 | h5 = h5py.File(fname)
49 | times = h5["processing/my spike times/UnitTimes/unit-0/times"].value
50 | assert len(times) == len(spikes[0]), "Spike count for unit-0 wrong"
51 | assert abs(times[1] - spikes[0][1]) < 0.001, "Wrong time found in file"
52 | h5.close()
53 |
54 |
55 | def create_spikes():
56 | spikes = []
57 | spikes.append([1.3, 1.4, 1.9, 2.1, 2.2, 2.3])
58 | spikes.append([2.2, 3.0])
59 | spikes.append([0.3, 0.4, 1.0, 1.1, 1.45, 1.8, 1.81, 2.2])
60 | return spikes
61 |
62 | def create_empty_file(fname):
63 | settings = {}
64 | settings["file_name"] = fname
65 | settings["identifier"] = utils.create_identifier("UnitTimes example")
66 | settings["mode"] = "w"
67 | settings["start_time"] = "Sat Jul 04 2015 3:14:16"
68 | settings["description"] = "Test file with spike times in processing module"
69 | settings["verbosity"] = "none"
70 | f = nwb_file.open(**settings)
71 | return f
72 |
73 | test_unit_times()
74 | print("%s PASSED" % __file__)
75 |
76 |
--------------------------------------------------------------------------------