├── ecephys_spike_sorting
├── __init__.py
├── common
│ ├── __init__.py
│ ├── schemas.py
│ ├── OEFileInfo.py
│ └── epoch.py
├── modules
│ ├── __init__.py
│ ├── automerging
│ │ ├── __init__.py
│ │ ├── _schemas.py
│ │ ├── README.md
│ │ ├── __main__.py
│ │ ├── automerging.py
│ │ ├── merges.py
│ │ └── metrics.py
│ ├── ks4_helper
│ │ ├── __init__.py
│ │ ├── README.md
│ │ └── _schemas.py
│ ├── psth_events
│ │ ├── __init__.py
│ │ ├── _schemas.py
│ │ └── README.md
│ ├── catGT_helper
│ │ ├── __init__.py
│ │ ├── README.md
│ │ └── _schemas.py
│ ├── depth_estimation
│ │ ├── __init__.py
│ │ ├── images
│ │ │ └── probe_depth.png
│ │ ├── README.md
│ │ ├── _schemas.py
│ │ └── __main__.py
│ ├── extract_from_npx
│ │ ├── __init__.py
│ │ ├── _schemas.py
│ │ ├── README.md
│ │ ├── create_settings_json.py
│ │ └── __main__.py
│ ├── kilosort_helper
│ │ ├── __init__.py
│ │ ├── kilosort2_master_file.m
│ │ ├── README.md
│ │ ├── main_KS2_KS25.m
│ │ ├── main_kilosort_multiversion.m
│ │ └── main_KS2_KS25_KS3.m
│ ├── mean_waveforms
│ │ ├── __init__.py
│ │ ├── images
│ │ │ ├── 2d_waveform.png
│ │ │ ├── 1d_waveform_features.png
│ │ │ └── 2d_waveform_features.png
│ │ ├── _schemas.py
│ │ └── README.md
│ ├── noise_templates
│ │ ├── __init__.py
│ │ ├── rf_classifier.pkl
│ │ ├── images
│ │ │ ├── type1_noise_waveform.png
│ │ │ ├── type2_noise_waveform.png
│ │ │ ├── type3_noise_waveform.png
│ │ │ └── type4_noise_waveform.png
│ │ ├── __main__.py
│ │ ├── README.md
│ │ ├── _schemas.py
│ │ └── train_classifier.py
│ ├── quality_metrics
│ │ ├── __init__.py
│ │ ├── images
│ │ │ ├── amp_cut.png
│ │ │ ├── d_prime.png
│ │ │ ├── isi_viol.png
│ │ │ ├── isol_dist.png
│ │ │ └── nn_overlap.png
│ │ ├── _schemas.py
│ │ ├── README.md
│ │ └── __main__.py
│ ├── tPrime_helper
│ │ ├── __init__.py
│ │ ├── _schemas.py
│ │ └── README.md
│ ├── median_subtraction
│ │ ├── __init__.py
│ │ ├── SpikeBandMedianSubtraction
│ │ │ ├── JuceLibraryCode
│ │ │ │ ├── juce_core.cpp
│ │ │ │ ├── juce_core.mm
│ │ │ │ ├── juce_events.mm
│ │ │ │ ├── juce_opengl.mm
│ │ │ │ ├── juce_video.cpp
│ │ │ │ ├── juce_video.mm
│ │ │ │ ├── juce_events.cpp
│ │ │ │ ├── juce_graphics.mm
│ │ │ │ ├── juce_opengl.cpp
│ │ │ │ ├── juce_graphics.cpp
│ │ │ │ ├── juce_gui_extra.cpp
│ │ │ │ ├── juce_gui_extra.mm
│ │ │ │ ├── juce_gui_basics.cpp
│ │ │ │ ├── juce_gui_basics.mm
│ │ │ │ ├── juce_audio_basics.cpp
│ │ │ │ ├── juce_audio_basics.mm
│ │ │ │ ├── juce_audio_devices.mm
│ │ │ │ ├── juce_audio_formats.mm
│ │ │ │ ├── juce_cryptography.cpp
│ │ │ │ ├── juce_cryptography.mm
│ │ │ │ ├── juce_audio_devices.cpp
│ │ │ │ ├── juce_audio_formats.cpp
│ │ │ │ ├── juce_audio_processors.mm
│ │ │ │ ├── juce_data_structures.cpp
│ │ │ │ ├── juce_data_structures.mm
│ │ │ │ ├── juce_audio_processors.cpp
│ │ │ │ ├── ReadMe.txt
│ │ │ │ └── JuceHeader.h
│ │ │ ├── Builds
│ │ │ │ └── VisualStudio2013
│ │ │ │ │ ├── resources.rc
│ │ │ │ │ └── SpikeBandMedianSubtraction.sln
│ │ │ └── SpikeBandMedianSubtraction.jucer
│ │ ├── _schemas.py
│ │ ├── README.md
│ │ └── __main__.py
│ ├── pykilosort_helper
│ │ ├── __init__.py
│ │ ├── README.md
│ │ └── _schemas.py
│ └── kilosort_postprocessing
│ │ ├── __init__.py
│ │ ├── README.md
│ │ ├── _schemas.py
│ │ └── __main__.py
└── scripts
│ ├── __init__.py
│ ├── helpers
│ ├── __init__.py
│ ├── processing.py
│ ├── metric_file_fix.py
│ ├── run_one_probe.py
│ ├── plot_raw_data.py
│ └── log_from_json.py
│ └── README.md
├── setup.cfg
├── docs
├── authors.rst
├── history.rst
├── aibs_sphinx
│ ├── .gitignore
│ ├── theme.conf
│ ├── static
│ │ ├── style
│ │ │ ├── comment.png
│ │ │ └── nocomment.png
│ │ └── external_assets
│ │ │ ├── images
│ │ │ ├── arrow_on.gif
│ │ │ ├── tab_blue.gif
│ │ │ ├── arrow_off.gif
│ │ │ ├── arrow_over.gif
│ │ │ ├── logo_AIBS.gif
│ │ │ └── progress_indicator.gif
│ │ │ ├── javascript
│ │ │ ├── relatedData.js
│ │ │ ├── appConfig.js
│ │ │ └── browserVersions.js
│ │ │ └── stylesheets
│ │ │ ├── animation.css
│ │ │ └── common_layout.css
│ ├── templates
│ │ ├── layout.html
│ │ ├── portalHeader.html
│ │ └── globaltoc.html
│ ├── buildPortalAssets.sh
│ ├── package.json
│ └── README.md
├── readme.rst
├── usage.rst
├── gallery
│ ├── README.txt
│ └── helloworld.py
├── installation.rst
└── index.rst
├── Makefile
├── README.md
├── icon.png
├── HISTORY.rst
├── ece_pipeline_cartoon.png
├── tests
├── __init__.py
├── integration
│ └── __init__.py
└── unit
│ ├── modules
│ ├── extract_from_npx
│ │ └── test_extract_from_npx.py
│ ├── automerging
│ │ └── test_automerging.py
│ ├── noise_templates
│ │ └── test_noise_templates.py
│ ├── quality_metrics
│ │ └── test_quality_metrics.py
│ ├── mean_waveforms
│ │ └── test_mean_waveforms.py
│ └── depth_estimation
│ │ └── test_depth_estimation.py
│ └── common
│ └── test_utils.py
├── .editorconfig
├── MANIFEST.in
├── AUTHORS.rst
├── .cookiecutter
├── update.sh
├── update_from_repo.py
├── .cookiecutter.yaml
└── .cookiecutter.json
├── integration-tox.ini
├── .bumpversion.cfg
├── appveyor.yml
├── Pipfile
├── tox.ini
├── .gitignore
├── CONTRIBUTING.md
├── setup.py
├── LICENSE.txt
├── .travis.yml
└── cached_data_manifests
└── internal_manifest.json
/ecephys_spike_sorting/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/common/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/scripts/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude = docs
3 |
--------------------------------------------------------------------------------
/docs/authors.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../AUTHORS.rst
2 |
--------------------------------------------------------------------------------
/docs/history.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../HISTORY.rst
2 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/automerging/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/ks4_helper/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/psth_events/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/scripts/helpers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/scripts/helpers/processing.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/catGT_helper/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/depth_estimation/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/extract_from_npx/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_helper/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/mean_waveforms/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/quality_metrics/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/tPrime_helper/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/pykilosort_helper/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_postprocessing/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 |
2 | clean:
3 | make -C docs clean
4 | rm -f Pipfile.lock
5 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/.gitignore:
--------------------------------------------------------------------------------
1 | *.DS_Store
2 | node_modules/
3 | package-lock.json
--------------------------------------------------------------------------------
/docs/readme.rst:
--------------------------------------------------------------------------------
1 | ======
2 | README
3 | ======
4 |
5 | .. include:: ../README.md
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/README.md
--------------------------------------------------------------------------------
/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/icon.png
--------------------------------------------------------------------------------
/HISTORY.rst:
--------------------------------------------------------------------------------
1 | =======
2 | History
3 | =======
4 |
5 | 0.1.0 (2018-05-10)
6 | ------------------
7 |
--------------------------------------------------------------------------------
/ece_pipeline_cartoon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ece_pipeline_cartoon.png
--------------------------------------------------------------------------------
/docs/aibs_sphinx/theme.conf:
--------------------------------------------------------------------------------
1 | # see http://sphinx-doc.org/templating.html
2 | [theme]
3 | inherit = basic
4 | stylesheet = aibs_sphinx.css
--------------------------------------------------------------------------------
/docs/usage.rst:
--------------------------------------------------------------------------------
1 | =====
2 | Usage
3 | =====
4 |
5 | To use AIBS ecephys spike sorting in a project::
6 |
7 | import ecephys_spike_sorting
8 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/style/comment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/docs/aibs_sphinx/static/style/comment.png
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/style/nocomment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/docs/aibs_sphinx/static/style/nocomment.png
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/images/arrow_on.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/docs/aibs_sphinx/static/external_assets/images/arrow_on.gif
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/images/tab_blue.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/docs/aibs_sphinx/static/external_assets/images/tab_blue.gif
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/images/arrow_off.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/docs/aibs_sphinx/static/external_assets/images/arrow_off.gif
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/images/arrow_over.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/docs/aibs_sphinx/static/external_assets/images/arrow_over.gif
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/images/logo_AIBS.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/docs/aibs_sphinx/static/external_assets/images/logo_AIBS.gif
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_postprocessing/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/kilosort_postprocessing/README.md
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/rf_classifier.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/noise_templates/rf_classifier.pkl
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/quality_metrics/images/amp_cut.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/quality_metrics/images/amp_cut.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/quality_metrics/images/d_prime.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/quality_metrics/images/d_prime.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/mean_waveforms/images/2d_waveform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/mean_waveforms/images/2d_waveform.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/quality_metrics/images/isi_viol.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/quality_metrics/images/isi_viol.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/quality_metrics/images/isol_dist.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/quality_metrics/images/isol_dist.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/quality_metrics/images/nn_overlap.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/quality_metrics/images/nn_overlap.png
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/images/progress_indicator.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/docs/aibs_sphinx/static/external_assets/images/progress_indicator.gif
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/depth_estimation/images/probe_depth.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/depth_estimation/images/probe_depth.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/mean_waveforms/images/1d_waveform_features.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/mean_waveforms/images/1d_waveform_features.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/mean_waveforms/images/2d_waveform_features.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/mean_waveforms/images/2d_waveform_features.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/images/type1_noise_waveform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/noise_templates/images/type1_noise_waveform.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/images/type2_noise_waveform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/noise_templates/images/type2_noise_waveform.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/images/type3_noise_waveform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/noise_templates/images/type3_noise_waveform.png
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/images/type4_noise_waveform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jenniferColonell/ecephys_spike_sorting/HEAD/ecephys_spike_sorting/modules/noise_templates/images/type4_noise_waveform.png
--------------------------------------------------------------------------------
/docs/gallery/README.txt:
--------------------------------------------------------------------------------
1 | .. _examples-index:
2 |
3 | Gallery of Examples
4 | ===================
5 |
6 |
7 | .. _general_examples:
8 |
9 | General examples
10 | ----------------
11 |
12 | General-purpose and introductory examples from the sphinx-gallery
13 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import sys
5 |
6 | def entrypoint_exists(entry_point):
7 | if sys.platform == "win32":
8 | entry_point += ".exe"
9 | executable_dir = os.path.dirname(sys.executable)
10 | return os.path.exists(os.path.join(executable_dir, entry_point))
--------------------------------------------------------------------------------
/docs/installation.rst:
--------------------------------------------------------------------------------
1 | .. highlight:: shell
2 |
3 | ======================================
4 | ecephys spike sorting Installation
5 | ======================================
6 |
7 | .. code-block:: console
8 |
9 | $ git clone https://github.com/AllenInstitute/ecephys_spike_sorting.git
10 | $ pip install .
11 |
12 |
--------------------------------------------------------------------------------
/tests/integration/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import sys
5 |
6 | def entrypoint_exists(entry_point):
7 | if sys.platform == "win32":
8 | entry_point += ".exe"
9 | executable_dir = os.path.dirname(sys.executable)
10 | return os.path.exists(os.path.join(executable_dir, entry_point))
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | # http://editorconfig.org
2 |
3 | root = true
4 |
5 | [*]
6 | indent_style = space
7 | indent_size = 4
8 | trim_trailing_whitespace = true
9 | insert_final_newline = true
10 | charset = utf-8
11 | end_of_line = lf
12 |
13 | [*.bat]
14 | indent_style = tab
15 | end_of_line = crlf
16 |
17 | [Makefile]
18 | indent_style = tab
19 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_core.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_core.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends '!layout.html' %}
2 |
3 | {% block header %}
4 | {% include "portalHeader.html" %}
5 | {{ super() }}
6 | {% endblock %}
7 |
8 | {% block relbar2 %}
9 | {% endblock %}
10 |
11 | {% block relbar1 %}
12 | {% endblock %}
13 |
14 | {% block footer %}
15 | {{ super() }}
16 | {% endblock %}
17 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_events.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_opengl.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_video.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_video.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include requirements.txt
2 | include requirements-test.txt
3 | include requirements-dev.txt
4 | include AUTHORS.rst
5 | include HISTORY.rst
6 | include README.md
7 |
8 | recursive-include tests *
9 | recursive-exclude * __pycache__
10 | recursive-exclude * *.py[co]
11 |
12 | recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif
13 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_events.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_graphics.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_opengl.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_graphics.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_gui_extra.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_gui_extra.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/AUTHORS.rst:
--------------------------------------------------------------------------------
1 | =======
2 | Credits
3 | =======
4 |
5 | Contributors
6 | ----------------
7 |
8 | * Josh Siegle
9 | * Nile Graddis
10 | * Xiaoxuan Jia
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_gui_basics.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_audio_basics.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_audio_basics.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_audio_devices.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_audio_formats.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_cryptography.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_cryptography.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_audio_devices.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_audio_formats.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_audio_processors.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_data_structures.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_data_structures.mm:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/juce_audio_processors.cpp:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | */
7 |
8 | #include "AppConfig.h"
9 | #include
10 |
--------------------------------------------------------------------------------
/.cookiecutter/update.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | PROJECT_DIR=$(pwd)/..
5 |
6 | python update_from_repo.py
7 |
8 | # Add post-cookiecutter commands that you always want run here:
9 | git checkout -- $PROJECT_DIR/README.md
10 | git checkout -- $PROJECT_DIR/AUTHORS.rst
11 | git checkout -- $PROJECT_DIR/Pipfile
12 | git checkout -- $PROJECT_DIR/.cookiecutter/update.sh
13 |
14 | # Enter patch mode on remaining diffs:
15 | git add -p
16 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/javascript/relatedData.js:
--------------------------------------------------------------------------------
1 | document.write("Click the "Related Data" button to display a list of Allen Brain Atlas \
2 | resources for the species indicated. Please note that the current project is excluded \
3 | from the list of suggestions. For example, if a "human" link is displayed while viewing \
4 | human microarray data, it refers to related ISH data. \
5 | ");
6 |
--------------------------------------------------------------------------------
/docs/gallery/helloworld.py:
--------------------------------------------------------------------------------
1 | """
2 | Example
3 | =================================
4 |
5 | One-line description
6 | """
7 |
8 | import matplotlib.pyplot as plt
9 | import numpy as np
10 |
11 | x = 'hello_world'
12 |
13 | ########################################
14 | # This is rendered text, and will break apart a notebook cell
15 |
16 | y = 'foo'
17 |
18 | print(x)
19 |
20 | plt.plot(np.random.rand(100), np.random.rand(100), 'b.')
21 | plt.show()
--------------------------------------------------------------------------------
/.cookiecutter/update_from_repo.py:
--------------------------------------------------------------------------------
1 | from cookiecutter.main import cookiecutter as cc
2 | import ruamel.yaml as yaml
3 | import os
4 |
5 | settings_fname = os.path.join(os.path.dirname(__file__), '.cookiecutter.yaml')
6 | settings = yaml.safe_load(open(settings_fname, 'r'))['default_context']
7 |
8 | cc(settings['_template'],
9 | output_dir="../..",
10 | config_file=".cookiecutter.yaml",
11 | no_input=True,
12 | overwrite_if_exists=True)
13 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | ecephys spike sorting documentation
2 | ======================================
3 |
4 | Gallery:
5 | ==================
6 |
7 | :ref:`examples-index`
8 |
9 | Contents:
10 | ==================
11 |
12 | .. toctree::
13 | :maxdepth: 2
14 |
15 | readme
16 | installation
17 | usage
18 | authors
19 | history
20 |
21 |
22 | Indices and tables
23 | ==================
24 |
25 | * :ref:`genindex`
26 | * :ref:`modindex`
27 | * :ref:`search`
28 |
--------------------------------------------------------------------------------
/integration-tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py{36}-{test}
3 |
4 | [testenv:py36-test]
5 |
6 | [testenv]
7 |
8 | commands =
9 | # package can install
10 | pip install .
11 | # entrypoints are exposed
12 | automerging -h
13 | depth-estimation -h
14 | extract-from-npx -h
15 | # kilosort-helper -h # TODO: test this, requires more complex test setup due to matlab :(...
16 | mean-waveforms -h
17 | noise-templates -h
18 | quality-metrics -h
19 |
--------------------------------------------------------------------------------
/tests/unit/modules/extract_from_npx/test_extract_from_npx.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | import os
4 |
5 | from ecephys_spike_sorting.modules.extract_from_npx.create_settings_json import create_settings_json
6 | import ecephys_spike_sorting.common.utils as utils
7 |
8 | DATA_DIR = os.environ.get('ECEPHYS_SPIKE_SORTING_DATA', False)
9 |
10 | def test_extract_from_npx():
11 |
12 | input_xml = os.path.join(DATA_DIR, 'settings.xml')
13 |
14 | oe_json = create_settings_json(input_xml)
--------------------------------------------------------------------------------
/.cookiecutter/.cookiecutter.yaml:
--------------------------------------------------------------------------------
1 | default_context:
2 | _copy_without_render:
3 | - docs/aibs_sphinx
4 | _template: pyproject_template/
5 | email: joshs@alleninstitute.org
6 | full_name: josh siegle
7 | project_name: ecephys spike sorting
8 | project_namespace: ''
9 | project_short_description: Tools for spike-sorting Allen Insitute Neuropixels data
10 | project_slug: ecephys_spike_sorting
11 | repo_url: https://github.com/AllenInstitute/ecephys_spike_sorting
12 | user_name: joshs
13 | version: 0.1.0
14 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/buildPortalAssets.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | echo "\n\n[rebuildPortalAssets.sh] Downloading latest npm package from internal npm registry..\n\n"
3 | rm -rf ./node_modules
4 | rm -f ./static/external_assets/bundled.js
5 | npm install aibs-portal-assets --save --registry http://dev_resource:4873
6 | rm -f ./node_modules/aibs-portal-assets/dist/index.html
7 | cp ./node_modules/aibs-portal-assets/dist/bundled.js ./static/external_assets/bundled.js
8 | echo "[rebuildPortalAssets.sh] Copied the new bundle to /static/external_assets..\n"
--------------------------------------------------------------------------------
/.cookiecutter/.cookiecutter.json:
--------------------------------------------------------------------------------
1 | {
2 | "_copy_without_render": [
3 | "docs/aibs_sphinx"
4 | ],
5 | "_template": "pyproject_template/",
6 | "email": "joshs@alleninstitute.org",
7 | "full_name": "josh siegle",
8 | "project_name": "ecephys spike sorting",
9 | "project_namespace": "",
10 | "project_short_description": "Tools for spike-sorting Allen Insitute Neuropixels data",
11 | "project_slug": "ecephys_spike_sorting",
12 | "repo_url": "https://github.com/AllenInstitute/ecephys_spike_sorting",
13 | "user_name": "joshs",
14 | "version": "0.1.0"
15 | }
--------------------------------------------------------------------------------
/.bumpversion.cfg:
--------------------------------------------------------------------------------
1 | [bumpversion]
2 | current_version = 0.2.0
3 | commit = True
4 | tag = True
5 |
6 | [bumpversion:file:ecephys_spike_sorting/__init__.py]
7 | search = __version__ = '{current_version}'
8 | replace = __version__ = '{new_version}'
9 |
10 | [bumpversion:file:setup.py]
11 | search = version = '{current_version}'
12 | replace = version = '{new_version}'
13 |
14 | [bumpversion:file:.cookiecutter/.cookiecutter.json]
15 | search = version = "{current_version}"
16 | replace = version = "{new_version}"
17 |
18 | [bumpversion:file:.cookiecutter/.cookiecutter.yaml]
19 | search = version = {current_version}
20 | replace = version = {new_version}
21 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/templates/portalHeader.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "aibs_sphinx",
3 | "version": "1.0.0",
4 | "description": "theme.conf, static, and template files for aibs-style sphinx documentation",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "repository": {
10 | "type": "git",
11 | "url": "git+https://github.com/AllenInstitute/aibs_sphinx.git"
12 | },
13 | "author": "",
14 | "license": "ISC",
15 | "bugs": {
16 | "url": "https://github.com/AllenInstitute/aibs_sphinx/issues"
17 | },
18 | "homepage": "https://github.com/AllenInstitute/aibs_sphinx#readme",
19 | "dependencies": {
20 | "aibs-portal-assets": "0.0.24"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/ReadMe.txt:
--------------------------------------------------------------------------------
1 |
2 | Important Note!!
3 | ================
4 |
5 | The purpose of this folder is to contain files that are auto-generated by the Projucer,
6 | and ALL files in this folder will be mercilessly DELETED and completely re-written whenever
7 | the Projucer saves your project.
8 |
9 | Therefore, it's a bad idea to make any manual changes to the files in here, or to
10 | put any of your own files in here if you don't want to lose them. (Of course you may choose
11 | to add the folder's contents to your version-control system so that you can re-merge your own
12 | modifications after the Projucer has saved its changes).
13 |
--------------------------------------------------------------------------------
/appveyor.yml:
--------------------------------------------------------------------------------
1 | build: false
2 |
3 | environment:
4 | PIPENV_VENV_IN_PROJECT: 1
5 | PIPENV_IGNORE_VIRTUALENVS: 1
6 | matrix:
7 | - MINICONDA: "C:\\Miniconda-x64"
8 | PYTHON: 2.7
9 | - MINICONDA: "C:\\Miniconda35-x64"
10 | PYTHON: 3.5
11 | - MINICONDA: "C:\\Miniconda36-x64"
12 | PYTHON: 3.6
13 |
14 | install:
15 | - set PATH=%MINICONDA%;%MINICONDA%\\Scripts;%PATH%
16 | - conda config --set always_yes yes --set changeps1 no
17 | - conda create -q -n test-environment python=%PYTHON%
18 | - activate test-environment
19 | - pip install pipenv
20 | - pipenv --update
21 | - pipenv --python %PYTHON%
22 | - pipenv install --dev
23 | - pipenv run pip install .
24 |
25 | test_script:
26 | - pipenv run coverage run --source ecephys_spike_sorting -m pytest
27 | - pipenv run codecov
28 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/Builds/VisualStudio2013/resources.rc:
--------------------------------------------------------------------------------
1 | #ifdef JUCE_USER_DEFINED_RC_FILE
2 | #include JUCE_USER_DEFINED_RC_FILE
3 | #else
4 |
5 | #undef WIN32_LEAN_AND_MEAN
6 | #define WIN32_LEAN_AND_MEAN
7 | #include
8 |
9 | VS_VERSION_INFO VERSIONINFO
10 | FILEVERSION 1,0,0,0
11 | BEGIN
12 | BLOCK "StringFileInfo"
13 | BEGIN
14 | BLOCK "040904E4"
15 | BEGIN
16 | VALUE "FileDescription", "SpikeBandMedianSubtraction\0"
17 | VALUE "FileVersion", "1.0.0\0"
18 | VALUE "ProductName", "SpikeBandMedianSubtraction\0"
19 | VALUE "ProductVersion", "1.0.0\0"
20 | END
21 | END
22 |
23 | BLOCK "VarFileInfo"
24 | BEGIN
25 | VALUE "Translation", 0x409, 1252
26 | END
27 | END
28 |
29 | #endif
30 |
--------------------------------------------------------------------------------
/tests/unit/modules/automerging/test_automerging.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | import os
4 |
5 | from ecephys_spike_sorting.modules.automerging.automerging import automerging
6 | import ecephys_spike_sorting.common.utils as utils
7 |
8 | DATA_DIR = os.environ.get('ECEPHYS_SPIKE_SORTING_DATA', False)
9 |
10 | def test_automerging():
11 |
12 | sample_rate = 30000.0
13 |
14 | params = {}
15 | params['merge_threshold'] = 2.5
16 | params['distance_to_compare'] = 5
17 |
18 | spike_times, spike_clusters, amplitudes, \
19 | templates, channel_map, cluster_ids, cluster_quality \
20 | = utils.load_kilosort_data(DATA_DIR, sample_rate, convert_to_seconds=True)
21 |
22 | clusters, ids, labels = automerging(spike_times, spike_clusters, cluster_ids, cluster_quality, templates, params)
23 |
24 | assert(len(ids) == len(labels))
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | url = "https://pypi.python.org/simple"
3 | verify_ssl = true
4 | name = "pypi"
5 |
6 | [[source]]
7 | url = "http://aibs-artifactory.corp.alleninstitute.org/artifactory/api/pypi/pypi-local/simple"
8 | verify_ssl = false
9 | name = "local"
10 |
11 | [dev-packages]
12 | bumpversion = "*"
13 | tox = "*"
14 | coverage = "*"
15 | sphinx = "*"
16 | cookiecutter = "*"
17 | sphinx-gallery = "*"
18 | pytest = "*"
19 | "ruamel.yaml" = "*"
20 |
21 | [packages]
22 | matplotlib = "*"
23 | scipy = "*"
24 | numpy = "*"
25 | pandas = "*"
26 | GitPython = "*"
27 | pillow = ">=6.2.0"
28 | psutil = "*"
29 | argschema = "==1.17.5"
30 | xmljson= "*"
31 | xarray= "*"
32 | scikit-learn = "*"
33 | h5py = "*"
34 | urllib3 = ">=1.24.2"
35 | requests = ">=2.20.0"
36 | marshmallow = "==2.19.2"
37 | joblib = "*"
38 | ecephys-spike-sorting = {path = "."}
39 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/javascript/appConfig.js:
--------------------------------------------------------------------------------
1 | /*
2 | * Application Configuration JavaScript file
3 | * Contains config data used by all external web apps
4 | *
5 | * Changes made here should also be reflected in corresponding changes to browserVersion.js
6 | */
7 |
8 | // Note that the userAgent reported versions are not always the same as the app-reported versions.
9 | // See http://www.useragentstring.com/pages/Safari/ for 'webkit'
10 | // See http://www.useragentstring.com/pages/Internet%20Explorer/ for 'msie'
11 | // and http://www.useragentstring.com/pages/Firefox/ for 'mozilla'
12 | // and http://www.useragentstring.com/pages/Chrome/ for 'chrome'
13 | // for a mapping of userAgent to browser version numbers.
14 | var _pSUPPORTED_BROWSERS = {webkit:'537.71', msie:'9.0', mozilla:'33.0', chrome:'38.0.2125.101'};
15 |
--------------------------------------------------------------------------------
/tests/unit/modules/noise_templates/test_noise_templates.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | import os
4 |
5 | from ecephys_spike_sorting.modules.noise_templates.id_noise_templates import id_noise_templates_rf
6 | import ecephys_spike_sorting.common.utils as utils
7 |
8 | DATA_DIR = os.environ.get('ECEPHYS_SPIKE_SORTING_DATA', False)
9 |
10 | def test_noise_templates():
11 |
12 | sample_rate = 30000.0
13 |
14 | params = {}
15 | params['classifier_path'] = os.path.join(DATA_DIR, 'classifier.pkl')
16 |
17 | spike_times, spike_clusters, amplitudes, \
18 | templates, channel_map, cluster_ids, cluster_quality \
19 | = utils.load_kilosort_data(DATA_DIR, sample_rate, convert_to_seconds=True)
20 |
21 | cluster_ids, is_noise = id_noise_templates_rf(spike_times, spike_clusters, cluster_ids, templates, params)
22 |
23 | assert(len(cluster_ids) == len(is_noise))
--------------------------------------------------------------------------------
/docs/aibs_sphinx/templates/globaltoc.html:
--------------------------------------------------------------------------------
1 | {#
2 | basic/globaltoc.html
3 | ~~~~~~~~~~~~~~~~~~~~
4 |
5 | Sphinx sidebar template: global table of contents.
6 |
7 | :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
8 | :license: BSD, see LICENSE for details.
9 | #}
10 |
11 | {{ toctree(includehidden=True,collapse=False) }}
12 | {{_('Questions') }}
13 |
14 | Send any questions using the Send Us a Message link below,
15 | or submit your question to StackOverflow using with the 'allen-sdk' tag.
16 |
17 |
18 |
19 | If you encounter any problems using the AllenSDK, please create an issue on Github's issue tracker .
20 |
21 |
22 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/README.md:
--------------------------------------------------------------------------------
1 | # aibs_sphinx
2 | theme.conf, static, and template files for aibs-style sphinx documentation
3 |
4 |
5 | ## Portal Assets
6 |
7 | ### Automated Install
8 | To re-build the common header/theme..
9 |
10 | `$ cd project-root`
11 |
12 | `$ sh ./buildPortalAssets.sh`.
13 |
14 | This shell script will add our _internal_ npm registry and install the npm package needed. Next, it will copy the bundled javascript file into the static/external_assets folder which sphinx requires in templates/portalHeader.html.
15 |
16 | ### Manual Install
17 | If you want to manually upgrade..
18 |
19 | `$ cd project-root`
20 |
21 | `$ rm -rf node_modules`
22 |
23 | `$ npm set registry `
24 |
25 | Open package.json and increment the npm package
26 |
27 | `$ npm install`
28 |
29 | `$ cp node_modules/aibs-portal-assets/dist/bundled.js static/external_assets/bundled.js`.
30 |
31 | ### Notes
32 | Eventually, this bundled javascript file will be served through a web server.
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/psth_events/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int
4 | from ...common.schemas import EphysParams, Directories
5 |
6 |
7 | class psth_params(DefaultSchema):
8 | event_ex_param_str = String(required=True, default='xd=0,0,-1,1,50', help="parameter string in CatGT used for extraction")
9 |
10 |
11 | class InputParameters(ArgSchema):
12 | psth_events = Nested(psth_params)
13 | directories = Nested(Directories)
14 | ephys_params = Nested(EphysParams)
15 |
16 | class OutputSchema(DefaultSchema):
17 | input_parameters = Nested(InputParameters,
18 | description=("Input parameters the module "
19 | "was run with"),
20 | required=True)
21 |
22 | class OutputParameters(OutputSchema):
23 |
24 | execution_time = Float()
25 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/Builds/VisualStudio2013/SpikeBandMedianSubtraction.sln:
--------------------------------------------------------------------------------
1 | Microsoft Visual Studio Solution File, Format Version 11.00
2 | # Visual Studio 2013
3 | Project("{A218467C-808A-5074-B7B6-4D41CC6B2DB4}") = "SpikeBandMedianSubtraction", "SpikeBandMedianSubtraction.vcxproj", "{740AC1EC-829B-0D4D-6AA0-FB2B59CAA93B}"
4 | EndProject
5 | Global
6 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
7 | Debug|Win32 = Debug|Win32
8 | Release|Win32 = Release|Win32
9 | EndGlobalSection
10 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
11 | {740AC1EC-829B-0D4D-6AA0-FB2B59CAA93B}.Debug|Win32.ActiveCfg = Debug|Win32
12 | {740AC1EC-829B-0D4D-6AA0-FB2B59CAA93B}.Debug|Win32.Build.0 = Debug|Win32
13 | {740AC1EC-829B-0D4D-6AA0-FB2B59CAA93B}.Release|Win32.ActiveCfg = Release|Win32
14 | {740AC1EC-829B-0D4D-6AA0-FB2B59CAA93B}.Release|Win32.Build.0 = Release|Win32
15 | EndGlobalSection
16 | GlobalSection(SolutionProperties) = preSolution
17 | HideSolutionNode = FALSE
18 | EndGlobalSection
19 | EndGlobal
20 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/automerging/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int
4 | from ...common.schemas import EphysParams, Directories
5 |
6 |
7 | class AutomergingParams(DefaultSchema):
8 | merge_threshold = Float(required=True, default=2.5, help='Minimum merge score required to perform a merge')
9 | distance_to_compare = Int(required=True, default=5, help='Distance (in channels) to look for potential merges')
10 |
11 | class InputParameters(ArgSchema):
12 |
13 | automerging_params = Nested(AutomergingParams)
14 | ephys_params = Nested(EphysParams)
15 | directories = Nested(Directories)
16 |
17 | class OutputSchema(DefaultSchema):
18 |
19 | input_parameters = Nested(InputParameters,
20 | description=("Input parameters the module "
21 | "was run with"),
22 | required=True)
23 |
24 | class OutputParameters(OutputSchema):
25 |
26 | execution_time = Float()
27 |
--------------------------------------------------------------------------------
/tests/unit/modules/quality_metrics/test_quality_metrics.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | import os
4 |
5 | from ecephys_spike_sorting.modules.quality_metrics.metrics import calculate_metrics
6 | import ecephys_spike_sorting.common.utils as utils
7 |
8 | DATA_DIR = os.environ.get('ECEPHYS_SPIKE_SORTING_DATA', False)
9 |
10 | def te_st_quality_metrics():
11 |
12 | sample_rate = 30000.0
13 | numChannels = 384
14 | bit_volts = 0.195
15 |
16 | params = {}
17 | params['samples_per_spike'] = 82
18 | params['pre_samples'] = 20
19 | params['snr_spike_count'] = 500
20 | params['isi_threshold'] = 0.015
21 |
22 | raw_data_file = os.path.join(DATA_DIR, 'continuous_ap_post.dat')
23 | rawData = np.memmap(raw_data_file, dtype='int16', mode='r')
24 | data = np.reshape(rawData, (int(rawData.size/384), 384))
25 |
26 | spike_times, spike_clusters, amplitudes, \
27 | templates, channel_map, cluster_ids, cluster_quality \
28 | = utils.load_kilosort_data(DATA_DIR, sample_rate, False)
29 |
30 | metrics = calculate_metrics(data, spike_times, spike_clusters, amplitudes, sample_rate, params)
31 |
32 | print(metrics)
33 |
34 | if __name__ == "__main__":
35 | #test_quality_metrics()
36 | pass
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/automerging/README.md:
--------------------------------------------------------------------------------
1 | Automerging
2 | ==============
3 | Looks for clusters that likely belong to the same cell, and merges them automatically.
4 |
5 | This is not currently part of our pipeline since switching to Kilosort2, but we're keeping the code around in case others find it useful. For example, it could be helpful for matching units across a series of chronic recordings.
6 |
7 |
8 | Running
9 | -------
10 | ```
11 | python -m ecephys_spike_sorting.modules.automerging --input_json --output_json
12 | ```
13 | Two arguments must be included:
14 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
15 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
16 |
17 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
18 |
19 | Input data
20 | ----------
21 | - **Kilosort outputs** : includes spike times, spike clusters, cluster quality, etc.
22 |
23 |
24 | Output data
25 | -----------
26 | - **spike_clusters.npy** : updated with new cluster labels
27 | - **cluster_group.tsv** : updated with new cluster labels
--------------------------------------------------------------------------------
/tests/unit/modules/mean_waveforms/test_mean_waveforms.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | import os
4 |
5 | from ecephys_spike_sorting.modules.mean_waveforms.extract_waveforms import extract_waveforms
6 | import ecephys_spike_sorting.common.utils as utils
7 |
8 | DATA_DIR = os.environ.get('ECEPHYS_SPIKE_SORTING_DATA', False)
9 |
10 | def test_extract_waveforms():
11 |
12 | sample_rate = 30000.0
13 | numChannels = 384
14 | bit_volts = 0.195
15 |
16 | params = {}
17 | params['samples_per_spike'] = 82
18 | params['pre_samples'] = 20
19 | params['num_epochs'] = 4
20 | params['spikes_per_epoch'] = 5
21 |
22 | raw_data_file = os.path.join(DATA_DIR, 'continuous_ap_post.dat')
23 | rawData = np.memmap(raw_data_file, dtype='int16', mode='r')
24 | data = np.reshape(rawData, (int(rawData.size/384), 384))
25 |
26 | spike_times, spike_clusters, amplitudes, \
27 | templates, channel_map, cluster_ids, cluster_quality \
28 | = utils.load_kilosort_data(DATA_DIR, sample_rate, False)
29 |
30 | data, spike_counts, coords, labels = extract_waveforms(data, spike_times, spike_clusters, cluster_ids, cluster_quality, bit_volts, sample_rate, params)
31 |
32 | print(labels)
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/javascript/browserVersions.js:
--------------------------------------------------------------------------------
1 | // ----> NOTE: changes made here should be mirrored to their equivalent place in appConfig.js
2 |
3 | document.write("Supported Platforms \
4 | This application has been tested with the following configurations. \
5 | You may notice irregularities with software that has not been tested. \
6 | There are known issues when viewing heat map data using unsupported browsers. \
7 | \
8 |
\
9 | \
10 | \
11 | Microsoft Windows 7 \
12 | \
13 | Chrome 38.0.2125.101 m \
14 | Firefox 33.0 \
15 | Internet Explorer 9.0 \
16 | \
17 | \
18 | \
19 | Apple Macintosh OS X 10.9.2 \
20 | \
21 | Chrome 38.0.2125.101 m \
22 | Firefox 33.0 \
23 | Safari 7 \
24 | \
25 | \
26 |
\
27 |
\
28 | ");
29 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int
4 | from ...common.schemas import EphysParams, Directories, CommonFiles
5 |
6 | class MedianSubtractionParams(ArgSchema):
7 | median_subtraction_executable = String(help='Path to .exe used for median subtraction (Windows only)')
8 | median_subtraction_repo = String(help='Path to local repository for median subtraction executable')
9 |
10 | class InputParameters(ArgSchema):
11 |
12 | median_subtraction_params = Nested(MedianSubtractionParams)
13 | common_files = Nested(CommonFiles)
14 | directories = Nested(Directories)
15 | ephys_params = Nested(EphysParams)
16 |
17 | class OutputSchema(DefaultSchema):
18 | input_parameters = Nested(InputParameters,
19 | description=("Input parameters the module "
20 | "was run with"),
21 | required=True)
22 |
23 | class OutputParameters(OutputSchema):
24 | median_subtraction_execution_time = Float()
25 | median_subtraction_commit_hash = String()
26 | median_subtraction_commit_date = String()
27 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py{36}-{test}
3 |
4 | [testenv:py36-test]
5 |
6 | [testenv]
7 |
8 | passenv=HOME
9 |
10 | setenv =
11 | PYTHONPATH = {toxinidir}
12 | ECEPHYS_SPIKE_SORTING_DATA = {env:ECEPHYS_SPIKE_SORTING_DATA:{toxinidir}/cached_data}
13 | DATA_MANIFEST = {env:DATA_MANIFEST:{toxinidir}/cached_data_manifests/internal_manifest.json}
14 | ECEPHYS_SPIKE_SORTING_INTEGRATION_TESTS = {env:ECEPHYS_SPIKE_SORTING_INTEGRATION_TESTS:0}
15 | HAS_MATLAB_ENGINE = 0
16 | PIPENV_IGNORE_VIRTUALENVS = 1
17 |
18 | commands =
19 | #pip install -q -U pip
20 |
21 | #py{36}-test: pipenv install --dev
22 | #py{36}-test: pipenv run pip install --no-deps --force --upgrade .
23 | #py{36}-test: pipenv run python -m ecephys_cached_data_service.client {env:DATA_MANIFEST} {env:ECEPHYS_SPIKE_SORTING_DATA} --clobber
24 | #py{36}-test: pipenv run coverage run --source ecephys_spike_sorting -m pytest --basetemp={envtmpdir} --junitxml=test-reports/test.xml {posargs}
25 | #py{36}-test: pipenv run coverage run --source ecephys_spike_sorting -m pytest -W error::RuntimeWarning --basetemp={envtmpdir} {posargs}
26 | #py{36}-test: pipenv run coverage report
27 | #py{36}-test: pipenv run coverage html
28 |
29 | deps =
30 | pipenv
31 |
32 | whitelist_externals =
33 | make
34 |
--------------------------------------------------------------------------------
/tests/unit/common/test_utils.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | import os
4 |
5 | import ecephys_spike_sorting.common.utils as utils
6 |
7 | DATA_DIR = os.environ.get('ECEPHYS_SPIKE_SORTING_DATA', False)
8 |
9 | def test_read_cluster_group_tsv():
10 |
11 | filename = os.path.join(DATA_DIR, 'cluster_group.tsv')
12 |
13 | cluster_ids, cluster_quality = utils.read_cluster_group_tsv(filename)
14 |
15 | assert(len(cluster_ids) == len(cluster_quality))
16 |
17 | def test_load_kilosort_data():
18 |
19 | spike_times, spike_clusters, amplitudes, \
20 | templates, channel_map, cluster_ids, cluster_quality \
21 | = utils.load_kilosort_data(DATA_DIR, 30000.0)
22 |
23 | assert(len(cluster_ids) == len(cluster_quality))
24 | assert(len(amplitudes) == len(spike_times))
25 |
26 | def test_read_probe_json():
27 |
28 | filename = os.path.join(DATA_DIR, 'probe_info.json')
29 |
30 | mask, offset, scaling, surface_channel, air_channel = utils.read_probe_json(filename)
31 |
32 | assert(len(mask) == len(scaling))
33 |
34 | def test_rms():
35 |
36 | data = np.array([1, -1, 1, -1, 1])
37 |
38 | output = utils.rms(data)
39 |
40 | assert(output == 1.0)
41 |
42 | def test_find_range():
43 |
44 | data = np.arange(0,100)
45 |
46 | output = utils.find_range(data, 20, 30)
47 |
48 | assert(np.array_equal(output, np.arange(20,31)))
49 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Sphinx gallery temp dir
2 | docs/auto_examples/
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | env/
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib/
22 | lib64/
23 | parts/
24 | sdist/
25 | var/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *,cover
49 | .hypothesis/
50 | test_data/
51 | cached_data/
52 | .pytest_cache/
53 | test-reports/
54 | .venv/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # pyenv python configuration file
70 | .python-version
71 |
72 | # ide specific
73 | .idea/
74 | .history/
75 | .vscode/
76 |
77 | # matlab
78 | *.mat
79 |
80 | # virtual environment
81 | .venv/
82 |
83 | # macOS
84 | .DS_Store
85 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/ks4_helper/README.md:
--------------------------------------------------------------------------------
1 | ks4_helper
2 | ==============
3 | Caller for Kilosort4. See the main readme for installation instructions.
4 |
5 | This module auto-generates the channel map (reading from the SpikeGLX metadata file), and ks_params structure (reading parameters from the schema; these can be edited in create_input_json.py),
6 |
7 | Dependencies
8 | ------------
9 | [Kilosort4](https://github.com/MouseLand/Kilosort)
10 |
11 | Running
12 | -------
13 | ```
14 | python -m ecephys_spike_sorting.modules.ks4_helper --input_json --output_json
15 | ```
16 | Two arguments must be included:
17 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
18 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
19 |
20 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
21 |
22 | Input data
23 | ----------
24 | - **AP band .bin file** : int16 binary file written by [SpikeGLX](https://github.com/billkarsh/spikeglx)
25 | - **AP band .meta file** : metadata file written by [SpikeGLX](https://github.com/billkarsh/spikeglx)
26 |
27 |
28 | Output data
29 | -----------
30 | - **Kilosort output files** : .npy files containing spike times, cluster labels, templates, etc. These are the input for phy.
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/catGT_helper/README.md:
--------------------------------------------------------------------------------
1 | CatGT Helper
2 | ==============
3 | Python wrapper for CatGT, a C++ command application written by Bill Karsh for preprocessing data collected by SpikeGLX. CatGT can also scan the Neuropixels sync signal and auxiliary data to extract pulse edges for synchronization across data streams. See the README for CatGT for details about parameters.
4 |
5 | Dependencies
6 | ------------
7 | [CatGT](https://billkarsh.github.io/SpikeGLX/#catgt)
8 |
9 | Running
10 | -------
11 | ```
12 | python -m ecephys_spike_sorting.modules.catGT_helper--input_json --output_json
13 | ```
14 | Two arguments must be included:
15 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
16 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
17 |
18 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
19 |
20 | Input data
21 | ----------
22 | - **SpikeGLX .bin files for ap and ni streams** : int16 binary files written by [SpikeGLX](https://github.com/billkarsh/spikeglx)
23 |
24 | Output data
25 | -----------
26 | - **CatGT output files** : .bin files of concatenated, filtered data
27 | - **CatGT edge files** : text files of edges found by CatGT scanning SYNC, XA and XD channels as specified in the CatGT command line.
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/psth_events/README.md:
--------------------------------------------------------------------------------
1 | psth_events
2 | ===========
3 | Reformat a text file of edges extracted by CatGT and write to events.csv inside the phy directory. This file will be read by the events.py plugin to generate psth plots for use in manual curation
4 |
5 | Dependencies
6 | ------------
7 | none
8 |
9 | Running
10 | -------
11 | ```
12 | python -m ecephys_spike_sorting.modules.psth_events --input_json --output_json
13 | ```
14 | Two arguments must be included:
15 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
16 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
17 |
18 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
19 |
20 | The only input parameter is event_ex_param_str, e.g. 'XD=4,1,50'. The module parses this string and looks for the corresponding file of edge times assuming the CatGT output generated using options: -prb_fld -out_prb_fld
21 |
22 | Input data
23 | ----------
24 | - **CatGT extraction parameters for the event list** : used to identifty which file of extracted edges should be used
25 | - **KS2 output** : folder of phy input files to which the events.csv file will be copied
26 |
27 | Output data
28 | -----------
29 | - **events.csv** : text files of event times in seconds for plotting PSTH
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/extract_from_npx/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int
4 | from ...common.schemas import EphysParams, Directories, CommonFiles
5 |
6 | class ExtractFromNpxParams(DefaultSchema):
7 | npx_directory = String(help='Path to NPX file(s) saved by Open Ephys')
8 | settings_xml = String(help='Path to settings.xml file saved by Open Ephys')
9 | npx_extractor_executable = String(help='Path to .exe file for NPX extraction (Windows only)')
10 | npx_extractor_repo = String(required=False, default='None', help='Path to local repository for NPX extractor')
11 |
12 | class InputParameters(ArgSchema):
13 | extract_from_npx_params = Nested(ExtractFromNpxParams)
14 | directories = Nested(Directories)
15 | common_files = Nested(CommonFiles)
16 |
17 | class OutputSchema(DefaultSchema):
18 | input_parameters = Nested(InputParameters,
19 | description=("Input parameters the module "
20 | "was run with"),
21 | required=True)
22 |
23 | class OutputParameters(OutputSchema):
24 | npx_extractor_execution_time = Float()
25 | settings_json = String()
26 | npx_extractor_commit_hash = String()
27 | npx_extractor_commit_date = String()
28 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/depth_estimation/README.md:
--------------------------------------------------------------------------------
1 | Depth Estimation
2 | ==============
3 | Creates a JSON file with information about the DC offset of each channel, as well as the channel closest to the brain surface. In the current SpikeGLX pipeline version this information is not fed forward into kilosort.
4 |
5 | Implementation
6 | --------------
7 | 
8 |
9 | This module uses the sharp increase in low-frequency LFP band power to estimate the brain surface location.
10 |
11 | Running
12 | -------
13 | ```
14 | python -m ecephys_spike_sorting.modules.depth_estimation --input_json --output_json
15 | ```
16 | Two arguments must be included:
17 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
18 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
19 |
20 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
21 |
22 | Input data
23 | ----------
24 | - LFP band .bin file matching the ap band file currently beign processed. With the standard pipeline script, the LFP must have been processed by CatGT.
25 |
26 | Output data
27 | -----------
28 | - **probe_info.json** : contains information about each channel, as well as the surface channel for the probe
29 | - **probe_depth.png** : image showing the estimated surface channel location
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/pykilosort_helper/README.md:
--------------------------------------------------------------------------------
1 | pykilosort Helper
2 | ==============
3 | Caller for Jennifer Colonell's fork of the International Brain Laboratory python version of Kilosort. Run with the default parameters, this runs a version of kilosort that is very similar to the KS2.5 release.
4 |
5 | This module auto-generates the channel map (reading from the SpikeGLX metadata file), and ks_params structure (reading parameters from the schema; these can be edited in create_input_json.py),
6 |
7 | Dependencies
8 | ------------
9 | [pykilosort](https://github.com/jenniferColonell/pykilosort)
10 | Other dependencies as given in ece_pyks2.yml
11 |
12 | Running
13 | -------
14 | ```
15 | python -m ecephys_spike_sorting.modules.pykilosort_helper --input_json --output_json
16 | ```
17 | Two arguments must be included:
18 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
19 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
20 |
21 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
22 |
23 | Input data
24 | ----------
25 | - **AP band .dat or .bin file** : int16 binary files written by [Open Ephys](https://github.com/open-ephys/plugin-GUI), [SpikeGLX](https://github.com/billkarsh/spikeglx), or the `extract_from_npx` module.
26 |
27 | Output data
28 | -----------
29 | - **Kilosort output files** : .npy files containing spike times, cluster labels, templates, etc. These are the input for phy.
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Allen Institute Contribution Agreement
2 |
3 | This document describes the terms under which you may make “Contributions” —
4 | which may include without limitation, software additions, revisions, bug fixes, configuration changes,
5 | documentation, or any other materials — to any of the projects owned or managed by the Allen Institute.
6 | If you have questions about these terms, please contact us at terms@alleninstitute.org.
7 |
8 | You certify that:
9 |
10 | • Your Contributions are either:
11 |
12 | 1. Created in whole or in part by you and you have the right to submit them under the designated license
13 | (described below); or
14 | 2. Based upon previous work that, to the best of your knowledge, is covered under an appropriate
15 | open source license and you have the right under that license to submit that work with modifications,
16 | whether created in whole or in part by you, under the designated license; or
17 |
18 | 3. Provided directly to you by some other person who certified (1) or (2) and you have not modified them.
19 |
20 | • You are granting your Contributions to the Allen Institute under the terms of the [2-Clause BSD license](https://opensource.org/licenses/BSD-2-Clause)
21 | (the “designated license”).
22 |
23 | • You understand and agree that the Allen Institute projects and your Contributions are public and that
24 | a record of the Contributions (including all metadata and personal information you submit with them) is
25 | maintained indefinitely and may be redistributed consistent with the Allen Institute’s mission and the
26 | 2-Clause BSD license.
27 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/scripts/helpers/metric_file_fix.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Thu Mar 26 16:15:30 2020
4 | ad hoc function to remove waveform_metrics columns from metrics.csv file
5 | to allow rerunning of mean_waveforms module
6 | @author: colonellj
7 | """
8 |
9 | import os
10 | import pandas as pd
11 |
12 |
13 | def DelColumns(kilosort_output_dir):
14 | qmetric_file = os.path.join(kilosort_output_dir, 'metrics.csv')
15 | # If the file exists, load and check for the waveform_metrics columns
16 | if os.path.exists(qmetric_file):
17 | qmetrics = pd.read_csv(qmetric_file)
18 | colList = qmetrics.columns
19 | nCol = len(colList)
20 | if nCol > 15:
21 | # when resaving, eliminate the unnamed column 0
22 | # to delete columns beyond index 14, need to get column labels
23 | # and pass that list to the drop command
24 | dropList = colList[15:]
25 | qmetrics = qmetrics.drop(dropList, axis='columns')
26 | dropList = colList[0]
27 | qmetrics = qmetrics.drop(dropList, axis='columns')
28 |
29 | # rename the epoch_name column label, which will have _quality_metrics appended
30 | colList = qmetrics.columns
31 | ec = [col for col in colList if 'epoch_name_quality_metrics' in col]
32 | if len(ec) > 0:
33 | print('renaming column')
34 | qmetrics = qmetrics.rename(columns={'epoch_name_quality_metrics':'epoch_name'})
35 |
36 | print("Re-saving new cluster metrics file after deleting columns ...")
37 | qmetrics.to_csv(qmetric_file)
38 | return
39 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/scripts/helpers/run_one_probe.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import subprocess
4 | import sys
5 | from helpers import log_from_json
6 |
7 | # Given json files for CatGT and modules, all processing unique to this
8 | # recording session and probe
9 |
10 | def runOne( session_id,
11 | json_directory,
12 | data_directory,
13 | run_CatGT,
14 | catGT_input_json,
15 | catGT_output_json,
16 | modules,
17 | module_input_json,
18 | logFullPath
19 | ):
20 |
21 | if run_CatGT:
22 | command = sys.executable + " -W ignore -m ecephys_spike_sorting.modules." + 'catGT_helper' + " --input_json " + catGT_input_json \
23 | + " --output_json " + catGT_output_json
24 | print(command)
25 | subprocess.check_call(command.split(' '))
26 |
27 | # if we are running any modules
28 | # copy module json file to data directory as record of the input parameters
29 | if len(modules) > 0:
30 | shutil.copy(module_input_json, os.path.join(data_directory, session_id + '-input.json'))
31 |
32 | for module in modules:
33 | output_json = os.path.join(json_directory, session_id + '-' + module + '-output.json')
34 | command = sys.executable + " -W ignore -m ecephys_spike_sorting.modules." + module + " --input_json " + module_input_json \
35 | + " --output_json " + output_json
36 | print(command)
37 | subprocess.check_call(command.split(' '))
38 |
39 | log_from_json.addEntry(modules, json_directory, session_id, logFullPath)# -*- coding: utf-8 -*-
40 |
41 |
--------------------------------------------------------------------------------
/tests/unit/modules/depth_estimation/test_depth_estimation.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | import os
4 |
5 | from ecephys_spike_sorting.modules.depth_estimation.depth_estimation import compute_offset_and_surface_channel
6 | import ecephys_spike_sorting.common.utils as utils
7 |
8 | DATA_DIR = os.environ.get('ECEPHYS_SPIKE_SORTING_DATA', False)
9 |
10 | def test_depth_estimation():
11 |
12 | ephys_params = {}
13 | ephys_params['sample_rate'] = 30000.0
14 | ephys_params['lfp_sample_rate'] = 2500.0
15 | ephys_params['bit_volts'] = 0.195
16 | ephys_params['num_channels'] = 384
17 | ephys_params['reference_channels'] = np.array([37, 76, 113, 152, 189, 228, 265, 304, 341, 380])
18 |
19 | params = {}
20 | params['hi_noise_thresh'] = 50.0
21 | params['lo_noise_thresh'] = 3.0
22 | params['save_figure'] = False
23 | params['smoothing_amount'] = 5
24 | params['power_thresh'] = 2.5
25 | params['diff_thresh'] = -0.07
26 | params['freq_range'] = [0, 10]
27 | params['max_freq'] = 150
28 | params['channel_range'] = [370, 380]
29 | params['n_passes'] = 1
30 | params['start_sample'] = 0
31 | params['air_gap'] = 100
32 | params['nfft'] = 4096
33 | params['skip_s_per_pass'] = 100
34 |
35 | raw_data_file = os.path.join(DATA_DIR, 'continuous_ap_pre.dat')
36 | rawData = np.memmap(raw_data_file, dtype='int16', mode='r')
37 | data = np.reshape(rawData, (int(rawData.size/384), 384))
38 |
39 | raw_data_file_lfp = os.path.join(DATA_DIR, 'continuous_lfp_pre.dat')
40 | rawDataLfp = np.memmap(raw_data_file_lfp, dtype='int16', mode='r')
41 | data_lfp = np.reshape(rawDataLfp, (int(rawDataLfp.size/384), 384))
42 |
43 | info = compute_offset_and_surface_channel(data, data_lfp, \
44 | ephys_params, params)
45 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/scripts/helpers/plot_raw_data.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import glob
4 | import matplotlib.pyplot as plt
5 |
6 | def plot_raw_data(dataFolder):
7 |
8 | f1 = os.path.join(dataFolder, os.path.join('continuous','Neuropix*.0'))
9 | f2 = os.path.join(dataFolder, os.path.join('continuous','Neuropix*.1'))
10 |
11 | ap_directory = glob.glob(f1)[0]
12 | lfp_directory = glob.glob(f2)[0]
13 |
14 | print(ap_directory)
15 | print(lfp_directory)
16 |
17 | hi_noise_thresh = 26
18 | lo_noise_thresh = 3
19 |
20 | output_file = os.path.join(dataFolder, 'probe_info.json')
21 |
22 | numChannels = 384
23 |
24 | offsets = np.zeros((numChannels,), dtype = 'int16')
25 | rms_noise = np.zeros((numChannels,), dtype='int16')
26 | lfp_power = np.zeros((numChannels,), dtype = 'float32')
27 |
28 | spikes_file = os.path.join(ap_directory, 'continuous.dat')
29 | lfp_file = os.path.join(lfp_directory, 'continuous.dat')
30 |
31 | # %%
32 |
33 | rawDataAp = np.memmap(spikes_file, dtype='int16', mode='r')
34 | dataAp = np.reshape(rawDataAp, (int(rawDataAp.size/numChannels), numChannels))
35 |
36 | mask_chans = np.array([37, 76, 113, 152, 189, 228, 265, 304, 341, 380]) - 1
37 |
38 | start_time = 30000*2000
39 | recording_time = 30000
40 | median_subtr = np.zeros((recording_time,numChannels))
41 |
42 | medians = np.zeros((384,))
43 | plt.figure(figsize=(10,10))
44 |
45 | for ch in range(0,384,10):
46 |
47 | d = dataAp[start_time:start_time+recording_time,ch].astype('float64') #* 1.2 / 1023 / 500.0 * 1e6
48 | medians[ch] = np.median(d)
49 |
50 | plt.plot(d + ch*100, color = 'gray', linewidth=0.5)
51 | plt.plot([0,5000],[ch*100,ch*100],'k',alpha=0.25)
52 |
53 | #plt.plot(medians)
54 | plt.show()
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_helper/kilosort2_master_file.m:
--------------------------------------------------------------------------------
1 | %paths will be addded to the matlab path using calls in python
2 | %addpath(genpath('Z:\workstation_backup\full_080119\Documents\KS2_current')) % path to kilosort folder
3 | %addpath(genpath('Z:\workstation_backup\full_080119\Documents\npy-matlab-master')) % path to npy-matlab scripts
4 |
5 | %path to the config file will also be added to the matlab path using call in python
6 | %pathToYourConfigFile = 'D:\kilosort_datatemp'; % take from Github folder and put it somewhere else (together with the master_file)
7 | clear; %clear anything in memory
8 | run('kilosort2_config_file.m')
9 |
10 | % find the binary file
11 | rootZ = ops.rootZ
12 | ops.fbinary = fullfile(ops.datafile)
13 |
14 | % preprocess data to create temp_wh.dat
15 | rez = preprocessDataSub(ops);
16 |
17 | % time-reordering as a function of drift
18 | rez = clusterSingleBatches(rez);
19 | save(fullfile(rootZ, 'rez.mat'), 'rez', '-v7.3');
20 |
21 | % main tracking and template matching algorithm
22 | rez = learnAndSolve8b(rez);
23 |
24 | % final merges
25 | rez = find_merges(rez, 1);
26 |
27 | % final splits by SVD
28 | rez = splitAllClusters(rez, 1);
29 |
30 | % final splits by amplitudes
31 | rez = splitAllClusters(rez, 0);
32 |
33 | % decide on cutoff
34 | rez = set_cutoff(rez);
35 |
36 | fprintf('found %d good units \n', sum(rez.good>0))
37 |
38 | % write to Phy
39 | fprintf('Saving results to Phy \n')
40 | rezToPhy(rez, rootZ);
41 |
42 | %% if you want to save the results to a Matlab file...
43 |
44 | % discard features in final rez file (too slow to save)
45 | rez.cProj = [];
46 | rez.cProjPC = [];
47 |
48 | % save final results as rez2
49 | fprintf('Saving final results in rez2 \n')
50 | fname = fullfile(rootZ, 'rez2.mat');
51 | save(fname, 'rez', '-v7.3');
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_helper/README.md:
--------------------------------------------------------------------------------
1 | Kilosort Helper
2 | ==============
3 | Python wrapper for Matlab-based spike sorting with Kilosort.
4 |
5 | This module auto-generates the channel map (reading from the SpikeGLX metadata file), configuration file (reading parameters from the schema; these can be edited in create_input_json.py), and master file for Kilosort, and runs everything via the Matlab engine.
6 |
7 | Dependencies
8 | ------------
9 | [Kilosort](https://github.com/MouseLand/Kilosort2) or [Kilosort2](https://github.com/cortex-lab/kilosort) - requires Matlab >=R2016b with Signal Processing and Parallel Computing Toolboxes, Visual Studio Community 2013, and a CUDA-compatible GPU
10 | [Matlab Engine API for Python](https://www.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html) - this may restrict the Python version you're able to use
11 |
12 | Running
13 | -------
14 | ```
15 | python -m ecephys_spike_sorting.modules.kilosort_helper --input_json --output_json
16 | ```
17 | Two arguments must be included:
18 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
19 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
20 |
21 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
22 |
23 | Input data
24 | ----------
25 | - **AP band .dat or .bin file** : int16 binary files written by [Open Ephys](https://github.com/open-ephys/plugin-GUI), [SpikeGLX](https://github.com/billkarsh/spikeglx), or the `extract_from_npx` module.
26 |
27 | Output data
28 | -----------
29 | - **Kilosort output files** : .npy files containing spike times, cluster labels, templates, etc. These are the input for phy.
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_postprocessing/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int, Boolean
4 | from ...common.schemas import EphysParams, Directories
5 |
6 |
7 | class PostprocessingParams(DefaultSchema):
8 | within_unit_overlap_window = Float(required=False, default=0.000166, help='Time window for removing overlapping spikes for one unit.')
9 | between_unit_overlap_window = Float(required=False, default=0.000166, help='Time window for removing overlapping spikes between two units.')
10 | between_unit_dist_um = Int(required=False, default=5, help='Number of channels (above and below peak channel) to search for overlapping spikes')
11 | deletion_mode = String(required=False, default='lowAmpCluster', help='lowAmpCluster or deleteFirst')
12 | remove_duplicates = Boolean(required=False, default=True, help='Set to True for duplicate removal')
13 | align_avg_waveform = Boolean(required=False, default=True, help='Set to true to set spike times for mean waveform min = t0')
14 | cWaves_path = InputDir(require=False, help='directory containing the CWaves executable.')
15 |
16 | class InputParameters(ArgSchema):
17 |
18 | ks_postprocessing_params = Nested(PostprocessingParams)
19 | directories = Nested(Directories)
20 | ephys_params = Nested(EphysParams)
21 |
22 |
23 | class OutputSchema(DefaultSchema):
24 | input_parameters = Nested(InputParameters,
25 | description=("Input parameters the module "
26 | "was run with"),
27 | required=True)
28 |
29 | class OutputParameters(OutputSchema):
30 |
31 | execution_time = Float()
32 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name = 'ecephys_spike_sorting',
5 | version = '0.1.0',
6 | description = """Tools for spike-sorting Allen Institute Neuropixels data""",
7 | author = "Josh Siegle, Nile Graddis, Xiaoxuan Jia, Gregg Heller, Chris Mochizuki, Dan Denman",
8 | author_email = "joshs@alleninstitute.org",
9 | url = 'https://github.com/AllenInstitute/ecephys_spike_sorting',
10 | packages = find_packages(),
11 | include_package_data=True,
12 | entry_points={
13 | 'console_scripts': [
14 | 'automerging = ecephys_spike_sorting.modules.automerging.__main__:main',
15 | 'depth-estimation = ecephys_spike_sorting.modules.depth_estimation.__main__:main',
16 | 'extract-from-npx = ecephys_spike_sorting.modules.extract_from_npx.__main__:main',
17 | 'kilosort-helper = ecephys_spike_sorting.modules.kilosort_helper.__main__:main',
18 | 'kilosort-postprocessing = ecephys_spike_sorting.modules.kilosort_postprocessing.__main__:main',
19 | 'mean-waveforms = ecephys_spike_sorting.modules.mean_waveforms.__main__:main',
20 | 'median-subtraction = ecephys_spike_sorting.modules.median_subtraction.__main__:main',
21 | 'noise-templates = ecephys_spike_sorting.modules.noise_templates.__main__:main',
22 | 'quality-metrics = ecephys_spike_sorting.modules.quality_metrics.__main__:main',
23 | ],
24 | },
25 | setup_requires=['pytest-runner'],
26 | install_requires=[
27 | 'matplotlib',
28 | 'scipy',
29 | 'numpy',
30 | 'pandas',
31 | 'GitPython',
32 | 'pillow',
33 | 'argschema',
34 | 'xmljson',
35 | 'xarray',
36 | 'scikit-learn',
37 | 'joblib',
38 | 'psutil'
39 | ],
40 | )
41 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/automerging/__main__.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchemaParser
2 | import os
3 | import logging
4 | import time
5 | import numpy as np
6 |
7 | from .automerging import automerging
8 |
9 | from ...common.utils import write_cluster_group_tsv, load_kilosort_data
10 |
11 |
12 | def run_automerging(args):
13 |
14 | print('ecephys spike sorting: automerging module')
15 |
16 | start = time.time()
17 |
18 | spike_times, spike_clusters, spike_templates, amplitudes, templates, \
19 | channel_map, channel_pos, clusterIDs, cluster_quality, cluster_amplitude, template_features = \
20 | load_kilosort_data(args['directories']['kilosort_output_directory'], \
21 | args['ephys_params']['sample_rate'], \
22 | convert_to_seconds = True)
23 |
24 | spike_clusters, cluster_index, cluster_quality = automerging(spike_times, spike_clusters, clusterIDs, templates, args['automerging_params'])
25 |
26 | write_cluster_group_tsv(cluster_index, cluster_quality)
27 | np.save(os.path.join(args['directories']['kilosort_output_directory'], 'spike_clusters.npy'), spike_clusters)
28 |
29 | execution_time = time.time() - start
30 |
31 | print('total time: ' + str(np.around(execution_time,2)) + ' seconds')
32 | print()
33 |
34 | return {"execution_time" : execution_time} # output manifest
35 |
36 |
37 | def main():
38 |
39 | from ._schemas import InputParameters, OutputParameters
40 |
41 | mod = ArgSchemaParser(schema_type=InputParameters,
42 | output_schema_type=OutputParameters)
43 |
44 | output = run_automerging(mod.args)
45 |
46 | output.update({"input_parameters": mod.args})
47 | if "output_json" in mod.args:
48 | mod.output(output, indent=2)
49 | else:
50 | print(mod.get_output_json(output))
51 |
52 |
53 | if __name__ == "__main__":
54 | main()
55 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/extract_from_npx/README.md:
--------------------------------------------------------------------------------
1 | Extract from NPX
2 | ==============
3 | Converts continuous data from raw NPX format (75% compression ratio) to .dat files required for spike sorting and other downstream analysis.
4 |
5 | Reads event times from the NPX file and writes them as .npy files.
6 |
7 | Converts the settings.xml file for an experiment into a JSON file with parameters such as sample rate and bit volts for each channel.
8 |
9 | Dependencies
10 | -------------
11 | The NpxExtractor executable (Windows only) can be found [here](https://github.com/open-ephys-GUI-binaries/open-ephys/tree/neuropix/Tools/NpxExtractor).
12 |
13 | Running
14 | -------
15 | ```
16 | python -m ecephys_spike_sorting.modules.extract_from_npx --input_json --output_json
17 | ```
18 | Two arguments must be included:
19 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
20 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
21 |
22 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
23 |
24 | Input data
25 | ----------
26 | - **NPX file** : Written by Open Ephys (https://github.com/open-ephys-gui-binaries/open-ephys/tree/neuropix). Contains all of the data recorded from one or more Neuropixels probes.
27 | - **settings.xml** : Written by Open Ephys. Contains information about the signal chain that was used for the experiment.
28 |
29 |
30 | Output data
31 | -----------
32 | - **continuous.dat** : Continuous data (1 file each for LFP and AP band)
33 | - **lfp_timestamps.npy** : Timestamps for LFP samples
34 | - **ap_timestamps.npy** : Timestamps for AP samples
35 | - **channel_states.npy** : Channels on which each event was recording
36 | - **event_timestamps.npy** : Timestamps for each event
37 | - **open-ephys.json** : Parameters for data acquistion.
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/README.md:
--------------------------------------------------------------------------------
1 | Median Subtraction
2 | ==============
3 | Calls an executable that removes the DC offset and common-mode noise from a spike-band continuous file.
4 |
5 | Because noise on Neuropixels probes is highly correlated across sites that share an ADC, we compute the median of every 24th channel, rather than using the median across all sites. This ends up creating a residual on the order of a few microvolts for large spikes, which can appear in the mean waveform. However, this is well below the probe's noise floor, and shouldn't affect spike sorting or data analysis.
6 |
7 | Dependencies
8 | ------------
9 | C++ source code for the median subtraction binary is available in the [SpikeBandMedianSubtraction](SpikeBandMedianSubtraction/) folder. This must be compiled prior to running this module.
10 |
11 | Running
12 | -------
13 | ```
14 | python -m ecephys_spike_sorting.modules.depth_estimation --input_json --output_json
15 | ```
16 | Two arguments must be included:
17 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
18 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
19 |
20 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
21 |
22 |
23 | Input data
24 | ----------
25 | - **AP band .dat or .bin file** : int16 binary files written by [Open Ephys](https://github.com/open-ephys/plugin-GUI), [SpikeGLX](https://github.com/billkarsh/spikeglx), or the `extract_from_npx` module.
26 | - **probe_info.json** : file written by `depth_estimation` module.
27 |
28 | Output data
29 | -----------
30 | - **AP band .dat or .bin file** : overwrites the existing file with the median-subtracted data.
31 | - **residuals.dat** : contains the subtracted signals, which makes it possible to reconstruct the original data if necessary.
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/JuceLibraryCode/JuceHeader.h:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | IMPORTANT! This file is auto-generated each time you save your
4 | project - if you alter its contents, your changes may be overwritten!
5 |
6 | This is the header file that your files should include in order to get all the
7 | JUCE library headers. You should avoid including the JUCE headers directly in
8 | your own source files, because that wouldn't pick up the correct configuration
9 | options for your app.
10 |
11 | */
12 |
13 | #ifndef __APPHEADERFILE_ENIIAS__
14 | #define __APPHEADERFILE_ENIIAS__
15 |
16 | #include "AppConfig.h"
17 |
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 | #include
30 | #include
31 |
32 |
33 | #if ! DONT_SET_USING_JUCE_NAMESPACE
34 | // If your code uses a lot of JUCE classes, then this will obviously save you
35 | // a lot of typing, but can be disabled by setting DONT_SET_USING_JUCE_NAMESPACE.
36 | using namespace juce;
37 | #endif
38 |
39 | #if ! JUCE_DONT_DECLARE_PROJECTINFO
40 | namespace ProjectInfo
41 | {
42 | const char* const projectName = "SpikeBandMedianSubtraction";
43 | const char* const versionString = "1.0.0";
44 | const int versionNumber = 0x10000;
45 | }
46 | #endif
47 |
48 | #endif // __APPHEADERFILE_ENIIAS__
49 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/__main__.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchemaParser
2 | import os
3 | import logging
4 | import subprocess
5 | import time
6 | import glob
7 | import numpy as np
8 |
9 | from ...common.utils import read_probe_json, get_repo_commit_date_and_hash
10 |
11 | def run_median_subtraction(args):
12 |
13 | print('ecephys spike sorting: median subtraction module')
14 |
15 | commit_date, commit_hash = get_repo_commit_date_and_hash(args['median_subtraction_params']['median_subtraction_repo'])
16 |
17 | mask, offset, scaling, surface_channel, air_channel = read_probe_json(args['common_files']['probe_json'])
18 |
19 | logging.info('Running median subtraction')
20 |
21 | start = time.time()
22 |
23 | subprocess.check_call([args['median_subtraction_params']['median_subtraction_executable'],
24 | args['common_files']['probe_json'],
25 | args['ephys_params']['ap_band_file'],
26 | str(int(air_channel))])
27 |
28 | execution_time = time.time() - start
29 |
30 | print('total time: ' + str(np.around(execution_time, 2)) + ' seconds')
31 | print()
32 |
33 | return {"median_subtraction_execution_time" : execution_time,
34 | "median_subtraction_commit_date" : commit_date,
35 | "median_subtraction_commit_hash" : commit_hash } # output manifest} # output manifest
36 |
37 | def main():
38 |
39 | from ._schemas import InputParameters, OutputParameters
40 |
41 | """Main entry point:"""
42 | mod = ArgSchemaParser(schema_type=InputParameters,
43 | output_schema_type=OutputParameters)
44 |
45 | output = run_median_subtraction(mod.args)
46 |
47 | output.update({"input_parameters": mod.args})
48 | if "output_json" in mod.args:
49 | mod.output(output, indent=2)
50 | else:
51 | print(mod.get_output_json(output))
52 |
53 |
54 | if __name__ == "__main__":
55 | main()
56 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Allen Institute Software License – This software license is the 2-clause BSD
2 | license plus clause a third clause that prohibits redistribution and use for
3 | commercial purposes without further permission.
4 |
5 | Copyright © 2019. Allen Institute. All rights reserved.
6 |
7 | Redistribution and use in source and binary forms, with or without
8 | modification, are permitted provided that the following conditions are met:
9 |
10 | 1. Redistributions of source code must retain the above copyright notice, this
11 | list of conditions and the following disclaimer.
12 |
13 | 2. Redistributions in binary form must reproduce the above copyright notice,
14 | this list of conditions and the following disclaimer in the documentation
15 | and/or other materials provided with the distribution.
16 |
17 | 3. Redistributions and use for commercial purposes are not permitted without
18 | the Allen Institute’s written permission. For purposes of this license,
19 | commercial purposes are the incorporation of the Allen Institute's software
20 | into anything for which you will charge fees or other compensation or use of
21 | the software to perform a commercial service for a third party. Contact
22 | terms@alleninstitute.org for commercial licensing opportunities.
23 |
24 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
28 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/tPrime_helper/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int, List, Boolean
4 | from ...common.schemas import EphysParams, Directories
5 | from ..catGT_helper._schemas import CatGTParams
6 |
7 |
8 | class tPrimeParams(DefaultSchema):
9 | tPrime_path = InputDir(help='directory containing the TPrime executable.')
10 | sync_period = Float(default=1.0, help='Period of sync waveform (sec).')
11 | toStream_sync_params = String(required=False, default='imec0', help='stream identifier for tostream, imec, ni, or obx')
12 | ni_sync_params = String(required=False, default='', help='deprecated, now read from fyi file')
13 | ni_ex_list = String(required=False, default='', help='deprecated, now read from fyi file')
14 | im_ex_list = String(required=False, default='', help='deprecated, now read from fyi file')
15 | tPrime_3A = Boolean(required=False, default=False, help='is this 3A data?')
16 | toStream_path_3A = String(required=False, help='full path to toStream edges file')
17 | fromStream_list_3A = List(String, required=False, help='list of full paths to fromStream edges files')
18 | psth_ex_str = String(required=False, help='extract string for events.csv for phy psth')
19 | sort_out_tag = String(required=False, help='tag for sort output (phy) folder')
20 | catGT_out_tag = String(required=False, help='tag catgt output folder; catgt or supercat')
21 |
22 | class InputParameters(ArgSchema):
23 | tPrime_helper_params = Nested(tPrimeParams)
24 | catGT_helper_params = Nested(CatGTParams)
25 | directories = Nested(Directories)
26 | ephys_params = Nested(EphysParams)
27 |
28 | class OutputSchema(DefaultSchema):
29 | input_parameters = Nested(InputParameters,
30 | description=("Input parameters the module "
31 | "was run with"),
32 | required=True)
33 |
34 | class OutputParameters(OutputSchema):
35 |
36 | execution_time = Float()
37 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | env:
2 | global:
3 | - PIPENV_VENV_IN_PROJECT=1
4 | - PIPENV_IGNORE_VIRTUALENVS=1
5 |
6 | matrix:
7 | include:
8 | - os: linux
9 | sudo: required
10 | python: 2.7
11 | env: TEST_PYTHON_VERSION=2.7
12 | - os: linux
13 | sudo: required
14 | python: 3.5
15 | env: TEST_PYTHON_VERSION=3.5
16 | - os: linux
17 | sudo: required
18 | python: 3.6
19 | env: TEST_PYTHON_VERSION=3.6
20 | - os: osx
21 | language: generic
22 | env: TEST_PYTHON_VERSION=2.7
23 | - os: osx
24 | languageL: generic
25 | env: TEST_PYTHON_VERSION=3.5
26 | - os: osx
27 | language: generic
28 | env: TEST_PYTHON_VERSION=3.6
29 |
30 | install:
31 | - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
32 | brew update;
33 | if [[ "$TEST_PYTHON_VERSION" == "2.7" ]]; then
34 | wget https://repo.continuum.io/miniconda/Miniconda2-latest-MacOSX-x86_64.sh -O miniconda.sh;
35 | else
36 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh;
37 | fi
38 | else
39 | sudo apt-get update;
40 | if [[ "$TEST_PYTHON_VERSION" == "2.7" ]]; then
41 | wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh;
42 | else
43 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
44 | fi
45 | fi
46 | - bash miniconda.sh -b -p $HOME/miniconda
47 | - export PATH="$HOME/miniconda/bin:$PATH"
48 | - hash -r
49 | - conda config --set always_yes yes --set changeps1 no
50 | - conda update -q conda
51 | - conda create -q -n test-environment python=$TEST_PYTHON_VERSION
52 | - source activate test-environment
53 | - if [[ "$TRAVIS_OS_NAME" == "osx" && "$TEST_PYTHON_VERSION" == "2.7" ]]; then
54 | conda install virtualenv;
55 | fi
56 | - pip install pipenv
57 | - pipenv --update
58 | - pipenv --python ${TEST_PYTHON_VERSION}
59 | - pipenv install --dev
60 | - pipenv run pip install .
61 |
62 | script:
63 | - pipenv run coverage run --source ecephys_spike_sorting -m pytest
64 | - pipenv run codecov
65 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/catGT_helper/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int, Bool
4 | from ...common.schemas import EphysParams, Directories
5 |
6 |
7 | class CatGTParams(DefaultSchema):
8 | run_name = String(required=True, help='undecorated run name (no g or t indices')
9 | gate_string = String(required=True, default='0', help='gate string')
10 | trigger_string = String(required=True, default='0,0', help='string specifying trials to concatenate, e.g. 0,200')
11 | probe_string = String(required=True, default='0', help='string specifying probes, e.g. 0:3')
12 | stream_string = String(required=True, default='-ap', help='string specifying which streams to process')
13 | car_mode = String(require=False, default='None', help='Comaon average reference mode. Must = None, gbldmx, or loccar ')
14 | loccar_inner = Int(require=False, default=2, help='Inner radius for loccar in sites')
15 | loccar_outer = Int(require=False, default=8, help='Outer radius for loccar in sites')
16 | loccar_inner_um = Int(require=False, default=40, help='Inner radius for loccar in um')
17 | loccar_outer_um = Int(require=False, default=40, help='Outer radius for loccar in um')
18 | maxZ_um = Float(require=False, default=-1, help='If > -1, maximum z from bottom row to analyze and save')
19 | useGeom = Bool(require=False, default=True, help='use snsGeomMap for loccar and depth')
20 | cmdStr = String(required=True, default='-prbfld -aphipass=300 -gbldmx -gfix=0.40,0.10,0.02', help='input stream filter, error correct and extract settings for CatGT')
21 | catGTPath = InputDir(help='directory containing the CatGT executable.')
22 |
23 | class InputParameters(ArgSchema):
24 |
25 | catGT_helper_params = Nested(CatGTParams)
26 | directories = Nested(Directories)
27 | ephys_params = Nested(EphysParams)
28 |
29 | class OutputSchema(DefaultSchema):
30 | input_parameters = Nested(InputParameters,
31 | description=("Input parameters the module "
32 | "was run with"),
33 | required=True)
34 |
35 | class OutputParameters(OutputSchema):
36 |
37 | execution_time = Float()
38 |
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/stylesheets/animation.css:
--------------------------------------------------------------------------------
1 | /*
2 | Animation example, for spinners
3 | */
4 | .animate-spin {
5 | -moz-animation: spin 2s infinite linear;
6 | -o-animation: spin 2s infinite linear;
7 | -webkit-animation: spin 2s infinite linear;
8 | animation: spin 2s infinite linear;
9 | display: inline-block;
10 | }
11 | @-moz-keyframes spin {
12 | 0% {
13 | -moz-transform: rotate(0deg);
14 | -o-transform: rotate(0deg);
15 | -webkit-transform: rotate(0deg);
16 | transform: rotate(0deg);
17 | }
18 |
19 | 100% {
20 | -moz-transform: rotate(359deg);
21 | -o-transform: rotate(359deg);
22 | -webkit-transform: rotate(359deg);
23 | transform: rotate(359deg);
24 | }
25 | }
26 | @-webkit-keyframes spin {
27 | 0% {
28 | -moz-transform: rotate(0deg);
29 | -o-transform: rotate(0deg);
30 | -webkit-transform: rotate(0deg);
31 | transform: rotate(0deg);
32 | }
33 |
34 | 100% {
35 | -moz-transform: rotate(359deg);
36 | -o-transform: rotate(359deg);
37 | -webkit-transform: rotate(359deg);
38 | transform: rotate(359deg);
39 | }
40 | }
41 | @-o-keyframes spin {
42 | 0% {
43 | -moz-transform: rotate(0deg);
44 | -o-transform: rotate(0deg);
45 | -webkit-transform: rotate(0deg);
46 | transform: rotate(0deg);
47 | }
48 |
49 | 100% {
50 | -moz-transform: rotate(359deg);
51 | -o-transform: rotate(359deg);
52 | -webkit-transform: rotate(359deg);
53 | transform: rotate(359deg);
54 | }
55 | }
56 | @-ms-keyframes spin {
57 | 0% {
58 | -moz-transform: rotate(0deg);
59 | -o-transform: rotate(0deg);
60 | -webkit-transform: rotate(0deg);
61 | transform: rotate(0deg);
62 | }
63 |
64 | 100% {
65 | -moz-transform: rotate(359deg);
66 | -o-transform: rotate(359deg);
67 | -webkit-transform: rotate(359deg);
68 | transform: rotate(359deg);
69 | }
70 | }
71 | @keyframes spin {
72 | 0% {
73 | -moz-transform: rotate(0deg);
74 | -o-transform: rotate(0deg);
75 | -webkit-transform: rotate(0deg);
76 | transform: rotate(0deg);
77 | }
78 |
79 | 100% {
80 | -moz-transform: rotate(359deg);
81 | -o-transform: rotate(359deg);
82 | -webkit-transform: rotate(359deg);
83 | transform: rotate(359deg);
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/extract_from_npx/create_settings_json.py:
--------------------------------------------------------------------------------
1 | from xmljson import gdata
2 | from xml.etree.ElementTree import fromstring
3 |
4 | def create_settings_json(input_file):
5 |
6 | with open (input_file, "r") as file:
7 | file_string = file.read()
8 |
9 | a = gdata.data(fromstring(file_string))
10 |
11 | info_dict = { }
12 |
13 | info_dict['software'] = 'Open Ephys GUI'
14 | info_dict['version'] = a['SETTINGS']['INFO']['VERSION']['$t']
15 | info_dict['machine'] = a['SETTINGS']['INFO']['MACHINE']['$t']
16 | info_dict['os'] = a['SETTINGS']['INFO']['OS']['$t']
17 | info_dict['date'] = a['SETTINGS']['INFO']['DATE']['$t']
18 |
19 | neuropix = { }
20 |
21 | for processor in a['SETTINGS']['SIGNALCHAIN'][1]['PROCESSOR']:
22 |
23 | #print(processor)
24 | #print(type(processor))
25 |
26 | if str.find(processor['name'], 'Neuropix') > -1:
27 |
28 | neuropix['phase'] = processor['name'][-2:]
29 |
30 | try:
31 | settings = processor['EDITOR']['NEUROPIXELS']
32 | hardware_info = [info.split(': ') for info in settings['info'].split('\n')[::2]]
33 |
34 | neuropix['ap gain'] = settings['apGainValue']
35 | neuropix['lfp gain'] = settings['lfpGainValue']
36 | neuropix['reference channel'] = settings['referenceChannel']
37 | neuropix['filter cut'] = settings['filterCut']
38 |
39 | for info in hardware_info:
40 | neuropix[str.lower(info[0])] = info[1]
41 | except KeyError:
42 | neuropix['error'] = 'probe info not found'
43 |
44 | sp0 = {}
45 | sp0['name'] = 'Neuropix-3a-100.0'
46 | sp0['type'] = 'AP band'
47 | sp0['num_channels'] = 384
48 | sp0['sample_rate'] = 30000.0
49 | sp0['bit_volts'] = 0.195
50 |
51 | sp1 = {}
52 | sp1['name'] = 'Neuropix-3a-100.1'
53 | sp1['type'] = 'LFP band'
54 | sp1['num_channels'] = 384
55 | sp1['sample_rate'] = 2500.0
56 | sp1['bit_volts'] = 0.195
57 |
58 | neuropix['subprocessors'] = [sp0, sp1]
59 |
60 | oe_json = { }
61 | oe_json['info'] = info_dict
62 | oe_json['neuropix'] = neuropix
63 |
64 | return oe_json
65 |
66 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/__main__.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchemaParser
2 | import os
3 | import logging
4 | import time
5 |
6 | import numpy as np
7 |
8 | from .id_noise_templates import id_noise_templates, id_noise_templates_rf
9 |
10 | from ...common.utils import write_cluster_group_tsv, load_kilosort_data
11 |
12 |
13 | def classify_noise_templates(args):
14 |
15 | print('ecephys spike sorting: noise templates module')
16 |
17 | start = time.time()
18 |
19 | spike_times, spike_clusters, spike_templates, amplitudes, templates, channel_map, \
20 | channel_pos, cluster_ids, cluster_quality, cluster_amplitude = \
21 | load_kilosort_data(args['directories']['kilosort_output_directory'], \
22 | args['ephys_params']['sample_rate'], \
23 | convert_to_seconds = True)
24 |
25 | if args['noise_waveform_params']['use_random_forest']:
26 | # use random forest classifier
27 | cluster_ids, is_noise = id_noise_templates_rf(spike_times, spike_clusters, \
28 | cluster_ids, templates, args['noise_waveform_params'])
29 | else:
30 | # use heuristics to identify templates that look like noise
31 | cluster_ids, is_noise = id_noise_templates(cluster_ids, templates, \
32 | channel_pos, args['noise_waveform_params'])
33 |
34 | mapping = {False: 'good', True: 'noise'}
35 | labels = [mapping[value] for value in is_noise]
36 |
37 | write_cluster_group_tsv(cluster_ids,
38 | labels,
39 | args['directories']['kilosort_output_directory'],
40 | args['ephys_params']['cluster_group_file_name'])
41 |
42 | execution_time = time.time() - start
43 |
44 | print('total time: ' + str(np.around(execution_time,2)) + ' seconds')
45 | print()
46 |
47 | return {"execution_time" : execution_time} # output manifest
48 |
49 |
50 | def main():
51 |
52 | from ._schemas import InputParameters, OutputParameters
53 |
54 | mod = ArgSchemaParser(schema_type=InputParameters,
55 | output_schema_type=OutputParameters)
56 |
57 | output = classify_noise_templates(mod.args)
58 |
59 | output.update({"input_parameters": mod.args})
60 | if "output_json" in mod.args:
61 | mod.output(output, indent=2)
62 | else:
63 | print(mod.get_output_json(output))
64 |
65 |
66 | if __name__ == "__main__":
67 | main()
68 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/quality_metrics/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int, Boolean
4 | from ...common.schemas import EphysParams, Directories, WaveformMetricsFile, ClusterMetricsFile
5 |
6 |
7 | class QualityMetricsParams(DefaultSchema):
8 | isi_threshold = Float(required=False, default=0.0015, help='Maximum time (in seconds) for ISI violation')
9 | min_isi = Float(required=False, default=0.00, help='Minimum time (in seconds) for ISI violation')
10 | tbin_sec = Float(required=False, default=0.001, help='time bin in seconds for ccg in contam_rate calculation')
11 | max_radius_um = Int(required=False, default=68, help='Maximum radius for computing PC metrics, in um')
12 | max_spikes_for_unit = Int(required=False, default=500, help='Number of spikes to subsample for computing PC metrics')
13 | max_spikes_for_nn = Int(required=False, default=10000, help='Further subsampling for NearestNeighbor calculation')
14 | n_neighbors = Int(required=False, default=4, help='Number of neighbors to use for NearestNeighbor calculation')
15 | n_silhouette = Int(required=False, default=10000, help='Number of spikes to use for calculating silhouette score')
16 |
17 | drift_metrics_min_spikes_per_interval = Int(required=False, default=10, help='Minimum number of spikes for computing depth')
18 | drift_metrics_interval_s = Float(required=False, default=100, help='Interval length is seconds for computing spike depth')
19 | include_pc_metrics = Boolean(required=False, default=True, help='Set to false if features were not saved with Phy output')
20 | include_ibl = Boolean(required=False, default=True, help='Set to false if features were not saved with Phy output')
21 |
22 | class InputParameters(ArgSchema):
23 |
24 | quality_metrics_params = Nested(QualityMetricsParams)
25 | ephys_params = Nested(EphysParams)
26 | directories = Nested(Directories)
27 | waveform_metrics = Nested(WaveformMetricsFile)
28 | cluster_metrics = Nested(ClusterMetricsFile)
29 |
30 | class OutputSchema(DefaultSchema):
31 | input_parameters = Nested(InputParameters,
32 | description=("Input parameters the module "
33 | "was run with"),
34 | required=True)
35 |
36 | class OutputParameters(OutputSchema):
37 |
38 | execution_time = Float()
39 | quality_metrics_output_file = String()
40 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/extract_from_npx/__main__.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchemaParser
2 | import os
3 | import logging
4 | import subprocess
5 | import time
6 | import shutil
7 |
8 | import numpy as np
9 |
10 | import io, json, os
11 |
12 | from .create_settings_json import create_settings_json
13 | from ...common.utils import get_repo_commit_date_and_hash
14 |
15 | def run_npx_extractor(args):
16 |
17 | print('ecephys spike sorting: npx extractor module')
18 |
19 | start = time.time()
20 |
21 | commit_date, commit_hash = get_repo_commit_date_and_hash(args['extract_from_npx_params']['npx_extractor_repo'])
22 |
23 | extracted_data_drive, directory = os.path.splitdrive(args['directories']['extracted_data_directory'])
24 |
25 | total, used, free = shutil.disk_usage(extracted_data_drive)
26 |
27 | filesize = os.path.getsize(args['extract_from_npx_params']['npx_directory'])
28 |
29 | assert(free > filesize * 2)
30 |
31 | if not os.path.exists(args['directories']['extracted_data_directory']):
32 | os.mkdir(args['directories']['extracted_data_directory'])
33 |
34 | subprocess.check_call([args['extract_from_npx_params']['npx_extractor_executable'],
35 | args['extract_from_npx_params']['npx_directory'],
36 | args['directories']['extracted_data_directory']])
37 |
38 | execution_time = time.time() - start
39 |
40 | #settings_json = create_settings_json(args['extract_from_npx_params']['settings_xml'])
41 |
42 | #with io.open(args['common_files']['settings_json'], 'w', encoding='utf-8') as f:
43 | # f.write(json.dumps(settings_json, ensure_ascii=False, sort_keys=True, indent=4))
44 |
45 | print('total time: ' + str(np.around(execution_time,2)) + ' seconds')
46 | print()
47 |
48 | return {"execution_time" : execution_time,
49 | "npx_extractor_commit_date" : commit_date,
50 | "npx_extractor_commit_hash" : commit_hash } # output manifest
51 |
52 |
53 | def main():
54 |
55 | from ._schemas import InputParameters, OutputParameters
56 |
57 | """Main entry point:"""
58 | mod = ArgSchemaParser(schema_type=InputParameters,
59 | output_schema_type=OutputParameters)
60 |
61 | output = run_npx_extractor(mod.args)
62 |
63 | output.update({"input_parameters": mod.args})
64 | if "output_json" in mod.args:
65 | mod.output(output, indent=2)
66 | else:
67 | print(mod.get_output_json(output))
68 |
69 |
70 | if __name__ == "__main__":
71 | main()
72 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/mean_waveforms/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int, Bool
4 | from ...common.schemas import EphysParams, Directories, WaveformMetricsFile, ClusterMetricsFile
5 |
6 | class MeanWaveformParams(DefaultSchema):
7 | samples_per_spike = Int(required=True, default=82, help='Number of samples to extract for each spike')
8 | pre_samples = Int(required=True, default=20, help='Number of samples between start of spike and the peak')
9 | num_epochs = Int(required=True, default=1, help='Number of epochs to compute mean waveforms')
10 | nAP = Int(required=True, default=384, help='Number AP channels in saved file')
11 | spikes_per_epoch = Int(require=True, default=100, help='Max number of spikes per epoch')
12 | upsampling_factor = Float(require=False, default=200/82, help='Upsampling factor for calculating waveform metrics')
13 | spread_threshold = Float(require=False, default=0.12, help='Threshold for computing channel spread of 2D waveform')
14 | site_range = Int(require=False, default=16, help='Number of sites to use for 2D waveform metrics')
15 | cWaves_path = InputDir(require=False, help='directory containing the C_Waves executable.')
16 | use_C_Waves = Bool(require=False, default=False, help='Use faster C routine to calculate mean waveforms')
17 | snr_radius = Int(require=False, default=8, help='disk radius (chans) about pk-chan for snr calculation in C_waves')
18 | snr_radius_um = Int(require=False, default=8, help='disk radius (um) about pk-chan for snr calculation in C_waves')
19 | mean_waveforms_file = String(required=True, help='Path to mean waveforms file (.npy)')
20 | calc_half_run = Bool(require=False, default=False, help='calculate mean waveforms for 1st + 2nd half of recording')
21 |
22 |
23 | class InputParameters(ArgSchema):
24 |
25 | waveform_metrics = Nested(WaveformMetricsFile)
26 | mean_waveform_params = Nested(MeanWaveformParams)
27 | cluster_metrics = Nested(ClusterMetricsFile)
28 | ephys_params = Nested(EphysParams)
29 | directories = Nested(Directories)
30 |
31 | class OutputSchema(DefaultSchema):
32 | input_parameters = Nested(InputParameters,
33 | description=("Input parameters the module "
34 | "was run with"),
35 | required=True)
36 |
37 | class OutputParameters(OutputSchema):
38 |
39 | execution_time = Float()
40 | mean_waveforms_file = String()
41 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/common/schemas.py:
--------------------------------------------------------------------------------
1 | from argschema.schemas import DefaultSchema
2 | from argschema.fields import Nested, InputDir, OutputDir, String, Float, Dict, Int, NumpyArray, Bool
3 |
4 | class EphysParams(DefaultSchema):
5 | sample_rate = Float(required=True, default=30000.0, help='Sample rate of Neuropixels AP band continuous data')
6 | lfp_sample_rate = Float(require=True, default=2500.0, help='Sample rate of Neuropixels LFP band continuous data')
7 | bit_volts = Float(required=True, default=0.195, help='Scalar required to convert int16 values into microvolts')
8 | num_channels = Int(required=True, default=385, help='Total number of channels in binary data files')
9 | num_sync_channels = Int(required=True, default=1, help='Number of sync channels in binary data files')
10 | reference_channels = NumpyArray(required=False, default=[36, 75, 112, 151, 188, 227, 264, 303, 340, 379], help='Reference channels on Neuropixels probe (numbering starts at 0)')
11 | template_zero_padding = Int(required=True, default=21, help='Zero-padding on templates output by Kilosort')
12 | vertical_site_spacing = Float(required=False, default=20e-6, help='Vertical site spacing in meters')
13 | probe_type = String(required=False, default='NP1', help='3A, 3B2, NP1')
14 | lfp_band_file = String(required=False, help='Location of LFP band binary file')
15 | ap_band_file = String(required=False, help='Location of AP band binary file')
16 | reorder_lfp_channels = Bool(required=False, default=True, help='Should we fix the ordering of LFP channels (necessary for 3a probes following extract_from_npx modules)')
17 | cluster_group_file_name = String(required=False, default='cluster_group.tsv')
18 |
19 | class Directories(DefaultSchema):
20 |
21 | ecephys_directory = InputDir(help='Location of the ecephys_spike_sorting directory containing modules directory')
22 | npx_directory = InputDir(help='Location of raw neuropixels binary files')
23 | kilosort_output_directory = OutputDir(help='Location of Kilosort output files')
24 | extracted_data_directory = OutputDir(help='Location for NPX/CatGT processed files')
25 | kilosort_output_tmp = OutputDir(help='Location for temporary KS output')
26 |
27 | class CommonFiles(DefaultSchema):
28 |
29 | probe_json = String(help='Location of probe JSON file')
30 | settings_json = String(help='Location of settings JSON written by extract_from_npx module')
31 |
32 | class WaveformMetricsFile(DefaultSchema):
33 | waveform_metrics_file = String(help='Location of waveform metrics CSV')
34 |
35 | class ClusterMetricsFile(DefaultSchema):
36 | cluster_metrics_file = String(help='Location of cluster metrics CSV')
37 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/common/OEFileInfo.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import numpy as np
4 |
5 | class OEContinuousFile:
6 |
7 | """
8 |
9 | Stores information about an Open Ephys .dat file, including:
10 | - the number of channels
11 | - the sample rate
12 | - the bit volts value for each channel.
13 |
14 | """
15 |
16 | def __init__(self, json_file, file_num = 0):
17 |
18 | info = json.load(open(json_file))
19 |
20 | if os.path.isabs(json_file):
21 | basepath = os.path.dirname(json_file)
22 | else:
23 | basepath = ""
24 |
25 | self.datafile = os.path.join(basepath, 'continuous', info['continuous'][file_num]['folder_name'], 'continuous.dat')
26 | self.tsfile = os.path.join(basepath, 'continuous', info['continuous'][file_num]['folder_name'], 'timestamps.npy')
27 | self.num_channels = info['continuous'][file_num]['num_channels']
28 | self.sample_rate = info['continuous'][file_num]['sample_rate']
29 | self.bit_volts = [0] * self.num_channels
30 |
31 | if info['continuous'][0]['folder_name'].find('3b') > -1:
32 | self.refs = np.array([191])
33 | else:
34 | self.refs = np.array([36, 75, 112, 151, 188, 190, 227, 264, 303, 340, 379])
35 |
36 | for i in range(self.num_channels):
37 | self.bit_volts[i] = info['continuous'][file_num]['channels'][i]['bit_volts']
38 |
39 | def check_size(self):
40 |
41 | num_bytes = os.path.getsize(self.datafile)
42 |
43 | if num_bytes % (self.num_channels * 2) == 0:
44 |
45 | return True
46 | else:
47 | return False
48 |
49 | def load(self):
50 |
51 | rawData = np.memmap(self.datafile, dtype='int16', mode='r')
52 | data = np.reshape(rawData, (int(rawData.size/self.num_channels),self. num_channels)) * self.bit_volts[0]
53 |
54 | return data
55 |
56 |
57 |
58 | def get_lfp_channel_order():
59 |
60 | """
61 | Returns the channel ordering for LFP data extracted from NPX files.
62 |
63 | Parameters:
64 | ----------
65 | None
66 |
67 | Returns:
68 | ---------
69 | channel_order : numpy.ndarray
70 | Contains the actual channel ordering.
71 | """
72 |
73 | remapping_pattern = np.array([0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19,
74 | 8, 20, 9, 21, 10, 22, 11, 23, 24, 36, 25, 37, 26, 38,
75 | 27, 39, 28, 40, 29, 41, 30, 42, 31, 43, 32, 44, 33, 45, 34, 46, 35, 47])
76 |
77 | channel_order = np.concatenate([remapping_pattern + 48*i for i in range(0,8)])
78 |
79 | return channel_order
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/depth_estimation/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, NumpyArray, String, Float, Dict, Int, Bool, OutputFile
4 | from ...common.schemas import EphysParams, Directories, CommonFiles
5 |
6 | class DepthEstimationParams(DefaultSchema):
7 | hi_noise_thresh = Float(required=True, default=50.0, help='Max RMS noise for including channels')
8 | lo_noise_thresh = Float(required=True, default=3.0, help='Min RMS noise for including channels')
9 |
10 | save_figure = Bool(required=True, default=True)
11 | figure_location = OutputFile(required=True, default=None)
12 |
13 | smoothing_amount = Int(required=True, default=5, help='Gaussian smoothing parameter to reduce channel-to-channel noise')
14 | power_thresh = Float(required=True, default=2.5, help='Ignore threshold crossings if power is above this level (indicates channels are in the brain)')
15 | diff_thresh = Float(required=True, default=-0.07, help='Threshold to detect large increases is power at brain surface')
16 | freq_range = NumpyArray(required=True, default=[0,10], help='Frequency band for detecting power increases')
17 | max_freq = Int(required=True, default=150, help='Maximum frequency to plot')
18 | saline_range_um = NumpyArray(required=True, default=[3700,3800], help='Y range assume to be out of brain, but in saline')
19 | n_passes = Int(required=True, default=10, help='Number of times to compute offset and surface channel')
20 | skip_s_per_pass = Int(required=True, default=5, help='Number of seconds between data chunks used on each pass') #default=100
21 | start_time = Float(required=True, default=0, help='First time (in seconds) for computing median offset')
22 | time_interval = Float(required=True, default=5, help='Number of seconds for computing median offset')
23 |
24 | nfft = Int(required=True, default=4096, help='Length of FFT used for calculations')
25 |
26 | air_gap_um = Int(required=True, default=1000, help='Approximate um between brain surface and air')
27 |
28 | class InputParameters(ArgSchema):
29 |
30 | depth_estimation_params = Nested(DepthEstimationParams)
31 |
32 | ephys_params = Nested(EphysParams)
33 | directories = Nested(Directories)
34 | common_files = Nested(CommonFiles)
35 |
36 | class OutputSchema(DefaultSchema):
37 |
38 | input_parameters = Nested(InputParameters,
39 | description=("Input parameters the module "
40 | "was run with"),
41 | required=True)
42 |
43 | class OutputParameters(OutputSchema):
44 |
45 | surface_channel = Int()
46 | air_channel = Int()
47 | probe_json = String()
48 | execution_time = Float()
--------------------------------------------------------------------------------
/ecephys_spike_sorting/common/epoch.py:
--------------------------------------------------------------------------------
1 | import h5py as h5
2 | import numpy as np
3 |
4 |
5 | class Epoch():
6 |
7 | """
8 | Represents a data epoch with a start time (in seconds), an end time (in seconds), and a name
9 |
10 | Optionally includes a start_index and an end_index
11 |
12 | """
13 |
14 | def __init__(self, name, start_time, end_time):
15 |
16 | """
17 | name : str
18 | Name of epoch
19 | start_time : float
20 | Start time in seconds
21 | end_time : float
22 | End time in seconds (can be Inf to use the full file)
23 | """
24 |
25 | self.start_time = start_time
26 | self.end_time = end_time
27 | self.name = name
28 | self.start_index = None
29 | self.end_index = None
30 |
31 | def convert_to_index(timestamps):
32 |
33 | """ Converts start/end times to start/end indices
34 |
35 | Input:
36 | ------
37 | timestamps : numpy.ndarray (float)
38 | Array of timestamps for each sample
39 |
40 | """
41 |
42 | self.start_index = np.argmin(np.abs(timestamps - self.start_time))
43 |
44 | if self.end_time != np.Inf:
45 | self.end_index = np.argmin(np.abs(timestamps - self.end_time))
46 | else:
47 | self.end_index = timestamps.size
48 |
49 |
50 |
51 | def get_epochs_from_nwb_file(filename):
52 |
53 | nwb = h5.File(filename)
54 |
55 | epochs = []
56 |
57 | stimuli = nwb['stimulus']['presentation'].keys()
58 |
59 | for stim_idx, stimulus in enumerate(stimuli):
60 |
61 | if stimulus != 'optotagging' and stimulus != 'spontaneous':
62 |
63 | trial_times = np.squeeze(nwb['stimulus']['presentation'][stimulus]['timestamps'][:,0])
64 | trial_data = nwb['stimulus']['presentation'][stimulus]['data']
65 | stimulus_features = [i.decode('utf-8') for i in nwb['stimulus']['presentation'][stimulus]['features']]
66 |
67 | if stimulus.find('natural_movie') > -1:
68 | movie_start_inds = np.where(trial_data == 0)[0]
69 | trial_times = trial_times[movie_start_inds]
70 |
71 | if stimulus.find('flash_250') > -1:
72 | epoch1_end = np.max(trial_times)
73 | elif stimulus.find('drifting_gratings_more_repeats') > -1:
74 | gap = np.where(np.diff(trial_times) > 5)[0][0]
75 | epoch3_start = np.mean(trial_times[gap:gap+2])
76 | elif stimulus.find('static_gratings') > -1:
77 | epoch3_start = np.min(trial_times)
78 |
79 | epochs = [Epoch('RF_mapping_and_flashes', 0, epoch1_end),
80 | Epoch('epoch2', epoch1_end, epoch3_start),
81 | Epoch('epoch3', epoch3_start, np.Inf),
82 | Epoch('complete_session', 0, np.Inf)]
83 |
84 | return epochs
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/depth_estimation/__main__.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchemaParser
2 | import os
3 | import logging
4 | import time
5 | from pathlib import Path
6 |
7 | import numpy as np
8 |
9 | from ecephys_spike_sorting.modules.depth_estimation.depth_estimation import compute_channel_offsets, find_surface_channel
10 | from ecephys_spike_sorting.common.utils import write_probe_json
11 | from ecephys_spike_sorting.common.SGLXMetaToCoords import MetaToCoords
12 |
13 | def run_depth_estimation(args):
14 |
15 | print('ecephys spike sorting: depth estimation module\n')
16 |
17 | start = time.time()
18 |
19 | numChannels = args['ephys_params']['num_channels']
20 |
21 | rawDataAp = np.memmap(args['ephys_params']['ap_band_file'], dtype='int16', mode='r')
22 | dataAp = np.reshape(rawDataAp, (int(rawDataAp.size/numChannels), numChannels))
23 |
24 | rawDataLfp = np.memmap(args['ephys_params']['lfp_band_file'], dtype='int16', mode='r')
25 | dataLfp = np.reshape(rawDataLfp, (int(rawDataLfp.size/numChannels), numChannels))
26 |
27 | metaName, binExt = os.path.splitext(args['ephys_params']['ap_band_file'])
28 | metaFullPath = Path(metaName + '.meta')
29 |
30 | [xCoord, yCoord, shankInd, connected] = MetaToCoords(metaFullPath, -1, badChan= np.zeros((0), dtype = 'int'), destFullPath = '', showPlot=False)
31 |
32 | print('Computing surface channel...')
33 |
34 | info_lfp = find_surface_channel(dataLfp,
35 | args['ephys_params'],
36 | args['depth_estimation_params'],
37 | xCoord,
38 | yCoord,
39 | shankInd)
40 |
41 | write_probe_json(args['common_files']['probe_json'],
42 | info_lfp['surface_y'],
43 | info_lfp['air_y'],
44 | np.squeeze(yCoord),
45 | np.squeeze(xCoord),
46 | np.squeeze(shankInd))
47 |
48 | execution_time = time.time() - start
49 |
50 | print('total time: ' + str(np.around(execution_time,2)) + ' seconds')
51 | print()
52 |
53 | return {"surface_channel": info_lfp['surface_y'],
54 | "air_channel": info_lfp['air_y'],
55 | "probe_json": args['common_files']['probe_json'],
56 | "execution_time": execution_time} # output manifest
57 |
58 | def main():
59 |
60 | from ecephys_spike_sorting.modules.depth_estimation._schemas import InputParameters, OutputParameters
61 |
62 | mod = ArgSchemaParser(schema_type=InputParameters,
63 | output_schema_type=OutputParameters)
64 |
65 | output = run_depth_estimation(mod.args)
66 |
67 | output.update({"input_parameters": mod.args})
68 | if "output_json" in mod.args:
69 | mod.output(output, indent=2)
70 | else:
71 | print(mod.get_output_json(output))
72 |
73 |
74 | if __name__ == "__main__":
75 | main()
76 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/README.md:
--------------------------------------------------------------------------------
1 | Noise Templates
2 | ==============
3 | Identifies "noise" units based on template shape
4 |
5 | [Kilosort2](https://github.com/MouseLand/Kilosort2) generates templates of a fixed length (2 ms) that matches the time coures of an extracellularly detected spike waveform. However, there are no constraints on template shape, which means that the algorithm often fits templates to voltage fluctuations that could not physically result from the current flow associated with an action potential. The units associated with these templates are considered "noise," and must be filtered out prior to analysis. This is true for other spike sorters as well, but the characteristics of the noise waveforms may be highly algorithm-dependent.
6 |
7 | This module contains code for two different approaches to noise template identification:
8 |
9 | (1) `id_noise_templates()` uses a variety of heuristics to find units with abnormal spatial spread (single channel or > 300 um), or multiple spatial peaks. These are based on many observations of typical noise template shapes from Neuropixels recordings in cortex, hippocampus, thalamus, and midbrain. The appropriate heuristics will likely need to be updated for different types of electrodes or different brain regions. To tune the parameters, you can adjust values in the schemas.py file and run just this module on your output (e.g. in sglx_multi_run_pipeline, set run_CatGT=False, and comment out all modules except noise_templates.
10 |
11 | (2) `id_noise_templates_rf()` uses a [random forest classifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) trained on manually annotated templates. A pickle file containing the classifier object is included in this repository. A PyQt-based app (`template_classifier_app.py`) is available if you'd like train your own classifier.
12 |
13 | Because there's so much variation in the shape of noise templates, we've found it hard to get the false negative rate down to zero with either approach (i.e., there are always some obvious noise units that pass through). Therefore, we still need a manual curation step to remove the remaining noise units. Any suggestions for how to improve the classifier's performance are welcome.
14 |
15 | Running
16 | -------
17 | ```
18 | python -m ecephys_spike_sorting.modules.noise_templates --input_json --output_json
19 | ```
20 | Two arguments must be included:
21 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
22 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
23 |
24 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
25 |
26 |
27 | Input data
28 | ----------
29 | - **Kilosort outputs** : includes spike times, spike clusters, templates, etc.
30 |
31 |
32 | Output data
33 | -----------
34 | - **cluster_group.tsv** : labels for each cluster in spike_clusters.npy
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/tPrime_helper/README.md:
--------------------------------------------------------------------------------
1 | TPrime Helper
2 | ==============
3 | Python wrapper for TPrime, a C++ command line application written by Bill Karsh, for mapping times between data streams collected by SpikeGLX. Each stream must have a common sync signal, and the edge times from that sync signal must be extracted by CatGT. See the README for TPrime for details about parameters.
4 |
5 | Dependencies
6 | ------------
7 | [TPrime](https://billkarsh.github.io/SpikeGLX/#tprime)
8 |
9 | Running
10 | -------
11 | ```
12 | python -m ecephys_spike_sorting.modules.TPrime_helper --input_json --output_json
13 | ```
14 | Two arguments must be included:
15 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
16 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
17 |
18 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
19 |
20 | TPrime Helper parses toStream_sync_params to find the SYNC file, assuming the data has been saved and processed through CatGT using probe folders. So, a file of sync edges from a probe is expected to be in the probe folder, for example
21 |
22 | \catgt_MyRun_g0\MyRun_g0_imec1
23 |
24 | spike_times.npy files for all probes for which there are sync edge files are read in and translated to seconds using the sample rate stored in params.py. (Note: the sample rate written out by rezToPhy is replaced by a value with more significant figures in the kilosort_helper module).
25 |
26 | The CatGT command line is parsed to find all extracted edge files:
27 |
28 | - All SY extracted (except tostream) are designated fromstreams
29 |
30 | - For all probes not specified as the tostream, the file \imecN_ks2\spike_times_sec.txt are designated events files
31 |
32 | - If tostream is not XA or XD, niStream_sync is designated a fromstream
33 |
34 | - All XA and XD extracted which are not specified as the the tostream or the niStream_sync are designated events files
35 |
36 | These parameters are used to build the TPrime command line.
37 |
38 | TPrime outputs the corrected times in text files. After it is run, these output times are resaved in npy format, as floats.
39 |
40 | Notes:
41 | - Since auxiliary data is usually collected at lower sample rates, a probe should be selected as the reference tostream
42 |
43 | - Stated above, but worth repeating: this helper module assumes data stored in the probe folder format, with phy output written to \imecN_ks2, sync edge files for each probe stored in the probe folder, etc. Any other file organization will cause errors.
44 |
45 | Parameters
46 | ----------
47 | - sync_period : in seconds, = measured sync (pulser) period (or 1.0 if you have not measured it)
48 | - toStream_sync_params : specify sync edges for tostream
49 | - niStream_sync_params : specify sync edges for ni stream
50 |
51 | Input data
52 | ----------
53 | - **CatGT extracted edge files for sync and events** : text files generated by CatGT with sync event edge times, in sec.
54 | - **spike_times.npy files** : for each probe
55 |
56 | Output data
57 | -----------
58 | - **spike_times_adj.txt, spike_times_adj.npy** : text files of spike times for each from stream
--------------------------------------------------------------------------------
/ecephys_spike_sorting/scripts/README.md:
--------------------------------------------------------------------------------
1 | # Batch Processing Scripts
2 |
3 | Each module can be run on its own using the following syntax:
4 |
5 | ```
6 | python -m ecephys_spike_sorting.modules. --input_json --output_json
7 | ```
8 |
9 | However, you'll typically want to run several modules in order, iterating over multiple sets of input files. The scripts in this directory provide examples of how to implement batch processing, as well how to auto-generate the required JSON files containing module parameters.
10 |
11 | ## Getting Started
12 |
13 | The first thing you'll want to do is edit `create_input_json.py`. The input JSON file tells each module where to find the required files, where to save its outputs, and what parameters to use for processing. We've tried to avoid hard-coding any paths or file names within the modules themselves (with the exception of the names of files generated by Kilosort).
14 |
15 | The `createInputJson` function has one required input argument, the location for writing a JSON file. Input data location and output location must also be specified:
16 | 1. If the script is running CatGT, npx_directory must = the parent directory containing the run directory. The input must also include the gate, triggers to concatenate, and which probe to process.
17 | 2. If the script is only running sorting and postprocessing, the npx directory is the directory containing the binary *.imec0.ap.bin files (for probe 0).
18 | 3. `kilosort_output_directory`: where the output phy files will be written, along with results from the metrics calculations.
19 |
20 |
21 | `createInputJson` contains a dictionary entry for each module's parameters, as well as four entries for parameters that span modules. You should browse these to ensure they are a good match to your data.
22 |
23 | Documentation on input parameters can be found in the `_schemas.py` file for each module, as well as in `schemas.py` in the "common" directory.
24 |
25 | Once you've updated the parameters dictionary, you can edit `sglx_multi_run_pipeline.py` or 'sglx_filelist_pipeline'. Here, you'll want to set the runs or data files to proces, output lcoation and the location where JSON files can be saved. If running CatGT, make sure to set whether or not there is auxiallary ni data, and edit the 'ni_extract_strings' appropriately. Finally, comment out the names of the modules you don't want to use.
26 |
27 | Then, you can run the script using `pipenv` (assuming you've already created a pipenv virtual environment based on the steps in the main [README](../../README.md) file):
28 |
29 | #### Linux / macOS
30 |
31 | ```shell
32 | $ pipenv shell
33 | (ecephys_spike_sorting) $ python ecephys_spike_sorting/scripts/batch_processing.py
34 | (ecephys_spike_sorting) $ exit
35 | ```
36 |
37 | #### Windows
38 |
39 | ```shell
40 | $ pipenv shell
41 | (.venv) $ python ecephys_spike_sorting\scripts\batch_processing.py
42 | (.venv) $ exit
43 | ```
44 |
45 | ## Available Scripts
46 |
47 | `sglx_multi_run_pipeline.py` for running multiple SpikeGLX runs, handles ni and multiple probes/run and runs TPrime to generate corrected event times for all streams.
48 |
49 | `sglx_filelist_pipeline.py` runs sorting and postprocessing for a list of individual files, with no preprocessing. Most useful for collections of single probe runs that do not include any auxilliary data.
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_helper/main_KS2_KS25.m:
--------------------------------------------------------------------------------
1 | function main_KS2_KS25( KSver, remDup, finalSplits, labelGood, saveRez )
2 | % paths will get set by the pipeline
3 | % config file created by pipeline and saved in directory with master
4 | % channel map created by pipeline, path specified in config
5 |
6 | rP.KSver = KSver;
7 | rP.remDup = remDup;
8 | rP.finalSplits = finalSplits;
9 | rP.labelGood = labelGood;
10 | rP.saveRez = saveRez;
11 |
12 | fprintf( 'main_KS2_KS25 params: \n');
13 | disp(rP)
14 |
15 | if ~or(strcmp(rP.KSver, '2.5'), strcmp(rP.KSver, '2.0'))
16 | fprintf('unsupported kilosort version\n');
17 | return;
18 | end
19 |
20 | run('kilosort2_config_file.m')
21 |
22 |
23 |
24 | if strcmp(rP.KSver, '2.5')
25 | % main parameter changes from Kilosort2 to v2.5
26 | ops.sig = 20; % spatial smoothness constant for registration
27 | ops.fshigh = 300; % high-pass more aggresively
28 | ops.nblocks = 5; % blocks for registration. 0 turns it off, 1 does rigid registration. Replaces "datashift" option.;
29 | % random number generator is used in datashift and to set order of batches
30 | % set seed and initialize here.
31 | iseed = 1;
32 | rng(iseed);
33 | end
34 |
35 | % find the binary file
36 | rootZ = ops.rootZ;
37 | ops.fbinary = fullfile(ops.datafile);
38 |
39 | % print out ops
40 | ops
41 |
42 | % preprocess data to create temp_wh.dat
43 | rez = preprocessDataSub(ops);
44 |
45 | if strcmp(rP.KSver, '2.5')
46 | % data registration step
47 | rez = datashift2(rez, 1); % last input is for shifting data
48 | % main tracking and template matching algorithm
49 | rez = learnAndSolve8b(rez, iseed);
50 | elseif strcmp(KSver, '2.0')
51 | % time-reordering as a function of drift
52 | rez = clusterSingleBatches(rez);
53 | rez = learnAndSolve8b(rez);
54 | end
55 |
56 | % OPTIONAL: remove double-counted spikes - solves issue in which individual spikes are assigned to multiple templates.
57 | % See issue 29: https://github.com/MouseLand/Kilosort/issues/29
58 | if rP.remDup
59 | rez = remove_ks2_duplicate_spikes(rez);
60 | end
61 |
62 | % final merges
63 | rez = find_merges(rez, 1);
64 |
65 | % final splits by SVD
66 | if rP.finalSplits
67 | rez = splitAllClusters(rez, 1);
68 | end
69 |
70 | % decide on cutoff
71 | rez = set_cutoff(rez);
72 |
73 | % eliminate widely spread waveforms (likely noise)
74 | if rP.labelGood
75 | rez.good = get_good_units(rez);
76 | end
77 |
78 | fprintf('found %d good units \n', sum(rez.good>0))
79 |
80 | % write to Phy
81 | fprintf('Saving results to Phy \n')
82 | rezToPhy(rez, rootZ);
83 |
84 | if rP.saveRez
85 | %% if you want to save the results to a Matlab file...
86 |
87 | % discard features in final rez file (too slow to save)
88 | rez.cProj = [];
89 | rez.cProjPC = [];
90 |
91 | % final time sorting of spikes, for apps that use st3 directly
92 | [~, isort] = sortrows(rez.st3);
93 | rez.st3 = rez.st3(isort, :);
94 |
95 | % Ensure all GPU arrays are transferred to CPU side before saving to .mat
96 | rez_fields = fieldnames(rez);
97 | for i = 1:numel(rez_fields)
98 | field_name = rez_fields{i};
99 | if(isa(rez.(field_name), 'gpuArray'))
100 | rez.(field_name) = gather(rez.(field_name));
101 | end
102 | end
103 |
104 | % save final results as rez2
105 | fprintf('Saving final results in rez2 \n')
106 | fname = fullfile(rootZ, 'rez2.mat');
107 | save(fname, 'rez', '-v7.3');
108 | end
109 |
110 | end
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int, Boolean, NumpyArray
4 | from ...common.schemas import EphysParams, Directories
5 |
6 | class NoiseWaveformParams(DefaultSchema):
7 | classifier_path = String(required=True, help='Path to pre-trained waveform classifier')
8 |
9 | smoothed_template_amplitude_threshold = Float(default=0.2, help='Fraction of max amplitude for calculating spread')
10 | template_amplitude_threshold = Float(default=0.2, help='Fraction of max amplitude for calculating spread')
11 | smoothed_template_filter_width = Int(default=2, help='Smoothing window for calculating spread')
12 | min_spread_threshold = Int(default=2, help='Minimum number of channels for a waveform to be considered good')
13 | mid_spread_threshold = Int(default=16, help='Over this channel spread, waveform shape must be considered')
14 | max_spread_threshold = Int(default=25, help='Maximum channel spread for a good unit')
15 | smoothed_template_filter_width_um = Int(default=15, help='Smoothing window for calculating spread')
16 | min_spread_threshold_um = Int(default=10, help='Minimum spatial spread of a waveform to be considered good')
17 | mid_spread_threshold_um = Int(default=50, help='Over this channel spread, waveform shape must be considered')
18 | max_spread_threshold_um = Int(default=300, help='Maximum channel spread for a good unit')
19 |
20 | channel_amplitude_thresh = Float(default=0.3, help='Fraction of max amplitude for considering channels in spatial peak detection')
21 | peak_height_thresh = Float(default=0.2, help='Minimum height for spatial peak detection')
22 | peak_prominence_thresh = Float(default=0.2, help='Minimum prominence for spatial peak detection')
23 | peak_channel_range = Int(default=24, help='Range of channels for detecting spatial peaks')
24 | peak_channel_range_um = Int(default=150, help='Range of in um to check for spatial peaks')
25 | peak_locs_std_thresh = Float(default=3.5, help='Maximum standard deviation of peak locations for good units')
26 |
27 | min_temporal_peak_location = Int(default=10, help='Minimum peak index for good unit')
28 | max_temporal_peak_location = Int(default=30, help='Maximum peak index for good unit')
29 |
30 | template_shape_channel_range = Int(default=12, help='Range of channels for checking template shape')
31 | wavelet_index = Int(default=2, help='Wavelet index for noise template shape detection')
32 | min_wavelet_peak_height = Float(default=0.0, help='Minimum wavelet peak height for good units')
33 | min_wavelet_peak_loc = Int(default=15, help='Minimum wavelet peak location for good units')
34 | max_wavelet_peak_loc = Int(default=25, help='Maximum wavelet peak location for good units')
35 |
36 | multiprocessing_worker_count = Int(default=4, help='Number of workers to use for spatial peak calculation')
37 | use_random_forest = Boolean(default=False, help='set to false to use heuristic noise id')
38 |
39 | class InputParameters(ArgSchema):
40 |
41 | noise_waveform_params = Nested(NoiseWaveformParams)
42 | ephys_params = Nested(EphysParams)
43 | directories = Nested(Directories)
44 |
45 | class OutputSchema(DefaultSchema):
46 |
47 | input_parameters = Nested(InputParameters,
48 | description=("Input parameters the module "
49 | "was run with"),
50 | required=True)
51 |
52 | class OutputParameters(OutputSchema):
53 |
54 | execution_time = Float()
55 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/automerging/automerging.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | import numpy as np
4 |
5 | from .metrics import compare_templates, make_interp_temp, compute_isi_score, compute_isi_score
6 | from .merges import compute_overall_score, ID_merge_groups, make_merges
7 | from ...common.spike_template_helpers import find_depth
8 |
9 | def automerging(spike_times, spike_clusters, clusterIDs, cluster_quality, templates, params):
10 |
11 | min_t = np.min(spike_times)
12 | max_t = np.max(spike_times)
13 |
14 | depths = np.zeros((clusterIDs.size,))
15 |
16 | is_noise = np.ones(cluster_quality.shape, dtype=bool)
17 | is_noise[cluster_quality == 'noise'] = False
18 |
19 | for idx, clusterID in enumerate(clusterIDs):
20 |
21 | template = templates[clusterID,:,:]
22 | depths[idx] = find_depth(template)
23 |
24 | sorted_by_depth = np.argsort(depths)
25 | clusterIDs = clusterIDs[sorted_by_depth]
26 | depths = depths[sorted_by_depth]
27 | is_good = np.invert(is_noise[sorted_by_depth])
28 |
29 | comparison_matrix = np.zeros((depths.size, depths.size, 5))
30 |
31 | for i in range(0,depths.size):
32 | for j in range(i+1,depths.size):
33 | if np.abs(depths[i] - depths[j]) <= params['distance_to_compare'] and is_good[i] and is_good[j]:
34 | comparison_matrix[i,j,0] = 1
35 |
36 | print('Total comparisons: ' + str(np.where(comparison_matrix[:,:,0] == 1)[0].size))
37 |
38 | print('Calculating initial metrics...')
39 |
40 | max_time = np.max(spike_times)
41 |
42 | for i in range(0,depths.size):
43 |
44 | if is_good[i]:
45 |
46 | temp1 = make_interp_temp(templates,[clusterIDs[i]]) #
47 | times1 = spike_times[spike_clusters == clusterIDs[i]]
48 |
49 | for j in range(i+1,depths.size):
50 |
51 | if comparison_matrix[i,j,0] == 1:
52 |
53 | temp2 = make_interp_temp(templates, [clusterIDs[j]]) #
54 | times2 = spike_times[spike_clusters == clusterIDs[j]]
55 |
56 | rms, offset_distance = compare_templates(temp1, temp2) #
57 | # overlap = percent_overlap(times1, times2, min_t, max_t, 50) #
58 | cISI_score, score_weight, ISI1, ISI2, cISI, rcISI, another_score = compute_isi_score(times1, times2, max_time)
59 | is_good
60 | comparison_matrix[i,j,1] = np.max(rms)
61 | comparison_matrix[i,j,2] = another_score
62 | comparison_matrix[i,j,3] = cISI_score
63 |
64 | overall_score, i_index, j_index = compute_overall_score(comparison_matrix)
65 |
66 | comparison_matrix[:,:,4] = 0
67 |
68 | for index in np.arange(overall_score.size) :
69 |
70 | if overall_score[index] > params['merge_threshold']:
71 |
72 | comparison_matrix[i_index[index],j_index[index],4] = 1
73 |
74 |
75 | print('Total merges = ' + str(np.where(comparison_matrix[:,:,4] == 1)[0].size))
76 | print(' ')
77 |
78 | groups = ID_merge_groups(comparison_matrix[:,:,4])
79 | clusters = np.copy(spike_clusters)
80 | clusters = make_merges(groups, clusters, spike_clusters, clusterIDs)
81 |
82 | print(' Total clusters = ' + str(np.unique(clusters).size))
83 |
84 | cluster_quality = []
85 | cluster_index = []
86 |
87 | for idx, ID in enumerate(np.unique(clusters)):
88 |
89 | cluster_index.append(ID)
90 |
91 | if ID > np.max(clusterIDs):
92 | cluster_quality.append(1) # good
93 | else:
94 | if is_good[np.where(clusterIDs == ID)[0]]:
95 | cluster_quality.append(1) # good
96 | else:
97 | cluster_quality.append(-1) # noise
98 |
99 | return clusters, cluster_index, cluster_quality
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/mean_waveforms/README.md:
--------------------------------------------------------------------------------
1 | Mean Waveforms
2 | ==============
3 | Extracts mean waveforms and compute waveform metrics from raw data, given spike times and cluster IDs.
4 |
5 | Dependencies
6 | ------------
7 | [C_Waves](http://billkarsh.github.io/SpikeGLX/#post-processing-tools)
8 |
9 | Mean Waveform Calculation
10 | =========================
11 |
12 | **In the original Allen Institute implementation:**
13 | Computes waveforms separately for individual epochs, as well as for the entire experiment. If no epochs are specified, waveforms are selected randomly from the entire recording. Waveform standard deviation is currently computed, but not saved.
14 |
15 | **In the Janelia revised implementation:**
16 | Computes waveforms using Bill Karsh's command line tool C_Waves. This version does not support epochs; spikes are drawn uniformly from the entire recording. The SNR is calculated over a disk of recording sites, and is given by:
17 |
18 | (Vmax - Vmin) on the peak channel/(2*sqrt(variance))
19 |
20 | variance = (1/(N-degrees of freedom))*sum(residuals^2)
21 |
22 | residuals = channel x sample array of (raw data - mean)
23 |
24 | The variance and residuals are calculated only over the first 15 points in the waveform -- this gives a measure of noise that is independent of the variation in amplitude and shape of the spikes included in the cluster.
25 |
26 | The radius of the disk, given in number of sites, is an input parameter to C_Waves. **create_input_json.py** takes as a parameter the radius specified in um (c_Waves_snr_um) and translates it into sites for the probe type read in the SpikeGLX meta file.
27 |
28 | The C_Waves implementation is very efficient. It is turned on in **create_input_json.py** by setting:
29 |
30 | ```
31 | use_C_Waves : True
32 | ```
33 |
34 | Waveform Metric Calculation
35 | ===========================
36 |
37 | Metrics are computed for every waveform, and include features of the 1D peak-channel waveform and the 2D waveform centered on the soma location.
38 |
39 | 
40 |
41 | **1D waveform features**: Waveform duration, peak-trough ratio, repolarization slope, and recovery slope.
42 |
43 | 
44 |
45 | **Example 2D waveform**: Signals from channels along one column of the probe are smoothed with a moving average to create the 2D waveform. Green dots indicate the location of the waveform trough on each channel.
46 |
47 | 
48 |
49 | **2D waveform features**: Waveform spread, velocity above the soma, and velocity below the soma.
50 |
51 | Source: [Jia et al. (2019) "High-density extracellular probes reveal dendritic backpropagation and facilitate neuron classification." _J Neurophys_ **121**: 1831-1847](https://doi.org/10.1152/jn.00680.2018)
52 |
53 |
54 | Running
55 | -------
56 | ```
57 | python -m ecephys_spike_sorting.modules.mean_waveforms --input_json --output_json
58 | ```
59 | Two arguments must be included:
60 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
61 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
62 |
63 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
64 |
65 | Input data
66 | ----------
67 | - **AP band .dat or .bin file** : int16 binary files written by [Open Ephys](https://github.com/open-ephys/plugin-GUI), [SpikeGLX](https://github.com/billkarsh/spikeglx), or the `extract_from_npx` module.
68 | - **Kilosort outputs** : includes spike times, spike clusters, cluster quality, etc.
69 |
70 |
71 | Output data
72 | -----------
73 | - **mean_waveforms.npy** : numpy file containing mean waveforms for clusters across all epochs
74 | - **waveform_metrics.csv** : CSV file containing metrics for each waveform
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/quality_metrics/README.md:
--------------------------------------------------------------------------------
1 | # Quality Metrics
2 |
3 | Computes quality metrics for sorted units. Similar to the `mean_waveforms` module, this module can calculate metrics separately for individual epochs. If no epochs are specified, metrics are computed for the entire recording.
4 |
5 | ## Included Metrics
6 |
7 | | Metric | Icon | Description | Reference |
8 | | ------------------ |:------------------------:| -------------------------------------------------- | -----------------|
9 | | Firing rate | | Mean spike rate in an epoch | |
10 | | Presence ratio | | Fraction of epoch in which spikes are present | |
11 | | ISI violations | | Rate of refractory-period violations | |
12 | | Amplitude cutoff | | Estimate of miss rate based on amplitude histogram | |
13 | | Isolation distance | | Distance to nearest cluster in Mahalanobis space | Schmitzer-Torbert et al. (2005) _Neuroscience_ **131**, 1-11 |
14 | | L-ratio | | | " |
15 | | _d'_ | | Classification accuracy based on LDA | Hill et al. (2011) _J Neurosci_ **31**, 8699-9705 |
16 | | Nearest-neighbors || Non-parametric estimate of unit contamination | Chung et al. (2017) _Neuron_ **95**, 1381-1394 |
17 | | Silhouette score | | Standard metric for cluster overlap | |
18 | | Maximum drift | | Maximum change in spike depth throughout recording | |
19 | | Cumulative drift | | Cumulative change in spike depth throughout recording | |
20 |
21 | ### A Note on Calculations
22 |
23 | For metrics based on waveform principal components (isolation distance, L-ratio, _d'_, and nearest neighbors hit rate and false alarm rate), it is typical to compute the metrics for all pairs of units and report the "worst-case" value. We have found that this tends to under- or over-estimate the degree of contamination when there are large firing rate differences between pairs of units that are being compared. Instead, we compute metrics by sub-selecting spikes from _all_ other units on the same set of channels, which seems to give a more accurate picture of isolation quality. We would appreciate feedback on whether this approach makes sense.
24 |
25 | The regions over which templates are compared and units are considered "close" are set by the parameter 'max_radius_um' in create_input_json. It is set by default to 68 um, which is equivalent to 13 sites on a NP 1.0 probe.
26 |
27 | The %false positive metric derived from ISI violations has been amended from the original to NOT assume that the fraction of false positve spikes << 1. In this case, the fraction of false positives is the root of a quadratic equation -- when there is no real root (at high fracton false positives) the output fraction of false positives is set to 1.0.
28 |
29 |
30 | ## Running
31 |
32 | ```
33 | python -m ecephys_spike_sorting.modules.quality_metrics --input_json --output_json
34 | ```
35 | Two arguments must be included:
36 | 1. The location of an existing file in JSON format containing a list of paths and parameters.
37 | 2. The location to write a file in JSON format containing information generated by the module while it was run.
38 |
39 | See the `_schemas.py` file for detailed information about the contents of the input JSON.
40 |
41 |
42 | ## Input data
43 |
44 | - **Kilosort outputs** : includes spike times, spike clusters, cluster quality, etc.
45 |
46 |
47 | ## Output data
48 |
49 | - **metrics.csv** : CSV containing metrics for all units
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/pykilosort_helper/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int, Bool, NumpyArray
4 | from ...common.schemas import EphysParams, Directories, CommonFiles
5 |
6 |
7 | class PyKilosortHelperParameters(DefaultSchema):
8 | preprocessing_function = String(required=True, default='kilosort2', help='Preprocessing function. Valid values: {"kilosort2", "destriping"} ')
9 | alf_location = String(required=False, default='', help='ALF location under the results directory')
10 | copy_fproc = Int(required=False, default=1, help='Copy processed binary to output directory')
11 | fproc = String(required=False, default='D:\kilosort_datatemp\temp_wh.dat',
12 | help='Processed data file on a fast ssd')
13 | seed = Int(required=False, default=42, help="seed for deterministic output")
14 | ks2_mode = Bool(required=False, default=False, help='Use ClusterSingleBatches and reorder')
15 | perform_drift_registration = Bool(required=False, default=True, help='Estimate electrode drift and apply registration')
16 | car = Bool(required=False, default=True, help='set to True to perform common average referencing (median subtraction)')
17 | Th = String(required=False, default='[10 4]', help='threshold last pass can be lower')
18 | ThPre = Float(required=False, default=8, help='threshold crossings for pre-clustering (in PCA projection space)')
19 | lam = Float(required=False, default=10, help='how important is the amplitude penalty (like in Kilosort1, 0 means not used,10 is average, 50 is a lot)')
20 | AUCsplit = Float(required=False, default=0.9, help='splitting a cluster at the end requires at least this much isolation for each sub-cluster (max=1)')
21 | minFR = Float(required=False, default=1.0/50, help='minimum spike rate (Hz), if a cluster falls below this for too long it gets removed')
22 | momentum = String(required=False, default='[20 400]', help='number of samples to average over (annealed from first to second value)')
23 | sig_datashift = Float(required=True, default=20.0, help='sigma for the Gaussian process smoothing')
24 | sigmaMask = Float(required=False, default=30, help='spatial constant in um for computing residual variance of spike')
25 | fshigh = Float(required=False, allow_none=True, default=300, help='high pass filter frequency')
26 | fslow = Float(required=False, allow_none=True, help='low pass filter frequency')
27 | minfr_goodchannels = Float(required=False, default=0.1, help='minimum firing rate on a "good" channel (0 to skip)')
28 | whiteningRange = Int(required=False, default=32, help='number of channels to use for whitening each channel')
29 | save_temp_files: bool = Bool(required=False, default=True, help='keep temporary files created while running')
30 | deterministic_mode =Bool(required=False, default=True, help='make output deterministic by sorting spikes before applying kernels')
31 | output_filename = String(required=False, allow_none=True, help='optionally save registered data to a new binary file')
32 | nblocks = Int(required=False, default=5, help='number of blocks used to segment the probe when tracking drift, 0 == do not track, 1 == rigid, > 1 == non-rigid')
33 | doFilter = Int(required=False, default=0, help='filter if = 1, skip bp filtering if = 0')
34 |
35 |
36 | class InputParameters(ArgSchema):
37 | pykilosort_helper_params = Nested(PyKilosortHelperParameters)
38 | directories = Nested(Directories)
39 | ephys_params = Nested(EphysParams)
40 | common_files = Nested(CommonFiles)
41 |
42 |
43 | class OutputSchema(DefaultSchema):
44 | input_parameters = Nested(InputParameters,
45 | description=("Input parameters the module "
46 | "was run with"),
47 | required=True)
48 |
49 |
50 | class OutputParameters(OutputSchema):
51 | message = String()
52 | execution_time = Float()
53 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/quality_metrics/__main__.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchemaParser
2 | import os
3 | import logging
4 | import time
5 | import pathlib
6 |
7 |
8 | import numpy as np
9 | import pandas as pd
10 |
11 | from ...common.utils import load_kilosort_data
12 | from ...common.utils import getFileVersion
13 | from ...common.epoch import get_epochs_from_nwb_file
14 |
15 | from .metrics import calculate_metrics
16 | from .ibl_metrics import calculate_ibl_metrics
17 |
18 |
19 | def calculate_quality_metrics(args):
20 |
21 | print('ecephys spike sorting: quality metrics module')
22 |
23 | start = time.time()
24 |
25 | include_pc_metrics = args['quality_metrics_params']['include_pc_metrics']
26 |
27 | # make usre we can write an output file
28 |
29 | output_file_args = args['cluster_metrics']['cluster_metrics_file']
30 |
31 | output_file, metrics_version = getFileVersion(output_file_args)
32 |
33 | print("kilosort_output_dir: ")
34 | print(args['directories']['kilosort_output_directory'])
35 | print("Loading data...")
36 |
37 |
38 | try:
39 |
40 | spike_times, spike_clusters, spike_templates, amplitudes, templates, channel_map, \
41 | channel_pos, clusterIDs, cluster_quality, cluster_amplitude, pc_features, \
42 | pc_feature_ind, template_features, channel_positions = \
43 | load_kilosort_data(args['directories']['kilosort_output_directory'], \
44 | args['ephys_params']['sample_rate'], \
45 | use_master_clock = False,
46 | include_pcs = True)
47 |
48 |
49 | metrics = calculate_metrics(spike_times, spike_clusters, spike_templates, amplitudes, channel_map, channel_pos, templates, pc_features, pc_feature_ind, args['quality_metrics_params'])
50 | if args['quality_metrics_params']['include_ibl']:
51 | ibl_metrics = calculate_ibl_metrics(spike_times, spike_clusters, amplitudes, args['quality_metrics_params'], args['ephys_params']['sample_rate'])
52 |
53 | except FileNotFoundError:
54 |
55 | execution_time = time.time() - start
56 |
57 | print(" Files not available.")
58 |
59 | return {"execution_time" : execution_time,
60 | "quality_metrics_output_file" : None}
61 |
62 | if args['quality_metrics_params']['include_ibl']:
63 | # merge allen and ibl metrics
64 | metrics = metrics.merge(ibl_metrics, on='cluster_id', suffixes=('_quality_metrics','_ibl'))
65 |
66 | # build name for waveform_metrics file with matched version
67 | wm_args = args['waveform_metrics']['waveform_metrics_file']
68 | if metrics_version == 0:
69 | wm = wm_args
70 | else:
71 | # buld name for waveform metrics file with matched version
72 | wm = os.path.join( pathlib.Path(wm_args).parent, pathlib.Path(wm_args).stem + '_' + repr(metrics_version) + '.csv' )
73 | if os.path.exists(wm):
74 | metrics = metrics.merge(pd.read_csv(wm, index_col=0),
75 | on='cluster_id',
76 | suffixes=('_quality_metrics','_waveform_metrics'))
77 |
78 | print("Saving data...")
79 |
80 | metrics.to_csv(output_file, index=False )
81 |
82 | execution_time = time.time() - start
83 |
84 | print('total time: ' + str(np.around(execution_time,2)) + ' seconds')
85 | print()
86 |
87 | return {"execution_time" : execution_time,
88 | "quality_metrics_output_file" : output_file} # output manifest
89 |
90 |
91 | def main():
92 |
93 | from ._schemas import InputParameters, OutputParameters
94 |
95 | mod = ArgSchemaParser(schema_type=InputParameters,
96 | output_schema_type=OutputParameters)
97 |
98 | output = calculate_quality_metrics(mod.args)
99 |
100 | output.update({"input_parameters": mod.args})
101 | if "output_json" in mod.args:
102 | mod.output(output, indent=2)
103 | else:
104 | print(mod.get_output_json(output))
105 |
106 |
107 | if __name__ == "__main__":
108 | main()
109 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/scripts/helpers/log_from_json.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import numpy as np
3 | import os
4 | import json
5 | from datetime import datetime
6 | from pathlib import Path
7 | from tkinter import Tk
8 | from tkinter import filedialog
9 |
10 | # Fill in a predefined row of a log table
11 | # Entries are:
12 | # session name (run_probe); number of clusters, number of spikes,
13 | # execution time for kilosort, KS postprocessing (duplicate removal),
14 | # noise templates, mean waveforms, and QC calculations
15 | #
16 | def addEntry(modules, jsondir, session_id, logFullPath):
17 |
18 | log_entry = ['None'] * 9
19 | sep = ','
20 |
21 | log_entry[0] = session_id
22 | now = datetime.now()
23 | log_entry[1] = now.strftime("%m/%d/%Y, %H:%M:%S")
24 |
25 | if 'kilosort_helper' in modules:
26 | jsonName = session_id + '-kilosort_helper-output.json'
27 | jsonFile = os.path.join(jsondir, jsonName)
28 | print(jsonFile)
29 | with open(jsonFile) as currJson:
30 | modData = json.load(currJson)
31 | log_entry[2] = repr(modData['nTot'])
32 | log_entry[3] = repr(modData['nTemplate'])
33 | log_entry[4] = '{:.2f}'.format(modData['execution_time'])
34 |
35 | if 'kilosort_postprocessing' in modules:
36 | jsonName = session_id + '-kilosort_postprocessing-output.json'
37 | jsonFile = os.path.join(jsondir, jsonName)
38 | print(jsonFile)
39 | with open(jsonFile) as currJson:
40 | modData = json.load(currJson)
41 | log_entry[5] = '{:.2f}'.format(modData['execution_time'])
42 |
43 | if 'noise_templates' in modules:
44 | jsonName = session_id + '-noise_templates-output.json'
45 | jsonFile = os.path.join(jsondir, jsonName)
46 | print(jsonFile)
47 | with open(jsonFile) as currJson:
48 | modData = json.load(currJson)
49 | log_entry[6] = '{:.2f}'.format(modData['execution_time'])
50 |
51 | if 'mean_waveforms' in modules:
52 | jsonName = session_id + '-mean_waveforms-output.json'
53 | jsonFile = os.path.join(jsondir, jsonName)
54 | print(jsonFile)
55 | with open(jsonFile) as currJson:
56 | modData = json.load(currJson)
57 | log_entry[7] = '{:.2f}'.format(modData['execution_time'])
58 |
59 | if 'quality_metrics' in modules:
60 | jsonName = session_id + '-quality_metrics-output.json'
61 | jsonFile = os.path.join(jsondir, jsonName)
62 | print(jsonFile)
63 | with open(jsonFile) as currJson:
64 | modData = json.load(currJson)
65 | log_entry[8] = '{:.2f}'.format(modData['execution_time'])
66 |
67 | log_entry_str = sep.join(log_entry)
68 | with open(logFullPath, 'a') as log:
69 | log.write(log_entry_str + '\n')
70 |
71 |
72 | # write header to file
73 | def writeHeader(logFullPath):
74 | with open(logFullPath, 'w') as log:
75 | log.write('session_id,date_run,time_run,ntot,nTemplate,KS2_time,KS_postprocess_time,noise_template_time,mean_waveform_time,QC_time\n')
76 |
77 |
78 | # For testing, prompt user for kilosort_helper-out.json,
79 | # get directory and session name, parse make an output log file
80 | #
81 | def main():
82 |
83 | # Get file from user
84 | root = Tk() # create the Tkinter widget
85 | root.withdraw() # hide the Tkinter root window
86 |
87 | # Windows specific; forces the window to appear in front
88 | root.attributes("-topmost", True)
89 |
90 | fullPath = Path(filedialog.askopenfilename(title="Select kilosort_helper-output.json"))
91 | root.destroy() # destroy the Tkinter widget
92 |
93 | jsondir, jsonName = os.path.split(fullPath)
94 | suffix_start = jsonName.find('-kilosort')
95 | session_id = jsonName[0:suffix_start]
96 | testLogPath = os.path.join(jsondir, 'testlog.csv')
97 | writeHeader(testLogPath)
98 |
99 | modules = ['kilosort_helper','kilosort_postprocessing','noise_templates','mean_waveforms','quality_metrics']
100 | addEntry(modules, jsondir, session_id, testLogPath)
101 |
102 |
103 | if __name__ == "__main__":
104 | main()
105 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/ks4_helper/_schemas.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchema, ArgSchemaParser
2 | from argschema.schemas import DefaultSchema
3 | from argschema.fields import Nested, InputDir, String, Float, Dict, Int, Bool, NumpyArray
4 | from ...common.schemas import EphysParams, Directories, CommonFiles
5 |
6 | class ks4_params(DefaultSchema):
7 | Th_universal = Float(required=False, default=9, help='threshold for creating templates')
8 | Th_learned = Float(required=False, default=8, help='threshold for creating templates')
9 | Th_single_ch = Float(required=False, default=8, help='threshold crossings for pre-clustering (in PCA projection space)')
10 | duplicate_spike_ms = Float(required=False, default=0.25, help='Number of bins for which subsequent spikes from the same cluster are assumed to be artifacts. A value of 0 disables this step.')
11 | nblocks = Int(required=False, default=5, help='number of blocks used to segment the probe when tracking drift, 0 == do not track, 1 == rigid, > 1 == non-rigid')
12 | sig_interp = Float(required=True, default=20.0, help='sigma for the Gaussian interpolation in drift correction um)')
13 | whitening_range = Int(required=False, default=32, help='number of channels to use for whitening each channel')
14 | min_template_size = Float(required=False, default=10, help='Width in um of Gaussian envelope for template weight')
15 | template_sizes = Int(required=False, default=5, help='number of template sizes, multiples of min_template size')
16 | templates_from_data=Bool(required=False, default=True, help='set to True to extract templates from data')
17 | tmin = Float(required=False, default=0, help='time in sec to start processing')
18 | tmax = Float(required=False, default=-1, help='time in sec to end processing; if < 0, set to inf ')
19 | nearest_chans = Int(required=False, default=10, help='Number of nearest channels to consider when finding local maxima during spike detection.')
20 | nearest_templates = Int(required=False, default=100, help='Number of nearest spike template locations to consider when finding local maxima during spike detection.')
21 | ccg_threshold = Float(required=False, default=0.25, help='Fraction of refractory period violations that are allowed in the CCG compared to baseline; used to perform splits and merges. ')
22 | acg_threshold = Float(required=False, default=0.20, help='Fraction of refractory period violations that are allowed in the ACG compared to baseline; used to assign "good" units. ')
23 | template_seed = Int(required=False, default=0, help='seed to pick which batches are used for finding universal templates')
24 | cluster_seed = Int(required=False, default=0, help='start seed for clustering')
25 |
26 | class KS4HelperParameters(DefaultSchema):
27 | do_CAR = Bool(required=False, default=True, help='set to True to perform common average referencing (median subtraction)')
28 | save_extra_vars = Bool(required=False, default=False, help='If true, save Wall and pc features in save_to_phy ')
29 | ks_make_copy = Bool(required=False, default=False, help='If true, make a copy of the original KS output')
30 | save_preprocessed_copy = Bool(required=False, default=False, help='If true, make a copy of the prprocessed data')
31 | doFilter = Int(required=False, default=0, help='filter if = 1, skip bp filtering if = 0')
32 | fproc = String(required=False, default='D:\kilosort_datatemp\temp_wh.dat', help='Processed data file on a fast ssd')
33 | fshigh = Float(required=False, allow_none=True, default=300, help='high pass filter frequency')
34 | fslow = Float(required=False, allow_none=True, default=10000, help='low pass filter frequency')
35 | ks4_params = Nested(ks4_params, required=True, help='Parameters used to auto-generate a Kilosort config file')
36 |
37 |
38 | class InputParameters(ArgSchema):
39 | ks4_helper_params = Nested(KS4HelperParameters)
40 | directories = Nested(Directories)
41 | ephys_params = Nested(EphysParams)
42 | common_files = Nested(CommonFiles)
43 |
44 |
45 | class OutputSchema(DefaultSchema):
46 | input_parameters = Nested(InputParameters,
47 | description=("Input parameters the module "
48 | "was run with"),
49 | required=True)
50 |
51 |
52 | class OutputParameters(OutputSchema):
53 | message = String()
54 | execution_time = Float()
55 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_helper/main_kilosort_multiversion.m:
--------------------------------------------------------------------------------
1 | function main_KS2_KS25( KSver, remDup, finalSplits, labelGood, saveRez )
2 | % paths will get set by the pipeline
3 | % config file created by pipeline and saved in directory with master
4 | % channel map created by pipeline, path specified in config
5 |
6 | rP.KSver = KSver;
7 | rP.remDup = remDup;
8 | rP.finalSplits = finalSplits;
9 | rP.labelGood = labelGood;
10 | rP.saveRez = saveRez;
11 |
12 | fprintf( 'main_KS2_KS25_KS3 params: \n');
13 | disp(rP)
14 |
15 | if ~(strcmp(rP.KSver, '2.5') | strcmp(rP.KSver, '2.0') | strcmp(rP.KSver, '3.0') )
16 | fprintf('unsupported kilosort version\n');
17 | return;
18 | end
19 |
20 | run('kilosort2_config_file.m')
21 |
22 |
23 |
24 | if strcmp(rP.KSver, '2.5')
25 | % main parameter changes from Kilosort2 to v2.5
26 | ops.sig = 20; % spatial smoothness constant for registration
27 | ops.fshigh = 300; % high-pass more aggresively
28 | % random number generator is used in datashift and to set order of batches
29 | % set seed and initialize here.
30 | iseed = 1;
31 | rng(iseed);
32 | elseif strcmp(rP.KSver, '3.0')
33 | % main parameter changes from Kilosort2 to v2.5
34 | ops.sig = 20; % spatial smoothness constant for registration
35 | ops.fshigh = 300; % high-pass more aggresively
36 | % main parameter changes from Kilosort2.5 to v3.0
37 | % if using KS3, set ops.Th appropriately from the calling program
38 | % ops.Th = [9, 9];
39 |
40 | end
41 |
42 | % find the binary file
43 | rootZ = ops.rootZ;
44 | ops.fbinary = fullfile(ops.datafile);
45 |
46 | % print out ops
47 | ops
48 |
49 | % preprocess data to create temp_wh.dat
50 | rez = preprocessDataSub(ops);
51 |
52 | if ~strcmp(rP.KSver, '3.0')
53 | % That is for KS2.0 and KS 2.5
54 | if strcmp(rP.KSver, '2.5')
55 | % data registration step
56 | rez = datashift2(rez, 1); % last input is for shifting data
57 | % main tracking and template matching algorithm
58 | rez = learnAndSolve8b(rez, iseed);
59 | elseif strcmp(KSver, '2.0')
60 | % time-reordering as a function of drift
61 | rez = clusterSingleBatches(rez);
62 | rez = learnAndSolve8b(rez);
63 | end
64 |
65 | % OPTIONAL: remove double-counted spikes - solves issue in which individual spikes are assigned to multiple templates.
66 | % See issue 29: https://github.com/MouseLand/Kilosort/issues/29
67 | if rP.remDup
68 | rez = remove_ks2_duplicate_spikes(rez);
69 | end
70 |
71 | % final merges
72 | rez = find_merges(rez, 1);
73 |
74 |
75 | % final splits by SVD
76 | if rP.finalSplits
77 | rez = splitAllClusters(rez, 1);
78 | end
79 |
80 | % decide on cutoff
81 | rez = set_cutoff(rez);
82 |
83 | % eliminate widely spread waveforms (likely noise); only implemented in KS2.5 release
84 | if ( rP.labelGood & strcmp(rP.KSver, '2.5'))
85 | rez.good = get_good_units(rez);
86 | end
87 |
88 | fprintf('found %d good units \n', sum(rez.good>0))
89 |
90 | else
91 | % For KS 3.0
92 | rez = datashift2(rez, 1);
93 |
94 | [rez, st3, tF] = extract_spikes(rez);
95 |
96 | rez = template_learning(rez, tF, st3);
97 |
98 | [rez, st3, tF] = trackAndSort(rez);
99 |
100 | rez = final_clustering(rez, tF, st3);
101 |
102 | rez = find_merges(rez, 1);
103 |
104 | rezToPhy2(rez, rootZ);
105 | end
106 |
107 | % write to Phy
108 | fprintf('Saving results to Phy \n')
109 | rezToPhy(rez, rootZ);
110 |
111 | if rP.saveRez
112 | %% if you want to save the results to a Matlab file...
113 |
114 | % discard features in final rez file (too slow to save)
115 | rez.cProj = [];
116 | rez.cProjPC = [];
117 |
118 | % final time sorting of spikes, for apps that use st3 directly
119 | [~, isort] = sortrows(rez.st3);
120 | rez.st3 = rez.st3(isort, :);
121 |
122 | if isfield(rez,'xy')
123 | fprintf('sorting xy for KS3\n')
124 | rez.xy = rez.xy(isort,:);
125 | end
126 | % Ensure all GPU arrays are transferred to CPU side before saving to .mat
127 | rez_fields = fieldnames(rez);
128 | for i = 1:numel(rez_fields)
129 | field_name = rez_fields{i};
130 | if(isa(rez.(field_name), 'gpuArray'))
131 | rez.(field_name) = gather(rez.(field_name));
132 | end
133 | end
134 |
135 | % save final results as rez2
136 | fprintf('Saving final results in rez2 \n')
137 | fname = fullfile(rootZ, 'rez2.mat');
138 | save(fname, 'rez', '-v7.3');
139 | end
140 |
141 | end
--------------------------------------------------------------------------------
/docs/aibs_sphinx/static/external_assets/stylesheets/common_layout.css:
--------------------------------------------------------------------------------
1 | /* SCCS variables */
2 | $black: #000000;
3 | $white: #FFFFFF;
4 | $bkgndGray: #CCCCCC;
5 | $aiGreen: #639A3C;
6 |
7 | /* needed to prevent IE8 from always showing a vertical scrollbar */
8 | html {
9 | overflow: auto;
10 | }
11 |
12 | body {
13 | font-size: 9pt;
14 | font-family: arial, sans-serif;
15 | background-color: $white;
16 | margin: 0px;
17 | padding: 0px;
18 | height: 97.2%;
19 | }
20 |
21 | .nobr {
22 | white-space: nowrap;
23 | }
24 |
25 | div.siteContent {
26 | margin: 0px;
27 | padding: 0px;
28 | position: relative;
29 | min-height: 100%;
30 | }
31 |
32 | #search_nav {
33 | border: 0px solid $black;
34 | padding: 12px;
35 |
36 | }
37 |
38 | div.separator {
39 | height: 8px;
40 | }
41 |
42 | img.brandLogo {
43 | float: left;
44 | margin: 10px;
45 | }
46 |
47 | div.clear {
48 | clear: both;
49 | }
50 |
51 | div.aboutContent {
52 | padding: 20px;
53 | }
54 |
55 | #searchArea {
56 | background-color: #ccc;
57 | }
58 |
59 | .microarraySearchBar {
60 | height: 100px;
61 | overflow: hidden;
62 | }
63 |
64 | .searchSelect {
65 | width: 130px;
66 | }
67 |
68 | /* autocomplete elements */
69 | div.auto_complete {
70 | width: auto !important;
71 | background: $white;
72 | z-index: 100;
73 | ul {
74 | border: 1px solid #888;
75 | margin: 0;
76 | padding: 0;
77 | width: 100%;
78 | list-style-type: none;
79 | li {
80 | margin: 0;
81 | padding: 3px;
82 | &.selected {
83 | background-color: #ffb;
84 | }
85 | }
86 | strong.highlight {
87 | color: #800;
88 | margin: 0;
89 | padding: 0;
90 | }
91 | }
92 | }
93 |
94 | a {
95 | text-decoration: none;
96 | cursor: pointer;
97 | }
98 |
99 | div.notice {
100 | width: 700px;
101 | background-color: #ddd;
102 | border: 1px solid #ccf;
103 | margin-left: auto;
104 | margin-right: auto;
105 | padding: 12px;
106 | }
107 |
108 | .info {
109 | color: $black;
110 | font-weight: bold;
111 | }
112 |
113 | .label {
114 | color: #667;
115 | }
116 |
117 | /* format for 'This data is also available as XML' */
118 | div#xml_message {
119 | border-top: 0px;
120 | margin-top: 0px;
121 | margin-left: auto;
122 | margin-right: auto;
123 | text-align: center;
124 | width: 100%;
125 | font-size: 10px;
126 | }
127 |
128 | /******************************
129 | portal overrides
130 | ******************************/
131 |
132 | #pFooter {
133 | height: 10px !important;
134 | }
135 |
136 | .contentBlock {
137 | margin-left: 0px;
138 | margin-right: 0px;
139 | margin-bottom: 10px;
140 | width: 99%;
141 | }
142 |
143 | table.contentBlock {
144 | border: 1px solid grey;
145 | width: 100%;
146 | border-collapse: collapse;
147 | }
148 |
149 | .contentBlock th {
150 | background-color: #31506C;
151 | color: $white;
152 | font-family: Arial, Helvetica, sans-serif;
153 | font-weight: bold;
154 | padding: 5px 12px;
155 | text-align: left;
156 | }
157 |
158 | .contentBlock h2 {
159 | color: $black;
160 | font-family: Arial, Helvetica, sans-serif;
161 | font-size: 12pt;
162 | font-weight: bold;
163 | text-align: left;
164 | }
165 |
166 | .contentBlock h3 {
167 | color: $black;
168 | font-family: Arial, Helvetica, sans-serif;
169 | font-size: 9pt;
170 | font-weight: bold;
171 | text-align: left;
172 | }
173 |
174 | .pageContent {
175 | margin-left: auto;
176 | margin-right: auto;
177 | width: 90%;
178 | }
179 |
180 | .color_ramp {
181 | width: 120px;
182 | height: 20px;
183 | }
184 |
185 | div.ontology_container {
186 | height: 400px;
187 | width: 299px;
188 | border: 1px solid #666;
189 | margin-top: 5px;
190 | overflow: hidden;
191 | position: absolute;
192 | background: $white;
193 | z-index: 100;
194 | cursor: pointer;
195 | }
196 |
197 | .hidden {
198 | display: none;
199 | }
200 |
201 | .highlight {
202 | background-color: rgb(240, 240, 240);
203 | }
204 |
205 | .highlight-python {
206 | border: 1px dashed;
207 | border-color: rgb(102, 153, 204);
208 | background-color: rgb(240, 240, 240);
209 | padding: 2px;
210 | font-size: 11px;
211 | font-family: Courier;
212 | margin: 10px;
213 | line-height: 13px;
214 | overflow: auto;
215 | }
216 |
217 | .document {
218 | padding: 5px;
219 | }
220 |
221 |
222 |
223 |
224 |
225 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_helper/main_KS2_KS25_KS3.m:
--------------------------------------------------------------------------------
1 | function main_KS2_KS25( KSver, remDup, finalSplits, labelGood, saveRez )
2 | % paths will get set by the pipeline
3 | % config file created by pipeline and saved in directory with master
4 | % channel map created by pipeline, path specified in config
5 |
6 | rP.KSver = KSver;
7 | rP.remDup = remDup;
8 | rP.finalSplits = finalSplits;
9 | rP.labelGood = labelGood;
10 | rP.saveRez = saveRez;
11 |
12 | fprintf( 'main_KS2_KS25_KS3 params: \n');
13 | disp(rP)
14 |
15 | if ~(strcmp(rP.KSver, '2.5') | strcmp(rP.KSver, '2.0') | strcmp(rP.KSver, '3.0') )
16 | fprintf('unsupported kilosort version\n');
17 | return;
18 | end
19 |
20 | run('kilosort2_config_file.m')
21 |
22 |
23 |
24 | if strcmp(rP.KSver, '2.5')
25 | % main parameter changes from Kilosort2 to v2.5
26 | ops.sig = 20; % spatial smoothness constant for registration
27 | ops.fshigh = 300; % high-pass more aggresively
28 | ops.nblocks = 5; % blocks for registration. 0 turns it off, 1 does rigid registration. Replaces "datashift" option.;
29 | % random number generator is used in datashift and to set order of batches
30 | % set seed and initialize here.
31 | iseed = 1;
32 | rng(iseed);
33 | elseif strcmp(rP.KSver, '3.0')
34 | % main parameter changes from Kilosort2 to v2.5
35 | ops.sig = 20; % spatial smoothness constant for registration
36 | ops.fshigh = 300; % high-pass more aggresively
37 | ops.nblocks = 5; % blocks for registration. 0 turns it off, 1 does rigid registration. Replaces "datashift" option.
38 |
39 | % main parameter changes from Kilosort2.5 to v3.0
40 | % if using KS3, set ops.Th appropriately from the calling program
41 | % ops.Th = [9, 9];
42 |
43 | end
44 |
45 | % find the binary file
46 | rootZ = ops.rootZ;
47 | ops.fbinary = fullfile(ops.datafile);
48 |
49 | % print out ops
50 | ops
51 |
52 | % preprocess data to create temp_wh.dat
53 | rez = preprocessDataSub(ops);
54 |
55 | if ~strcmp(rP.KSver, '3.0')
56 | % That is for KS2.0 and KS 2.5
57 | if strcmp(rP.KSver, '2.5')
58 | % data registration step
59 | rez = datashift2(rez, 1); % last input is for shifting data
60 | % main tracking and template matching algorithm
61 | rez = learnAndSolve8b(rez, iseed);
62 | elseif strcmp(KSver, '2.0')
63 | % time-reordering as a function of drift
64 | rez = clusterSingleBatches(rez);
65 | rez = learnAndSolve8b(rez);
66 | end
67 |
68 | % OPTIONAL: remove double-counted spikes - solves issue in which individual spikes are assigned to multiple templates.
69 | % See issue 29: https://github.com/MouseLand/Kilosort/issues/29
70 | if rP.remDup
71 | rez = remove_ks2_duplicate_spikes(rez);
72 | end
73 |
74 | % final merges
75 | rez = find_merges(rez, 1);
76 |
77 | % final splits by SVD
78 | if rP.finalSplits
79 | rez = splitAllClusters(rez, 1);
80 | end
81 |
82 | % decide on cutoff
83 | rez = set_cutoff(rez);
84 |
85 | % eliminate widely spread waveforms (likely noise); only implemented in KS2.5 release
86 | if ( rP.labelGood & strcmp(rP.KSver, '2.5'))
87 | rez.good = get_good_units(rez);
88 | end
89 |
90 | fprintf('found %d good units \n', sum(rez.good>0))
91 |
92 | else
93 | % For KS 3.0
94 | rez = datashift2(rez, 1);
95 |
96 | [rez, st3, tF] = extract_spikes(rez);
97 |
98 | rez = template_learning(rez, tF, st3);
99 |
100 | [rez, st3, tF] = trackAndSort(rez);
101 |
102 | rez = final_clustering(rez, tF, st3);
103 |
104 | rez = find_merges(rez, 1);
105 |
106 | rezToPhy2(rez, rootZ);
107 | end
108 |
109 | % write to Phy
110 | fprintf('Saving results to Phy \n')
111 | rezToPhy(rez, rootZ);
112 |
113 | if rP.saveRez
114 | %% if you want to save the results to a Matlab file...
115 |
116 | % discard features in final rez file (too slow to save)
117 | rez.cProj = [];
118 | rez.cProjPC = [];
119 |
120 | % final time sorting of spikes, for apps that use st3 directly
121 | [~, isort] = sortrows(rez.st3);
122 | rez.st3 = rez.st3(isort, :);
123 |
124 | % Ensure all GPU arrays are transferred to CPU side before saving to .mat
125 | rez_fields = fieldnames(rez);
126 | for i = 1:numel(rez_fields)
127 | field_name = rez_fields{i};
128 | if(isa(rez.(field_name), 'gpuArray'))
129 | rez.(field_name) = gather(rez.(field_name));
130 | end
131 | end
132 |
133 | % save final results as rez2
134 | fprintf('Saving final results in rez2 \n')
135 | fname = fullfile(rootZ, 'rez2.mat');
136 | save(fname, 'rez', '-v7.3');
137 | end
138 |
139 | end
--------------------------------------------------------------------------------
/cached_data_manifests/internal_manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "test.npx": {
3 | "local_path": "test.npx",
4 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/test.npx",
5 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/test.npx"
6 | },
7 | "probe_info.json": {
8 | "local_path": "probe_info.json",
9 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/probe_info.json",
10 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/probe_info.json"
11 | },
12 | "channel_states.npy": {
13 | "local_path": "channel_states.npy",
14 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/channel_states.npy",
15 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/channel_states.npy"
16 | },
17 | "event_timestamps.npy": {
18 | "local_path": "event_timestamps.npy",
19 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/event_timestamps.npy",
20 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/event_timestamps.npy"
21 | },
22 | "ap_timestamps.npy": {
23 | "local_path": "ap_timestamps.npy",
24 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/ap_timestamps.npy",
25 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/ap_timestamps.npy"
26 | },
27 | "lfp_timestamps.npy": {
28 | "local_path": "lfp_timestamps.npy",
29 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/lfp_timestamps.npy",
30 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/lfp_timestamps.npy"
31 | },
32 | "continuous_ap_pre.dat": {
33 | "local_path": "continuous_ap_pre.dat",
34 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/continuous_ap_pre.dat",
35 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/continuous_ap_pre.dat"
36 | },
37 | "continuous_ap_post.dat": {
38 | "local_path": "continuous_ap_post.dat",
39 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/continuous_ap_post.dat",
40 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/continuous_ap_post.dat"
41 | },
42 | "continuous_lfp_pre.dat": {
43 | "local_path": "continuous_lfp_pre.dat",
44 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/continuous_lfp_pre.dat",
45 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/continuous_lfp_pre.dat"
46 | },
47 | "spike_times.npy": {
48 | "local_path": "spike_times.npy",
49 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/spike_times.npy",
50 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/spike_times.npy"
51 | },
52 | "amplitudes.npy": {
53 | "local_path": "amplitudes.npy",
54 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/amplitudes.npy",
55 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/amplitudes.npy"
56 | },
57 | "spike_clusters.npy": {
58 | "local_path": "spike_clusters.npy",
59 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/spike_clusters.npy",
60 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/spike_clusters.npy"
61 | },
62 | "templates.npy": {
63 | "local_path": "templates.npy",
64 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/templates.npy",
65 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/templates.npy"
66 | },
67 | "whitening_mat_inv.npy": {
68 | "local_path": "whitening_mat_inv.npy",
69 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/whitening_mat_inv.npy",
70 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/whitening_mat_inv.npy"
71 | },
72 | "channel_map.npy": {
73 | "local_path": "channel_map.npy",
74 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/channel_map.npy",
75 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/channel_map.npy"
76 | },
77 | "channel_positions.npy": {
78 | "local_path": "channel_positions.npy",
79 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/channel_positions.npy",
80 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/channel_positions.npy"
81 | },
82 | "cluster_group.tsv": {
83 | "local_path": "cluster_group.tsv",
84 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/cluster_group.tsv",
85 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/cluster_group.tsv"
86 | },
87 | "settings.xml": {
88 | "local_path": "settings.xml",
89 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/settings.xml",
90 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/settings.xml"
91 | },
92 | "classifier.pkl": {
93 | "local_path": "classifier.pkl",
94 | "hash_uri": "http://axon:8090/hash/ecephys_spike_sorting/classifier.pkl",
95 | "data_uri": "http://axon:8090/file/ecephys_spike_sorting/classifier.pkl"
96 | }
97 | }
98 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/kilosort_postprocessing/__main__.py:
--------------------------------------------------------------------------------
1 | from argschema import ArgSchemaParser
2 | import os
3 | import logging
4 | import time
5 |
6 | import numpy as np
7 |
8 | from ...common.utils import load_kilosort_data
9 |
10 | from .postprocessing import remove_double_counted_spikes
11 | from .postprocessing import align_spike_times
12 |
13 | def run_postprocessing(args):
14 |
15 | print('ecephys spike sorting: kilosort postprocessing module')
16 |
17 | print("Loading data...")
18 |
19 | #print(args.keys())
20 | #print(args['directories'].keys())
21 |
22 | start = time.time()
23 |
24 |
25 | spike_times, spike_clusters, spike_templates, amplitudes, templates, channel_map, \
26 | channel_pos, clusterIDs, cluster_quality, cluster_amplitude, pc_features, pc_feature_ind, \
27 | template_features, spike_positions = \
28 | load_kilosort_data(args['directories']['kilosort_output_directory'], \
29 | args['ephys_params']['sample_rate'], \
30 | convert_to_seconds = False, \
31 | use_master_clock = False, \
32 | include_pcs = True )
33 |
34 |
35 | if args['ks_postprocessing_params']['align_avg_waveform']:
36 | spike_times = align_spike_times(spike_times,
37 | spike_clusters,
38 | args['ephys_params']['ap_band_file'],
39 | args['directories']['kilosort_output_directory'],
40 | args['ks_postprocessing_params']['cWaves_path'])
41 |
42 | if args['ks_postprocessing_params']['remove_duplicates']:
43 | spike_times, spike_clusters, spike_templates, amplitudes, pc_features, \
44 | template_features, spike_positions, overlap_matrix, overlap_summary = \
45 | remove_double_counted_spikes(spike_times,
46 | spike_clusters,
47 | spike_templates,
48 | amplitudes,
49 | channel_map,
50 | channel_pos,
51 | templates,
52 | pc_features,
53 | pc_feature_ind,
54 | template_features,
55 | spike_positions,
56 | cluster_amplitude,
57 | args['ephys_params']['sample_rate'],
58 | args['ks_postprocessing_params'])
59 |
60 |
61 | print("Saving data...")
62 |
63 | # save data -- it's fine to overwrite existing files, because the original outputs are stored in rez.mat
64 | output_dir = args['directories']['kilosort_output_directory']
65 | np.save(os.path.join(output_dir, 'spike_times.npy'), spike_times)
66 | np.save(os.path.join(output_dir, 'amplitudes.npy'), amplitudes)
67 | np.save(os.path.join(output_dir, 'spike_clusters.npy'), spike_clusters)
68 | np.save(os.path.join(output_dir, 'spike_templates.npy'), spike_templates)
69 |
70 | if pc_features.size > 0:
71 | np.save(os.path.join(output_dir, 'pc_features.npy'), pc_features)
72 | if template_features.size > 0:
73 | np.save(os.path.join(output_dir, 'template_features.npy'), template_features)
74 | if spike_positions.size > 0:
75 | np.save(os.path.join(output_dir, 'spike_positions.npy'), spike_positions )
76 |
77 |
78 | if args['ks_postprocessing_params']['remove_duplicates']:
79 | np.save(os.path.join(output_dir, 'overlap_matrix.npy'), overlap_matrix)
80 | np.save(os.path.join(output_dir, 'overlap_summary.npy'), overlap_summary)
81 | # save the overlap_summary as a text file -- allows user to easily understand what happened
82 | np.savetxt(os.path.join(output_dir, 'overlap_summary.csv'), overlap_summary, fmt = '%d', delimiter = ',')
83 |
84 | execution_time = time.time() - start
85 |
86 | print('total time: ' + str(np.around(execution_time,2)) + ' seconds')
87 | print( )
88 |
89 | return {"execution_time" : execution_time} # output manifest
90 |
91 |
92 | def main():
93 |
94 | from ._schemas import InputParameters, OutputParameters
95 |
96 | """Main entry point:"""
97 | mod = ArgSchemaParser(schema_type=InputParameters,
98 | output_schema_type=OutputParameters)
99 |
100 | output = run_postprocessing(mod.args)
101 |
102 | output.update({"input_parameters": mod.args})
103 | if "output_json" in mod.args:
104 | mod.output(output, indent=2)
105 | else:
106 | print(mod.get_output_json(output))
107 |
108 |
109 | if __name__ == "__main__":
110 | main()
111 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/noise_templates/train_classifier.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | import numpy as np
4 | import pandas as pd
5 |
6 | import matplotlib.pyplot as plt
7 |
8 | base_directory = '/mnt/md0/data'
9 |
10 | mice = ['392810', '405755', '448504', '407972', '444384']
11 |
12 | def load(directory, file):
13 | return np.load(os.path.join(directory, file))
14 |
15 | def read_template_ratings_file(filename):
16 |
17 | qualities = ['good','noise1','noise2','noise3','noise4','noise5']
18 |
19 | info = pd.read_csv(filename)
20 | cluster_ids = list(info['cluster_id'].values)
21 | cluster_quality = [qualities.index(x) for x in info['rating'].values]
22 |
23 | return cluster_ids, cluster_quality
24 |
25 | original_features = np.zeros((15000, 61, 32))
26 | labels = np.zeros((15000,))
27 |
28 | unit_idx = 0
29 |
30 | for mouse in mice:
31 |
32 | print(mouse)
33 |
34 | directory = os.path.join(base_directory, 'mouse' + mouse)
35 |
36 | probe_directories = glob.glob(directory + '/*probe*sorted')
37 | probe_directories.sort()
38 |
39 | for folder in probe_directories:
40 |
41 | subfolder = glob.glob(os.path.join(folder, 'continuous', 'Neuropix-*-100.0'))[0]
42 |
43 | templates_raw = load(subfolder,'templates.npy')
44 | unwhitening_mat = load(subfolder,'whitening_mat_inv.npy')
45 | cluster_ids, cluster_quality = read_template_ratings_file(os.path.join(subfolder, 'template_ratings_new.csv'))
46 |
47 | templates = np.zeros(templates_raw.shape)
48 |
49 | for temp_idx in range(templates.shape[0]):
50 |
51 | templates[temp_idx,:,:] = np.dot(np.ascontiguousarray(templates_raw[temp_idx,:,:]),np.ascontiguousarray(unwhitening_mat))
52 |
53 | peak_channels = np.argmin(np.min(templates,1),1)
54 |
55 | for idx, unit in enumerate(cluster_ids):
56 |
57 | peak_channel = peak_channels[unit]
58 |
59 | min_chan = np.max([0,peak_channel-16])
60 | if min_chan == 0:
61 | max_chan = 32
62 | else:
63 | max_chan = np.min([templates.shape[2], peak_channel+16])
64 | if max_chan == templates.shape[2]:
65 | min_chan = max_chan - 32
66 |
67 | sub_template = templates[unit, 21:, min_chan:max_chan]
68 |
69 | original_features[unit_idx,:,:] = sub_template
70 | labels[unit_idx] = cluster_quality[idx]
71 | unit_idx += 1
72 |
73 | print(probe_directories)
74 | # %%
75 | original_features = original_features[:unit_idx,:,:]
76 | features = np.reshape(original_features[:,:,:], (original_features.shape[0], original_features.shape[1] * original_features.shape[2]), 2)
77 | features = features[:,::4]
78 | labels = labels[:unit_idx]
79 |
80 | # %%
81 |
82 | noise_templates = np.where(labels > 0)[0]
83 | good_templates = np.where(labels == 0)[0]
84 |
85 | order_noise = np.random.permutation(noise_templates.size)
86 | order_good = np.random.permutation(good_templates.size)
87 |
88 | # %%
89 |
90 | # # # # # # # # # # # #
91 |
92 | # These numbers are critical. The ratio of good units vs. noise units used in training
93 | # determines the hit rate and false alarm rate.
94 |
95 | n_train_noise = 300
96 | n_train_good = 500
97 | # # # # # # # # # # # #
98 |
99 | x_train = np.concatenate((features[noise_templates[order_noise[:n_train_noise]],:], features[good_templates[order_good[:n_train_good]],:]))
100 | y_train = np.concatenate((labels[noise_templates[order_noise[:n_train_noise]]], labels[good_templates[order_good[:n_train_good]]]))
101 |
102 | x_test = np.concatenate((features[noise_templates[order_noise[n_train_noise:]],:], features[good_templates[order_good[n_train_good:]],:]))
103 | y_test = np.concatenate((labels[noise_templates[order_noise[n_train_noise:]]], labels[good_templates[order_good[n_train_good:]]]))
104 |
105 | # %%
106 |
107 | from sklearn.ensemble import RandomForestClassifier
108 |
109 | clf = RandomForestClassifier(n_estimators=50, max_depth=50, random_state=10, bootstrap = False, warm_start=True, criterion='entropy', class_weight={0 : 0.01, 1: 1})
110 |
111 | clf.n_estimators = 50
112 | clf.fit(x_train, y_train)
113 |
114 | predicted_labels = clf.predict(x_test)
115 |
116 | hits = np.sum((predicted_labels == 0) * (y_test == 0)) / np.sum(y_test == 0)
117 | fp = np.sum((predicted_labels == 0) * (y_test > 0)) / np.sum(y_test > 0)
118 |
119 | confusion_matrix = np.zeros((5,5))
120 |
121 | for i in range(5):
122 | for j in range(5):
123 | confusion_matrix[i,j] = np.sum((predicted_labels == i) * (y_test == j)) / np.sum(y_test == j)
124 |
125 | overall = np.sum(((predicted_labels == 0) * (y_test == 0)) + ((predicted_labels > 0) * (y_test > 0))) / len(y_test)
126 |
127 | print('Hit rate: ' + str(hits))
128 | print('FP rate: ' + str(fp))
129 | print('Overall rate: ' + str(overall))
130 |
131 | plt.figure(14111)
132 | plt.clf()
133 |
134 | plt.imshow(confusion_matrix)
135 |
136 | # %%
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/automerging/merges.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def constrainValues(input_array):
5 |
6 | output_array = np.copy(input_array)
7 | output_array[np.isinf(input_array)] = 0
8 | output_array[np.isnan(input_array)] = 0
9 | output_array[input_array > 1] = 0
10 | output_array[input_array < 0] = 0
11 |
12 | return output_array
13 |
14 | def getNextMerge(comparison_matrix):
15 |
16 | overall_score, i_index, j_index = compute_overall_score(comparison_matrix)
17 |
18 | nextMerge = np.argmax(overall_score)
19 | mergeScore = np.max(overall_score)
20 |
21 | i = i_index[nextMerge]
22 | j = j_index[nextMerge]
23 |
24 | return mergeScore, i, j, overall_score
25 |
26 | def compute_overall_score(comparison_matrix):
27 |
28 | num_units = comparison_matrix.shape[0]
29 |
30 | index_matrix = np.zeros((num_units,num_units,2),dtype='int')
31 |
32 | for i in range(0,num_units):
33 | for j in range(i+1,num_units):
34 | index_matrix[i,j,0] = i
35 | index_matrix[i,j,1] = j
36 |
37 | selection = comparison_matrix[:,:,0].flatten() == 1
38 | waveform_sim = comparison_matrix[:,:,1].flatten()
39 | isi_sim = comparison_matrix[:,:,3].flatten()
40 | isi_score = 1 - comparison_matrix[:,:,2].flatten()
41 | i_index = index_matrix[:,:,0].flatten()
42 | j_index = index_matrix[:,:,1].flatten()
43 |
44 | overall_score = constrainValues(isi_score) + constrainValues(isi_sim) + constrainValues(waveform_sim)
45 |
46 | return overall_score[selection], i_index[selection], j_index[selection]
47 |
48 | def getTemplateIndsForCluster(spike_templates, spike_clusters, clusterId, templateIDs):
49 |
50 | templatesForCluster = np.unique(spike_templates[spike_clusters == clusterId])
51 |
52 | tempInds = np.zeros((templateIDs.size,))
53 |
54 | for ID in templatesForCluster:
55 |
56 | tempInds = tempInds + (templateIDs == ID)
57 |
58 | return np.squeeze(np.argwhere(tempInds))
59 |
60 | # %%
61 |
62 | # decision for whether or not to merge, based on waveform similarity and isi similarity scores:
63 |
64 | def should_merge(waveform_similarity, isi_similarity, isi_score, t1 = 0.2, t2 =0.75, t3 =0.5, t4=0.9, t5 = 0.9):
65 |
66 | if not np.isnan(isi_score) and not np.isinf(isi_score) and isi_score < t1 and isi_score > 0.001:
67 |
68 | if not np.isnan(isi_similarity):
69 |
70 | return isi_similarity >= 0.9 - pow(waveform_similarity*1.1, np.e)
71 |
72 | return False
73 |
74 |
75 | # identify the merge groups
76 |
77 | def ID_merge_groups(merges):
78 |
79 | connected_groups = []
80 |
81 | for u1 in range(0,merges.shape[0]):
82 |
83 | for u2 in range(0, merges.shape[0]):
84 |
85 | if merges[u1,u2] == 1 and u1 != u2:
86 |
87 | if len(connected_groups) == 0: # initialize merge groups
88 |
89 | connected_groups.append([u1, u2])
90 |
91 | else:
92 |
93 | foundMatch = False
94 |
95 | for idx, group in enumerate(connected_groups):
96 |
97 | if u1 in group or u2 in group:
98 |
99 | if u1 not in group:
100 | group.extend([u1])
101 | if u2 not in group:
102 | group.extend([u2])
103 |
104 | foundMatch = True
105 |
106 |
107 | if not foundMatch:
108 | connected_groups.append([u1,u2])
109 |
110 | # check for overlapping groups
111 | if True:
112 | for idx, group1 in enumerate(connected_groups):
113 |
114 | for idx2, group2 in enumerate(connected_groups):
115 |
116 | L = len(set(group1).intersection(group2))
117 |
118 | if L > 0 and idx != idx2:
119 |
120 | connected_groups[idx] = list(np.sort(group1 + list(set(group2) - set(group1))))
121 | connected_groups[idx2] = []
122 |
123 | connected_groups[:] = [item for item in connected_groups if item != []] # remove empty elements
124 |
125 | return connected_groups
126 |
127 |
128 | # make the merges
129 | def make_merges(connected_groups, spike_clusters, spike_templates, templateIDs):
130 |
131 | maxId = np.max(spike_clusters)
132 |
133 | for merge_group in connected_groups:
134 |
135 | maxId += 1
136 |
137 | for unit_idx in merge_group:
138 |
139 | spike_clusters[np.where(spike_templates == templateIDs[unit_idx])[0]] = maxId
140 |
141 | return spike_clusters
142 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/median_subtraction/SpikeBandMedianSubtraction/SpikeBandMedianSubtraction.jucer:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
16 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
39 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
--------------------------------------------------------------------------------
/ecephys_spike_sorting/modules/automerging/metrics.py:
--------------------------------------------------------------------------------
1 | from scipy.interpolate import griddata
2 | from scipy.signal import correlate
3 | import numpy as np
4 | from .spike_ISI import *
5 |
6 | def find_depth(template):
7 |
8 | """
9 | Finds depth based on channel with maximum range.
10 | """
11 |
12 | return np.argmax(np.max(template,0)-np.min(template,0))
13 |
14 | def find_height(template):
15 |
16 | """
17 | Maximum peak-to-trough amplitude of a template
18 | """
19 |
20 | return np.max(np.max(template,0)-np.min(template,0))
21 |
22 | def check_template(template, times):
23 |
24 | """
25 | Detects noise templates based on a set of heuristics
26 | """
27 |
28 | depth = find_depth(template)
29 |
30 | std_thresh = 2.5
31 |
32 | S = np.std(template[:,depth-5:depth+5],1)
33 |
34 | thresh2 = 0.2
35 | wv = template[:,depth]
36 | C = correlate(wv,wv,mode='same')
37 | C = C/np.max(C)
38 |
39 | a = np.where(C > thresh2)[0]
40 | d = np.diff(a)
41 | b = np.where(d > 1)[0]
42 |
43 | h, bins = np.histogram(np.diff(times), bins=np.linspace(0,0.1,100))
44 | h = h/np.max(h)
45 |
46 | H = np.mean(h[:3])/np.max(h)
47 |
48 | if ((np.max(S) < std_thresh or np.argmax(wv) < 10 or np.argmin(wv) < 10) and H > 0.01) or len(b) > 0 or np.min(wv) > -5:
49 | return False
50 | else:
51 | return True
52 |
53 |
54 | def make_actual_channel_locations(min_chan, max_chan):
55 | actual_channel_locations = np.zeros((max_chan-min_chan,2),dtype=int)
56 | xlocations = [16, 48, 0, 32]
57 | for i in range(min_chan,max_chan):
58 | actual_channel_locations[i,0] = xlocations[i%4]
59 | actual_channel_locations[i,1] = np.floor(i/2)*20
60 | return actual_channel_locations
61 |
62 |
63 | def make_interp_channel_locations(min_chan, max_chan):
64 | interp_channel_locations = np.zeros(((max_chan-min_chan)*7,2))
65 | xlocations = [0, 8, 16, 24, 32, 40, 48]
66 | for i in range(min_chan,(max_chan-min_chan)*7+min_chan):
67 | interp_channel_locations[i,0] = xlocations[i%7]
68 | interp_channel_locations[i,1] = np.floor(i/7)*10
69 | return interp_channel_locations
70 |
71 |
72 | def make_interp_temp(templates, indices):
73 |
74 | total_samples = templates.shape[1]
75 | total_channels = templates.shape[2]
76 | refs = np.array([36, 75, 112, 151, 188, 227, 264, 303, 340, 379])
77 | loc_a = make_actual_channel_locations(0, total_channels)
78 | loc_i = make_interp_channel_locations(0, total_channels)
79 |
80 | indices = np.array(indices)
81 | to_include = np.arange(0,total_channels)
82 | to_include = np.delete(to_include, refs)
83 |
84 | interp_temp = np.zeros((templates.shape[1],templates.shape[2]*7,indices.size))
85 |
86 | for i in np.arange(indices.size):
87 |
88 | temp = templates[indices[i], :, :]
89 |
90 | for t in range(0,total_samples):
91 |
92 | interp_temp[t,:,i] = griddata(loc_a[to_include,:], temp[t,to_include], loc_i, method='cubic', fill_value=0, rescale=False)
93 |
94 | return np.reshape(np.mean(interp_temp,2), (total_samples, total_channels, 7)).astype('float')
95 |
96 |
97 | def compare_templates(t1, t2):
98 |
99 | depth1 = find_depth(t1) / 7
100 | depth2 = find_depth(t2) / 7
101 |
102 | total_channels = t1.shape[1]
103 | max_padding = 10
104 | if np.max((depth1,depth2)) < max_padding:
105 | padding_neg = int(np.max((depth1, depth2)))
106 | else:
107 | padding_neg = max_padding
108 |
109 | if np.min((depth1,depth2)) > total_channels - max_padding:
110 | padding_pos = int(total_channels - np.min((depth1, depth2)))
111 | else:
112 | padding_pos = max_padding
113 |
114 | m1 = np.zeros((61, total_channels+padding_neg+padding_pos, 7))
115 | m1[:,padding_neg:total_channels+padding_neg,:] = t1
116 | m2 = np.zeros((61, total_channels+padding_neg+padding_pos, 7))
117 |
118 | sim = np.zeros((padding_neg + padding_pos,))
119 | offset_distance = np.zeros((padding_neg + padding_pos,))
120 | ii = 0
121 |
122 | for idx, offset in enumerate(range(-padding_neg, padding_pos)):
123 | m2[:,padding_neg+offset:total_channels+padding_neg+offset,:] = t2
124 | sim[ii] = np.corrcoef(m1.flatten(), m2.flatten())[0,1]
125 | offset_distance[idx] = -offset*10
126 | ii += 1
127 |
128 | return sim, offset_distance
129 |
130 |
131 | def compute_isi_score(t1, t2, max_time):
132 |
133 | cISI_score, score_weight, ISI1, ISI2, cISI, rcISI = find_cISI_score(t1, t2, max_time)
134 |
135 | ms = 5
136 | ratio = (cISI[:ms*10] + 0.001) / (rcISI[:ms*10] + 0.001)
137 | weight = 10 - np.linspace(0,9,ms*10)
138 | weighted_ratio = (ratio * weight)/10
139 | another_score = np.mean(weighted_ratio)
140 |
141 | if np.isnan(another_score) or np.isinf(another_score) or another_score > 1 or another_score < 0:
142 | another_score = 1
143 |
144 | return cISI_score, score_weight, ISI1, ISI2, cISI, rcISI, another_score
145 |
146 |
147 | def percent_overlap(t1, t2, min_t, max_t, num_bins = 50):
148 |
149 | h1,b = np.histogram(t1, bins=np.linspace(min_t, max_t, num_bins))
150 | h2,b = np.histogram(t2, bins=np.linspace(min_t, max_t, num_bins))
151 |
152 | overlap = np.intersect1d(np.where(h1 > 0)[0], np.where(h2 > 0)[0]).size/float(num_bins)
153 |
154 | return overlap
155 |
156 | def get_templates_for_cluster(spike_templates, spike_clusters, clusterId):
157 |
158 | templatesForCluster = np.unique(spike_templates[spike_clusters == clusterId])
159 |
160 | return templatesForCluster
--------------------------------------------------------------------------------