├── spikely ├── pipeman │ ├── __init__.py │ ├── piperun.py │ └── pipeman.py ├── resources │ ├── __init__.py │ ├── curator.png │ ├── exporter.png │ ├── sorter.png │ ├── spikely.png │ ├── extractor.png │ ├── preprocessor.png │ └── spikely_gui.png ├── elements │ ├── guiparams │ │ ├── __init__.py │ │ ├── curator │ │ │ ├── __init__.py │ │ │ ├── thresholdnumspikes.py │ │ │ ├── thresholdfiringrates.py │ │ │ ├── thresholdpresenceratios.py │ │ │ ├── thresholdisiviolations.py │ │ │ ├── thresholdamplitudecutoffs.py │ │ │ ├── thresholdsilhouettescores.py │ │ │ ├── thresholddprimes.py │ │ │ ├── thresholdlratios.py │ │ │ ├── thresholdisolationdistances.py │ │ │ ├── thresholddriftmetrics.py │ │ │ ├── thresholdnnmetrics.py │ │ │ └── thresholdsnrs.py │ │ ├── exporter │ │ │ ├── __init__.py │ │ │ └── phyexporter.py │ │ ├── extractor │ │ │ ├── __init__.py │ │ │ ├── nixiorecording.py │ │ │ ├── exdirrecording.py │ │ │ ├── phyrecording.py │ │ │ ├── maxonerecording.py │ │ │ ├── mea1krecordingextractor.py │ │ │ ├── klustarecordingextractor.py │ │ │ ├── neuralynxrecording.py │ │ │ ├── spykingcircusrecordingextractor.py │ │ │ ├── shybridrecording.py │ │ │ ├── intanrecordingextractor.py │ │ │ ├── mearecrecordingextractor.py │ │ │ ├── nwbrecording.py │ │ │ ├── biocamrecording.py │ │ │ ├── mcsh5recordingextractor.py │ │ │ ├── spikeglxrecordingextractor.py │ │ │ ├── openephysrecording.py │ │ │ ├── mdarecordingextractor.py │ │ │ └── bindatrecordingextractor.py │ │ ├── sorter │ │ │ ├── __init__.py │ │ │ ├── kilosort.py │ │ │ ├── klusta.py │ │ │ ├── tridesclous.py │ │ │ ├── spykingcircus.py │ │ │ ├── mountainsort4.py │ │ │ ├── kilosort2.py │ │ │ ├── hdsort.py │ │ │ └── waveclus.py │ │ └── preprocessor │ │ │ ├── __init__.py │ │ │ ├── rectify.py │ │ │ ├── resample.py │ │ │ ├── transform.py │ │ │ ├── clip.py │ │ │ ├── blanksaturation.py │ │ │ ├── removeartifacts.py │ │ │ ├── center.py │ │ │ ├── whiten.py │ │ │ ├── normalizebyquantile.py │ │ │ ├── removebadchannels.py │ │ │ ├── notchfilter.py │ │ │ ├── commonreference.py │ │ │ └── bandpassfilter.py │ ├── __init__.py │ ├── exporterlist.py │ ├── phy_exporter.py │ ├── element_policy.py │ ├── spike_element.py │ ├── std_element_policy.py │ ├── preprocessor.py │ ├── sorter.py │ ├── exporter.py │ ├── curator.py │ └── extractor.py ├── version.py ├── __init__.py ├── parameter_view.py ├── help_menu.py ├── guiparams.py ├── operation_view.py ├── tool_bar.py ├── config.py ├── spikely_main.py ├── file_menu.py ├── pipeline_model.py └── pipeline_view.py ├── MANIFEST.in ├── tests ├── pytest.ini ├── test_param_model.py └── test_config.py ├── docs ├── images │ ├── gui.png │ └── gui_annotated.png ├── source │ ├── installation.rst │ ├── contact.rst │ ├── index.rst │ ├── workflow.rst │ ├── overview.rst │ └── conf.py ├── Makefile └── make.bat ├── .gitignore ├── README.md └── setup.py /spikely/pipeman/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /spikely/resources/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include spikely/resources/*.png -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/exporter/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/sorter/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /spikely/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.7.2' 2 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | smoke: Performs smoke test on code. -------------------------------------------------------------------------------- /docs/images/gui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SpikeInterface/spikely/HEAD/docs/images/gui.png -------------------------------------------------------------------------------- /docs/images/gui_annotated.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SpikeInterface/spikely/HEAD/docs/images/gui_annotated.png -------------------------------------------------------------------------------- /spikely/resources/curator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SpikeInterface/spikely/HEAD/spikely/resources/curator.png -------------------------------------------------------------------------------- /spikely/resources/exporter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SpikeInterface/spikely/HEAD/spikely/resources/exporter.png -------------------------------------------------------------------------------- /spikely/resources/sorter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SpikeInterface/spikely/HEAD/spikely/resources/sorter.png -------------------------------------------------------------------------------- /spikely/resources/spikely.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SpikeInterface/spikely/HEAD/spikely/resources/spikely.png -------------------------------------------------------------------------------- /spikely/resources/extractor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SpikeInterface/spikely/HEAD/spikely/resources/extractor.png -------------------------------------------------------------------------------- /spikely/resources/preprocessor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SpikeInterface/spikely/HEAD/spikely/resources/preprocessor.png -------------------------------------------------------------------------------- /spikely/resources/spikely_gui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SpikeInterface/spikely/HEAD/spikely/resources/spikely_gui.png -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/rectify.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import rectify 2 | 3 | spif_init_func = rectify 4 | 5 | gui_params = [] 6 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/nixiorecording.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to file." 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/exdirrecording.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "folder_path", 4 | "type": "folder", 5 | "title": "Path to folder" 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/phyrecording.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "folder_path", 4 | "type": "folder", 5 | "title": "Path to folder" 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/maxonerecording.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to recording.", 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/mea1krecordingextractor.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to recording.", 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/klustarecordingextractor.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to recording.", 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/neuralynxrecording.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "dirname", 4 | "type": "folder", 5 | "title": "Path to Neuralynx directory." 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.json 2 | *.pyc 3 | *.egg 4 | *.gz 5 | *.whl 6 | tmp_* 7 | *build/ 8 | venv/ 9 | spikely.egg-info/ 10 | .vscode/ 11 | klusta_output/ 12 | mtnsort_data/ 13 | mountainsort4_output/ 14 | phy_output/ 15 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/spykingcircusrecordingextractor.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "folder_path", 4 | "type": "folder", 5 | "title": "Path to folder" 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/shybridrecording.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "folder", 5 | "title": "Full path to hybrid recording (.bin, .raw, .dat)." 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /spikely/elements/__init__.py: -------------------------------------------------------------------------------- 1 | # imports ensure SpikeElement.__subclasses__() works properly 2 | # from . import spike_element # noqa: F401 3 | # from . import extractor # noqa: F401 4 | # from . import sorter # noqa: F401 5 | # from . import preprocessor # noqa: F401 6 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/resample.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import resample 2 | 3 | spif_init_func = resample 4 | 5 | gui_params = [ 6 | { 7 | "name": "resample_rate", 8 | "type": "float", 9 | "title": "The resampling frequency." 10 | }, 11 | ] 12 | -------------------------------------------------------------------------------- /spikely/elements/exporterlist.py: -------------------------------------------------------------------------------- 1 | import spikeextractors as se 2 | from .phy_exporter import PhyExporter 3 | 4 | exporters_list = [] 5 | exporters_list.extend(se.extractorlist.writable_sorting_extractor_list) 6 | exporters_list.append(PhyExporter) 7 | 8 | se.sorting_exporter_dict 9 | 10 | sorting_exporter_dict = se.sorting_exporter_dict 11 | sorting_exporter_dict[PhyExporter.exporter_name] = PhyExporter 12 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/intanrecordingextractor.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to file (.rhs or .rhd)" 6 | }, 7 | { 8 | "name": "verbose", 9 | "type": "bool", 10 | "value": False, 11 | "default": False, 12 | "title": "If true, use detailed messages." 13 | }, 14 | ] 15 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/mearecrecordingextractor.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to file (.h5 or .hdf5)." 6 | }, 7 | { 8 | "name": "locs_2d", 9 | "type": "bool", 10 | "value": True, 11 | "default": True, 12 | "title": "If true, 3d locations are converted to 2d.", 13 | }, 14 | ] 15 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/nwbrecording.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to file (.h5 or .hdf5)", 6 | }, 7 | { 8 | "name": "electrical_series_name", 9 | "type": "string", 10 | "value": "ElectricalSeries", 11 | "default": "ElectricalSeries", 12 | "title": "Name of Electrical Series.", 13 | }, 14 | ] 15 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | :code:`spikely` is a Python package. The latest production version can be 5 | installed this way: 6 | 7 | .. code-block:: python 8 | 9 | pip install spikely 10 | 11 | 12 | If you want to work directly off the master branch of the repository, the 13 | latest pre-production version can be installed this way: 14 | 15 | .. code-block:: bash 16 | 17 | git clone https://github.com/SpikeInterface/spikely 18 | pip install -e spikely 19 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/biocamrecording.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to file (.h5 or .hdf5)." 6 | }, 7 | { 8 | "name": "verbose", 9 | "type": "bool", 10 | "value": False, 11 | "default": False, 12 | "title": "If true, provide verbose messages.", 13 | }, 14 | { 15 | "name": "mea_pitch", 16 | "type": "int", 17 | "value": 42, 18 | "default": 42, 19 | "title": "The pitch of the MEA.", 20 | }, 21 | ] 22 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/transform.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import transform 2 | 3 | spif_init_func = transform 4 | 5 | 6 | gui_params = [ 7 | { 8 | "name": "scalar", 9 | "type": "float", 10 | "value": 1.0, 11 | "default": 1.0, 12 | "title": "Scalar for the traces of the recording extractor.", 13 | }, 14 | { 15 | "name": "offset", 16 | "type": "float", 17 | "value": 0.0, 18 | "default": 0.0, 19 | "title": "Offset for the traces of the recording extractor", 20 | }, 21 | ] 22 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/mcsh5recordingextractor.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to file (.h5 or .hdf5)" 6 | }, 7 | { 8 | "name": "stream_id", 9 | "type": "int", 10 | "value": 0, 11 | "default": 0, 12 | "title": "ID of stream that will be loaded" 13 | }, 14 | { 15 | "name": "verbose", 16 | "type": "bool", 17 | "value": False, 18 | "default": False, 19 | "title": "If true, use verbose messages.", 20 | }, 21 | ] 22 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/clip.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import clip 2 | 3 | spif_init_func = clip 4 | 5 | gui_params = [ 6 | { 7 | "name": "a_min", 8 | "type": "float", 9 | "value": None, 10 | "default": None, 11 | "title": "Minimum value. If `None`, clipping is not performed on lower interval edge.", 12 | }, 13 | { 14 | "name": "a_max", 15 | "type": "float", 16 | "value": None, 17 | "default": None, 18 | "title": "Maximum value. If `None`, clipping is not performed on upper interval edge.", 19 | }, 20 | ] 21 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/spikeglxrecordingextractor.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to neuropixels ap or lf file", 6 | }, 7 | { 8 | "name": "x_pitch", 9 | "type": "int", 10 | "value": 21, 11 | "default": 21, 12 | "title": "x_pitch for Neuropixels probe (default 21)", 13 | }, 14 | { 15 | "name": "y_pitch", 16 | "type": "int", 17 | "value": 20, 18 | "default": 20, 19 | "title": "y_pitch for Neuropixels probe (default 20)", 20 | }, 21 | ] 22 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = source 8 | BUILDDIR = build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /spikely/__init__.py: -------------------------------------------------------------------------------- 1 | # Make version id available at a package level 2 | from .elements.spike_element import SpikeElement # noqa: F401 3 | from .elements.std_element_policy import StdElementPolicy # noqa: F401 4 | from .pipeline_model import PipelineModel # noqa: F401 5 | from .pipeline_view import PipelineView # noqa: F401 6 | from .parameter_view import ParameterView # noqa: F401 7 | from .parameter_model import ParameterModel # noqa: F401 8 | from .operation_view import OperationView # noqa: F401 9 | from .version import __version__ # noqa: F401 10 | -------------------------------------------------------------------------------- /docs/source/contact.rst: -------------------------------------------------------------------------------- 1 | Contact Us 2 | ========== 3 | 4 | Below are the authors of spikely: 5 | 6 | * `Roger Hurwitz `_ [1] 7 | * `Cole Hurwitz `_ [2] 8 | * `Shawn Guo `_ [3] 9 | 10 | For any inquiries, please email rogerhurwitz@gmail.com or just leave an issue! 11 | 12 | 1. Independent Developer, Portland, Oregon, USA 13 | 2. PhD Candidate, The Institute for Adaptive and Neural Computation (ANC), University of Edinburgh, Edinburgh, Scotland. 14 | 3. Research Assistant, The Institute for Adaptive and Neural Computation (ANC), University of Edinburgh, Edinburgh, Scotland. 15 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/blanksaturation.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import blank_saturation 2 | 3 | spif_init_func = blank_saturation 4 | 5 | gui_params = [ 6 | { 7 | "name": "threshold", 8 | "type": "float", 9 | "value": None, 10 | "default": None, 11 | "title": "Threshold value (in absolute units) for saturation artifacts. " 12 | "If None, the threshold will be determined from the 0.1 signal percentile.", 13 | }, 14 | { 15 | "name": "seed", 16 | "type": "int", 17 | "value": 0, 18 | "default": 0, 19 | "title": "Random seed for reproducibility.", 20 | }, 21 | ] 22 | -------------------------------------------------------------------------------- /tests/test_param_model.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from spikely.parameter_model import ParameterModel 4 | 5 | 6 | @pytest.mark.parametrize( 7 | 'type_str, value, expected_cvt_value', 8 | [('', 'None', None), 9 | ('str', 'Test', 'Test'), 10 | ('int_list_list', '[[0,1],[2,3]]', [[0, 1], [2, 3]]), 11 | ('int', '25', 25), 12 | ('int', 'inf', float('inf')), 13 | ('float', '25.5', 25.5), 14 | ('int_list', '[0, 1, 2, 3]', [0, 1, 2, 3]), 15 | ]) 16 | def test_convert_value(type_str, value, expected_cvt_value): 17 | pm = ParameterModel() 18 | success, cvt_value = pm._convert_value(type_str, value) 19 | 20 | assert success 21 | assert cvt_value == expected_cvt_value 22 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/openephysrecording.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "folder_path", 4 | "type": "folder", 5 | "title": "Folder path name." 6 | }, 7 | { 8 | "name": "experiment_id", 9 | "type": "int", 10 | "value": 0, 11 | "default": 0, 12 | "title": "Experiment ID", 13 | }, 14 | { 15 | "name": "recording_id", 16 | "type": "int", 17 | "value": 0, 18 | "default": 0, 19 | "title": "Recording ID", 20 | }, 21 | { 22 | "name": "dtype", 23 | "type": "str", 24 | "value": "float", 25 | "default": "float", 26 | "title": "dtype ('float' or 'int')", 27 | }, 28 | ] 29 | -------------------------------------------------------------------------------- /spikely/elements/phy_exporter.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.postprocessing import export_to_phy 2 | 3 | 4 | class PhyExporter: 5 | 6 | installed = True 7 | mode = "folder" 8 | 9 | @staticmethod 10 | def write_sorting( 11 | recording, 12 | sorting, 13 | save_path, 14 | compute_pc_features, 15 | compute_amplitudes, 16 | max_channels_per_template, 17 | **kwargs 18 | ): 19 | 20 | export_to_phy( 21 | recording, 22 | sorting, 23 | save_path, 24 | compute_pc_features=compute_pc_features, 25 | compute_amplitudes=compute_amplitudes, 26 | max_channels_per_template=max_channels_per_template, 27 | **kwargs 28 | ) 29 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/removeartifacts.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import remove_artifacts 2 | 3 | spif_init_func = remove_artifacts 4 | 5 | gui_params = [ 6 | { 7 | "name": "triggers", 8 | "type": "int_list", 9 | "title": "List of ints with the stimulation trigger frames.", 10 | }, 11 | { 12 | "name": "ms_before", 13 | "type": "float", 14 | "value": 0.5, 15 | "default": 0.5, 16 | "title": "Time interval in ms to remove before the trigger events.", 17 | }, 18 | { 19 | "name": "ms_after", 20 | "type": "float", 21 | "value": 3.0, 22 | "default": 3.0, 23 | "title": "Time interval in ms to remove after the trigger events.", 24 | }, 25 | ] 26 | -------------------------------------------------------------------------------- /spikely/pipeman/piperun.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import sys 4 | 5 | from spikely import config as cfg 6 | 7 | 8 | def run(elem_list_str): 9 | 10 | elem_jdict_list = json.loads(elem_list_str) 11 | elem_list = [cfg.cvt_dict_to_elem(elem_jdict) 12 | for elem_jdict in elem_jdict_list] 13 | 14 | payload = None 15 | last_index = len(elem_list) - 1 16 | for index, elem in enumerate(elem_list): 17 | if index == last_index: 18 | next_elem = None 19 | else: 20 | next_elem = elem_list[index + 1] 21 | payload = elem.run(payload, next_elem) 22 | 23 | 24 | def main(): 25 | run(sys.argv[1]) 26 | 27 | # Turns out that this is a very important call 28 | os._exit(1) 29 | 30 | if __name__ == '__main__': 31 | main() 32 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/center.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import center 2 | 3 | spif_init_func = center 4 | 5 | gui_params = [ 6 | { 7 | "name": "mode", 8 | "type": "str", 9 | "value": "median", 10 | "default": "median", 11 | "title": "median (default) or mean." 12 | }, 13 | { 14 | "name": "seconds", 15 | "type": "float", 16 | "value": 10, 17 | "default": 10, 18 | "title": "Number of seconds used to compute center.", 19 | }, 20 | { 21 | "name": "n_snippets", 22 | "type": "int", 23 | "value": 10, 24 | "default": 10, 25 | "title": "Number of snippets in which the total 'seconds' are divided spanning" 26 | " the recording duration." 27 | }, 28 | ] 29 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/whiten.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import whiten 2 | 3 | spif_init_func = whiten 4 | 5 | 6 | gui_params = [ 7 | { 8 | "name": "chunk_size", 9 | "type": "int", 10 | "value": 30_000, 11 | "default": 30_000, 12 | "title": "The chunk size to be used for the filtering.", 13 | }, 14 | { 15 | "name": "cache_chunks", 16 | "type": "bool", 17 | "value": False, 18 | "default": False, 19 | "title": "IIf True, filtered traces are computed and" 20 | " cached all at once (default False).", 21 | }, 22 | { 23 | "name": "seed", 24 | "type": "int", 25 | "value": 0, 26 | "default": 0, 27 | "title": "Random seed for reproducibility.", 28 | }, 29 | ] 30 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/mdarecordingextractor.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "folder_path", 4 | "type": "folder", 5 | "title": "Path to folder.", 6 | }, 7 | { 8 | "name": "raw_fname", 9 | "type": "file", 10 | "value": "raw.mda", 11 | "default": "raw.mda", 12 | "title": "File name of raw file (default raw.mda).", 13 | }, 14 | { 15 | "name": "params_fname", 16 | "type": "file", 17 | "value": "params.json", 18 | "default": "params.json", 19 | "title": "File name of params file (default params.json).", 20 | }, 21 | { 22 | "name": "geom_fname", 23 | "type": "file", 24 | "value": "geom.csv", 25 | "default": "geom.csv", 26 | "title": "File name of geom file (default geom.csv).", 27 | }, 28 | ] 29 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | spikely 2 | ======= 3 | 4 | Spike Sorting Made Simple 5 | ------------------------- 6 | 7 | .. _SpikeInterface: https://github.com/SpikeInterface 8 | 9 | Spikely is a Python application built on top of SpikeInterface_ designed to simplify 10 | the process of creating and running spike sorting pipelines. 11 | Spikely supports loading, preprocessing, sorting, curating, and exporting 12 | of extracellular datasets that are stored in `SpikeInterface compatible file 13 | formats `_. 14 | 15 | .. image:: ../images/gui.png 16 | 17 | Contents 18 | -------- 19 | .. toctree:: 20 | :maxdepth: 1 21 | 22 | overview 23 | workflow 24 | installation 25 | contact 26 | 27 | .. Indices and tables 28 | .. ------------------ 29 | .. * :ref:`genindex` 30 | .. * :ref:`modindex` 31 | .. * :ref:`search` 32 | -------------------------------------------------------------------------------- /spikely/elements/element_policy.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class ElementPolicy(ABC): 5 | 6 | def __init__(self, required_cls_list, cls_order_dict, 7 | cls_display_name_dict): 8 | self._required_cls_list = required_cls_list 9 | self._cls_order_dict = cls_order_dict 10 | self._cls_display_name_dict = cls_display_name_dict 11 | 12 | @abstractmethod 13 | def is_cls_available(self, cls): 14 | pass 15 | 16 | @abstractmethod 17 | def is_cls_singleton(self, cls): 18 | pass 19 | 20 | @property 21 | def required_cls_list(self): 22 | return self._required_cls_list 23 | 24 | @property 25 | def cls_order_dict(self): 26 | return self._cls_order_dict 27 | 28 | def get_cls_display_name(self, cls): 29 | return self._cls_display_name_dict.get(cls, str(cls)) 30 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /spikely/parameter_view.py: -------------------------------------------------------------------------------- 1 | from PyQt5 import QtWidgets 2 | 3 | from spikely.config import PARAM_COL, TYPE_COL, VALUE_COL 4 | 5 | 6 | class ParameterView(QtWidgets.QGroupBox): 7 | 8 | def __init__(self, pipeline_model, parameter_model): 9 | super().__init__("Configure Parameters") 10 | self._pipeline_model = pipeline_model 11 | self._parameter_model = parameter_model 12 | 13 | self._init_ui() 14 | 15 | def _init_ui(self): 16 | self.setLayout(QtWidgets.QHBoxLayout()) 17 | 18 | cfg_table = QtWidgets.QTableView(self) 19 | 20 | # Magic happens here: links element model to view 21 | cfg_table.setModel(self._parameter_model) 22 | 23 | cfg_table.verticalHeader().hide() 24 | cfg_table.setColumnWidth(PARAM_COL, 200) 25 | cfg_table.setColumnWidth(TYPE_COL, 100) 26 | cfg_table.setColumnWidth(VALUE_COL, 200) 27 | cfg_table.horizontalHeader().setStretchLastSection(True) 28 | 29 | self.layout().addWidget(cfg_table) 30 | -------------------------------------------------------------------------------- /spikely/help_menu.py: -------------------------------------------------------------------------------- 1 | # Application help menu construction and execution 2 | 3 | import webbrowser 4 | 5 | from PyQt5 import QtWidgets 6 | 7 | 8 | def create_help_menu(main_window: QtWidgets.QMainWindow) -> QtWidgets.QMenu: 9 | help_menu = QtWidgets.QMenu('&Help', main_window) 10 | _create_help_actions(help_menu, main_window) 11 | return help_menu 12 | 13 | 14 | def _create_help_actions(menu, win): 15 | file_actions = [ 16 | ('Documentation', 'Ctrl+D', 17 | 'Open spikely documentation link in web browser', 18 | _open_doc_browser) 19 | ] 20 | 21 | for name, shortcut, statustip, signal in file_actions: 22 | action = QtWidgets.QAction(name, win) 23 | action.setShortcut(shortcut) 24 | action.setStatusTip(statustip) 25 | action.triggered.connect(signal) 26 | menu.addAction(action) 27 | 28 | 29 | def _open_doc_browser(): 30 | webbrowser.open('https://spikely.readthedocs.io/en/latest/index.html#a-simple-extracellur-data-processing-application-based-on-spikeinterface)') # noqa: E501 31 | -------------------------------------------------------------------------------- /spikely/elements/spike_element.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class SpikeElement(ABC): 5 | 6 | # Abstract methods 7 | 8 | @staticmethod 9 | @abstractmethod 10 | def get_installed_spif_cls_list(): 11 | pass 12 | 13 | @staticmethod 14 | @abstractmethod 15 | def get_display_name_from_spif_class(spif_class): 16 | pass 17 | 18 | @abstractmethod 19 | def run(self, payload, next_elem): 20 | pass 21 | 22 | @property 23 | @abstractmethod 24 | def display_name(self): 25 | pass 26 | 27 | @property 28 | @abstractmethod 29 | def display_icon(self): 30 | pass 31 | 32 | # Concrete base class methods 33 | 34 | def __init__(self, spif_class): 35 | self._spif_class = spif_class 36 | self._param_list = None 37 | 38 | @property 39 | def spif_class(self): 40 | return self._spif_class 41 | 42 | @property 43 | def param_list(self): 44 | return self._param_list 45 | 46 | @param_list.setter 47 | def param_list(self, param_list): 48 | self._param_list = param_list 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # spikely 2 | An application built on top of SpikeInterface to create and run extracellular 3 | data processing pipelines within a GUI. Spikely currently supports loading, preprocessing, 4 | sorting, and curating extracellular datasets that are stored in SpikeInterface 5 | compatible file formats. 6 | 7 | ## Setup 8 | 9 | To run spikely first you must install it: 10 | 11 | ``` 12 | pip install spikely 13 | ``` 14 | 15 | Once installed, spikely can be launched from the command line: 16 | 17 | ``` 18 | spikely 19 | ``` 20 | 21 | ## Documentation 22 | All documentation for spikely can be found here: https://spikely.readthedocs.io/en/latest/. 23 | 24 | ## Screenshot 25 | 26 | ![gui](./spikely/resources/spikely_gui.png) 27 | 28 | ## Authors 29 | 30 | [Roger Hurwitz](mailto:rogerhurwitz@gmail.com?subject=Spikely) - Portland, Oregon, USA 31 | 32 | [Cole Hurwitz](https://colehurwitz.github.io/) - The Institute for Adaptive and Neural Computation (ANC), University of Edinburgh, Edinburgh, Scotland 33 | 34 | [Shawn Guo](http://www.shawnguo.cn/) - The Institute for Adaptive and Neural Computation (ANC), University of Edinburgh, Edinburgh, Scotland 35 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/normalizebyquantile.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import normalize_by_quantile 2 | 3 | spif_init_func = normalize_by_quantile 4 | 5 | gui_params = [ 6 | { 7 | "name": "scale", 8 | "type": "float", 9 | "value": 1.0, 10 | "default": 1.0, 11 | "title": "Scale for the output distribution" 12 | }, 13 | { 14 | "name": "median", 15 | "type": "float", 16 | "value": 0.0, 17 | "default": 0.0, 18 | "title": "Median for the output distribution"}, 19 | { 20 | "name": "q1", 21 | "type": "float", 22 | "value": 0.01, 23 | "default": 0.01, 24 | "title": "Lower quantile used for measuring the scale", 25 | }, 26 | { 27 | "name": "q2", 28 | "type": "float", 29 | "value": 0.99, 30 | "default": 0.99, 31 | "title": "Upper quantile used for measuring the scale", 32 | }, 33 | { 34 | "name": "seed", 35 | "type": "int", 36 | "value": 0, 37 | "default": 0, 38 | "title": "Random seed for reproducibility.", 39 | }, 40 | ] 41 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/removebadchannels.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import remove_bad_channels 2 | 3 | spif_init_func = remove_bad_channels 4 | 5 | gui_params = [ 6 | { 7 | "name": "bad_channel_ids", 8 | "type": "int_list", 9 | "value": None, 10 | "default": None, 11 | "title": "List of bad channel ids (int)." 12 | " If None, automatic removal will be done based on standard deviation.", 13 | }, 14 | { 15 | "name": "bad_threshold", 16 | "type": "float", 17 | "value": 2, 18 | "default": 2, 19 | "title": "If automatic is used, the threshold for the standard deviation" 20 | " over which channels are removed", 21 | }, 22 | { 23 | "name": "seconds", 24 | "type": "float", 25 | "value": 10, 26 | "default": 10, 27 | "title": "If automatic is used, the number of seconds used to compute" 28 | " standard deviations.", 29 | }, 30 | { 31 | "name": "verbose", 32 | "type": "bool", 33 | "value": False, 34 | "default": False, 35 | "title": "If True output is verbose"}, 36 | ] 37 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | version = {} 4 | with open('spikely/version.py') as fp: 5 | exec(fp.read(), version) 6 | 7 | with open('README.md', 'r') as fp: 8 | long_description = fp.read() 9 | 10 | setup( 11 | name='spikely', 12 | version=version['__version__'], 13 | description='Spike sorting made simple', 14 | long_description=long_description, 15 | long_description_content_type='text/markdown', 16 | author='Roger Hurwitz', 17 | author_email='rogerhurwitz@gmail.com', 18 | url='https://github.com/SpikeInterface/spikely', 19 | packages=find_packages(exclude=('tests', 'docs')), 20 | include_package_data=True, 21 | install_requires=[ 22 | 'pyqt5>=5.14.2', 23 | 'spikeextractors>=0.8.4', 24 | 'spikesorters>=0.3.3', 25 | 'spiketoolkit>=0.6.3' 26 | ], 27 | entry_points={ 28 | 'console_scripts': [ 29 | 'spikely=spikely.spikely_main:launch_spikely' 30 | ] 31 | }, 32 | classifiers=[ 33 | 'Programming Language :: Python :: 3', 34 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 35 | 'Operating System :: OS Independent', 36 | ], 37 | ) 38 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/notchfilter.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import notch_filter 2 | 3 | spif_init_func = notch_filter 4 | 5 | gui_params = [ 6 | { 7 | "name": "freq", 8 | "type": "float", 9 | "value": 3000.0, 10 | "default": 3000.0, 11 | "title": "The target frequency of the notch filter.", 12 | }, 13 | { 14 | "name": "q", 15 | "type": "int", 16 | "value": 30, 17 | "default": 30, 18 | "title": "The quality factor of the notch filter.", 19 | }, 20 | { 21 | "name": "chunk_size", 22 | "type": "int", 23 | "value": 30_000, 24 | "default": 30_000, 25 | "title": "The chunk size to be used for the filtering.", 26 | }, 27 | { 28 | "name": "cache_to_file", 29 | "type": "bool", 30 | "value": False, 31 | "default": False, 32 | "title": "If True, filtered traces are computed and cached all" 33 | " at once on disk in temp file.", 34 | }, 35 | { 36 | "name": "cache_chunks", 37 | "type": "bool", 38 | "value": False, 39 | "default": False, 40 | "title": "If True then each chunk is cached in memory (in a dict).", 41 | }, 42 | ] 43 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/commonreference.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import common_reference 2 | 3 | spif_init_func = common_reference 4 | 5 | gui_params = [ 6 | { 7 | "name": "reference", 8 | "type": "str", 9 | "value": "median", 10 | "default": "median", 11 | "title": "Reference type (median, average, or single).", 12 | }, 13 | { 14 | "name": "groups", 15 | "type": "int_list_list", 16 | "value": None, 17 | "default": None, 18 | "title": "List of lists containins the channels for splitting the reference.", 19 | }, 20 | { 21 | "name": "ref_channels", 22 | "type": "int_or_int_list", 23 | "value": None, 24 | "default": None, 25 | "title": "If no groups are specified, all channels are referenced to ref_channels.", 26 | }, 27 | { 28 | "name": "dtype", 29 | "type": "str", 30 | "value": None, 31 | "default": None, 32 | "title": "dtype of the returned traces. If None, dtype is maintained.", 33 | }, 34 | { 35 | "name": "verbose", 36 | "type": "bool", 37 | "value": False, 38 | "default": False, 39 | "title": "If True, output from SpikeInterface element is verbose when run.", 40 | }, 41 | ] 42 | -------------------------------------------------------------------------------- /spikely/elements/std_element_policy.py: -------------------------------------------------------------------------------- 1 | from . import curator as sp_cur 2 | from . import element_policy as sp_elp 3 | from . import preprocessor as sp_pre 4 | from . import extractor as sp_ree 5 | from . import sorter as sp_sor 6 | from . import exporter as sp_soe 7 | 8 | 9 | class StdElementPolicy(sp_elp.ElementPolicy): 10 | def __init__(self): 11 | required_cls_list = [ 12 | sp_ree.Extractor, 13 | sp_sor.Sorter, 14 | ] 15 | 16 | cls_order_dict = { 17 | sp_ree.Extractor: 0, 18 | sp_pre.Preprocessor: 1, 19 | sp_sor.Sorter: 2, 20 | sp_cur.Curator: 3, 21 | sp_soe.SortingExporter: 4, 22 | } 23 | 24 | cls_display_name_dict = { 25 | sp_ree.Extractor: "Extractor", 26 | sp_pre.Preprocessor: "Preprocessor", 27 | sp_sor.Sorter: "Sorter", 28 | sp_cur.Curator: "Curator", 29 | sp_soe.SortingExporter: "Exporter", 30 | } 31 | 32 | super().__init__(required_cls_list, cls_order_dict, cls_display_name_dict) 33 | 34 | def is_cls_available(self, cls): 35 | return cls in [ 36 | sp_ree.Extractor, 37 | sp_pre.Preprocessor, 38 | sp_sor.Sorter, 39 | sp_cur.Curator, 40 | sp_soe.SortingExporter, 41 | ] 42 | 43 | def is_cls_singleton(self, cls): 44 | return cls in [sp_ree.Extractor, sp_sor.Sorter, sp_soe.SortingExporter] 45 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/extractor/bindatrecordingextractor.py: -------------------------------------------------------------------------------- 1 | gui_params = [ 2 | { 3 | "name": "file_path", 4 | "type": "file", 5 | "title": "Path to file (.dat)"}, 6 | { 7 | "name": "sampling_frequency", 8 | "type": "float", 9 | "title": "Sampling rate in HZ"}, 10 | { 11 | "name": "numchan", 12 | "type": "int", 13 | "title": "Number of channels"}, 14 | { 15 | "name": "dtype", 16 | "type": "np.dtype", 17 | "title": "The dtype of underlying data (int16, float32, etc.)", 18 | }, 19 | { 20 | "name": "recording_channels", 21 | "type": "int_list", 22 | "value": None, 23 | "default": None, 24 | "title": "List of recording channels", 25 | }, 26 | { 27 | "name": "time_axis", 28 | "type": "int", 29 | "value": 0, 30 | "default": 0, 31 | "title": "If 0 traces are transposed to ensure (nb_sample, nb_channel) in the file" 32 | " If 1, the traces shape (nb_channel, nb_sample) is kept in the file.", 33 | }, 34 | { 35 | "name": "offset", 36 | "type": "int", 37 | "value": 0, 38 | "default": 0, 39 | "title": "Offset in binary file", 40 | }, 41 | { 42 | "name": "gain", 43 | "type": "float", 44 | "value": None, 45 | "default": None, 46 | "title": "gain of the recordings", 47 | }, 48 | { 49 | "name": "is_filtered", 50 | "type": "bool", 51 | "value": False, 52 | "default": False, 53 | "title": "If true, assume recording is filtered.", 54 | }, 55 | ] 56 | -------------------------------------------------------------------------------- /spikely/guiparams.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from copy import deepcopy 3 | 4 | 5 | def get_gui_params(filename, subpathname): 6 | """Load the gui_params list of param dictionaries for caller. 7 | 8 | Args: 9 | filename: gui_params filename maps to spif class display name 10 | subpathname: "curator", "exporter", "preprocessor", or "sorter" 11 | 12 | Returns: 13 | gui_param_list: List of gui_param dicts needed to populate the UI 14 | 15 | """ 16 | module = get_gui_params_module(filename, subpathname) 17 | 18 | gui_params = None if not module else getattr(module, "gui_params", None) 19 | 20 | return deepcopy(gui_params) 21 | 22 | 23 | def get_spif_init_func(filename, subpathname): 24 | """Load the gui_params list of param dictionaries for caller. 25 | 26 | Args: 27 | filename: gui_params filename maps to spif class display name 28 | subpathname: "curator", "exporter", "preprocessor", or "sorter" 29 | 30 | Returns: 31 | spif_init_func: Underlying spif function needed to instantiate spif class. 32 | 33 | """ 34 | module = get_gui_params_module(filename, subpathname) 35 | 36 | spif_init_func = None if not module else getattr(module, "spif_init_func", None) 37 | 38 | return spif_init_func 39 | 40 | 41 | def gui_params_file_exists(filename, subpathname): 42 | 43 | return get_gui_params_module(filename, subpathname) is not None 44 | 45 | 46 | def get_gui_params_module(filename, subpathname): 47 | 48 | module_pathname = "." + subpathname + "." + filename.lower() 49 | 50 | try: 51 | module = importlib.import_module(module_pathname, "spikely.elements.guiparams") 52 | except ModuleNotFoundError: 53 | module = None 54 | 55 | return module 56 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdnumspikes.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 2 | get_validation_params, 3 | ) 4 | from spiketoolkit.curation import threshold_num_spikes 5 | spif_init_func = threshold_num_spikes 6 | class_default = get_validation_params() 7 | 8 | gui_params = [ 9 | { 10 | "name": "threshold", 11 | "type": "int", 12 | "title": "The threshold for the given metric.", 13 | }, 14 | { 15 | "name": "threshold_sign", 16 | "type": "str", 17 | "title": "If 'less', will threshold any metric less than the given threshold. \ 18 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 19 | If 'greater', will threshold any metric greater than the given threshold. \ 20 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 21 | }, 22 | { 23 | "name": "sampling_frequency", 24 | "type": "float", 25 | "value": None, 26 | "default": None, 27 | "title": "The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor.", 28 | }, 29 | # kwargs 30 | { 31 | "name": "save_property_or_features", 32 | "type": "bool", 33 | "value": class_default["save_property_or_features"], 34 | "default": class_default["save_property_or_features"], 35 | "title": "If True, it will save features in the sorting extractor.", 36 | }, 37 | { 38 | "name": "verbose", 39 | "type": "bool", 40 | "value": class_default["verbose"], 41 | "default": class_default["verbose"], 42 | "title": "If True, output from SpikeInterface element is verbose when run.", 43 | }, 44 | ] 45 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdfiringrates.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.curation import threshold_firing_rates 2 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 3 | get_validation_params, 4 | ) 5 | class_default = get_validation_params() 6 | spif_init_func = threshold_firing_rates 7 | 8 | gui_params = [ 9 | { 10 | "name": "threshold", 11 | "type": "float", 12 | "title": "The threshold for the given metric.", 13 | }, 14 | { 15 | "name": "threshold_sign", 16 | "type": "str", 17 | "title": "If 'less', will threshold any metric less than the given threshold. \ 18 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 19 | If 'greater', will threshold any metric greater than the given threshold. \ 20 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 21 | }, 22 | { 23 | "name": "duration_in_frames", 24 | "type": "int", 25 | "title": "Length of recording (in frames).", 26 | }, 27 | { 28 | "name": "sampling_frequency", 29 | "type": "float", 30 | "value": None, 31 | "default": None, 32 | "title": "The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor.", 33 | }, 34 | #kwargs 35 | { 36 | "name": "save_property_or_features", 37 | "type": "bool", 38 | "value": class_default["save_property_or_features"], 39 | "default": class_default["save_property_or_features"], 40 | "title": "If True, it will save features in the sorting extractor.", 41 | }, 42 | { 43 | "name": "verbose", 44 | "type": "bool", 45 | "value": class_default["verbose"], 46 | "default": class_default["verbose"], 47 | "title": "If True, output from SpikeInterface element is verbose when run.", 48 | }, 49 | ] -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdpresenceratios.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.curation import threshold_presence_ratios 2 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 3 | get_validation_params, 4 | ) 5 | class_default = get_validation_params() 6 | spif_init_func = threshold_presence_ratios 7 | 8 | gui_params = [ 9 | { 10 | "name": "threshold", 11 | "type": "float", 12 | "title": "The threshold for the given metric.", 13 | }, 14 | { 15 | "name": "threshold_sign", 16 | "type": "str", 17 | "title": "If 'less', will threshold any metric less than the given threshold. \ 18 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 19 | If 'greater', will threshold any metric greater than the given threshold. \ 20 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 21 | }, 22 | { 23 | "name": "duration_in_frames", 24 | "type": "int", 25 | "title": "Length of recording (in frames).", 26 | }, 27 | { 28 | "name": "sampling_frequency", 29 | "type": "float", 30 | "value": None, 31 | "default": None, 32 | "title": "The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor.", 33 | }, 34 | #kwargs 35 | { 36 | "name": "save_property_or_features", 37 | "type": "bool", 38 | "value": class_default["save_property_or_features"], 39 | "default": class_default["save_property_or_features"], 40 | "title": "If True, it will save features in the sorting extractor.", 41 | }, 42 | { 43 | "name": "verbose", 44 | "type": "bool", 45 | "value": class_default["verbose"], 46 | "default": class_default["verbose"], 47 | "title": "If True, output from SpikeInterface element is verbose when run.", 48 | }, 49 | ] -------------------------------------------------------------------------------- /spikely/elements/guiparams/preprocessor/bandpassfilter.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.preprocessing import bandpass_filter 2 | 3 | spif_init_func = bandpass_filter 4 | 5 | gui_params = [ 6 | { 7 | "name": "freq_min", 8 | "type": "float", 9 | "value": 300.0, 10 | "default": 300.0, 11 | "title": "High-pass cutoff frequency.", 12 | }, 13 | { 14 | "name": "freq_max", 15 | "type": "float", 16 | "value": 6_000, 17 | "default": 6_000, 18 | "title": "Low-pass cutoff frequency.", 19 | }, 20 | { 21 | "name": "freq_wid", 22 | "type": "float", 23 | "value": 1_000, 24 | "default": 1_000, 25 | "title": "Width of the filter (when type is 'fft')", 26 | }, 27 | { 28 | "name": "filter_type", 29 | "type": "str", 30 | "value": "fft", 31 | "default": "fft", 32 | "title": "fft or butter. The fft filter uses a kernel in the frequency domain." 33 | " The butter filter uses scipy butter and filtfilt functions.", 34 | }, 35 | { 36 | "name": "order", 37 | "type": "int", 38 | "value": 3, 39 | "default": 3, 40 | "title": "Order of the filter (if 'butter').", 41 | }, 42 | { 43 | "name": "chunk_size", 44 | "type": "int", 45 | "value": 3_000, 46 | "default": 3_000, 47 | "title": "If True, filtered traces are computed and cached all at once" 48 | " on disk in temp file.", 49 | }, 50 | { 51 | "name": "cache_to_file", 52 | "type": "bool", 53 | "value": False, 54 | "default": False, 55 | "title": "If True, filtered traces are computed and cached all at once on" 56 | " disk in temp file.", 57 | }, 58 | { 59 | "name": "cache_chunks", 60 | "type": "bool", 61 | "value": False, 62 | "default": False, 63 | "title": "If True then each chunk is cached in memory (in a dict).", 64 | }, 65 | ] 66 | -------------------------------------------------------------------------------- /spikely/elements/preprocessor.py: -------------------------------------------------------------------------------- 1 | # Python 2 | import pkg_resources 3 | 4 | # PyQt 5 | from PyQt5 import QtGui 6 | from PyQt5 import QtWidgets 7 | 8 | # spikely 9 | from . import spike_element as sp_spe 10 | import spiketoolkit as st 11 | from spikely.guiparams import get_gui_params, get_spif_init_func, gui_params_file_exists 12 | 13 | 14 | class Preprocessor(sp_spe.SpikeElement): 15 | @staticmethod 16 | def get_installed_spif_cls_list(): 17 | """Returns sorted list of installed spif classes having gui_params files.""" 18 | raw_list = st.preprocessing.preprocessinglist.installed_preprocessers_list 19 | 20 | # To be installed for Spikely purposes spif_class must have gui_params file 21 | cooked_list = [ 22 | spif_class 23 | for spif_class in raw_list 24 | if gui_params_file_exists( 25 | Preprocessor.get_display_name_from_spif_class(spif_class), 26 | "preprocessor", 27 | ) 28 | ] 29 | 30 | return sorted(cooked_list, key=lambda spif_class: spif_class.preprocessor_name) 31 | 32 | @staticmethod 33 | def get_display_name_from_spif_class(spif_class): 34 | return spif_class.preprocessor_name 35 | 36 | def __init__(self, spif_class): 37 | super().__init__(spif_class) 38 | 39 | self._display_name = self.get_display_name_from_spif_class(spif_class) 40 | 41 | if QtWidgets.QApplication.instance(): 42 | self._display_icon = QtGui.QIcon( 43 | pkg_resources.resource_filename("spikely.resources", "preprocessor.png") 44 | ) 45 | else: 46 | self._display_icon = None 47 | 48 | self._param_list = get_gui_params(self._display_name, "preprocessor") 49 | self._preprocessor_func = get_spif_init_func(self._display_name, "preprocessor") 50 | 51 | @property 52 | def display_name(self): 53 | return self._display_name 54 | 55 | @property 56 | def display_icon(self): 57 | return self._display_icon 58 | 59 | def run(self, payload, next_elem): 60 | spif_param_dict = {param["name"]: param["value"] for param in self.param_list} 61 | spif_param_dict["recording"] = payload 62 | pp = self._preprocessor_func(**spif_param_dict) 63 | return pp 64 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | """Test functionality associated with config.py""" 2 | 3 | import random 4 | import sys 5 | 6 | import pytest 7 | from PyQt5 import QtWidgets 8 | from spikely import config 9 | from spikely.elements.extractor import Extractor 10 | from spikely.spikely_main import SpikelyMainWindow 11 | 12 | 13 | class TestCvt(): 14 | 15 | # @pytest.mark.smoke 16 | def test_cvt_elem_raises(self): 17 | """cvt_elem_to_dict() shoule raise exception on invalid parameter""" 18 | with pytest.raises(TypeError): 19 | config.cvt_elem_to_dict('not a SpikeElement') 20 | 21 | def test_cvt_dict_raises(self): 22 | """cvt_dict_to_elem() shoule raise exception on invalid parameter""" 23 | with pytest.raises(TypeError): 24 | config.cvt_dict_to_elem('not a SpikeElement') 25 | 26 | # @pytest.mark.skip(reason='Need clarity from developer') 27 | def test_cvt_methods(self): 28 | """Test the methods used to enable JSON coding/decoding. 29 | 30 | Before JSON encoding, a SpikeElement is converted into a data-based 31 | dictionary representation using cvt_elem_to_dict(). After JSON 32 | decoding, the resultant dictionary is re-instantiated as a SpikeElement 33 | using cvt_dict_to_elem(). 34 | 35 | This test runs through both conversions back-to-back and compares the 36 | resultant SpikeElement with the original SpikeElement to ensure 37 | identity. 38 | 39 | """ 40 | cls_list = Extractor.get_installed_spif_cls_list() 41 | elem = Extractor( 42 | cls_list[random.randint(0, len(cls_list) - 1)] 43 | ) 44 | elem_dict = config.cvt_elem_to_dict(elem) 45 | new_elem = config.cvt_dict_to_elem(elem_dict) 46 | 47 | print(f'SpikeElement tested: {elem.display_name}') 48 | 49 | assert elem.__class__.__name__ == new_elem.__class__.__name__ 50 | assert elem.__module__ == new_elem.__module__ 51 | assert elem.spif_class.__name__ == new_elem.spif_class.__name__ 52 | assert elem.param_list == new_elem.param_list 53 | 54 | 55 | # @pytest.mark.smoke 56 | def test_get_main_window(): 57 | """Tests function that finds reference to app's main window.""" 58 | app = QtWidgets.QApplication(sys.argv) # noqa: F841 59 | win = SpikelyMainWindow() 60 | found_win = config.get_main_window() 61 | 62 | assert win is found_win 63 | -------------------------------------------------------------------------------- /spikely/operation_view.py: -------------------------------------------------------------------------------- 1 | # Implements MVC view associated with pipeline operations (run,clear, queue) 2 | 3 | from PyQt5 import QtWidgets 4 | 5 | from spikely import config 6 | 7 | 8 | class OperationView(QtWidgets.QGroupBox): 9 | 10 | def __init__(self, pipeline_model, parameter_model): 11 | super().__init__("Command Pipeline") # Group box label 12 | self._pipeline_model = pipeline_model 13 | self._parameter_model = parameter_model 14 | 15 | self._init_ui() 16 | 17 | def _init_ui(self): 18 | 19 | # Disable/Enable command buttons on empty/non-empty pipeline state 20 | self._pipeline_model.rowsInserted.connect(self._pipeline_changed) 21 | self._pipeline_model.rowsRemoved.connect(self._pipeline_changed) 22 | self._pipeline_model.modelReset.connect(self._pipeline_changed) 23 | 24 | self.setLayout(QtWidgets.QHBoxLayout()) 25 | 26 | self._run_btn = QtWidgets.QPushButton("Run") 27 | self._run_btn.setStatusTip('Command the pipeline to run - executes ' 28 | 'from top to bottom') # noqa: E128 29 | self._run_btn.setEnabled(False) 30 | self._run_btn.clicked.connect(self._pipeline_model.run) 31 | self.layout().addWidget(self._run_btn) 32 | 33 | self._clear_btn = QtWidgets.QPushButton("Clear") 34 | self._clear_btn.setStatusTip('Command the pipeline to clear - ' 35 | 'removes all elements') # noqa: E128 36 | self._clear_btn.setEnabled(False) 37 | self._clear_btn.clicked.connect(self._pipeline_model.clear) 38 | self.layout().addWidget(self._clear_btn) 39 | 40 | self._queue_btn = QtWidgets.QPushButton("Queue") 41 | self._queue_btn.setStatusTip('Adds pipeline to queue for ' 42 | 'batch processing - not implemented') # noqa: E128 43 | self._queue_btn.clicked.connect(self._queue_clicked) 44 | self.layout().addWidget(self._queue_btn) 45 | self._queue_btn.setEnabled(False) 46 | 47 | def _queue_clicked(self): 48 | config.get_main_window().statusBar().showMessage( 49 | "Queue not implemented", config.STATUS_MSG_TIMEOUT) 50 | 51 | def _pipeline_changed(self, parent=None, first=None, last=None): 52 | enabled = self._pipeline_model.rowCount(None) > 0 53 | self._run_btn.setEnabled(enabled) 54 | self._clear_btn.setEnabled(enabled) 55 | # self._queue_btn.setEnabled(enabled) 56 | -------------------------------------------------------------------------------- /spikely/elements/sorter.py: -------------------------------------------------------------------------------- 1 | import pkg_resources 2 | import spikesorters as ss 3 | from PyQt5 import QtGui, QtWidgets 4 | 5 | from spikely.guiparams import get_gui_params, gui_params_file_exists 6 | from . import spike_element as sp_spe 7 | 8 | 9 | class Sorter(sp_spe.SpikeElement): 10 | @staticmethod 11 | def get_installed_spif_cls_list(): 12 | """Returns sorted list of installed spif classes having gui_params files.""" 13 | raw_list = ss.installed_sorter_list 14 | 15 | # Filter out installed spif classes with no gui_params files 16 | cooked_list = [ 17 | spif_class for spif_class in raw_list 18 | if gui_params_file_exists( 19 | Sorter.get_display_name_from_spif_class(spif_class), "sorter" 20 | ) 21 | ] 22 | return sorted(cooked_list, key=lambda spif_class: spif_class.sorter_name) 23 | 24 | @staticmethod 25 | def get_display_name_from_spif_class(spif_class): 26 | return spif_class.sorter_name 27 | 28 | def __init__(self, spif_class): 29 | super().__init__(spif_class) 30 | 31 | self._display_name = self.get_display_name_from_spif_class(spif_class) 32 | 33 | if QtWidgets.QApplication.instance(): 34 | self._display_icon = QtGui.QIcon( 35 | pkg_resources.resource_filename("spikely.resources", "sorter.png") 36 | ) 37 | else: 38 | self._display_icon = None 39 | 40 | self._param_list = get_gui_params(self._display_name, "sorter") 41 | 42 | @property 43 | def display_name(self): 44 | return self._display_name 45 | 46 | @property 47 | def display_icon(self): 48 | return self._display_icon 49 | 50 | def run(self, payload, next_elem): 51 | 52 | base_param_list = { 53 | param["name"]: param["value"] 54 | for param in self._param_list 55 | if param.get("base_param") and bool(param.get("base_param")) 56 | } 57 | base_param_list["recording"] = payload 58 | sorter = self._spif_class(**base_param_list) 59 | 60 | sub_param_list = { 61 | param["name"]: param["value"] 62 | for param in self._param_list 63 | if not param.get("base_param") 64 | } 65 | sorter.set_params(**sub_param_list) 66 | 67 | sorter.run() 68 | 69 | output_folder_string = sub_param_list.get("output_folder") 70 | if output_folder_string is None: 71 | output_folder_string = "tmp_" + sorter.sorter_name 72 | 73 | return sorter.get_result_list(), output_folder_string, payload 74 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdisiviolations.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 2 | get_validation_params, 3 | ) 4 | from spiketoolkit.curation import threshold_isi_violations 5 | from spiketoolkit.validation.quality_metric_classes.isi_violation import ISIViolation 6 | metric_default = ISIViolation.params 7 | spif_init_func = threshold_isi_violations 8 | class_default = get_validation_params() 9 | 10 | gui_params = [ 11 | { 12 | "name": "threshold", 13 | "type": "float", 14 | "title": "The threshold for the given metric.", 15 | }, 16 | { 17 | "name": "threshold_sign", 18 | "type": "str", 19 | "title": "If 'less', will threshold any metric less than the given threshold. \ 20 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 21 | If 'greater', will threshold any metric greater than the given threshold. \ 22 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 23 | }, 24 | { 25 | "name": "duration_in_frames", 26 | "type": "int", 27 | "title": "Length of recording (in frames).", 28 | }, 29 | { 30 | "name": "isi_threshold", 31 | "type": "float", 32 | "value": metric_default["isi_threshold"], 33 | "default": metric_default["isi_threshold"], 34 | "title": "The isi threshold for calculating isi violations.", 35 | }, 36 | { 37 | "name": "min_isi", 38 | "type": "float", 39 | "value": metric_default["min_isi"], 40 | "default": metric_default["min_isi"], 41 | "title": "The minimum expected isi value.", 42 | }, 43 | { 44 | "name": "sampling_frequency", 45 | "type": "float", 46 | "value": None, 47 | "default": None, 48 | "title": "The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor.", 49 | }, 50 | # kwargs 51 | { 52 | "name": "save_property_or_features", 53 | "type": "bool", 54 | "value": class_default["save_property_or_features"], 55 | "default": class_default["save_property_or_features"], 56 | "title": "If True, it will save features in the sorting extractor.", 57 | }, 58 | { 59 | "name": "verbose", 60 | "type": "bool", 61 | "value": class_default["verbose"], 62 | "default": class_default["verbose"], 63 | "title": "If True, output from SpikeInterface element is verbose when run.", 64 | }, 65 | ] 66 | -------------------------------------------------------------------------------- /docs/source/workflow.rst: -------------------------------------------------------------------------------- 1 | Workflow 2 | ======== 3 | 4 | .. _SpikeInterface: https://github.com/SpikeInterface 5 | 6 | With a grounding in SpikeInterface_, and a grasp of spikely's element, 7 | parameter, and pipeline abstractions, the last piece of the puzzle to unlocking 8 | spikely's potential is understanding its workflow and associated user 9 | interface. Spikely was designed for specific use cases, and its workflow is 10 | optimized with those use cases in mind. 11 | 12 | At a high level spikely's workflow consists of creating a pipeline of elements, 13 | configuring the parameters associated with those elements, and finally, running 14 | the pipeline to pull extracellular data into the pipeline transforming it as it 15 | flows through to the end. 16 | 17 | .. image:: ../images/gui_annotated.png 18 | 19 | 1. **Constructing the Pipeline** - The user constructs a pipeline in spikely by 20 | choosing the element category (e.g., *Extractors*), choosing one 21 | of the installed elements within that category (e.g., 22 | *MdaRecordingExtractor*) and then adding that element to the pipeline using 23 | the "Add Element" button. Individual elements added to the pipeline can be 24 | moved up, moved down, or deleted as part of pipeline construction process. 25 | Note, there are pipeline policies enforced by spikely related to ordering 26 | and singularity that limit certain pipeline permutations. 27 | 28 | 2. **Configuring Element Parameters** - When an element is selected in the 29 | *Construct Pipeline* part of the UI that element's parameters are displayed 30 | in the *Configure Elements* part of the UI. Element parameters are specific 31 | to it, so a detailed explanation of an element's parameters will need to be 32 | gleaned from the corresponding SpikeInterface documentation. Clicking on 33 | the *Value* field for a parameter enables the user to edit it. Spikely does 34 | rudimentary type checking, but for the most part it is up to the user to 35 | ensure that a parameter value is valid. 36 | 37 | 3. **Commanding the Pipeline** - While the commands available to the user in 38 | the *Construct Pipeline' part of the UI operate on individual elements in 39 | the pipeline, *Command Pipeline* commands act on the pipeline as a whole. 40 | Currently, two operations are supported: *Run*, and *Clear*. *Clear* 41 | deletes all the elements in the pipeline enabling the user to quickly tear 42 | down the current pipeline before building up a new one. *Run* is where the 43 | magic happens, instantiating the pipeline and transforming the extracellular 44 | data as it flows from the source element (Extractor) to the sink 45 | element (Sorter or Exporter). 46 | 47 | .. tip:: 48 | The pipeline creation and parameter configuration steps can be shortened by 49 | saving and loading complete pipelines to/from files using the corresponding 50 | actions from spikely's *File Menu.* 51 | -------------------------------------------------------------------------------- /spikely/tool_bar.py: -------------------------------------------------------------------------------- 1 | from PyQt5 import QtWidgets 2 | 3 | from spikely import config 4 | 5 | # TODO: Implement instance (versus class) version of QFileDialog 6 | 7 | 8 | # Menu and Menu Action construction methods 9 | def create_tool_bar(main_win): 10 | tool_bar = QtWidgets.QToolBar(main_win) 11 | tool_bar.setMovable(False) 12 | tool_bar.setFloatable(False) 13 | 14 | folder_act = QtWidgets.QAction(QtWidgets.QFileIconProvider().icon( 15 | QtWidgets.QFileIconProvider.Folder), 'Select Folder', main_win) 16 | folder_act.setStatusTip('Choose folder and copy path into clipboard ' 17 | 'to enable pasting path into an element parameter field') # noqa: E128 18 | folder_act.triggered.connect(_perform_folder_action) 19 | tool_bar.addAction(folder_act) 20 | 21 | file_act = QtWidgets.QAction(QtWidgets.QFileIconProvider().icon( 22 | QtWidgets.QFileIconProvider.File), 'Select File', main_win) 23 | file_act.setStatusTip('Choose file and copy path into clipboard ' 24 | 'to enable pasting path into an element parameter field') # noqa: E128 25 | file_act.triggered.connect(_perform_file_action) 26 | tool_bar.addAction(file_act) 27 | 28 | return tool_bar 29 | 30 | 31 | def _perform_file_action() -> None: 32 | 33 | options = QtWidgets.QFileDialog.Options() 34 | options |= QtWidgets.QFileDialog.DontUseNativeDialog 35 | file_name, _filter = QtWidgets.QFileDialog.getOpenFileName( 36 | config.get_main_window(), caption='Copy File Name to Clipboard', 37 | options=options) 38 | 39 | if file_name: 40 | QtWidgets.QApplication.clipboard().setText(file_name) 41 | 42 | 43 | def _perform_folder_action() -> None: 44 | 45 | options = QtWidgets.QFileDialog.Options() 46 | options |= QtWidgets.QFileDialog.DontUseNativeDialog 47 | options |= QtWidgets.QFileDialog.ShowDirsOnly 48 | options |= QtWidgets.QFileDialog.DontResolveSymlinks 49 | folder_name = QtWidgets.QFileDialog.getExistingDirectory( 50 | config.get_main_window(), caption='Copy Folder Name to Clipboard', 51 | options=options) 52 | 53 | if folder_name: 54 | QtWidgets.QApplication.clipboard().setText(folder_name) 55 | 56 | 57 | ''' 58 | # tool_menu = menu_bar.addMenu(qw.QMenu('Tools', self)) 59 | # dir_action = qw.QAction('Pick Directory', self) 60 | # dir_action.setShortcut('Ctrl+D') 61 | # dir_action.setStatusTip('Copy directory path to clipboard') 62 | # dir_action.triggered.connect(self.do_dir_action) 63 | # tool_menu.addAction(dir_action) 64 | 65 | # def do_dir_action(self): 66 | # dlg = qw.QFileDialog(self) 67 | # dlg.setFileMode(dlg.Directory) 68 | # dlg.setViewMode(dlg.List) 69 | # dlg.setDirectory('.') 70 | # dlg.setOption(dlg.DontUseNativeDialog, True) 71 | # # dlg.setOption(dlg.ShowDirsOnly, True) 72 | # dlg.setOption(dlg.ReadOnly, True) 73 | # dlg.setOption(dlg.HideNameFilterDetails, True) 74 | 75 | # if (dlg.exec_()): 76 | # file_names = dlg.selectedFiles() 77 | # cb = qw.QApplication.clipboard() 78 | # cb.setText(file_names[0]) 79 | ''' 80 | -------------------------------------------------------------------------------- /spikely/elements/exporter.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import os 3 | 4 | import pkg_resources 5 | from PyQt5 import QtGui, QtWidgets 6 | 7 | from spikely.guiparams import get_gui_params, gui_params_file_exists 8 | from . import spike_element as sp_spe 9 | from spikely.elements.phy_exporter import PhyExporter 10 | 11 | 12 | class SortingExporter(sp_spe.SpikeElement): 13 | @staticmethod 14 | def get_installed_spif_cls_list(): 15 | """Returns sorted list of installed spif classes having gui_params files.""" 16 | raw_list = [PhyExporter] 17 | 18 | # To be installed for Spikely purposes spif_class must have gui_params file 19 | cooked_list = [ 20 | spif_class for spif_class in raw_list 21 | if gui_params_file_exists( 22 | SortingExporter.get_display_name_from_spif_class(spif_class), 23 | "exporter", 24 | ) 25 | ] 26 | return sorted(cooked_list, key=lambda spif_class: spif_class.__name__) 27 | 28 | @staticmethod 29 | def get_display_name_from_spif_class(spif_class): 30 | return spif_class.__name__ 31 | 32 | def __init__(self, spif_class): 33 | super().__init__(spif_class) 34 | 35 | if QtWidgets.QApplication.instance(): 36 | self._display_icon = QtGui.QIcon( 37 | pkg_resources.resource_filename( 38 | 'spikely.resources', 'exporter.png')) 39 | else: 40 | self._display_icon = None 41 | 42 | self._param_list = get_gui_params(self.spif_class.__name__, "exporter") 43 | 44 | @property 45 | def display_name(self): 46 | return self.get_display_name_from_spif_class(self.spif_class) 47 | 48 | @property 49 | def display_icon(self): 50 | return self._display_icon 51 | 52 | def run(self, payload, next_elem): 53 | sorting_list = payload[0] 54 | recording = payload[2] 55 | 56 | for i, sorting in enumerate(sorting_list): 57 | params_dict = {} 58 | params_dict['sorting'] = sorting 59 | 60 | if 'recording' in inspect.signature( 61 | self.spif_class.write_sorting).parameters: 62 | params_dict['recording'] = recording 63 | elif 'sampling_frequency' in inspect.signature( 64 | (self.spif_class.write_sorting)).parameters: 65 | params_dict['sampling_frequency'] = \ 66 | recording.get_sampling_frequency() 67 | 68 | for param in self.param_list: 69 | param_name = param['name'] 70 | param_value = param['value'] 71 | 72 | if param_name == 'save_path': 73 | if(len(sorting_list) == 1): 74 | param_value = param_value 75 | else: 76 | path, file_name = os.path.split(param_value) 77 | param_value = path + str(i) + '_' + file_name 78 | params_dict[param_name] = param_value 79 | 80 | print("Exporting to " + params_dict['save_path']) 81 | self.spif_class.write_sorting(**params_dict) 82 | print("Done exporting") -------------------------------------------------------------------------------- /spikely/elements/guiparams/sorter/kilosort.py: -------------------------------------------------------------------------------- 1 | from spikesorters.kilosort import KilosortSorter 2 | class_default = KilosortSorter._default_params 3 | 4 | gui_params = [ 5 | { 6 | "name": "output_folder", 7 | "type": "folder", 8 | "value": None, 9 | "default": None, 10 | "title": "Sorting output folder path.", 11 | "base_param": True, 12 | }, 13 | { 14 | "name": "verbose", 15 | "type": "bool", 16 | "value": False, 17 | "default": False, 18 | "title": "If True, output from SpikeInterface element is verbose when run.", 19 | "base_param": True, 20 | }, 21 | { 22 | "name": "grouping_property", 23 | "type": "str", 24 | "value": None, 25 | "default": None, 26 | "title": "Property name to be used for sorter output grouping.", 27 | "base_param": True, 28 | }, 29 | { 30 | "name": "parallel", 31 | "type": "bool", 32 | "value": False, 33 | "default": False, 34 | "title": "If grouping property specifed, sort property groups in parallel if True.", 35 | "base_param": True, 36 | }, 37 | { 38 | "name": "delete_output_folder", 39 | "type": "bool", 40 | "value": False, 41 | "default": False, 42 | "title": "Delete specified or default output folder on completion if True.", 43 | "base_param": True, 44 | }, 45 | # kilosort specific parameters 46 | { 47 | "name": "detect_threshold", 48 | "type": "float", 49 | "value": class_default["detect_threshold"], 50 | "default": class_default["detect_threshold"], 51 | "title": "Relative detection threshold", 52 | }, 53 | { 54 | "name": "car", 55 | "type": "bool", 56 | "value": class_default["car"], 57 | "default": class_default["car"], 58 | "title": "car" 59 | }, 60 | { 61 | "name": "useGPU", 62 | "type": "bool", 63 | "value": class_default["useGPU"], 64 | "default": class_default["useGPU"], 65 | "title": "If True, will use GPU", 66 | }, 67 | { 68 | "name": "freq_min", 69 | "type": "float", 70 | "value": class_default["freq_min"], 71 | "default": class_default["freq_min"], 72 | "title": "Low-pass frequency", 73 | }, 74 | { 75 | "name": "freq_max", 76 | "type": "float", 77 | "value": class_default["freq_max"], 78 | "default": class_default["freq_max"], 79 | "title": "High-pass frequency", 80 | }, 81 | { 82 | "name": "ntbuff", 83 | "type": "int", 84 | "value": class_default["ntbuff"], 85 | "default": class_default["ntbuff"], 86 | "title": "Samples of symmetrical buffer " "for whitening and spike detection", 87 | }, 88 | { 89 | "name": "Nfilt", 90 | "type": "int", 91 | "value": class_default["Nfilt"], 92 | "default": class_default["Nfilt"], 93 | "title": "Number of clusters to use " 94 | "(2-4 times more than Nchan, " 95 | "should be a multiple of 32)", 96 | }, 97 | { 98 | "name": "NT", 99 | "type": "int", 100 | "value": class_default["NT"], 101 | "default": class_default["NT"], 102 | "title": "Batch size (try decreasing if " 103 | "out of memory) for GPU should be " 104 | "multiple of 32 + ntbuff ", 105 | }, 106 | ] 107 | -------------------------------------------------------------------------------- /spikely/config.py: -------------------------------------------------------------------------------- 1 | # Constants and helper functions used by other spikely modules 2 | import importlib 3 | import sys 4 | 5 | from PyQt5 import QtWidgets 6 | 7 | from spikely import SpikeElement 8 | 9 | # Duration in milliseconds of timeout for temporary status messages 10 | STATUS_MSG_TIMEOUT = 3500 11 | 12 | # Identifier to get elem object from pipeline model data() 13 | ELEMENT_ROLE = 0x100 14 | 15 | # Column IDs used by QTableView to display elem parameter data 16 | PARAM_COL, TYPE_COL, VALUE_COL = 0, 1, 2 17 | 18 | 19 | def get_main_window() -> QtWidgets.QMainWindow: 20 | """Returns the app's main window for use as message box parent.""" 21 | for widget in QtWidgets.QApplication.instance().topLevelWidgets(): 22 | if isinstance(widget, QtWidgets.QMainWindow): 23 | return widget 24 | 25 | # It is a dark day if we end up here 26 | print("<>", 27 | file=sys.stderr) 28 | sys.exit() 29 | 30 | 31 | def cvt_elem_to_dict(elem: SpikeElement) -> dict: 32 | """Converts element to dictionary to enable JSON encoding. 33 | 34 | Elements cannot be directly json encoded, so this function stores and 35 | element's class and module names along its instance data in a dictionary. 36 | The dictionary can be encoded, and when decoded a new instance of the 37 | element can be instantiated. 38 | 39 | json encoded elements are saved to files to allow pipeline saves and loads, 40 | and also used to transfer pipelines as strings between processes. 41 | 42 | """ 43 | 44 | if not isinstance(elem, SpikeElement): 45 | raise TypeError("elem must be a SpikeElement object") 46 | 47 | elem_dict = { 48 | "element_cls_name": elem.__class__.__name__, 49 | "element_mod_name": elem.__module__, 50 | "spif_cls_name": elem.spif_class.__name__, 51 | "spif_mod_name": elem.spif_class.__module__, 52 | "param_list": elem.param_list, 53 | } 54 | 55 | return elem_dict 56 | 57 | 58 | def cvt_dict_to_elem(elem_dict: dict) -> SpikeElement: 59 | """ Converts an element dictionary into an element. 60 | 61 | Used as part of the json encode/decode process, this method "reconstitutes" 62 | an element from a dictionary of element instance data that had been json 63 | encoded. 64 | 65 | """ 66 | 67 | if not isinstance(elem_dict, dict): 68 | raise TypeError("elem_dict must be a dict object") 69 | 70 | elem_mod = importlib.import_module(elem_dict["element_mod_name"]) 71 | elem_cls = getattr(elem_mod, elem_dict["element_cls_name"]) 72 | spif_mod = importlib.import_module(elem_dict["spif_mod_name"]) 73 | spif_cls = getattr(spif_mod, elem_dict["spif_cls_name"]) 74 | 75 | if not spif_cls.installed: 76 | # Abort if spif_class is no longer installed on system 77 | raise ValueError( 78 | f"Cannot create {elem_dict['spif_cls_name']} - " 79 | f" not installed on users's system" 80 | ) 81 | 82 | elem = elem_cls(spif_cls) 83 | 84 | elem_param_name_set = {param["name"] for param in elem.param_list} 85 | 86 | dict_param_name_set = {param["name"] for param in elem_dict["param_list"]} 87 | 88 | if not dict_param_name_set.issubset(elem_param_name_set): 89 | # Abort if the old param list is not a subset of new one 90 | raise ValueError( 91 | f"Cannot create {elem_dict['spif_cls_name']} - " 92 | f" saved version incompatible with current version" 93 | ) 94 | 95 | elem.param_list = elem_dict["param_list"] 96 | 97 | return elem 98 | -------------------------------------------------------------------------------- /spikely/spikely_main.py: -------------------------------------------------------------------------------- 1 | """ Main module for spikely application 2 | 3 | This module is home to the entry point for the application, launch_spikely(), 4 | called when the user invokes spikely from the command line. In response, 5 | spikely_main instantiates the PyQt Application class (QApplication) and the 6 | associated widget hierarchy starting w/ SpikelyMainWindow at the top. Once 7 | these tasks are performed, execution shifts to the xxx_view.py and xxx_menu.py 8 | modules whose methods are called in response to user interactions with the UI. 9 | 10 | """ 11 | import sys 12 | 13 | import pkg_resources 14 | from PyQt5 import QtWidgets, QtCore, QtGui 15 | 16 | from spikely import ( 17 | PipelineModel, PipelineView, ParameterView, ParameterModel, 18 | OperationView, file_menu, help_menu, tool_bar, __version__) 19 | 20 | 21 | class SpikelyMainWindow(QtWidgets.QMainWindow): 22 | # Parent UI for application delegates to subwindow views/models 23 | def __init__(self): 24 | super().__init__() 25 | 26 | # Subwindows Views need underlying Model references 27 | self._parameter_model = ParameterModel() 28 | self._pipeline_model = PipelineModel(self._parameter_model) 29 | self._init_ui() 30 | 31 | def _init_ui(self): 32 | self.setWindowTitle("spikely") 33 | self.setGeometry(100, 100, 1280, 512) 34 | 35 | # Disable maximize button on title bar 36 | # self.setWindowFlags(self.windowFlags() 37 | # & ~QtCore.Qt.WindowMaximizeButtonHint) 38 | 39 | try: 40 | spikely_png_path = pkg_resources.resource_filename( 41 | "spikely.resources", "spikely.png") 42 | except KeyError: 43 | print( 44 | "<>", file=sys.stderr,) 46 | else: 47 | self.setWindowIcon(QtGui.QIcon(spikely_png_path)) 48 | 49 | self.statusBar().addPermanentWidget( 50 | QtWidgets.QLabel("Version " + __version__)) 51 | 52 | menu_bar = self.menuBar() 53 | menu_bar.addMenu(file_menu.create_file_menu( 54 | self, self._pipeline_model)) 55 | menu_bar.addMenu(help_menu.create_help_menu(self)) 56 | 57 | bar = tool_bar.create_tool_bar(self) 58 | self.addToolBar(QtCore.Qt.RightToolBarArea, bar) 59 | 60 | main_frame = QtWidgets.QFrame() 61 | self.setCentralWidget(main_frame) 62 | main_frame.setLayout(QtWidgets.QVBoxLayout()) 63 | 64 | pipe_param_splitter = QtWidgets.QSplitter() 65 | pipe_param_splitter.setChildrenCollapsible(False) 66 | 67 | # Subwindows for element pipeline and selected element parameters 68 | pipe_param_splitter.addWidget( 69 | PipelineView(self._pipeline_model, self._parameter_model)) 70 | pipe_param_splitter.addWidget( 71 | ParameterView(self._pipeline_model, self._parameter_model)) 72 | pipe_param_splitter.setSizes([256, 1024]) 73 | main_frame.layout().addWidget(pipe_param_splitter) 74 | 75 | # Subwindow at bottom for pipeline operations (run, clear, queue) 76 | main_frame.layout().addWidget( 77 | OperationView(self._pipeline_model, self._parameter_model)) 78 | 79 | # Stretches pipeline and parameter widgets down as window grows 80 | main_frame.layout().setStretch(0, 1) 81 | 82 | 83 | def launch_spikely(): 84 | app = QtWidgets.QApplication(sys.argv) 85 | win = SpikelyMainWindow() 86 | win.show() 87 | sys.exit(app.exec_()) 88 | 89 | 90 | if __name__ == "__main__": 91 | launch_spikely() 92 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdamplitudecutoffs.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 2 | get_validation_params, 3 | ) 4 | from spiketoolkit.curation import threshold_amplitude_cutoffs 5 | class_default = get_validation_params() 6 | spif_init_func = threshold_amplitude_cutoffs 7 | 8 | gui_params = [ 9 | { 10 | "name": "threshold", 11 | "type": "float", 12 | "title": "The threshold for the given metric.", 13 | }, 14 | { 15 | "name": "threshold_sign", 16 | "type": "str", 17 | "title": "If 'less', will threshold any metric less than the given threshold. \ 18 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 19 | If 'greater', will threshold any metric greater than the given threshold. \ 20 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 21 | }, 22 | #kwargs 23 | { 24 | "name": "method", 25 | "type": "str", 26 | "value": class_default["method"], 27 | "default": class_default["method"], 28 | "title": "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. \ 29 | If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.", 30 | }, 31 | { 32 | "name": "peak", 33 | "type": "str", 34 | "value": class_default["peak"], 35 | "default": class_default["peak"], 36 | "title": "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or \ 37 | both ('both' - default).", 38 | }, 39 | { 40 | "name": "frames_before", 41 | "type": "int", 42 | "value": class_default["frames_before"], 43 | "default": class_default["frames_before"], 44 | "title": "Frames before peak to compute amplitude.", 45 | }, 46 | { 47 | "name": "frames_after", 48 | "type": "int", 49 | "value": class_default["frames_after"], 50 | "default": class_default["frames_after"], 51 | "title": "Frames after peak to compute amplitude.", 52 | }, 53 | { 54 | "name": "apply_filter", 55 | "type": "bool", 56 | "value": class_default["apply_filter"], 57 | "default": class_default["apply_filter"], 58 | "title": "If True, recording is bandpass-filtered", 59 | }, 60 | { 61 | "name": "freq_min", 62 | "type": "float", 63 | "value": class_default["freq_min"], 64 | "default": class_default["freq_min"], 65 | "title": "High-pass frequency for optional filter (default 300 Hz).", 66 | }, 67 | { 68 | "name": "freq_max", 69 | "type": "float", 70 | "value": class_default["freq_max"], 71 | "default": class_default["freq_max"], 72 | "title": "Low-pass frequency for optional filter (default 6000 Hz).", 73 | }, 74 | { 75 | "name": "save_property_or_features", 76 | "type": "bool", 77 | "value": class_default["save_property_or_features"], 78 | "default": class_default["save_property_or_features"], 79 | "title": "If True, it will save features in the sorting extractor.", 80 | }, 81 | { 82 | "name": "recompute_info", 83 | "type": "bool", 84 | "value": class_default["recompute_info"], 85 | "default": class_default["recompute_info"], 86 | "title": "If True, waveforms are recomputed.", 87 | }, 88 | { 89 | "name": "max_spikes_per_unit", 90 | "type": "int", 91 | "value": class_default["max_spikes_per_unit"], 92 | "default": class_default["max_spikes_per_unit"], 93 | "title": "The maximum number of spikes to extract per unit.", 94 | }, 95 | ] -------------------------------------------------------------------------------- /spikely/elements/guiparams/sorter/klusta.py: -------------------------------------------------------------------------------- 1 | from spikesorters.klusta import KlustaSorter 2 | class_default = KlustaSorter._default_params 3 | 4 | gui_params = [ 5 | { 6 | "name": "output_folder", 7 | "type": "folder", 8 | "value": None, 9 | "default": None, 10 | "title": "Sorting output folder path.", 11 | "base_param": True, 12 | }, 13 | { 14 | "name": "verbose", 15 | "type": "bool", 16 | "value": False, 17 | "default": False, 18 | "title": "If True, output from SpikeInterface element is verbose when run.", 19 | "base_param": True, 20 | }, 21 | { 22 | "name": "grouping_property", 23 | "type": "str", 24 | "value": None, 25 | "default": None, 26 | "title": "Property name to be used for sorter output grouping.", 27 | "base_param": True, 28 | }, 29 | { 30 | "name": "parallel", 31 | "type": "bool", 32 | "value": False, 33 | "default": False, 34 | "title": "If grouping property specifed, sort property groups in parallel if True.", 35 | "base_param": True, 36 | }, 37 | { 38 | "name": "delete_output_folder", 39 | "type": "bool", 40 | "value": False, 41 | "default": False, 42 | "title": "Delete specified or default output folder on completion if True.", 43 | "base_param": True, 44 | }, 45 | # Klusta specific parameters 46 | { 47 | "name": "adjacency_radius", 48 | "type": "float", 49 | "value": class_default["adjacency_radius"], 50 | "default": class_default["adjacency_radius"], 51 | "title": "Adjacency radius (microns).", 52 | }, 53 | { 54 | "name": "threshold_strong_std_factor", 55 | "type": "int", 56 | "value": class_default["threshold_strong_std_factor"], 57 | "default": class_default["threshold_strong_std_factor"], 58 | "title": "Threshold strong std factor.", 59 | }, 60 | { 61 | "name": "threshold_weak_std_factor", 62 | "type": "int", 63 | "value": class_default["threshold_weak_std_factor"], 64 | "default": class_default["threshold_weak_std_factor"], 65 | "title": "Threshold weak std factor.", 66 | }, 67 | { 68 | "name": "detect_sign", 69 | "type": "int", 70 | "value": class_default["detect_sign"], 71 | "default": class_default["detect_sign"], 72 | "title": "Use -1, 0, or 1, depending on the sign of the spikes in the recording.", 73 | }, 74 | { 75 | "name": "extract_s_before", 76 | "type": "int", 77 | "value": class_default["extract_s_before"], 78 | "default": class_default["extract_s_before"], 79 | "title": "Frames to extract before.", 80 | }, 81 | { 82 | "name": "extract_s_after", 83 | "type": "int", 84 | "value": class_default["extract_s_after"], 85 | "default": class_default["extract_s_after"], 86 | "title": "Frames to extract after.", 87 | }, 88 | { 89 | "name": "n_features_per_channel", 90 | "type": "int", 91 | "value": class_default["n_features_per_channel"], 92 | "default": class_default["n_features_per_channel"], 93 | "title": "Number of features per channel.", 94 | }, 95 | { 96 | "name": "pca_n_waveforms_max", 97 | "type": "int", 98 | "value": class_default["pca_n_waveforms_max"], 99 | "default": class_default["pca_n_waveforms_max"], 100 | "title": "Max number of waveforms for PCA.", 101 | }, 102 | { 103 | "name": "num_starting_clusters", 104 | "type": "int", 105 | "value": class_default["num_starting_clusters"], 106 | "default": class_default["num_starting_clusters"], 107 | "title": "Starting number of clusters.", 108 | }, 109 | ] 110 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/sorter/tridesclous.py: -------------------------------------------------------------------------------- 1 | from spikesorters.tridesclous import TridesclousSorter 2 | class_default = TridesclousSorter._default_params 3 | 4 | gui_params = [ 5 | { 6 | "name": "output_folder", 7 | "type": "folder", 8 | "value": None, 9 | "default": None, 10 | "title": "Sorting output folder path.", 11 | "base_param": True, 12 | }, 13 | { 14 | "name": "verbose", 15 | "type": "bool", 16 | "value": False, 17 | "default": False, 18 | "title": "If True, output from SpikeInterface element is verbose when run.", 19 | "base_param": True, 20 | }, 21 | { 22 | "name": "grouping_property", 23 | "type": "str", 24 | "value": None, 25 | "default": None, 26 | "title": "Property name to be used for sorter output grouping.", 27 | "base_param": True, 28 | }, 29 | { 30 | "name": "parallel", 31 | "type": "bool", 32 | "value": False, 33 | "default": False, 34 | "title": "If grouping property specifed, sort property groups in parallel if True.", 35 | "base_param": True, 36 | }, 37 | { 38 | "name": "delete_output_folder", 39 | "type": "bool", 40 | "value": False, 41 | "default": False, 42 | "title": "Delete specified or default output folder on completion if True.", 43 | "base_param": True, 44 | }, 45 | # Tridesclous specific parameters 46 | { 47 | "name": "highpass_freq", 48 | "type": "float", 49 | "value": class_default["highpass_freq"], 50 | "default": class_default["highpass_freq"], 51 | "title": "High-pass frequency", 52 | }, 53 | { 54 | "name": "lowpass_freq", 55 | "type": "float", 56 | "value": class_default["lowpass_freq"], 57 | "default": class_default["lowpass_freq"], 58 | "title": "Low-pass frequency", 59 | }, 60 | { 61 | "name": "peak_sign", 62 | "type": "str", 63 | "value": class_default["peak_sign"], 64 | "default": class_default["peak_sign"], 65 | "title": "Negative or positive peak sign", 66 | }, 67 | { 68 | "name": "relative_threshold", 69 | "type": "float", 70 | "value": class_default["relative_threshold"], 71 | "default": class_default["relative_threshold"], 72 | "title": "Relative threshold for detection", 73 | }, 74 | { 75 | "name": "peak_span_ms", 76 | "type": "float", 77 | "value": class_default["peak_span_ms"], 78 | "default": class_default["peak_span_ms"], 79 | "title": "Time span of peaks for detected events (ms)", 80 | }, 81 | { 82 | "name": "wf_left_ms", 83 | "type": "float", 84 | "value": class_default["wf_left_ms"], 85 | "default": class_default["wf_left_ms"], 86 | "title": "Waveform length before peak (ms)", 87 | }, 88 | { 89 | "name": "wf_right_ms", 90 | "type": "float", 91 | "value": class_default["wf_right_ms"], 92 | "default": class_default["wf_right_ms"], 93 | "title": "Waveform length after peak (ms)", 94 | }, 95 | { 96 | "name": "feature_method", 97 | "type": "str", 98 | "value": class_default["feature_method"], 99 | "default": class_default["feature_method"], 100 | "title": "Feature Extraction Method", 101 | }, 102 | { 103 | "name": "cluster_method", 104 | "type": "str", 105 | "value": class_default["cluster_method"], 106 | "default": class_default["cluster_method"], 107 | "title": "Clustering Method", 108 | }, 109 | { 110 | "name": "clean_catalogue_gui", 111 | "type": "bool", 112 | "value": class_default["clean_catalogue_gui"], 113 | "default": class_default["clean_catalogue_gui"], 114 | "title": "Clean catalogue with an interactive window", 115 | }, 116 | ] 117 | -------------------------------------------------------------------------------- /spikely/pipeman/pipeman.py: -------------------------------------------------------------------------------- 1 | """Standalone application launched by spikely to manage pipeline execution. 2 | 3 | To support parallelism each spikely pipeline is executed in its own process 4 | (QProcess). pipeman is launched by spikely as a detached process passing in 5 | the pipeline in as a json encoded string in sys.argv[1]. In turn, pipeman 6 | creates a child process of its own, that actually executes the pipeline 7 | (piperun). 8 | 9 | Creating a child process of its own allows pipeman to catch and display the 10 | stdout/stderr of piperun, and allows the user to kill piperun without killing 11 | pipeman. 12 | 13 | """ 14 | import sys 15 | import locale 16 | 17 | import pkg_resources 18 | from PyQt5 import QtCore, QtWidgets, QtGui 19 | 20 | from spikely import version, config 21 | 22 | 23 | class MainWindow(QtWidgets.QMainWindow): 24 | def __init__(self): 25 | super().__init__() 26 | 27 | self.process = QtCore.QProcess(self) 28 | self.process.setProcessChannelMode(QtCore.QProcess.MergedChannels) 29 | self.process.readyReadStandardOutput.connect(self.stdout_ready) 30 | 31 | self._init_ui() 32 | 33 | piperun_path = pkg_resources.resource_filename( 34 | 'spikely.pipeman', 'piperun.py') 35 | self.process.start('python', ['-u', piperun_path, sys.argv[1]]) 36 | 37 | if self.process.state() == QtCore.QProcess.Starting \ 38 | or self.process.state() == QtCore.QProcess.Running: 39 | self.cancel_btn.setDisabled(False) 40 | self.process.finished.connect(self._process_finished) 41 | 42 | def _process_finished(self, exit_status): 43 | self.cancel_btn.setDisabled(True) 44 | 45 | def _init_ui(self): 46 | self.setWindowTitle("spikely pipeline manager") 47 | self.resize(640, 384) 48 | 49 | self.statusBar().addPermanentWidget( 50 | QtWidgets.QLabel("Version " + version.__version__)) 51 | 52 | main_frame = QtWidgets.QFrame() 53 | self.setCentralWidget(main_frame) 54 | main_frame.setLayout(QtWidgets.QVBoxLayout()) 55 | 56 | self.output = QtWidgets.QTextEdit(self) 57 | self.output.setReadOnly(True) 58 | self.output.setAcceptRichText(False) 59 | self.output.setStyleSheet( 60 | "QTextEdit { color: green; background-color: black; }") 61 | self.output.setWordWrapMode(QtGui.QTextOption.NoWrap) 62 | main_frame.layout().addWidget(self.output) 63 | 64 | self.cancel_btn = QtWidgets.QPushButton('Terminate Process') 65 | btn_box = QtWidgets.QHBoxLayout() 66 | btn_box.addStretch(1) 67 | btn_box.addWidget(self.cancel_btn) 68 | btn_box.addStretch(1) 69 | self.cancel_btn.setDisabled(True) 70 | self.cancel_btn.clicked.connect(self.process.kill) 71 | main_frame.layout().addLayout(btn_box) 72 | 73 | def append(self, text): 74 | self.output.append(text) 75 | 76 | def stdout_ready(self): 77 | text = bytearray(self.process.readAllStandardOutput())\ 78 | .decode(locale.getdefaultlocale()[1]) 79 | 80 | self.append(text) 81 | 82 | def closeEvent(self, event): 83 | """Overrides QMainWindow method for confirmation before exiting""" 84 | if self.process.state() == QtCore.QProcess.Running: 85 | reply = QtWidgets.QMessageBox.question( 86 | config.get_main_window(), 'Exiting', 'Exiting will terminate' 87 | ' pipeline execution. Are you sure you want to exit?', 88 | QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) 89 | 90 | if reply == QtWidgets.QMessageBox.No: 91 | event.ignore() 92 | 93 | 94 | def main(): 95 | app = QtWidgets.QApplication(sys.argv) 96 | win = MainWindow() 97 | win.show() 98 | sys.exit(app.exec_()) 99 | 100 | # config.get_main_window().statusBar().showMessage( 101 | # "Error Message", config.STATUS_MSG_TIMEOUT) 102 | 103 | 104 | if __name__ == '__main__': 105 | main() 106 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/sorter/spykingcircus.py: -------------------------------------------------------------------------------- 1 | from spikesorters.spyking_circus import SpykingcircusSorter 2 | class_default = SpykingcircusSorter._default_params 3 | 4 | gui_params = [ 5 | { 6 | "name": "output_folder", 7 | "type": "folder", 8 | "value": None, 9 | "default": None, 10 | "title": "Sorting output folder path.", 11 | "base_param": True, 12 | }, 13 | { 14 | "name": "verbose", 15 | "type": "bool", 16 | "value": False, 17 | "default": False, 18 | "title": "If True, output from SpikeInterface element is verbose when run.", 19 | "base_param": True, 20 | }, 21 | { 22 | "name": "grouping_property", 23 | "type": "str", 24 | "value": None, 25 | "default": None, 26 | "title": "Property name to be used for sorter output grouping.", 27 | "base_param": True, 28 | }, 29 | { 30 | "name": "parallel", 31 | "type": "bool", 32 | "value": False, 33 | "default": False, 34 | "title": "If grouping property specifed, sort property groups in parallel if True.", 35 | "base_param": True, 36 | }, 37 | { 38 | "name": "delete_output_folder", 39 | "type": "bool", 40 | "value": False, 41 | "default": False, 42 | "title": "Delete specified or default output folder on completion if True.", 43 | "base_param": True, 44 | }, 45 | # spyking_circus specific parameters 46 | { 47 | "name": "detect_sign", 48 | "type": "int", 49 | "value": class_default["detect_sign"], 50 | "default": class_default["detect_sign"], 51 | "title": "Use -1, 0, or 1, depending on the sign of the spikes in the recording", 52 | }, 53 | { 54 | "name": "adjacency_radius", 55 | "type": "float", 56 | "value": class_default["adjacency_radius"], 57 | "default": class_default["adjacency_radius"], 58 | "title": "Distance (in microns) of the adjacency radius", 59 | }, 60 | { 61 | "name": "detect_threshold", 62 | "type": "float", 63 | "value": class_default["detect_threshold"], 64 | "default": class_default["detect_threshold"], 65 | "title": "Threshold for detection", 66 | }, 67 | { 68 | "name": "template_width_ms", 69 | "type": "float", 70 | "value": class_default["template_width_ms"], 71 | "default": class_default["template_width_ms"], 72 | "title": "Width of templates (ms)", 73 | }, 74 | { 75 | "name": "filter", 76 | "type": "bool", 77 | "value": class_default["filter"], 78 | "default": class_default["filter"], 79 | "title": "If True, the recording will be filtered", 80 | }, 81 | { 82 | "name": "merge_spikes", 83 | "type": "bool", 84 | "value": class_default["merge_spikes"], 85 | "default": class_default["merge_spikes"], 86 | "title": "If True, spikes will be merged at the end.", 87 | }, 88 | { 89 | "name": "auto_merge", 90 | "type": "float", 91 | "value": class_default["auto_merge"], 92 | "default": class_default["auto_merge"], 93 | "title": "Auto-merge value", 94 | }, 95 | { 96 | "name": "num_workers", 97 | "type": "int", 98 | "value": class_default["num_workers"], 99 | "default": class_default["num_workers"], 100 | "title": "Number of parallel workers", 101 | }, 102 | { 103 | "name": "whitening_max_elts", 104 | "type": "int", 105 | "value": class_default["whitening_max_elts"], 106 | "default": class_default["whitening_max_elts"], 107 | "title": "Related to subsampling", 108 | }, 109 | { 110 | "name": "clustering_max_elts", 111 | "type": "int", 112 | "value": class_default["clustering_max_elts"], 113 | "default": class_default["clustering_max_elts"], 114 | "title": "Related to subsampling", 115 | }, 116 | ] 117 | -------------------------------------------------------------------------------- /spikely/elements/curator.py: -------------------------------------------------------------------------------- 1 | 2 | import inspect 3 | import shutil 4 | from pathlib import Path 5 | 6 | from PyQt5 import QtGui 7 | from PyQt5 import QtWidgets 8 | import pkg_resources 9 | 10 | from . import spike_element as sp_spe 11 | import spikeextractors as se 12 | import spiketoolkit as st 13 | from spikely.guiparams import get_gui_params, get_spif_init_func, gui_params_file_exists 14 | 15 | 16 | class Curator(sp_spe.SpikeElement): 17 | @staticmethod 18 | def get_installed_spif_cls_list(): 19 | """Returns sorted list of installed spif classes having gui_params files.""" 20 | raw_list = st.validation.curation_list.installed_curation_list 21 | 22 | # To be installed for Spikely purposes spif_class must also have gui_params file 23 | cooked_list = [ 24 | spif_class for spif_class in raw_list 25 | if gui_params_file_exists( 26 | Curator.get_display_name_from_spif_class(spif_class), "curator" 27 | ) 28 | ] 29 | 30 | return sorted(cooked_list, key=lambda spif_class: spif_class.curator_name) 31 | 32 | @staticmethod 33 | def get_display_name_from_spif_class(spif_class): 34 | 35 | display_name = spif_class.curator_name 36 | if not display_name.endswith("s"): 37 | display_name += "s" 38 | 39 | return display_name 40 | 41 | def __init__(self, spif_class): 42 | super().__init__(spif_class) 43 | 44 | self._display_name = self.get_display_name_from_spif_class(spif_class) 45 | 46 | if QtWidgets.QApplication.instance(): 47 | self._display_icon = QtGui.QIcon( 48 | pkg_resources.resource_filename( 49 | 'spikely.resources', 'curator.png')) 50 | else: 51 | self._display_icon = None 52 | 53 | self._param_list = get_gui_params(self._display_name, "curator") 54 | self._curation_func = get_spif_init_func(self._display_name, "curator") 55 | 56 | @property 57 | def display_name(self): 58 | return self._display_name 59 | 60 | @property 61 | def display_icon(self): 62 | return self._display_icon 63 | 64 | def run(self, payload, next_element): 65 | 66 | sorting_list = payload[0] 67 | output_folder_str = payload[1] 68 | recording = payload[2] 69 | 70 | if not next_element: 71 | output_folder_str_new = output_folder_str + '_curated' 72 | output_folder = Path(output_folder_str_new).absolute() 73 | if output_folder.is_dir(): 74 | shutil.rmtree(output_folder) 75 | output_folder.mkdir() 76 | 77 | curated_sorting_list = [] 78 | for i, sorting in enumerate(sorting_list): 79 | params_dict = {} 80 | params_dict['sorting'] = sorting 81 | 82 | if 'recording' in \ 83 | inspect.signature(self._curation_func).parameters: 84 | params_dict['recording'] = recording 85 | elif 'sampling_frequency' in \ 86 | inspect.signature(self._curation_func).parameters: 87 | params_dict['sampling_frequency'] = \ 88 | recording.get_sampling_frequency() 89 | 90 | for param in self.param_list: 91 | param_name = param['name'] 92 | param_value = param['value'] 93 | params_dict[param_name] = param_value 94 | 95 | curated_sorting = self._curation_func(**params_dict) 96 | curated_sorting_list.append(curated_sorting) 97 | 98 | if not next_element: 99 | print("No Exporter chosen. Defaulting to " 100 | "the .npz format.") 101 | if len(sorting_list) == 1: 102 | file_name = 'curated_output.npz' 103 | else: 104 | file_name = str(i) + '_curated_output.npz' 105 | 106 | se.NpzSortingExtractor.write_sorting(curated_sorting, 107 | output_folder / file_name) # noqa: E128 108 | print("Saved curated results to " + str(output_folder)) 109 | 110 | return curated_sorting_list, output_folder_str, recording 111 | -------------------------------------------------------------------------------- /spikely/elements/extractor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pkg_resources 3 | import spikeextractors as se 4 | from PyQt5 import QtGui, QtWidgets 5 | 6 | from spikely.guiparams import get_gui_params, gui_params_file_exists 7 | from . import spike_element as sp_spe 8 | 9 | 10 | class Extractor(sp_spe.SpikeElement): 11 | @staticmethod 12 | def get_installed_spif_cls_list(): 13 | """Returns sorted list of installed spif classes having gui_params files.""" 14 | raw_list = se.installed_recording_extractor_list 15 | 16 | # To be installed for Spikely purposes spif_class must have gui_params file 17 | cooked_list = [ 18 | spif_class 19 | for spif_class in raw_list 20 | if gui_params_file_exists( 21 | Extractor.get_display_name_from_spif_class(spif_class), 22 | "extractor", 23 | ) 24 | ] 25 | return sorted(cooked_list, key=lambda spif_class: spif_class.extractor_name) 26 | 27 | @staticmethod 28 | def get_display_name_from_spif_class(spif_class): 29 | return spif_class.extractor_name 30 | 31 | def __init__(self, spif_class): 32 | super().__init__(spif_class) 33 | 34 | self._display_name = self.get_display_name_from_spif_class(spif_class) 35 | 36 | if QtWidgets.QApplication.instance(): 37 | self._display_icon = QtGui.QIcon( 38 | pkg_resources.resource_filename("spikely.resources", "extractor.png") 39 | ) 40 | else: 41 | self._display_icon = None 42 | 43 | self._param_list = get_gui_params(self._display_name, "extractor") 44 | 45 | probe_path_dict = { 46 | "name": "probe_path", 47 | "type": "file", 48 | "value": None, 49 | "default": None, 50 | "title": "Path to probe file (.csv or .prb)", 51 | } 52 | self._param_list.append(probe_path_dict) 53 | 54 | self._param_list.append( 55 | { 56 | "name": "channel_map", 57 | "type": "int_list", 58 | "value": None, 59 | "default": None, 60 | "title": "List of channel ids for underlying channels to be be mapped. " 61 | "If None, then uses default ordering.", 62 | } 63 | ) 64 | 65 | self._param_list.append( 66 | { 67 | "name": "channel_groups", 68 | "type": "int_list", 69 | "value": None, 70 | "default": None, 71 | "title": "List of channel groups of the underlying channels. " 72 | "If None, then no groups given.", 73 | } 74 | ) 75 | 76 | @property 77 | def display_name(self): 78 | return self._display_name 79 | 80 | @property 81 | def display_icon(self): 82 | return self._display_icon 83 | 84 | def run(self, payload, next_elem): 85 | spif_params_dict = {} 86 | probe_file = None 87 | for param in self.param_list: 88 | if param["name"] == "probe_path": 89 | probe_file = param["value"] 90 | elif param["name"] == "channel_map": 91 | channel_map = param["value"] 92 | elif param["name"] == "channel_groups": 93 | channel_groups = param["value"] 94 | else: 95 | spif_params_dict[param["name"]] = param["value"] 96 | 97 | recording = self._spif_class(**spif_params_dict) 98 | 99 | if probe_file: 100 | recording = recording.load_probe_file( 101 | probe_file, channel_map, channel_groups 102 | ) 103 | else: 104 | if channel_map: 105 | assert np.all( 106 | [chan in channel_map for chan in recording.get_channel_ids()] 107 | ), ( 108 | "all channel_ids in " 109 | "'channel_map' must be in recording channel ids" 110 | ) 111 | recording = se.SubRecordingExtractor(recording, channel_ids=channel_map) 112 | if channel_groups: 113 | recording.set_channel_groups( 114 | recording.get_channel_ids(), channel_groups 115 | ) 116 | 117 | return recording 118 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/sorter/mountainsort4.py: -------------------------------------------------------------------------------- 1 | from spikesorters.mountainsort4 import Mountainsort4Sorter 2 | class_default = Mountainsort4Sorter._default_params 3 | 4 | gui_params = [ 5 | { 6 | "name": "output_folder", 7 | "type": "folder", 8 | "value": None, 9 | "default": None, 10 | "title": "Sorting output folder path.", 11 | "base_param": True, 12 | }, 13 | { 14 | "name": "verbose", 15 | "type": "bool", 16 | "value": False, 17 | "default": False, 18 | "title": "If True, output from SpikeInterface element is verbose when run.", 19 | "base_param": True, 20 | }, 21 | { 22 | "name": "grouping_property", 23 | "type": "str", 24 | "value": None, 25 | "default": None, 26 | "title": "Property name to be used for sorter output grouping.", 27 | "base_param": True, 28 | }, 29 | { 30 | "name": "parallel", 31 | "type": "bool", 32 | "value": False, 33 | "default": False, 34 | "title": "If grouping property specifed, sort property groups in parallel if True.", 35 | "base_param": True, 36 | }, 37 | { 38 | "name": "delete_output_folder", 39 | "type": "bool", 40 | "value": False, 41 | "default": False, 42 | "title": "Delete specified or default output folder on completion if True.", 43 | "base_param": True, 44 | }, 45 | # Ms4 specific parameters 46 | { 47 | "name": "adjacency_radius", 48 | "type": "int", 49 | "value": class_default["adjacency_radius"], 50 | "default": class_default["adjacency_radius"], 51 | "title": "Use -1, 0, or 1, depending on the sign of the spikes in the recording", 52 | }, 53 | { 54 | "name": "detect_sign", 55 | "type": "float", 56 | "value": class_default["detect_sign"], 57 | "default": class_default["detect_sign"], 58 | "title": "Use -1 to include all channels in every neighborhood", 59 | }, 60 | { 61 | "name": "freq_min", 62 | "type": "float", 63 | "value": class_default["freq_min"], 64 | "default": class_default["freq_min"], 65 | "title": "High-pass frequency", 66 | }, 67 | { 68 | "name": "freq_max", 69 | "type": "float", 70 | "value": class_default["freq_max"], 71 | "default": class_default["freq_max"], 72 | "title": "Low-pass frequency", 73 | }, 74 | { 75 | "name": "filter", 76 | "type": "bool", 77 | "value": class_default["filter"], 78 | "default": class_default["filter"], 79 | "title": "Bandpass filters the recording if True", 80 | }, 81 | { 82 | "name": "whiten", 83 | "type": "bool", 84 | "value": class_default["whiten"], 85 | "default": class_default["whiten"], 86 | "title": "Whitens the recording if True", 87 | }, 88 | { 89 | "name": "curation", 90 | "type": "bool", 91 | "value": class_default["curation"], 92 | "default": class_default["curation"], 93 | "title": "Curates the output if True", 94 | }, 95 | { 96 | "name": "num_workers", 97 | "type": "int", 98 | "value": class_default["num_workers"], 99 | "default": class_default["num_workers"], 100 | "title": "Number of parallel workers", 101 | }, 102 | { 103 | "name": "clip_size", 104 | "type": "int", 105 | "value": class_default["clip_size"], 106 | "default": class_default["clip_size"], 107 | "title": "Clip size", 108 | }, 109 | { 110 | "name": "detect_threshold", 111 | "type": "float", 112 | "value": class_default["detect_threshold"], 113 | "default": class_default["detect_threshold"], 114 | "title": "Threshold for detection", 115 | }, 116 | { 117 | "name": "detect_interval", 118 | "type": "int", 119 | "value": class_default["detect_interval"], 120 | "default": class_default["detect_interval"], 121 | "title": "Minimum number of timepoints between events detected on the same channel", 122 | }, 123 | { 124 | "name": "noise_overlap_threshold", 125 | "type": "float", 126 | "value": class_default["noise_overlap_threshold"], 127 | "default": class_default["noise_overlap_threshold"], 128 | "title": "Use None for no automated curation", 129 | }, 130 | ] 131 | -------------------------------------------------------------------------------- /docs/source/overview.rst: -------------------------------------------------------------------------------- 1 | 2 | Overview 3 | ======== 4 | 5 | .. _SpikeInterface: https://github.com/SpikeInterface 6 | 7 | SpikeInterface_ is a powerful Python-based extracellular data processing 8 | framework supporting a broad range of features and functions. Its power and 9 | breadth come at a price, however, and that price is complexity. But for those 10 | well-versed in Python programming and needing full control over the 11 | extracellular data processing process, working directly with SpikeInterface is 12 | the way to go. 13 | 14 | Spikely, on the other hand, is for users who want to take advantage of 15 | SpikeInterface without having to program in Python. Spikely provides a GUI on 16 | top of SpikeInterface optimized for a specific use case: pipelining 17 | extracelluar data from a source to a sink while enabling one or more data 18 | transformations along the way. In exchange for its ease of use and efficiency, 19 | spikely sacrifices some of SpikeInterface's power and breadth. Spikely 20 | therefore complements, rather than replaces, SpikeInterface. 21 | 22 | .. tip:: 23 | One of scenarios we had in mind when making spikely was enabling bulk 24 | extracelluar data processing in the lab. After data collection, running a 25 | standard series of transformations may be the next step prior to a deeper 26 | dive on the data. In that case using spikely may improve overall 27 | throughput. 28 | 29 | Because of the close relationship between spikely and SpikeInterface, it is 30 | important for the spikely user to have a grounding in the concepts behind 31 | SpikeInterface. If you are not already familiar with SpikeInterface, a good 32 | place to get started is its `online documentation 33 | `_ 34 | 35 | In addition to being familiar with SpikeInterface, taking full advantage of 36 | spikely requires an understanding of a few key concepts specific to it: 37 | 38 | * **Element** - An element in Spikely corresponds to entities exposed by 39 | the data processing nodes in SpikeInterface. To be used in spikely, the 40 | underlying SpikeInterface entity must already be installed on the user's 41 | system. For information on installing SpikeInterface entities like Spike 42 | Sorters, check out `this document 43 | `_. 44 | 45 | Elements in spikely consist of: 46 | 47 | * *Extractors* - Extractors read raw extracelluar data 48 | from files, and make those data available to downstream elements in the 49 | pipeline. Extractor names correspond to the raw extracellular 50 | data format they support. Spikely requires one, and only one, Recording 51 | Extractor per pipeline. 52 | 53 | * *Pre-Processors* - Pre-Processors transform data sourced into the 54 | pipeline by the Extractor before it is sent to the Sorter. 55 | Pre-processors are optional. Spikely supports multiple Pre-Preprocessors 56 | per pipeline between the Extractor and the Sorter. 57 | 58 | * *Sorters* - Spike sorting is a big part of SpikeInterface, and spikely's 59 | Sorters correspond closely to spike sorters in SpikeInterface. Spikely 60 | requires the presence of one, and only one, Sorter in the pipeline. 61 | Sorters write their results out to a file (unless specified not to) 62 | allowing a Sorter to act as a terminating sink in a spikely pipeline. 63 | 64 | * *Curators* - Curators, also known as post-processors, automatically 65 | curate sorted data produced by the Sorter and output them downstream to 66 | either another Curator or to a pipeline terminating Exporter. Curators 67 | are optional. Spikely supports multiple Curators per pipeline. 68 | 69 | * *Exporters* - Exporters act as data sinks, transforming 70 | sorted datasets into different formats. Exporters are optional, and 71 | spikely only supports a single Exporter per pipeline. 72 | 73 | * **Parameter** - Most elements have one or more parameters associated with 74 | them that can be edited by the user in spikely to customize the behavior of 75 | that element during the execution of a pipeline. Parameters are element 76 | specific, and some familiarity with the proxied node in SpikeInterface is 77 | required to correctly configure an element. 78 | 79 | * **Pipeline** - The user organizes elements in spikely in a series where 80 | extracelluar data "flows" from the first element in the Pipeline to the last 81 | when the pipeline is run. Pipelines, and their associated parameterized 82 | elements, can be saved for future use thereby enabling greater efficiency and 83 | repeatability. 84 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/sorter/kilosort2.py: -------------------------------------------------------------------------------- 1 | from spikesorters.kilosort2 import Kilosort2Sorter 2 | class_default = Kilosort2Sorter._default_params 3 | 4 | gui_params = [ 5 | { 6 | "name": "output_folder", 7 | "type": "folder", 8 | "value": None, 9 | "default": None, 10 | "title": "Sorting output folder path.", 11 | "base_param": True, 12 | }, 13 | { 14 | "name": "verbose", 15 | "type": "bool", 16 | "value": False, 17 | "default": False, 18 | "title": "If True, output from SpikeInterface element is verbose when run.", 19 | "base_param": True, 20 | }, 21 | { 22 | "name": "grouping_property", 23 | "type": "str", 24 | "value": None, 25 | "default": None, 26 | "title": "Property name to be used for sorter output grouping.", 27 | "base_param": True, 28 | }, 29 | { 30 | "name": "parallel", 31 | "type": "bool", 32 | "value": False, 33 | "default": False, 34 | "title": "If grouping property specifed, sort property groups in parallel if True.", 35 | "base_param": True, 36 | }, 37 | { 38 | "name": "delete_output_folder", 39 | "type": "bool", 40 | "value": False, 41 | "default": False, 42 | "title": "Delete specified or default output folder on completion if True.", 43 | "base_param": True, 44 | }, 45 | # kilosort2 specific parameters 46 | { 47 | "name": "detect_threshold", 48 | "type": "float", 49 | "value": class_default["detect_threshold"], 50 | "default": class_default["detect_threshold"], 51 | "title": "Relative detection threshold", 52 | }, 53 | { 54 | "name": "projection_threshold", 55 | "type": "list of float", 56 | "value": class_default["projection_threshold"], 57 | "default": class_default["projection_threshold"], 58 | "title": "Threshold on projections", 59 | }, 60 | { 61 | "name": "preclust_threshold", 62 | "type": "float", 63 | "value": class_default["preclust_threshold"], 64 | "default": class_default["preclust_threshold"], 65 | "title": "Threshold crossings for pre-clustering", 66 | }, 67 | { 68 | "name": "car", 69 | "type": "bool", 70 | "value": class_default["car"], 71 | "default": class_default["car"], 72 | "title": "car"}, 73 | { 74 | "name": "minFR", 75 | "type": "float", 76 | "value": class_default["minFR"], 77 | "default": class_default["minFR"], 78 | "title": "Minimum FR to keep templates", 79 | }, 80 | { 81 | "name": "minfr_goodchannels", 82 | "type": "float", 83 | "value": class_default["minfr_goodchannels"], 84 | "default": class_default["minfr_goodchannels"], 85 | "title": "Minimum FR to consider " "a channel 'good'", 86 | }, 87 | { 88 | "name": "freq_min", 89 | "type": "float", 90 | "value": class_default["freq_min"], 91 | "default": class_default["freq_min"], 92 | "title": "Low-pass frequency", 93 | }, 94 | { 95 | "name": "sigmaMask", 96 | "type": "int", 97 | "value": class_default["sigmaMask"], 98 | "default": class_default["sigmaMask"], 99 | "title": "Sigma mask", 100 | }, 101 | { 102 | "name": "nPCs", 103 | "type": "int", 104 | "value": class_default["nPCs"], 105 | "default": class_default["nPCs"], 106 | "title": "Number of principal components", 107 | }, 108 | { 109 | "name": "ntbuff", 110 | "type": "int", 111 | "value": class_default["ntbuff"], 112 | "default": class_default["ntbuff"], 113 | "title": "Samples of symmetrical buffer for whitening and spike detection", 114 | }, 115 | { 116 | "name": "nfilt_factor", 117 | "type": "int", 118 | "value": class_default["nfilt_factor"], 119 | "default": class_default["nfilt_factor"], 120 | "title": "Max number of clusters per good channel (even temporary ones)", 121 | }, 122 | { 123 | "name": "NT", 124 | "type": "int", 125 | "value": class_default["NT"], 126 | "default": class_default["NT"], 127 | "title": "Batch size (try decreasing if out of memory) for GPU should be" 128 | " multiple of 32 + ntbuff.", 129 | }, 130 | { 131 | "name": "keep_good_only", 132 | "type": "bool", 133 | "value": class_default["keep_good_only"], 134 | "default": class_default["keep_good_only"], 135 | "title": "If true, will attempt to bad units.", 136 | }, 137 | ] 138 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/sorter/hdsort.py: -------------------------------------------------------------------------------- 1 | from spikesorters.hdsort import HDSortSorter 2 | class_default = HDSortSorter._default_params 3 | 4 | gui_params = [ 5 | { 6 | "name": "output_folder", 7 | "type": "folder", 8 | "value": None, 9 | "default": None, 10 | "title": "Sorting output folder path.", 11 | "base_param": True, 12 | }, 13 | { 14 | "name": "verbose", 15 | "type": "bool", 16 | "value": False, 17 | "default": False, 18 | "title": "If True, output from SpikeInterface element is verbose when run.", 19 | "base_param": True, 20 | }, 21 | { 22 | "name": "grouping_property", 23 | "type": "str", 24 | "value": None, 25 | "default": None, 26 | "title": "Property name to be used for sorter output grouping.", 27 | "base_param": True, 28 | }, 29 | { 30 | "name": "parallel", 31 | "type": "bool", 32 | "value": False, 33 | "default": False, 34 | "title": "If grouping property specifed, sort property groups in parallel if True.", 35 | "base_param": True, 36 | }, 37 | { 38 | "name": "delete_output_folder", 39 | "type": "bool", 40 | "value": False, 41 | "default": False, 42 | "title": "Delete specified or default output folder on completion if True.", 43 | "base_param": True, 44 | }, 45 | # hdsort specific parameters 46 | { 47 | "name": "detect_threshold", 48 | "type": "float", 49 | "value": class_default["detect_threshold"], 50 | "default": class_default["detect_threshold"], 51 | "title": "Relative detection threshold.", 52 | }, 53 | { 54 | "name": "detect_sign", 55 | "type": "int", 56 | "value": class_default["detect_sign"], 57 | "default": class_default["detect_sign"], 58 | "title": "Use -1, or 1, depending on the sign of the spikes in the recording.", 59 | }, 60 | { 61 | "name": "filter", 62 | "type": "bool", 63 | "value": class_default["filter"], 64 | "default": class_default["filter"], 65 | "title": "If True, the recordings are filtered.", 66 | }, 67 | { 68 | "name": "parfor", 69 | "type": "bool", 70 | "value": class_default["parfor"], 71 | "default": class_default["parfor"], 72 | "title": "Use parallel processing.", 73 | }, 74 | { 75 | "name": "hpf", 76 | "type": "float", 77 | "value": class_default["hpf"], 78 | "default": class_default["hpf"], 79 | "title": "high-pass cutoff frequency.", 80 | }, 81 | { 82 | "name": "lpf", 83 | "type": "float", 84 | "value": class_default["lpf"], 85 | "default": class_default["lpf"], 86 | "title": "low-pass cutoff frequency.", 87 | }, 88 | { 89 | "name": "max_el_per_group", 90 | "type": "int", 91 | "value": class_default["max_el_per_group"], 92 | "default": class_default["max_el_per_group"], 93 | "title": "Maximum number of electrodes per local electrode group.", 94 | }, 95 | { 96 | "name": "min_el_per_group", 97 | "type": "int", 98 | "value": class_default["min_el_per_group"], 99 | "default": class_default["min_el_per_group"], 100 | "title": "Minimum number of electrodes per local electrode group.", 101 | }, 102 | { 103 | "name": "add_if_nearer_than", 104 | "type": "float", 105 | "value": class_default["add_if_nearer_than"], 106 | "default": class_default["add_if_nearer_than"], 107 | "title": "Add to electrode group if distance is closer than this value.", 108 | }, 109 | { 110 | "name": "max_distance_within_group", 111 | "type": "float", 112 | "value": class_default["max_distance_within_group"], 113 | "default": class_default["max_distance_within_group"], 114 | "title": "Maximum distance within group.", 115 | }, 116 | { 117 | "name": "n_pc_dims", 118 | "type": "int", 119 | "value": class_default["n_pc_dims"], 120 | "default": class_default["n_pc_dims"], 121 | "title": "Number of pc dimensions.", 122 | }, 123 | { 124 | "name": "chunk_size", 125 | "type": "int", 126 | "value": class_default["chunk_size"], 127 | "default": class_default["chunk_size"], 128 | "title": "Chunk size in number of samples.", 129 | }, 130 | { 131 | "name": "loop_mode", 132 | "type": "str", 133 | "value": class_default["loop_mode"], 134 | "default": class_default["loop_mode"], 135 | "title": "Loop mode for sorting ('local_parfor', 'loop', 'grid').", 136 | }, 137 | ] 138 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/sorter/waveclus.py: -------------------------------------------------------------------------------- 1 | from spikesorters.waveclus import WaveClusSorter 2 | class_default = WaveClusSorter._default_params 3 | 4 | gui_params = [ 5 | { 6 | "name": "output_folder", 7 | "type": "folder", 8 | "value": None, 9 | "default": None, 10 | "title": "Sorting output folder path.", 11 | "base_param": True, 12 | }, 13 | { 14 | "name": "verbose", 15 | "type": "bool", 16 | "value": False, 17 | "default": False, 18 | "title": "If True, output from SpikeInterface element is verbose when run.", 19 | "base_param": True, 20 | }, 21 | { 22 | "name": "grouping_property", 23 | "type": "str", 24 | "value": None, 25 | "default": None, 26 | "title": "Property name to be used for sorter output grouping.", 27 | "base_param": True, 28 | }, 29 | { 30 | "name": "parallel", 31 | "type": "bool", 32 | "value": False, 33 | "default": False, 34 | "title": "If grouping property specifed, sort property groups in parallel if True.", 35 | "base_param": True, 36 | }, 37 | { 38 | "name": "delete_output_folder", 39 | "type": "bool", 40 | "value": False, 41 | "default": False, 42 | "title": "Delete specified or default output folder on completion if True.", 43 | "base_param": True, 44 | }, 45 | # waveclus specific parameters 46 | { 47 | "name": "detect_threshold", 48 | "type": "float", 49 | "value": 5.0, 50 | "default": 5.0, 51 | "title": "Relative detection threshold", 52 | }, 53 | { 54 | "name": "detect_sign", 55 | "type": "int", 56 | "value": -1, 57 | "default": -1, 58 | "title": "Use -1, 0, or 1, depending on the sign of the spikes in the recording", 59 | }, 60 | { 61 | "name": "feature_type", 62 | "type": "str", 63 | "value": "wav", 64 | "default": "wav", 65 | "title": "Feature type ('wav', 'pca')", 66 | }, 67 | { 68 | "name": "scales", 69 | "type": "int", 70 | "value": 4, 71 | "default": 4, 72 | "title": "Number of wavelet scales", 73 | }, 74 | { 75 | "name": "min_clus", 76 | "type": "int", 77 | "value": 20, 78 | "default": 20, 79 | "title": "Minimum size of a cluster", 80 | }, 81 | { 82 | "name": "maxtemp", 83 | "type": "float", 84 | "value": 0.251, 85 | "default": 0.251, 86 | "title": "Maximum temperature for SPC", 87 | }, 88 | { 89 | "name": "template_sdnum", 90 | "type": "int", 91 | "value": 3, 92 | "default": 3, 93 | "title": "Max radius of cluster in std devs", 94 | }, 95 | { 96 | "name": "enable_detect_filter", 97 | "type": "bool", 98 | "value": class_default["enable_detect_filter"], 99 | "default": class_default["enable_detect_filter"], 100 | "title": "If true, enable detect filter.", 101 | }, 102 | { 103 | "name": "enable_sort_filter", 104 | "type": "bool", 105 | "value": class_default["enable_sort_filter"], 106 | "default": class_default["enable_sort_filter"], 107 | "title": "If true, enable sort filter.", 108 | }, 109 | { 110 | "name": "detect_filter_fmin", 111 | "type": "float", 112 | "value": class_default["detect_filter_fmin"], 113 | "default": class_default["detect_filter_fmin"], 114 | "title": "Minimum detection filter frequency.", 115 | }, 116 | { 117 | "name": "detect_filter_fmax", 118 | "type": "float", 119 | "value": class_default["detect_filter_fmax"], 120 | "default": class_default["detect_filter_fmax"], 121 | "title": "Maximum detection filter frequency.", 122 | }, 123 | { 124 | "name": "detect_filter_order", 125 | "type": "int", 126 | "value": class_default["detect_filter_order"], 127 | "default": class_default["detect_filter_order"], 128 | "title": "Order of the filter.", 129 | }, 130 | { 131 | "name": "sort_filter_fmin", 132 | "type": "float", 133 | "value": class_default["sort_filter_fmin"], 134 | "default": class_default["sort_filter_fmin"], 135 | "title": "Sort filter frequency minimum.", 136 | }, 137 | { 138 | "name": "sort_filter_fmax", 139 | "type": "float", 140 | "value": class_default["sort_filter_fmax"], 141 | "default": class_default["sort_filter_fmax"], 142 | "title": "Sort filter frequency maximum.", 143 | }, 144 | { 145 | "name": "sort_filter_order", 146 | "type": "int", 147 | "value": class_default["sort_filter_order"], 148 | "default": class_default["sort_filter_order"], 149 | "title": "Order of the filter.", 150 | }, 151 | ] 152 | -------------------------------------------------------------------------------- /spikely/file_menu.py: -------------------------------------------------------------------------------- 1 | """Constructs File menu/actions, and executes user selected actions 2 | 3 | During application initialization in spikely_main a menu bar is created as part 4 | of the main window. create_file_menu is called as part of that process to 5 | populate the menu bar with a drop down menu of file related actions. 6 | 7 | The core functionality within this module is support for saving and loading the 8 | elements in the pipeline as JSON files. In collaboration with config.py, which 9 | has methods to convert individual elements to/from serializable dictionary 10 | objects, the _perform load/save methods in this module operate on the pipeline 11 | as a whole. 12 | 13 | """ 14 | 15 | import json 16 | 17 | from PyQt5 import QtWidgets 18 | 19 | from spikely import PipelineModel, config 20 | 21 | # Enables access to element list for both input and output 22 | _pipeline_model = None 23 | 24 | 25 | # Menu and Menu Action construction methods 26 | def create_file_menu(main_window: QtWidgets.QMainWindow, 27 | pipeline_model: PipelineModel) -> QtWidgets.QMenu: 28 | global _pipeline_model 29 | _pipeline_model = pipeline_model 30 | 31 | file_menu = QtWidgets.QMenu('&File', main_window) 32 | 33 | file_menu.addAction(_action( 34 | 'Load Pipeline', 'Load pipeline from JSON file', _perform_load_action)) 35 | file_menu.addAction(_action( 36 | 'Save Pipeline', 'Save pipeline to JSON file', _perform_save_action)) 37 | file_menu.addSeparator() 38 | file_menu.addAction(_action( 39 | 'Share Output', 'Use terminal for all pipeline output', 40 | _toggle_share_state, checkable=True, checked=True)) 41 | file_menu.addSeparator() 42 | file_menu.addAction(_action( 43 | 'Exit', 'Terminate the application.', 44 | QtWidgets.QApplication.closeAllWindows)) 45 | 46 | return file_menu 47 | 48 | 49 | def _action(name, tip, slot, shortcut=None, checkable=False, checked=None): 50 | action = QtWidgets.QAction(name, config.get_main_window(), 51 | checkable=checkable) 52 | action.setStatusTip(tip) 53 | action.triggered.connect(slot) 54 | if shortcut is not None: 55 | action.setShortcut(shortcut) 56 | if checkable and checked is not None: 57 | action.setChecked(checked) 58 | 59 | return action 60 | 61 | 62 | # Menu Action execution methods 63 | 64 | def _toggle_share_state(checked): 65 | _pipeline_model.share_output = checked 66 | 67 | 68 | def _perform_load_action() -> None: 69 | """Loads current pipeline with elements from a previously saved JSON file 70 | 71 | Launches a file dialog box that allows the user to select a previously 72 | saved JSON file, attempts to decode it, and if successful adds the elements 73 | to the current pipeline replacing any elements extant in the pipeline. 74 | 75 | config.cvt_dict_to_elem() does most of the hard work, and throws exceptions 76 | if the element is no longer installed, or is no longer compatible with the 77 | version saved previously. 78 | 79 | """ 80 | global _pipeline_model 81 | 82 | options = QtWidgets.QFileDialog.Options() 83 | options |= QtWidgets.QFileDialog.DontUseNativeDialog 84 | file_name, _filter = QtWidgets.QFileDialog.getOpenFileName( 85 | config.get_main_window(), caption='Open File', 86 | filter='JSON (*.json)', options=options) 87 | 88 | if file_name: 89 | _pipeline_model.clear() 90 | try: 91 | with open(file_name, 'r') as json_file: 92 | elem_dict_list = json.load(json_file) 93 | 94 | for elem_dict in elem_dict_list: 95 | elem = config.cvt_dict_to_elem(elem_dict) 96 | _pipeline_model.add_element(elem) 97 | 98 | except (json.decoder.JSONDecodeError, ValueError) as e: 99 | QtWidgets.QMessageBox.warning( 100 | config.get_main_window(), 'JSON File Load Failure', 101 | f'Failed to load {file_name}: {str(e)}') 102 | _pipeline_model.clear() 103 | 104 | except Exception as e: 105 | QtWidgets.QMessageBox.warning( 106 | config.get_main_window(), 'JSON File Load Failure', 107 | f'Unspecified exception: {str(e)}') 108 | _pipeline_model.clear() 109 | 110 | 111 | def _perform_save_action() -> None: 112 | """Saves current pipeline of elements to a user specified JSON file 113 | 114 | Launches a file dialog box that allows the user to specifiy a JSON file, 115 | attempts to encode the element pipeline, and if successful writes out the 116 | decoded element pipelin in JSON format to file. 117 | 118 | config.cvt_elem_to_dict() does most of the hard work, by extracting class 119 | and parameter data from the element that allows the element to be JSON 120 | encoded and reinstantiated later when the filed is read back in by 121 | _perform_load_action(). 122 | 123 | """ 124 | global _pipeline_model 125 | 126 | # TODO: _element_list is supposed to be private - use data() instead? 127 | element_list = _pipeline_model._element_list 128 | 129 | if element_list: 130 | options = QtWidgets.QFileDialog.Options() 131 | options |= QtWidgets.QFileDialog.DontUseNativeDialog 132 | file_name, _filter = QtWidgets.QFileDialog.getSaveFileName( 133 | config.get_main_window(), caption='Save File', 134 | filter='JSON (*.json)', options=options) 135 | 136 | if file_name: 137 | if not file_name.lower().endswith('.json'): 138 | file_name = file_name + '.json' 139 | elem_dict_list = [config.cvt_elem_to_dict(element) 140 | for element in element_list] 141 | 142 | with open(file_name, 'w') as json_file: 143 | json.dump(elem_dict_list, json_file) 144 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | # import os 16 | # import sys 17 | # sys.path.insert(0, os.path.abspath('.')) 18 | 19 | 20 | # -- Project information ----------------------------------------------------- 21 | 22 | project = 'spikely' 23 | copyright = '2019, Roger Hurwitz, Cole Hurwitz' 24 | author = 'Roger Hurwitz, Cole Hurwitz' 25 | 26 | # The short X.Y version 27 | version = '' 28 | # The full version, including alpha/beta/rc tags 29 | release = '0.0.1' 30 | 31 | 32 | # -- General configuration --------------------------------------------------- 33 | 34 | # If your documentation needs a minimal Sphinx version, state it here. 35 | # 36 | # needs_sphinx = '1.0' 37 | 38 | # Add any Sphinx extension module names here, as strings. They can be 39 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 40 | # ones. 41 | extensions = [ 42 | 'sphinx.ext.autodoc', 43 | 'sphinx.ext.doctest', 44 | ] 45 | 46 | # Add any paths that contain templates here, relative to this directory. 47 | templates_path = ['_templates'] 48 | 49 | # The suffix(es) of source filenames. 50 | # You can specify multiple suffix as a list of string: 51 | # 52 | # source_suffix = ['.rst', '.md'] 53 | source_suffix = '.rst' 54 | 55 | # The master toctree document. 56 | master_doc = 'index' 57 | 58 | # The language for content autogenerated by Sphinx. Refer to documentation 59 | # for a list of supported languages. 60 | # 61 | # This is also used if you do content translation via gettext catalogs. 62 | # Usually you set "language" from the command line for these cases. 63 | language = None 64 | 65 | # List of patterns, relative to source directory, that match files and 66 | # directories to ignore when looking for source files. 67 | # This pattern also affects html_static_path and html_extra_path. 68 | exclude_patterns = [] 69 | 70 | # The name of the Pygments (syntax highlighting) style to use. 71 | pygments_style = None 72 | 73 | 74 | # -- Options for HTML output ------------------------------------------------- 75 | 76 | # The theme to use for HTML and HTML Help pages. See the documentation for 77 | # a list of builtin themes. 78 | # 79 | try: 80 | import sphinx_rtd_theme 81 | 82 | html_theme = "sphinx_rtd_theme" 83 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 84 | except ImportError: 85 | print("RTD theme not installed, using default") 86 | html_theme = 'alabaster' 87 | # Theme options are theme-specific and customize the look and feel of a theme 88 | # further. For a list of options available for each theme, see the 89 | # documentation. 90 | # 91 | # html_theme_options = {} 92 | 93 | # Add any paths that contain custom static files (such as style sheets) here, 94 | # relative to this directory. They are copied after the builtin static files, 95 | # so a file named "default.css" will overwrite the builtin "default.css". 96 | html_static_path = ['_static'] 97 | 98 | # Custom sidebar templates, must be a dictionary that maps document names 99 | # to template names. 100 | # 101 | # The default sidebars (for documents that don't match any pattern) are 102 | # defined by theme itself. Builtin themes are using these templates by 103 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 104 | # 'searchbox.html']``. 105 | # 106 | # html_sidebars = {} 107 | 108 | 109 | # -- Options for HTMLHelp output --------------------------------------------- 110 | 111 | # Output file base name for HTML help builder. 112 | htmlhelp_basename = 'spikelydoc' 113 | 114 | 115 | # -- Options for LaTeX output ------------------------------------------------ 116 | 117 | latex_elements = { 118 | # The paper size ('letterpaper' or 'a4paper'). 119 | # 120 | # 'papersize': 'letterpaper', 121 | 122 | # The font size ('10pt', '11pt' or '12pt'). 123 | # 124 | # 'pointsize': '10pt', 125 | 126 | # Additional stuff for the LaTeX preamble. 127 | # 128 | # 'preamble': '', 129 | 130 | # Latex figure (float) alignment 131 | # 132 | # 'figure_align': 'htbp', 133 | } 134 | 135 | # Grouping the document tree into LaTeX files. List of tuples 136 | # (source start file, target name, title, 137 | # author, documentclass [howto, manual, or own class]). 138 | latex_documents = [ 139 | (master_doc, 'spikely.tex', 'spikely Documentation', 140 | 'Roger Hurwitz, Cole Hurwitz', 'manual'), 141 | ] 142 | 143 | 144 | # -- Options for manual page output ------------------------------------------ 145 | 146 | # One entry per manual page. List of tuples 147 | # (source start file, name, description, authors, manual section). 148 | man_pages = [ 149 | (master_doc, 'spikely', 'spikely Documentation', 150 | [author], 1) 151 | ] 152 | 153 | 154 | # -- Options for Texinfo output ---------------------------------------------- 155 | 156 | # Grouping the document tree into Texinfo files. List of tuples 157 | # (source start file, target name, title, author, 158 | # dir menu entry, description, category) 159 | texinfo_documents = [ 160 | (master_doc, 'spikely', 'spikely Documentation', 161 | author, 'spikely', 'One line description of project.', 162 | 'Miscellaneous'), 163 | ] 164 | 165 | 166 | # -- Options for Epub output ------------------------------------------------- 167 | 168 | # Bibliographic Dublin Core info. 169 | epub_title = project 170 | 171 | # The unique identifier of the text. This can be a ISBN number 172 | # or the project homepage. 173 | # 174 | # epub_identifier = '' 175 | 176 | # A unique identification for the text. 177 | # 178 | # epub_uid = '' 179 | 180 | # A list of files that should not be packed into the epub file. 181 | epub_exclude_files = ['search.html'] 182 | 183 | 184 | # -- Extension configuration ------------------------------------------------- 185 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/exporter/phyexporter.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 2 | get_validation_params, 3 | ) 4 | 5 | class_default = get_validation_params() 6 | 7 | 8 | gui_params = [ 9 | {"name": "save_path", "type": "folder", "title": "Save path"}, 10 | { 11 | "name": "compute_pc_features", 12 | "type": "bool", 13 | "value": True, 14 | "default": True, 15 | "title": "If True (default), pc features are computed.", 16 | }, 17 | { 18 | "name": "compute_amplitudes", 19 | "type": "bool", 20 | "value": True, 21 | "default": True, 22 | "title": "If True (default), waveforms amplitudes are compute.", 23 | }, 24 | { 25 | "name": "max_channels_per_template", 26 | "type": "int", 27 | "value": 3, 28 | "default": 3, 29 | "title": "Maximum channels per unit to return." 30 | " If None, all channels are returned.", 31 | }, 32 | { 33 | "name": "n_comp", 34 | "type": "int", 35 | "value": class_default["n_comp"], 36 | "default": class_default["n_comp"], 37 | "title": "Number of PCA components (default 3).", 38 | }, 39 | { 40 | "name": "max_spikes_for_pca", 41 | "type": "int", 42 | "value": class_default["max_spikes_for_pca"], 43 | "default": class_default["max_spikes_for_pca"], 44 | "title": "The maximum number of spikes to use to compute PCA.", 45 | }, 46 | { 47 | "name": "grouping_property", 48 | "type": "str", 49 | "value": class_default["grouping_property"], 50 | "default": class_default["grouping_property"], 51 | "title": "Property to group channels. E.g. if the recording extractor has the" 52 | " 'group' property and 'grouping_property' is 'group', then waveforms" 53 | " are computed group-wise.", 54 | }, 55 | { 56 | "name": "ms_before", 57 | "type": "float", 58 | "value": class_default["ms_before"], 59 | "default": class_default["ms_before"], 60 | "title": "Time period in ms to cut waveforms before the spike events.", 61 | }, 62 | { 63 | "name": "ms_after", 64 | "type": "float", 65 | "value": class_default["ms_after"], 66 | "default": class_default["ms_after"], 67 | "title": "Time period in ms to cut waveforms after the spike events.", 68 | }, 69 | { 70 | "name": "dtype", 71 | "type": "dtype", 72 | "value": class_default["dtype"], 73 | "default": class_default["dtype"], 74 | "title": "The dtype of underlying data (int16, float32, etc.)", 75 | }, 76 | { 77 | "name": "max_spikes_per_unit", 78 | "type": "int", 79 | "value": class_default["max_spikes_per_unit"], 80 | "default": class_default["max_spikes_per_unit"], 81 | "title": "The maximum number of spikes to extract per unit.", 82 | }, 83 | { 84 | "name": "compute_property_from_recording", 85 | "type": "bool", 86 | "value": class_default["compute_property_from_recording"], 87 | "default": class_default["compute_property_from_recording"], 88 | "title": "If True and 'grouping_property' is given, the property of each unit" 89 | " is assigned as the corresponding property of the recording extractor" 90 | " channel on which the average waveform is the largest", 91 | }, 92 | { 93 | "name": "n_jobs", 94 | "type": "int", 95 | "value": class_default["n_jobs"], 96 | "default": class_default["n_jobs"], 97 | "title": "Number of parallel jobs (default 1).", 98 | }, 99 | { 100 | "name": "method", 101 | "type": "str", 102 | "value": class_default["method"], 103 | "default": class_default["method"], 104 | "title": "If 'absolute' (default), amplitudes are absolute amplitudes" 105 | " in uV are returned. If 'relative', amplitudes are returned" 106 | " as ratios between waveform amplitudes and template amplitudes.", 107 | }, 108 | { 109 | "name": "peak", 110 | "type": "str", 111 | "value": class_default["peak"], 112 | "default": class_default["peak"], 113 | "title": "If maximum channel has to be found among negative peaks ('neg')," 114 | " positive ('pos') or both ('both' - default)", 115 | }, 116 | { 117 | "name": "frames_before", 118 | "type": "int", 119 | "value": class_default["frames_before"], 120 | "default": class_default["frames_before"], 121 | "title": "Frames before peak to compute amplitude.", 122 | }, 123 | { 124 | "name": "frames_after", 125 | "type": "int", 126 | "value": class_default["frames_after"], 127 | "default": class_default["frames_after"], 128 | "title": "Frames after peak to compute amplitude.", 129 | }, 130 | { 131 | "name": "recompute_info", 132 | "type": "bool", 133 | "value": class_default["recompute_info"], 134 | "default": class_default["recompute_info"], 135 | "title": "If True, will always re-extract waveforms and templates.", 136 | }, 137 | { 138 | "name": "save_property_or_features", 139 | "type": "bool", 140 | "value": class_default["save_property_or_features"], 141 | "default": class_default["save_property_or_features"], 142 | "title": "If True, will store all calculated features and properties.", 143 | }, 144 | { 145 | "name": "verbose", 146 | "type": "bool", 147 | "value": class_default["verbose"], 148 | "default": class_default["verbose"], 149 | "title": "If True output is verbose.", 150 | }, 151 | { 152 | "name": "memmap", 153 | "type": "dtype", 154 | "value": class_default["memmap"], 155 | "default": class_default["memmap"], 156 | "title": "If True, waveforms are saved as memmap object (recommended" 157 | " for long recordings with many channels.", 158 | }, 159 | { 160 | "name": "joblib_backend", 161 | "type": "str", 162 | "value": class_default["joblib_backend"], 163 | "default": class_default["joblib_backend"], 164 | "title": "The backend for joblib (default is 'loky')", 165 | }, 166 | { 167 | "name": "seed", 168 | "type": "int", 169 | "value": class_default["seed"], 170 | "default": class_default["seed"], 171 | "title": "Random seed for extracting waveforms and pcs.", 172 | }, 173 | ] 174 | -------------------------------------------------------------------------------- /spikely/pipeline_model.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pkg_resources 4 | from PyQt5 import QtCore, QtWidgets 5 | 6 | from spikely import SpikeElement, StdElementPolicy, config 7 | 8 | 9 | class PipelineModel(QtCore.QAbstractListModel): 10 | 11 | def __init__(self, parameter_model): 12 | super().__init__() 13 | 14 | self._element_list = [] 15 | self._element_policy = StdElementPolicy() 16 | self._parameter_model = parameter_model 17 | self._share_output = True 18 | 19 | # TODO: Put back in when ready to support terminal output 20 | @property 21 | def share_output(self): 22 | return self._share_output 23 | 24 | @share_output.setter 25 | def share_output(self, state): 26 | self._share_output = state 27 | 28 | def _elem_cls_count(self, target_cls): 29 | elem_cls_list = [type(elem) for elem in self._element_list] 30 | return elem_cls_list.count(target_cls) 31 | 32 | def rowCount(self, parent=None): 33 | # Overrides base class: provides count of elements in pipeline 34 | return len(self._element_list) 35 | 36 | def data(self, mod_index, role=QtCore.Qt.DisplayRole): 37 | # Overrides base class: returns data for element in pipeline for role 38 | if mod_index.isValid() and mod_index.row() < len(self._element_list): 39 | element = self._element_list[mod_index.row()] 40 | data_dict = { 41 | QtCore.Qt.DisplayRole: element.display_name, 42 | QtCore.Qt.EditRole: element.display_name, 43 | QtCore.Qt.DecorationRole: element.display_icon, 44 | config.ELEMENT_ROLE: element} 45 | return data_dict.get(role) 46 | 47 | def run(self): 48 | # Called in response to user pressing Run button in UI 49 | missing_param_count = self._missing_param_count() 50 | if missing_param_count: 51 | QtWidgets.QMessageBox.warning( 52 | config.get_main_window(), 'Run Failure', 53 | f'Missing mandatory element parameters. Missing parameter ' 54 | f'count: {missing_param_count}') 55 | return 56 | 57 | for cls in self._element_policy.required_cls_list: 58 | if not self._elem_cls_count(cls): 59 | QtWidgets.QMessageBox.warning( 60 | config.get_main_window(), 'Run Failure', 61 | f'Missing required element: {cls.__name__}') 62 | return 63 | 64 | config.get_main_window().statusBar().showMessage( 65 | 'Running pipeline', config.STATUS_MSG_TIMEOUT) 66 | 67 | elem_jdict_list = [config.cvt_elem_to_dict(element) 68 | for element in self._element_list] 69 | 70 | elem_list_str = json.dumps(elem_jdict_list) 71 | 72 | # TODO: Add plumbing for shared output support 73 | if self.share_output: 74 | run_path = pkg_resources.resource_filename( 75 | 'spikely.pipeman', 'piperun.py') 76 | else: 77 | run_path = pkg_resources.resource_filename( 78 | 'spikely.pipeman', 'pipeman.py') 79 | 80 | run_process = QtCore.QProcess(self) 81 | success = run_process.startDetached( 82 | 'python', [f'{run_path}', elem_list_str]) 83 | if not success: 84 | QtWidgets.QMessageBox.warning( 85 | config.get_main_window(), 'Failed to Start Python Process', 86 | f'Command line: python {run_path}, elem_list_str') 87 | 88 | def clear(self): 89 | self.beginResetModel() 90 | self._element_list.clear() 91 | self.endResetModel() 92 | # Synchronize parameter model and view 93 | self._parameter_model.element = None 94 | 95 | def add_element(self, new_elem: SpikeElement) -> None: 96 | # Inserts new element into pipeline in proper order 97 | 98 | new_elem_cls_count = self._elem_cls_count(new_elem.__class__) 99 | new_elem_is_singleton = self._element_policy.\ 100 | is_cls_singleton(new_elem.__class__) 101 | if new_elem_is_singleton and new_elem_cls_count: 102 | config.get_main_window().statusBar().showMessage( 103 | 'Only one element of this type allowed in pipeline', 104 | config.STATUS_MSG_TIMEOUT) 105 | return 106 | 107 | target_positions = self._element_policy.cls_order_dict 108 | new_elem_target_pos = target_positions[new_elem.__class__] 109 | new_elem_insert_pos = 0 110 | for pipe_elem in self._element_list: 111 | pipe_elem_target_pos = target_positions[pipe_elem.__class__] 112 | if new_elem_target_pos >= pipe_elem_target_pos: 113 | new_elem_insert_pos += 1 114 | else: 115 | break 116 | 117 | self.beginInsertRows(QtCore.QModelIndex(), new_elem_insert_pos, 118 | new_elem_insert_pos) 119 | self._element_list.insert(new_elem_insert_pos, new_elem) 120 | self.endInsertRows() 121 | 122 | # TODO: Clean this up in line w/ add_element method 123 | def move_up(self, elem: SpikeElement) -> None: 124 | rank = self._element_policy.cls_order_dict 125 | row = self._element_list.index(elem) 126 | 127 | if row > 0 and rank[type(elem)] == \ 128 | rank[type(self._element_list[row - 1])]: 129 | self.beginMoveRows(QtCore.QModelIndex(), row, 130 | row, QtCore.QModelIndex(), row - 1) # noqa: E128 131 | self._swap(self._element_list, row, row - 1) 132 | self.endMoveRows() 133 | else: 134 | config.get_main_window().statusBar().showMessage( 135 | "Cannot move element any higher", config.STATUS_MSG_TIMEOUT) 136 | 137 | # TODO: Clean this up in line w/ add_element method 138 | def move_down(self, elem: SpikeElement) -> None: 139 | rank = self._element_policy.cls_order_dict 140 | row = self._element_list.index(elem) 141 | 142 | if row < len(self._element_list) - 1 and \ 143 | rank[type(elem)] == rank[type(self._element_list[row + 1])]: 144 | self.beginMoveRows(QtCore.QModelIndex(), row + 1, 145 | row + 1, QtCore.QModelIndex(), row) # noqa: E128 146 | self._swap(self._element_list, row, row + 1) 147 | self.endMoveRows() 148 | else: 149 | config.get_main_window().statusBar().showMessage( 150 | "Cannot move element any lower", config.STATUS_MSG_TIMEOUT) 151 | 152 | def delete(self, element: SpikeElement) -> None: 153 | index = self._element_list.index(element) 154 | self.beginRemoveRows(QtCore.QModelIndex(), index, index) 155 | self._element_list.pop(index) 156 | self.endRemoveRows() 157 | 158 | # Helper methods 159 | 160 | def _swap(self, list, pos1, pos2): 161 | list[pos1], list[pos2] = list[pos2], list[pos1] 162 | 163 | def _missing_param_count(self) -> int: 164 | missing_param_list = [param for elem in self._element_list for param in 165 | elem.param_list if 'value' not in param.keys()] 166 | return len(missing_param_list) 167 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdsilhouettescores.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.curation import threshold_silhouette_scores 2 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 3 | get_validation_params, 4 | ) 5 | from spiketoolkit.validation.quality_metric_classes.silhouette_score import SilhouetteScore 6 | class_default = get_validation_params() 7 | spif_init_func = threshold_silhouette_scores 8 | metric_default = SilhouetteScore.params 9 | 10 | gui_params = [ 11 | { 12 | "name": "threshold", 13 | "type": "float", 14 | "title": "The threshold for the given metric.", 15 | }, 16 | { 17 | "name": "threshold_sign", 18 | "type": "str", 19 | "title": "If 'less', will threshold any metric less than the given threshold. \ 20 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 21 | If 'greater', will threshold any metric greater than the given threshold. \ 22 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 23 | }, 24 | { 25 | "name": "max_spikes_for_silhouette", 26 | "type": "int", 27 | "value": metric_default["max_spikes_for_silhouette"], 28 | "default": metric_default["max_spikes_for_silhouette"], 29 | "title": "Max spikes to be used for silhouette metric.", 30 | }, 31 | #kwargs 32 | { 33 | "name": "method", 34 | "type": "str", 35 | "value": class_default["method"], 36 | "default": class_default["method"], 37 | "title": "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. \ 38 | If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.", 39 | }, 40 | { 41 | "name": "peak", 42 | "type": "str", 43 | "value": class_default["peak"], 44 | "default": class_default["peak"], 45 | "title": "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or \ 46 | both ('both' - default).", 47 | }, 48 | { 49 | "name": "frames_before", 50 | "type": "int", 51 | "value": class_default["frames_before"], 52 | "default": class_default["frames_before"], 53 | "title": "Frames before peak to compute amplitude.", 54 | }, 55 | { 56 | "name": "frames_after", 57 | "type": "int", 58 | "value": class_default["frames_after"], 59 | "default": class_default["frames_after"], 60 | "title": "Frames after peak to compute amplitude.", 61 | }, 62 | { 63 | "name": "apply_filter", 64 | "type": "bool", 65 | "value": class_default["apply_filter"], 66 | "default": class_default["apply_filter"], 67 | "title": "If True, recording is bandpass-filtered", 68 | }, 69 | { 70 | "name": "freq_min", 71 | "type": "float", 72 | "value": class_default["freq_min"], 73 | "default": class_default["freq_min"], 74 | "title": "High-pass frequency for optional filter (default 300 Hz).", 75 | }, 76 | { 77 | "name": "freq_max", 78 | "type": "float", 79 | "value": class_default["freq_max"], 80 | "default": class_default["freq_max"], 81 | "title": "Low-pass frequency for optional filter (default 6000 Hz).", 82 | }, 83 | { 84 | "name": "grouping_property", 85 | "type": "str", 86 | "value": class_default["grouping_property"], 87 | "default": class_default["grouping_property"], 88 | "title": "Property to group channels. E.g. if the recording extractor has the 'group' property and \ 89 | 'grouping_property' is 'group', then waveforms are computed group-wise.", 90 | }, 91 | { 92 | "name": "ms_before", 93 | "type": "float", 94 | "value": class_default["ms_before"], 95 | "default": class_default["ms_before"], 96 | "title": "Time period in ms to cut waveforms before the spike events.", 97 | }, 98 | { 99 | "name": "ms_after", 100 | "type": "float", 101 | "value": class_default["ms_after"], 102 | "default": class_default["ms_after"], 103 | "title": "Time period in ms to cut waveforms after the spike events.", 104 | }, 105 | { 106 | "name": "dtype", 107 | "type": "dtype", 108 | "value": class_default["dtype"], 109 | "default": class_default["dtype"], 110 | "title": "The numpy dtype of the waveforms.", 111 | }, 112 | { 113 | "name": "compute_property_from_recording", 114 | "type": "bool", 115 | "value": class_default["compute_property_from_recording"], 116 | "default": class_default["compute_property_from_recording"], 117 | "title": "If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding \ 118 | property of the recording extractor channel on which the average waveform is the largest.", 119 | }, 120 | { 121 | "name": "max_channels_per_waveforms", 122 | "type": "int", 123 | "value": class_default["max_channels_per_waveforms"], 124 | "default": class_default["max_channels_per_waveforms"], 125 | "title": " Maximum channels per waveforms to return. If None, all channels are returned.", 126 | }, 127 | { 128 | "name": "n_jobs", 129 | "type": "int", 130 | "value": class_default["n_jobs"], 131 | "default": class_default["n_jobs"], 132 | "title": "Number of parallel jobs (default None).", 133 | }, 134 | { 135 | "name": "memmap", 136 | "type": "bool", 137 | "value": class_default["memmap"], 138 | "default": class_default["memmap"], 139 | "title": "If True, waveforms are saved as memmap object (recommended for long recordings with many channels).", 140 | }, 141 | { 142 | "name": "save_property_or_features", 143 | "type": "bool", 144 | "value": class_default["save_property_or_features"], 145 | "default": class_default["save_property_or_features"], 146 | "title": "If True, it will save features in the sorting extractor.", 147 | }, 148 | { 149 | "name": "recompute_info", 150 | "type": "bool", 151 | "value": class_default["recompute_info"], 152 | "default": class_default["recompute_info"], 153 | "title": "If True, waveforms are recomputed.", 154 | }, 155 | { 156 | "name": "max_spikes_per_unit", 157 | "type": "int", 158 | "value": class_default["max_spikes_per_unit"], 159 | "default": class_default["max_spikes_per_unit"], 160 | "title": "The maximum number of spikes to extract per unit.", 161 | }, 162 | { 163 | "name": "seed", 164 | "type": "int", 165 | "value": class_default["seed"], 166 | "default": class_default["seed"], 167 | "title": "Random seed for reproducibility.", 168 | }, 169 | { 170 | "name": "verbose", 171 | "type": "bool", 172 | "value": class_default["verbose"], 173 | "default": class_default["verbose"], 174 | "title": "If True, output from SpikeInterface element is verbose when run.", 175 | }, 176 | ] 177 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholddprimes.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 2 | get_validation_params, 3 | ) 4 | from spiketoolkit.curation import threshold_d_primes 5 | from spiketoolkit.validation.quality_metric_classes.d_prime import DPrime 6 | metric_default = DPrime.params 7 | spif_init_func = threshold_d_primes 8 | class_default = get_validation_params() 9 | 10 | gui_params = [ 11 | { 12 | "name": "threshold", 13 | "type": "float", 14 | "title": "The threshold for the given metric.", 15 | }, 16 | { 17 | "name": "threshold_sign", 18 | "type": "str", 19 | "title": "If 'less', will threshold any metric less than the given threshold. \ 20 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 21 | If 'greater', will threshold any metric greater than the given threshold. \ 22 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 23 | }, 24 | { 25 | "name": "num_channels_to_compare", 26 | "type": "int", 27 | "value": metric_default["num_channels_to_compare"], 28 | "default": metric_default["num_channels_to_compare"], 29 | "title": "The number of channels to be used for the PC extraction and comparison.", 30 | }, 31 | { 32 | "name": "max_spikes_per_cluster", 33 | "type": "int", 34 | "value": metric_default["max_spikes_per_cluster"], 35 | "default": metric_default["max_spikes_per_cluster"], 36 | "title": "Max spikes to be used from each unit.", 37 | }, 38 | # kwargs 39 | { 40 | "name": "method", 41 | "type": "str", 42 | "value": class_default["method"], 43 | "default": class_default["method"], 44 | "title": "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. \ 45 | If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.", 46 | }, 47 | { 48 | "name": "peak", 49 | "type": "str", 50 | "value": class_default["peak"], 51 | "default": class_default["peak"], 52 | "title": "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or \ 53 | both ('both' - default).", 54 | }, 55 | { 56 | "name": "frames_before", 57 | "type": "int", 58 | "value": class_default["frames_before"], 59 | "default": class_default["frames_before"], 60 | "title": "Frames before peak to compute amplitude.", 61 | }, 62 | { 63 | "name": "frames_after", 64 | "type": "int", 65 | "value": class_default["frames_after"], 66 | "default": class_default["frames_after"], 67 | "title": "Frames after peak to compute amplitude.", 68 | }, 69 | { 70 | "name": "apply_filter", 71 | "type": "bool", 72 | "value": class_default["apply_filter"], 73 | "default": class_default["apply_filter"], 74 | "title": "If True, recording is bandpass-filtered", 75 | }, 76 | { 77 | "name": "freq_min", 78 | "type": "float", 79 | "value": class_default["freq_min"], 80 | "default": class_default["freq_min"], 81 | "title": "High-pass frequency for optional filter (default 300 Hz).", 82 | }, 83 | { 84 | "name": "freq_max", 85 | "type": "float", 86 | "value": class_default["freq_max"], 87 | "default": class_default["freq_max"], 88 | "title": "Low-pass frequency for optional filter (default 6000 Hz).", 89 | }, 90 | { 91 | "name": "grouping_property", 92 | "type": "str", 93 | "value": class_default["grouping_property"], 94 | "default": class_default["grouping_property"], 95 | "title": "Property to group channels. E.g. if the recording extractor has the 'group' property and \ 96 | 'grouping_property' is 'group', then waveforms are computed group-wise.", 97 | }, 98 | { 99 | "name": "ms_before", 100 | "type": "float", 101 | "value": class_default["ms_before"], 102 | "default": class_default["ms_before"], 103 | "title": "Time period in ms to cut waveforms before the spike events.", 104 | }, 105 | { 106 | "name": "ms_after", 107 | "type": "float", 108 | "value": class_default["ms_after"], 109 | "default": class_default["ms_after"], 110 | "title": "Time period in ms to cut waveforms after the spike events.", 111 | }, 112 | { 113 | "name": "dtype", 114 | "type": "dtype", 115 | "value": class_default["dtype"], 116 | "default": class_default["dtype"], 117 | "title": "The numpy dtype of the waveforms.", 118 | }, 119 | { 120 | "name": "compute_property_from_recording", 121 | "type": "bool", 122 | "value": class_default["compute_property_from_recording"], 123 | "default": class_default["compute_property_from_recording"], 124 | "title": "If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding \ 125 | property of the recording extractor channel on which the average waveform is the largest.", 126 | }, 127 | { 128 | "name": "max_channels_per_waveforms", 129 | "type": "int", 130 | "value": class_default["max_channels_per_waveforms"], 131 | "default": class_default["max_channels_per_waveforms"], 132 | "title": " Maximum channels per waveforms to return. If None, all channels are returned.", 133 | }, 134 | { 135 | "name": "n_jobs", 136 | "type": "int", 137 | "value": class_default["n_jobs"], 138 | "default": class_default["n_jobs"], 139 | "title": "Number of parallel jobs (default None).", 140 | }, 141 | { 142 | "name": "memmap", 143 | "type": "bool", 144 | "value": class_default["memmap"], 145 | "default": class_default["memmap"], 146 | "title": "If True, waveforms are saved as memmap object (recommended for long recordings with many channels).", 147 | }, 148 | { 149 | "name": "save_property_or_features", 150 | "type": "bool", 151 | "value": class_default["save_property_or_features"], 152 | "default": class_default["save_property_or_features"], 153 | "title": "If True, it will save features in the sorting extractor.", 154 | }, 155 | { 156 | "name": "recompute_info", 157 | "type": "bool", 158 | "value": class_default["recompute_info"], 159 | "default": class_default["recompute_info"], 160 | "title": "If True, waveforms are recomputed.", 161 | }, 162 | { 163 | "name": "max_spikes_per_unit", 164 | "type": "int", 165 | "value": class_default["max_spikes_per_unit"], 166 | "default": class_default["max_spikes_per_unit"], 167 | "title": "The maximum number of spikes to extract per unit.", 168 | }, 169 | { 170 | "name": "seed", 171 | "type": "int", 172 | "value": class_default["seed"], 173 | "default": class_default["seed"], 174 | "title": "Random seed for reproducibility.", 175 | }, 176 | { 177 | "name": "verbose", 178 | "type": "bool", 179 | "value": class_default["verbose"], 180 | "default": class_default["verbose"], 181 | "title": "If True, output from SpikeInterface element is verbose when run.", 182 | }, 183 | ] 184 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdlratios.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 2 | get_validation_params, 3 | ) 4 | from spiketoolkit.curation import threshold_l_ratios 5 | from spiketoolkit.validation.quality_metric_classes.l_ratio import LRatio 6 | metric_default = LRatio.params 7 | spif_init_func = threshold_l_ratios 8 | class_default = get_validation_params() 9 | 10 | gui_params = [ 11 | { 12 | "name": "threshold", 13 | "type": "float", 14 | "title": "The threshold for the given metric.", 15 | }, 16 | { 17 | "name": "threshold_sign", 18 | "type": "str", 19 | "title": "If 'less', will threshold any metric less than the given threshold. \ 20 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 21 | If 'greater', will threshold any metric greater than the given threshold. \ 22 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 23 | }, 24 | { 25 | "name": "num_channels_to_compare", 26 | "type": "int", 27 | "value": metric_default["num_channels_to_compare"], 28 | "default": metric_default["num_channels_to_compare"], 29 | "title": "The number of channels to be used for the PC extraction and comparison.", 30 | }, 31 | { 32 | "name": "max_spikes_per_cluster", 33 | "type": "int", 34 | "value": metric_default["max_spikes_per_cluster"], 35 | "default": metric_default["max_spikes_per_cluster"], 36 | "title": "Max spikes to be used from each unit.", 37 | }, 38 | # kwargs 39 | { 40 | "name": "method", 41 | "type": "str", 42 | "value": class_default["method"], 43 | "default": class_default["method"], 44 | "title": "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. \ 45 | If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.", 46 | }, 47 | { 48 | "name": "peak", 49 | "type": "str", 50 | "value": class_default["peak"], 51 | "default": class_default["peak"], 52 | "title": "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or \ 53 | both ('both' - default).", 54 | }, 55 | { 56 | "name": "frames_before", 57 | "type": "int", 58 | "value": class_default["frames_before"], 59 | "default": class_default["frames_before"], 60 | "title": "Frames before peak to compute amplitude.", 61 | }, 62 | { 63 | "name": "frames_after", 64 | "type": "int", 65 | "value": class_default["frames_after"], 66 | "default": class_default["frames_after"], 67 | "title": "Frames after peak to compute amplitude.", 68 | }, 69 | { 70 | "name": "apply_filter", 71 | "type": "bool", 72 | "value": class_default["apply_filter"], 73 | "default": class_default["apply_filter"], 74 | "title": "If True, recording is bandpass-filtered", 75 | }, 76 | { 77 | "name": "freq_min", 78 | "type": "float", 79 | "value": class_default["freq_min"], 80 | "default": class_default["freq_min"], 81 | "title": "High-pass frequency for optional filter (default 300 Hz).", 82 | }, 83 | { 84 | "name": "freq_max", 85 | "type": "float", 86 | "value": class_default["freq_max"], 87 | "default": class_default["freq_max"], 88 | "title": "Low-pass frequency for optional filter (default 6000 Hz).", 89 | }, 90 | { 91 | "name": "grouping_property", 92 | "type": "str", 93 | "value": class_default["grouping_property"], 94 | "default": class_default["grouping_property"], 95 | "title": "Property to group channels. E.g. if the recording extractor has the 'group' property and \ 96 | 'grouping_property' is 'group', then waveforms are computed group-wise.", 97 | }, 98 | { 99 | "name": "ms_before", 100 | "type": "float", 101 | "value": class_default["ms_before"], 102 | "default": class_default["ms_before"], 103 | "title": "Time period in ms to cut waveforms before the spike events.", 104 | }, 105 | { 106 | "name": "ms_after", 107 | "type": "float", 108 | "value": class_default["ms_after"], 109 | "default": class_default["ms_after"], 110 | "title": "Time period in ms to cut waveforms after the spike events.", 111 | }, 112 | { 113 | "name": "dtype", 114 | "type": "dtype", 115 | "value": class_default["dtype"], 116 | "default": class_default["dtype"], 117 | "title": "The numpy dtype of the waveforms.", 118 | }, 119 | { 120 | "name": "compute_property_from_recording", 121 | "type": "bool", 122 | "value": class_default["compute_property_from_recording"], 123 | "default": class_default["compute_property_from_recording"], 124 | "title": "If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding \ 125 | property of the recording extractor channel on which the average waveform is the largest.", 126 | }, 127 | { 128 | "name": "max_channels_per_waveforms", 129 | "type": "int", 130 | "value": class_default["max_channels_per_waveforms"], 131 | "default": class_default["max_channels_per_waveforms"], 132 | "title": " Maximum channels per waveforms to return. If None, all channels are returned.", 133 | }, 134 | { 135 | "name": "n_jobs", 136 | "type": "int", 137 | "value": class_default["n_jobs"], 138 | "default": class_default["n_jobs"], 139 | "title": "Number of parallel jobs (default None).", 140 | }, 141 | { 142 | "name": "memmap", 143 | "type": "bool", 144 | "value": class_default["memmap"], 145 | "default": class_default["memmap"], 146 | "title": "If True, waveforms are saved as memmap object (recommended for long recordings with many channels).", 147 | }, 148 | { 149 | "name": "save_property_or_features", 150 | "type": "bool", 151 | "value": class_default["save_property_or_features"], 152 | "default": class_default["save_property_or_features"], 153 | "title": "If True, it will save features in the sorting extractor.", 154 | }, 155 | { 156 | "name": "recompute_info", 157 | "type": "bool", 158 | "value": class_default["recompute_info"], 159 | "default": class_default["recompute_info"], 160 | "title": "If True, waveforms are recomputed.", 161 | }, 162 | { 163 | "name": "max_spikes_per_unit", 164 | "type": "int", 165 | "value": class_default["max_spikes_per_unit"], 166 | "default": class_default["max_spikes_per_unit"], 167 | "title": "The maximum number of spikes to extract per unit.", 168 | }, 169 | { 170 | "name": "seed", 171 | "type": "int", 172 | "value": class_default["seed"], 173 | "default": class_default["seed"], 174 | "title": "Random seed for reproducibility.", 175 | }, 176 | { 177 | "name": "verbose", 178 | "type": "bool", 179 | "value": class_default["verbose"], 180 | "default": class_default["verbose"], 181 | "title": "If True, output from SpikeInterface element is verbose when run.", 182 | }, 183 | ] 184 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdisolationdistances.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.curation import threshold_isolation_distances 2 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 3 | get_validation_params, 4 | ) 5 | from spiketoolkit.validation.quality_metric_classes.isolation_distance import IsolationDistance 6 | metric_default = IsolationDistance.params 7 | class_default = get_validation_params() 8 | spif_init_func = threshold_isolation_distances 9 | 10 | 11 | gui_params = [ 12 | { 13 | "name": "threshold", 14 | "type": "float", 15 | "title": "The threshold for the given metric.", 16 | }, 17 | { 18 | "name": "threshold_sign", 19 | "type": "str", 20 | "title": "If 'less', will threshold any metric less than the given threshold. \ 21 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 22 | If 'greater', will threshold any metric greater than the given threshold. \ 23 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 24 | }, 25 | { 26 | "name": "num_channels_to_compare", 27 | "type": "int", 28 | "value": metric_default["num_channels_to_compare"], 29 | "default": metric_default["num_channels_to_compare"], 30 | "title": "The number of channels to be used for the PC extraction and comparison.", 31 | }, 32 | { 33 | "name": "max_spikes_per_cluster", 34 | "type": "int", 35 | "value": metric_default["max_spikes_per_cluster"], 36 | "default": metric_default["max_spikes_per_cluster"], 37 | "title": "Max spikes to be used from each unit.", 38 | }, 39 | #kwargs 40 | { 41 | "name": "method", 42 | "type": "str", 43 | "value": class_default["method"], 44 | "default": class_default["method"], 45 | "title": "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. \ 46 | If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.", 47 | }, 48 | { 49 | "name": "peak", 50 | "type": "str", 51 | "value": class_default["peak"], 52 | "default": class_default["peak"], 53 | "title": "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or \ 54 | both ('both' - default).", 55 | }, 56 | { 57 | "name": "frames_before", 58 | "type": "int", 59 | "value": class_default["frames_before"], 60 | "default": class_default["frames_before"], 61 | "title": "Frames before peak to compute amplitude.", 62 | }, 63 | { 64 | "name": "frames_after", 65 | "type": "int", 66 | "value": class_default["frames_after"], 67 | "default": class_default["frames_after"], 68 | "title": "Frames after peak to compute amplitude.", 69 | }, 70 | { 71 | "name": "apply_filter", 72 | "type": "bool", 73 | "value": class_default["apply_filter"], 74 | "default": class_default["apply_filter"], 75 | "title": "If True, recording is bandpass-filtered", 76 | }, 77 | { 78 | "name": "freq_min", 79 | "type": "float", 80 | "value": class_default["freq_min"], 81 | "default": class_default["freq_min"], 82 | "title": "High-pass frequency for optional filter (default 300 Hz).", 83 | }, 84 | { 85 | "name": "freq_max", 86 | "type": "float", 87 | "value": class_default["freq_max"], 88 | "default": class_default["freq_max"], 89 | "title": "Low-pass frequency for optional filter (default 6000 Hz).", 90 | }, 91 | { 92 | "name": "grouping_property", 93 | "type": "str", 94 | "value": class_default["grouping_property"], 95 | "default": class_default["grouping_property"], 96 | "title": "Property to group channels. E.g. if the recording extractor has the 'group' property and \ 97 | 'grouping_property' is 'group', then waveforms are computed group-wise.", 98 | }, 99 | { 100 | "name": "ms_before", 101 | "type": "float", 102 | "value": class_default["ms_before"], 103 | "default": class_default["ms_before"], 104 | "title": "Time period in ms to cut waveforms before the spike events.", 105 | }, 106 | { 107 | "name": "ms_after", 108 | "type": "float", 109 | "value": class_default["ms_after"], 110 | "default": class_default["ms_after"], 111 | "title": "Time period in ms to cut waveforms after the spike events.", 112 | }, 113 | { 114 | "name": "dtype", 115 | "type": "dtype", 116 | "value": class_default["dtype"], 117 | "default": class_default["dtype"], 118 | "title": "The numpy dtype of the waveforms.", 119 | }, 120 | { 121 | "name": "compute_property_from_recording", 122 | "type": "bool", 123 | "value": class_default["compute_property_from_recording"], 124 | "default": class_default["compute_property_from_recording"], 125 | "title": "If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding \ 126 | property of the recording extractor channel on which the average waveform is the largest.", 127 | }, 128 | { 129 | "name": "max_channels_per_waveforms", 130 | "type": "int", 131 | "value": class_default["max_channels_per_waveforms"], 132 | "default": class_default["max_channels_per_waveforms"], 133 | "title": " Maximum channels per waveforms to return. If None, all channels are returned.", 134 | }, 135 | { 136 | "name": "n_jobs", 137 | "type": "int", 138 | "value": class_default["n_jobs"], 139 | "default": class_default["n_jobs"], 140 | "title": "Number of parallel jobs (default None).", 141 | }, 142 | { 143 | "name": "memmap", 144 | "type": "bool", 145 | "value": class_default["memmap"], 146 | "default": class_default["memmap"], 147 | "title": "If True, waveforms are saved as memmap object (recommended for long recordings with many channels).", 148 | }, 149 | { 150 | "name": "save_property_or_features", 151 | "type": "bool", 152 | "value": class_default["save_property_or_features"], 153 | "default": class_default["save_property_or_features"], 154 | "title": "If True, it will save features in the sorting extractor.", 155 | }, 156 | { 157 | "name": "recompute_info", 158 | "type": "bool", 159 | "value": class_default["recompute_info"], 160 | "default": class_default["recompute_info"], 161 | "title": "If True, waveforms are recomputed.", 162 | }, 163 | { 164 | "name": "max_spikes_per_unit", 165 | "type": "int", 166 | "value": class_default["max_spikes_per_unit"], 167 | "default": class_default["max_spikes_per_unit"], 168 | "title": "The maximum number of spikes to extract per unit.", 169 | }, 170 | { 171 | "name": "seed", 172 | "type": "int", 173 | "value": class_default["seed"], 174 | "default": class_default["seed"], 175 | "title": "Random seed for reproducibility.", 176 | }, 177 | { 178 | "name": "verbose", 179 | "type": "bool", 180 | "value": class_default["verbose"], 181 | "default": class_default["verbose"], 182 | "title": "If True, output from SpikeInterface element is verbose when run.", 183 | }, 184 | ] 185 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholddriftmetrics.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.curation import threshold_drift_metrics 2 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 3 | get_validation_params, 4 | ) 5 | from spiketoolkit.validation.quality_metric_classes.drift_metric import DriftMetric 6 | metric_default = DriftMetric.params 7 | spif_init_func = threshold_drift_metrics 8 | class_default = get_validation_params() 9 | 10 | gui_params = [ 11 | { 12 | "name": "threshold", 13 | "type": "float", 14 | "title": "The threshold for the given metric.", 15 | }, 16 | { 17 | "name": "threshold_sign", 18 | "type": "str", 19 | "title": "If 'less', will threshold any metric less than the given threshold. \ 20 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 21 | If 'greater', will threshold any metric greater than the given threshold. \ 22 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 23 | }, 24 | { 25 | "name": "metric_name", 26 | "type": "str", 27 | "value": "max_drift", 28 | "default": "max_drift", 29 | "title": "The name of the drift metric to be thresholded (either 'max_drift' or 'cumulative_drift').", 30 | }, 31 | { 32 | "name": "drift_metrics_interval_s", 33 | "type": "float", 34 | "value": metric_default["drift_metrics_interval_s"], 35 | "default": metric_default["drift_metrics_interval_s"], 36 | "title": "Time period for evaluating drift.", 37 | }, 38 | { 39 | "name": "drift_metrics_min_spikes_per_interval", 40 | "type": "int", 41 | "value": metric_default["drift_metrics_min_spikes_per_interval"], 42 | "default": metric_default["drift_metrics_min_spikes_per_interval"], 43 | "title": "Minimum number of spikes for evaluating drift metrics per interval.", 44 | }, 45 | # kwargs 46 | { 47 | "name": "method", 48 | "type": "str", 49 | "value": class_default["method"], 50 | "default": class_default["method"], 51 | "title": "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. \ 52 | If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.", 53 | }, 54 | { 55 | "name": "peak", 56 | "type": "str", 57 | "value": class_default["peak"], 58 | "default": class_default["peak"], 59 | "title": "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or \ 60 | both ('both' - default).", 61 | }, 62 | { 63 | "name": "frames_before", 64 | "type": "int", 65 | "value": class_default["frames_before"], 66 | "default": class_default["frames_before"], 67 | "title": "Frames before peak to compute amplitude.", 68 | }, 69 | { 70 | "name": "frames_after", 71 | "type": "int", 72 | "value": class_default["frames_after"], 73 | "default": class_default["frames_after"], 74 | "title": "Frames after peak to compute amplitude.", 75 | }, 76 | { 77 | "name": "apply_filter", 78 | "type": "bool", 79 | "value": class_default["apply_filter"], 80 | "default": class_default["apply_filter"], 81 | "title": "If True, recording is bandpass-filtered", 82 | }, 83 | { 84 | "name": "freq_min", 85 | "type": "float", 86 | "value": class_default["freq_min"], 87 | "default": class_default["freq_min"], 88 | "title": "High-pass frequency for optional filter (default 300 Hz).", 89 | }, 90 | { 91 | "name": "freq_max", 92 | "type": "float", 93 | "value": class_default["freq_max"], 94 | "default": class_default["freq_max"], 95 | "title": "Low-pass frequency for optional filter (default 6000 Hz).", 96 | }, 97 | { 98 | "name": "grouping_property", 99 | "type": "str", 100 | "value": class_default["grouping_property"], 101 | "default": class_default["grouping_property"], 102 | "title": "Property to group channels. E.g. if the recording extractor has the 'group' property and \ 103 | 'grouping_property' is 'group', then waveforms are computed group-wise.", 104 | }, 105 | { 106 | "name": "ms_before", 107 | "type": "float", 108 | "value": class_default["ms_before"], 109 | "default": class_default["ms_before"], 110 | "title": "Time period in ms to cut waveforms before the spike events.", 111 | }, 112 | { 113 | "name": "ms_after", 114 | "type": "float", 115 | "value": class_default["ms_after"], 116 | "default": class_default["ms_after"], 117 | "title": "Time period in ms to cut waveforms after the spike events.", 118 | }, 119 | { 120 | "name": "dtype", 121 | "type": "dtype", 122 | "value": class_default["dtype"], 123 | "default": class_default["dtype"], 124 | "title": "The numpy dtype of the waveforms.", 125 | }, 126 | { 127 | "name": "compute_property_from_recording", 128 | "type": "bool", 129 | "value": class_default["compute_property_from_recording"], 130 | "default": class_default["compute_property_from_recording"], 131 | "title": "If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding \ 132 | property of the recording extractor channel on which the average waveform is the largest.", 133 | }, 134 | { 135 | "name": "max_channels_per_waveforms", 136 | "type": "int", 137 | "value": class_default["max_channels_per_waveforms"], 138 | "default": class_default["max_channels_per_waveforms"], 139 | "title": " Maximum channels per waveforms to return. If None, all channels are returned.", 140 | }, 141 | { 142 | "name": "n_jobs", 143 | "type": "int", 144 | "value": class_default["n_jobs"], 145 | "default": class_default["n_jobs"], 146 | "title": "Number of parallel jobs (default None).", 147 | }, 148 | { 149 | "name": "memmap", 150 | "type": "bool", 151 | "value": class_default["memmap"], 152 | "default": class_default["memmap"], 153 | "title": "If True, waveforms are saved as memmap object (recommended for long recordings with many channels).", 154 | }, 155 | { 156 | "name": "save_property_or_features", 157 | "type": "bool", 158 | "value": class_default["save_property_or_features"], 159 | "default": class_default["save_property_or_features"], 160 | "title": "If True, it will save features in the sorting extractor.", 161 | }, 162 | { 163 | "name": "recompute_info", 164 | "type": "bool", 165 | "value": class_default["recompute_info"], 166 | "default": class_default["recompute_info"], 167 | "title": "If True, waveforms are recomputed.", 168 | }, 169 | { 170 | "name": "max_spikes_per_unit", 171 | "type": "int", 172 | "value": class_default["max_spikes_per_unit"], 173 | "default": class_default["max_spikes_per_unit"], 174 | "title": "The maximum number of spikes to extract per unit.", 175 | }, 176 | { 177 | "name": "seed", 178 | "type": "int", 179 | "value": class_default["seed"], 180 | "default": class_default["seed"], 181 | "title": "Random seed for reproducibility.", 182 | }, 183 | { 184 | "name": "verbose", 185 | "type": "bool", 186 | "value": class_default["verbose"], 187 | "default": class_default["verbose"], 188 | "title": "If True, output from SpikeInterface element is verbose when run.", 189 | }, 190 | ] 191 | -------------------------------------------------------------------------------- /spikely/pipeline_view.py: -------------------------------------------------------------------------------- 1 | # The view-control widget set for constructing the active pipeline. 2 | 3 | from PyQt5 import QtWidgets 4 | 5 | from spikely import StdElementPolicy, SpikeElement, config 6 | 7 | 8 | class PipelineView(QtWidgets.QGroupBox): 9 | 10 | def __init__(self, pipeline_model, parameter_model): 11 | super().__init__("Construct Pipeline") 12 | 13 | self._pipeline_model = pipeline_model 14 | self._pipeline_view = QtWidgets.QListView(self) 15 | self._parameter_model = parameter_model 16 | self._element_policy = StdElementPolicy() 17 | 18 | self._init_ui() 19 | 20 | def _init_ui(self): 21 | # Assembles the individual widgets into the widget-set. 22 | 23 | # The PipelineView consists of three separate UI assemblies 24 | # stacked top to bottom: element selection, pipeline element list, 25 | # and pipeline element commands (move up, delete, move down) 26 | 27 | # Lay out view from top to bottom of group box 28 | self.setLayout(QtWidgets.QVBoxLayout()) 29 | 30 | self.layout().addWidget(self._element_selection()) 31 | self.layout().addWidget(self._pipeline_list()) 32 | self.layout().addWidget(self._pipeline_commands()) 33 | 34 | def _element_selection(self): 35 | # UI to select for and add elements to pipeline. 36 | 37 | # The UI for element selection combines a combo box for the stages 38 | # (e.g., Extractor) and one for the corresponding 39 | # SpikeInterface classes (e.g., MEArecRecordingExtractor) - the two 40 | # pieces of information required to instantiate the SpikeElement 41 | # inserted into the pipeline. 42 | 43 | ui_frame = QtWidgets.QFrame() 44 | ui_frame.setLayout(QtWidgets.QHBoxLayout()) 45 | 46 | # Out of order declaration needed as forward reference 47 | spif_cbx = QtWidgets.QComboBox(self) 48 | spif_cbx.setStatusTip('Choose an element to be added to the ' 49 | 'pipeline - listed for current element category') # noqa: E128 50 | 51 | elem_cbx = QtWidgets.QComboBox() 52 | elem_cbx.setStatusTip('Choose an element category to list the ' 53 | 'specific elements available within that category') # noqa: E128 54 | 55 | ui_frame.layout().addWidget(elem_cbx) 56 | 57 | # Change spif_cbx contents when user makes elem_cbx selection 58 | def _elem_cbx_changed(index): 59 | spif_cbx.clear() 60 | element_cls = elem_cbx.itemData(index) 61 | # SpikeElement subclasses tasked w/ generating spif class lists 62 | 63 | for spif_cls in element_cls.get_installed_spif_cls_list(): 64 | spif_cbx.addItem( 65 | element_cls.get_display_name_from_spif_class( 66 | spif_cls), spif_cls) 67 | 68 | elem_cbx.currentIndexChanged.connect(_elem_cbx_changed) 69 | 70 | # Note: cbx instantiation order matters for initial m/v signalling 71 | 72 | # All elements are subclasses of SpikeElement, but element policy 73 | # determines which ones are available to the user 74 | elem_classes = [cls for cls in SpikeElement.__subclasses__() 75 | if self._element_policy.is_cls_available(cls)] 76 | # Element (subclass) order is arbitrary, so sort by policy order 77 | elem_classes.sort(key=lambda e: self._element_policy.cls_order_dict[e]) 78 | # Now that elements are sorted and filtered, set the combo box 79 | 80 | for cls in elem_classes: 81 | display_name = self._element_policy.get_cls_display_name(cls) 82 | elem_cbx.addItem(display_name + 's', cls) 83 | elem_cbx.setCurrentIndex(0) 84 | 85 | ui_frame.layout().addWidget(spif_cbx) 86 | 87 | add_button = QtWidgets.QPushButton("Add Element") 88 | add_button.setStatusTip('Add selected element to the pipeline - ' 89 | 'element will be inserted in category order') # noqa: E128 90 | 91 | def _add_element_clicked(): 92 | if spif_cbx.currentIndex() > -1: 93 | # Classes stored as cbx user data enables object creation 94 | spif_class = spif_cbx.currentData() 95 | element_class = elem_cbx.currentData() 96 | element = element_class(spif_class) 97 | self._pipeline_model.add_element(element) 98 | add_button.clicked.connect(_add_element_clicked) 99 | 100 | ui_frame.layout().addWidget(add_button) 101 | 102 | return ui_frame 103 | 104 | def _pipeline_list(self): 105 | # MVC in action - connect View (widget) to Model 106 | self._pipeline_view.setModel(self._pipeline_model) 107 | 108 | self._pipeline_view.setSelectionMode( 109 | QtWidgets.QAbstractItemView.SingleSelection) 110 | 111 | # Links element (parameter_view) and pipeline (pipeline_view) views 112 | def list_selection_changed(selected, deselected): 113 | if selected.indexes(): 114 | # Retrieve selected element from pipeline model 115 | element = self._get_selected_element() 116 | # Link selected element to element property editor 117 | self._parameter_model.element = element 118 | else: 119 | self._parameter_model.element = None 120 | self._pipeline_view.selectionModel().selectionChanged.connect( 121 | list_selection_changed) 122 | 123 | return self._pipeline_view 124 | 125 | def _pipeline_commands(self): 126 | # Operations applied to the pipeline as a whole (Run, Clear, Queue) 127 | ui_frame = QtWidgets.QFrame() 128 | ui_frame.setLayout(QtWidgets.QHBoxLayout()) 129 | 130 | # Move Up element button and associated action 131 | mu_btn = QtWidgets.QPushButton("Move Up") 132 | mu_btn.setStatusTip('Move selected element up one step in the ' 133 | 'pipeline - cross element category moves barred') # noqa: E128 134 | ui_frame.layout().addWidget(mu_btn) 135 | 136 | def move_up_clicked(): 137 | element = self._get_selected_element() 138 | if element is None: 139 | config.get_main_window().statusBar().showMessage( 140 | "Nothing to move up", config.STATUS_MSG_TIMEOUT) 141 | else: 142 | self._pipeline_model.move_up(element) 143 | mu_btn.clicked.connect(move_up_clicked) 144 | 145 | # Move Down element button and associated action 146 | md_btn = QtWidgets.QPushButton("Move Down") 147 | md_btn.setStatusTip('Move selected element down one step in the ' 148 | 'pipeline - cross element category moves barred') # noqa: E128 149 | ui_frame.layout().addWidget(md_btn) 150 | 151 | def move_down_clicked(): 152 | element = self._get_selected_element() 153 | if element is None: 154 | config.get_main_window().statusBar().showMessage( 155 | "Nothing to move down", config.STATUS_MSG_TIMEOUT) 156 | else: 157 | self._pipeline_model.move_down(element) 158 | md_btn.clicked.connect(move_down_clicked) 159 | 160 | # Delete element button and associated action 161 | de_btn = QtWidgets.QPushButton("Delete") 162 | de_btn.setStatusTip('Delete the selected element in the pipeline') 163 | ui_frame.layout().addWidget(de_btn) 164 | 165 | def delete_clicked(): 166 | element = self._get_selected_element() 167 | if element is None: 168 | config.get_main_window().statusBar().showMessage( 169 | "Nothing to delete", config.STATUS_MSG_TIMEOUT) 170 | else: 171 | self._pipeline_model.delete(element) 172 | de_btn.clicked.connect(delete_clicked) 173 | 174 | return ui_frame 175 | 176 | def _get_selected_element(self): 177 | # Convenience function to retrieve selected element in pipe view 178 | element = None 179 | model = self._pipeline_view.selectionModel() 180 | if model.hasSelection(): 181 | element = self._pipeline_model.data( 182 | model.selectedIndexes()[0], config.ELEMENT_ROLE) 183 | return element 184 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdnnmetrics.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.curation import threshold_nn_metrics 2 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 3 | get_validation_params, 4 | ) 5 | from spiketoolkit.validation.quality_metric_classes.nearest_neighbor import NearestNeighbor 6 | metric_default = NearestNeighbor.params 7 | class_default = get_validation_params() 8 | spif_init_func = threshold_nn_metrics 9 | 10 | gui_params = [ 11 | { 12 | "name": "threshold", 13 | "type": "float", 14 | "title": "The threshold for the given metric.", 15 | }, 16 | { 17 | "name": "threshold_sign", 18 | "type": "str", 19 | "title": "If 'less', will threshold any metric less than the given threshold. \ 20 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 21 | If 'greater', will threshold any metric greater than the given threshold. \ 22 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 23 | }, 24 | { 25 | "name": "metric_name", 26 | "type": "str", 27 | "value": "nn_hit_rate", 28 | "default": "nn_hit_rate", 29 | "title": "The name of the nearest neighbor metric to be thresholded (either 'nn_hit_rate' or 'nn_miss_rate').", 30 | }, 31 | { 32 | "name": "num_channels_to_compare", 33 | "type": "int", 34 | "value": metric_default["num_channels_to_compare"], 35 | "default": metric_default["num_channels_to_compare"], 36 | "title": "The number of channels to be used for the PC extraction and comparison.", 37 | }, 38 | { 39 | "name": "max_spikes_per_cluster", 40 | "type": "int", 41 | "value": metric_default["max_spikes_per_cluster"], 42 | "default": metric_default["max_spikes_per_cluster"], 43 | "title": "Max spikes to be used from each unit.", 44 | }, 45 | { 46 | "name": "max_spikes_for_nn", 47 | "type": "int", 48 | "value": metric_default["max_spikes_for_nn"], 49 | "default": metric_default["max_spikes_for_nn"], 50 | "title": "Max spikes to be used for nearest-neighbors calculation.", 51 | }, 52 | { 53 | "name": "n_neighbors", 54 | "type": "int", 55 | "value": metric_default["n_neighbors"], 56 | "default": metric_default["n_neighbors"], 57 | "title": "Number of neighbors to compare.", 58 | }, 59 | #kwargs 60 | { 61 | "name": "method", 62 | "type": "str", 63 | "value": class_default["method"], 64 | "default": class_default["method"], 65 | "title": "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. \ 66 | If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.", 67 | }, 68 | { 69 | "name": "peak", 70 | "type": "str", 71 | "value": class_default["peak"], 72 | "default": class_default["peak"], 73 | "title": "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or \ 74 | both ('both' - default).", 75 | }, 76 | { 77 | "name": "frames_before", 78 | "type": "int", 79 | "value": class_default["frames_before"], 80 | "default": class_default["frames_before"], 81 | "title": "Frames before peak to compute amplitude.", 82 | }, 83 | { 84 | "name": "frames_after", 85 | "type": "int", 86 | "value": class_default["frames_after"], 87 | "default": class_default["frames_after"], 88 | "title": "Frames after peak to compute amplitude.", 89 | }, 90 | { 91 | "name": "apply_filter", 92 | "type": "bool", 93 | "value": class_default["apply_filter"], 94 | "default": class_default["apply_filter"], 95 | "title": "If True, recording is bandpass-filtered", 96 | }, 97 | { 98 | "name": "freq_min", 99 | "type": "float", 100 | "value": class_default["freq_min"], 101 | "default": class_default["freq_min"], 102 | "title": "High-pass frequency for optional filter (default 300 Hz).", 103 | }, 104 | { 105 | "name": "freq_max", 106 | "type": "float", 107 | "value": class_default["freq_max"], 108 | "default": class_default["freq_max"], 109 | "title": "Low-pass frequency for optional filter (default 6000 Hz).", 110 | }, 111 | { 112 | "name": "grouping_property", 113 | "type": "str", 114 | "value": class_default["grouping_property"], 115 | "default": class_default["grouping_property"], 116 | "title": "Property to group channels. E.g. if the recording extractor has the 'group' property and \ 117 | 'grouping_property' is 'group', then waveforms are computed group-wise.", 118 | }, 119 | { 120 | "name": "ms_before", 121 | "type": "float", 122 | "value": class_default["ms_before"], 123 | "default": class_default["ms_before"], 124 | "title": "Time period in ms to cut waveforms before the spike events.", 125 | }, 126 | { 127 | "name": "ms_after", 128 | "type": "float", 129 | "value": class_default["ms_after"], 130 | "default": class_default["ms_after"], 131 | "title": "Time period in ms to cut waveforms after the spike events.", 132 | }, 133 | { 134 | "name": "dtype", 135 | "type": "dtype", 136 | "value": class_default["dtype"], 137 | "default": class_default["dtype"], 138 | "title": "The numpy dtype of the waveforms.", 139 | }, 140 | { 141 | "name": "compute_property_from_recording", 142 | "type": "bool", 143 | "value": class_default["compute_property_from_recording"], 144 | "default": class_default["compute_property_from_recording"], 145 | "title": "If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding \ 146 | property of the recording extractor channel on which the average waveform is the largest.", 147 | }, 148 | { 149 | "name": "max_channels_per_waveforms", 150 | "type": "int", 151 | "value": class_default["max_channels_per_waveforms"], 152 | "default": class_default["max_channels_per_waveforms"], 153 | "title": " Maximum channels per waveforms to return. If None, all channels are returned.", 154 | }, 155 | { 156 | "name": "n_jobs", 157 | "type": "int", 158 | "value": class_default["n_jobs"], 159 | "default": class_default["n_jobs"], 160 | "title": "Number of parallel jobs (default None).", 161 | }, 162 | { 163 | "name": "memmap", 164 | "type": "bool", 165 | "value": class_default["memmap"], 166 | "default": class_default["memmap"], 167 | "title": "If True, waveforms are saved as memmap object (recommended for long recordings with many channels).", 168 | }, 169 | { 170 | "name": "save_property_or_features", 171 | "type": "bool", 172 | "value": class_default["save_property_or_features"], 173 | "default": class_default["save_property_or_features"], 174 | "title": "If True, it will save features in the sorting extractor.", 175 | }, 176 | { 177 | "name": "recompute_info", 178 | "type": "bool", 179 | "value": class_default["recompute_info"], 180 | "default": class_default["recompute_info"], 181 | "title": "If True, waveforms are recomputed.", 182 | }, 183 | { 184 | "name": "max_spikes_per_unit", 185 | "type": "int", 186 | "value": class_default["max_spikes_per_unit"], 187 | "default": class_default["max_spikes_per_unit"], 188 | "title": "The maximum number of spikes to extract per unit.", 189 | }, 190 | { 191 | "name": "seed", 192 | "type": "int", 193 | "value": class_default["seed"], 194 | "default": class_default["seed"], 195 | "title": "Random seed for reproducibility.", 196 | }, 197 | { 198 | "name": "verbose", 199 | "type": "bool", 200 | "value": class_default["verbose"], 201 | "default": class_default["verbose"], 202 | "title": "If True, output from SpikeInterface element is verbose when run.", 203 | }, 204 | ] 205 | -------------------------------------------------------------------------------- /spikely/elements/guiparams/curator/thresholdsnrs.py: -------------------------------------------------------------------------------- 1 | from spiketoolkit.validation.quality_metric_classes.parameter_dictionaries import ( 2 | get_validation_params, 3 | ) 4 | from spiketoolkit.curation import threshold_snrs 5 | from spiketoolkit.validation.quality_metric_classes.snr import SNR 6 | metric_default = SNR.params 7 | spif_init_func = threshold_snrs 8 | class_default = get_validation_params() 9 | 10 | gui_params = [ 11 | { 12 | "name": "threshold", 13 | "type": "float", 14 | "title": "The threshold for the given metric.", 15 | }, 16 | { 17 | "name": "threshold_sign", 18 | "type": "str", 19 | "title": "If 'less', will threshold any metric less than the given threshold. \ 20 | If 'less_or_equal', will threshold any metric less than or equal to the given threshold. \ 21 | If 'greater', will threshold any metric greater than the given threshold. \ 22 | If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold.", 23 | }, 24 | { 25 | "name": "snr_mode", 26 | "type": "str", 27 | "value": metric_default["snr_mode"], 28 | "default": metric_default["snr_mode"], 29 | "title": "Mode to compute noise SNR ('mad' | 'std' - default 'mad').", 30 | }, 31 | { 32 | "name": "snr_noise_duration", 33 | "type": "float", 34 | "value": metric_default["snr_noise_duration"], 35 | "default": metric_default["snr_noise_duration"], 36 | "title": "Number of seconds to compute noise level from (default 10.0).", 37 | }, 38 | { 39 | "name": "max_spikes_per_unit_for_snr", 40 | "type": "int", 41 | "value": metric_default["max_spikes_per_unit_for_snr"], 42 | "default": metric_default["max_spikes_per_unit_for_snr"], 43 | "title": "Maximum number of spikes to compute templates from (default 1000).", 44 | }, 45 | { 46 | "name": "template_mode", 47 | "type": "str", 48 | "value": metric_default["template_mode"], 49 | "default": metric_default["template_mode"], 50 | "title": "Use 'mean' or 'median' to compute templates.", 51 | }, 52 | { 53 | "name": "max_channel_peak", 54 | "type": "float", 55 | "value": metric_default["max_channel_peak"], 56 | "default": metric_default["max_channel_peak"], 57 | "title": " If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default).", 58 | }, 59 | # kwargs 60 | { 61 | "name": "method", 62 | "type": "str", 63 | "value": class_default["method"], 64 | "default": class_default["method"], 65 | "title": "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. \ 66 | If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.", 67 | }, 68 | { 69 | "name": "peak", 70 | "type": "str", 71 | "value": class_default["peak"], 72 | "default": class_default["peak"], 73 | "title": "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or \ 74 | both ('both' - default).", 75 | }, 76 | { 77 | "name": "frames_before", 78 | "type": "int", 79 | "value": class_default["frames_before"], 80 | "default": class_default["frames_before"], 81 | "title": "Frames before peak to compute amplitude.", 82 | }, 83 | { 84 | "name": "frames_after", 85 | "type": "int", 86 | "value": class_default["frames_after"], 87 | "default": class_default["frames_after"], 88 | "title": "Frames after peak to compute amplitude.", 89 | }, 90 | { 91 | "name": "apply_filter", 92 | "type": "bool", 93 | "value": class_default["apply_filter"], 94 | "default": class_default["apply_filter"], 95 | "title": "If True, recording is bandpass-filtered", 96 | }, 97 | { 98 | "name": "freq_min", 99 | "type": "float", 100 | "value": class_default["freq_min"], 101 | "default": class_default["freq_min"], 102 | "title": "High-pass frequency for optional filter (default 300 Hz).", 103 | }, 104 | { 105 | "name": "freq_max", 106 | "type": "float", 107 | "value": class_default["freq_max"], 108 | "default": class_default["freq_max"], 109 | "title": "Low-pass frequency for optional filter (default 6000 Hz).", 110 | }, 111 | { 112 | "name": "grouping_property", 113 | "type": "str", 114 | "value": class_default["grouping_property"], 115 | "default": class_default["grouping_property"], 116 | "title": "Property to group channels. E.g. if the recording extractor has the 'group' property and \ 117 | 'grouping_property' is 'group', then waveforms are computed group-wise.", 118 | }, 119 | { 120 | "name": "ms_before", 121 | "type": "float", 122 | "value": class_default["ms_before"], 123 | "default": class_default["ms_before"], 124 | "title": "Time period in ms to cut waveforms before the spike events.", 125 | }, 126 | { 127 | "name": "ms_after", 128 | "type": "float", 129 | "value": class_default["ms_after"], 130 | "default": class_default["ms_after"], 131 | "title": "Time period in ms to cut waveforms after the spike events.", 132 | }, 133 | { 134 | "name": "dtype", 135 | "type": "dtype", 136 | "value": class_default["dtype"], 137 | "default": class_default["dtype"], 138 | "title": "The numpy dtype of the waveforms.", 139 | }, 140 | { 141 | "name": "compute_property_from_recording", 142 | "type": "bool", 143 | "value": class_default["compute_property_from_recording"], 144 | "default": class_default["compute_property_from_recording"], 145 | "title": "If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding \ 146 | property of the recording extractor channel on which the average waveform is the largest.", 147 | }, 148 | { 149 | "name": "max_channels_per_waveforms", 150 | "type": "int", 151 | "value": class_default["max_channels_per_waveforms"], 152 | "default": class_default["max_channels_per_waveforms"], 153 | "title": " Maximum channels per waveforms to return. If None, all channels are returned.", 154 | }, 155 | { 156 | "name": "n_jobs", 157 | "type": "int", 158 | "value": class_default["n_jobs"], 159 | "default": class_default["n_jobs"], 160 | "title": "Number of parallel jobs (default None).", 161 | }, 162 | { 163 | "name": "memmap", 164 | "type": "bool", 165 | "value": class_default["memmap"], 166 | "default": class_default["memmap"], 167 | "title": "If True, waveforms are saved as memmap object (recommended for long recordings with many channels).", 168 | }, 169 | { 170 | "name": "save_property_or_features", 171 | "type": "bool", 172 | "value": class_default["save_property_or_features"], 173 | "default": class_default["save_property_or_features"], 174 | "title": "If True, it will save features in the sorting extractor.", 175 | }, 176 | { 177 | "name": "recompute_info", 178 | "type": "bool", 179 | "value": class_default["recompute_info"], 180 | "default": class_default["recompute_info"], 181 | "title": "If True, waveforms are recomputed.", 182 | }, 183 | { 184 | "name": "max_spikes_per_unit", 185 | "type": "int", 186 | "value": class_default["max_spikes_per_unit"], 187 | "default": class_default["max_spikes_per_unit"], 188 | "title": "The maximum number of spikes to extract per unit.", 189 | }, 190 | { 191 | "name": "seed", 192 | "type": "int", 193 | "value": class_default["seed"], 194 | "default": class_default["seed"], 195 | "title": "Random seed for reproducibility.", 196 | }, 197 | { 198 | "name": "verbose", 199 | "type": "bool", 200 | "value": class_default["verbose"], 201 | "default": class_default["verbose"], 202 | "title": "If True, output from SpikeInterface element is verbose when run.", 203 | }, 204 | ] 205 | --------------------------------------------------------------------------------