├── .codepod.yml
├── .gitignore
├── .travis.yml
├── .vscode
└── snippets
│ └── python.json
├── LICENSE
├── README.md
├── README_old.md
├── codepod_run.sh
├── devel
├── codepod_init.sh
├── codepod_test.sh
├── docker
│ └── codepod_spikeforest2
│ │ ├── Dockerfile
│ │ └── readme.txt
├── format_python.sh
├── requirements.txt
├── setup_colab.sh
├── setup_jp_proxy_widget.sh
├── setup_nbstripout.sh
└── setup_python.sh
├── docs
├── basic_flow_chart_spikeforest.jpg
├── blurbs
│ ├── auto-install-algs.md
│ ├── making-a-recording-public.md
│ ├── prepare-recordings-update.md
│ ├── separation-of-derivatives-pipeline.md
│ ├── spikeforest-readme-misc.md
│ ├── spikeforest-readme.md
│ └── spikeforest-sfn-abstract.md
├── example_notebooks
│ ├── example_multi_recording.ipynb
│ ├── example_multi_recording_old.ipynb
│ ├── example_single_recording.ipynb
│ ├── multi_recordings_parallel_local.ipynb
│ ├── multi_recordings_parallel_processing.ipynb
│ ├── multi_recordings_parallel_remote.ipynb
│ ├── multi_sorters_parallel_local.ipynb
│ ├── multi_sorters_parallel_processing.ipynb
│ ├── multi_sorters_serial_local.ipynb
│ └── multi_sorters_serial_processing.ipynb
├── full_analysis.md
├── index.md
├── news
│ ├── 2019-06-11-analysis-archive.md
│ ├── 2019-06-25-edinburgh-workshop.md
│ └── 2019-07-01-spikeforum.md
├── spike_sorting.md
├── tutorials
│ ├── j2run.sh
│ ├── j2templates
│ │ ├── compare_with_truth.py
│ │ ├── explain_force_run.md
│ │ ├── install_spikeforest_pypi.md
│ │ ├── install_spikeforest_pypi.sh
│ │ ├── load_spikeforest_tetrode_recording.py
│ │ ├── prepare_toy_recording.py
│ │ ├── prerequisites.md
│ │ └── run_spike_sorting.py
│ ├── readme.txt
│ ├── spike_sorting_single_recording.md
│ ├── spike_sorting_single_recording.md.j2
│ ├── spike_sorting_single_recording.py
│ ├── spike_sorting_single_recording.py.j2
│ ├── spike_sorting_spikeforest_recording.md
│ ├── spike_sorting_spikeforest_recording.md.j2
│ ├── spike_sorting_spikeforest_recording.py
│ └── spike_sorting_spikeforest_recording.py.j2
└── updates
│ ├── 2019-04-11.md
│ ├── 2019-04-12.md
│ ├── 2019-04-13.md
│ ├── 2019-04-17.md
│ ├── 2019-04-18.md
│ ├── 2019-04-19.md
│ ├── 2019-04-23.md
│ ├── 2019-04-24.md
│ ├── 2019-04-25.md
│ ├── 2019-04-26.md
│ ├── 2019-04-29.md
│ ├── 2019-04-30.md
│ ├── 2019-05-02.md
│ ├── 2019-05-03.md
│ ├── 2019-05-06.md
│ ├── 2019-05-07.md
│ ├── 2019-05-08.md
│ ├── 2019-05-09.md
│ ├── 2019-05-10.md
│ ├── 2019-05-13.md
│ ├── 2019-05-14.md
│ ├── 2019-05-15.md
│ ├── 2019-05-17.md
│ └── 2019-05-20.md
├── mountaintools
├── .bumpversion.cfg
├── .gitignore
├── .travis.yml
├── LICENSE
├── README.md
├── bin
│ ├── kachery-token
│ ├── mt-cat
│ ├── mt-download
│ ├── mt-execute-job
│ ├── mt-find
│ ├── mt-ls
│ ├── mt-resolve-key-path
│ └── mt-snapshot
├── containers
│ ├── build_simg_using_docker.sh
│ └── mountaintools_basic
│ │ ├── Dockerfile
│ │ ├── build_simg.sh
│ │ ├── readme.txt
│ │ └── upload_singularity_container.py
├── docs
│ ├── .gitignore
│ ├── .nojekyll
│ ├── Makefile
│ ├── _static
│ │ └── dummy.txt
│ ├── conf.py
│ ├── hold
│ │ └── false_theorems.tex
│ ├── index.html
│ ├── index.rst
│ ├── modules.rst
│ └── mountainclient.rst
├── examples
│ ├── .gitignore
│ ├── example_mandelbrot
│ │ ├── __init__.py
│ │ ├── mandelbrot
│ │ │ ├── __init__.py
│ │ │ └── mandelbrot.py
│ │ └── test_mandelbrot.py
│ └── example_primes
│ │ └── example_primes.py
├── kachery
│ ├── Dockerfile
│ ├── how_to_run_in_docker.txt
│ ├── how_to_test.txt
│ ├── package.json
│ └── src
│ │ └── kacheryserver.js
├── mlprocessors
│ ├── README.md
│ ├── __init__.py
│ ├── consolecapture.py
│ ├── core.py
│ ├── createjobs.py
│ ├── defaultjobhandler.py
│ ├── execute.py
│ ├── jobhandler.py
│ ├── jobqueue.py
│ ├── mountainjob.py
│ ├── mountainjobresult.py
│ ├── paralleljobhandler.py
│ ├── registry.py
│ ├── shellscript.py
│ ├── slurmjobhandler.py
│ ├── temporarydirectory.py
│ └── validators.py
├── mountainclient
│ ├── README.md
│ ├── __init__.py
│ ├── aux.py
│ ├── filelock.py
│ ├── kachery_tokens.py
│ ├── mountainclient.py
│ ├── mountainclientlocal.py
│ ├── mountainremoteclient.py
│ ├── mttyping.py
│ ├── sha1cache.py
│ ├── steady_download_and_compute_sha1.py
│ └── unittests
│ │ └── try_local_db_multiprocess.py
├── mountaintools
│ ├── __init__.py
│ └── reactcomponentpythoncompanion.py
├── mtlogging
│ ├── __init__.py
│ └── mtlogging.py
├── pairioserver
│ ├── Dockerfile
│ ├── LICENSE
│ ├── README.md
│ ├── docker_entry.sh
│ ├── examples
│ │ └── basic_usage.py
│ ├── package.json
│ └── pairioserver
│ │ └── pairioserver.js
├── pytest.ini
├── requirements.txt
├── setup.py
├── tests
│ └── test_skip_failing.py
└── vdomr
│ ├── README.md
│ ├── __init__.py
│ ├── component.py
│ ├── components
│ ├── __init__.py
│ └── components.py
│ ├── devel
│ ├── __init__.py
│ └── devel.py
│ ├── google
│ └── colab
│ │ ├── __init__.py
│ │ └── output
│ │ ├── __init__.py
│ │ ├── _area.py
│ │ ├── _js.py
│ │ ├── _js_builder.py
│ │ ├── _publish.py
│ │ ├── _tags.py
│ │ └── _util.py
│ ├── helpers.py
│ ├── server_example
│ └── server_example.py
│ ├── vdom.py
│ ├── vdomr.py
│ └── vdomrserver.py
├── old
├── gui
│ ├── batchmonitor.sh
│ ├── batchmonitor
│ │ ├── batchmonitor.py
│ │ ├── batchmonitormainwindow.py
│ │ └── start_batchmonitor.py
│ ├── browse_recordings.sh
│ ├── browse_recordings
│ │ ├── browse_recordings.py
│ │ ├── sfrecordingwidget.py
│ │ └── start_browse_recordings.py
│ ├── sfbrowser.sh
│ ├── sfbrowser
│ │ ├── sfbrowser.py
│ │ ├── sfbrowsermainwindow.py
│ │ └── start_sfbrowser.py
│ ├── sfbrowser_snr.sh
│ ├── sfbrowser_snr
│ │ ├── sfbrowser_snr.py
│ │ ├── sfbrowser_snr_mainwindow.py
│ │ └── start_sfbrowser_snr.py
│ ├── sorting_result_explorer.sh
│ ├── sorting_result_explorer
│ │ ├── sortingresultexplorer.py
│ │ ├── sortingresultsexplorermainwindow.py
│ │ └── start_sorting_result_explorer.py
│ ├── test_view_timeseries.sh
│ └── view_timeseries.py
└── misc
│ └── simplot
│ ├── d3.min.js
│ ├── index.html
│ ├── jquery-3.3.1.min.js
│ ├── simplot.js
│ └── spikeforestwidgets.js
├── pytest.ini
├── run_pytest.sh
├── spikeforest
├── .bumpversion.cfg
├── .gitignore
├── .travis.yml
├── MANIFEST.in
├── bin
│ └── forestview
├── containers
│ ├── build_simg_using_docker.sh
│ ├── pysing
│ │ ├── Dockerfile
│ │ └── readme.txt
│ ├── spikeforest_basic
│ │ ├── Dockerfile
│ │ ├── build_simg.sh
│ │ ├── readme.txt
│ │ └── upload_singularity_container.py
│ └── spikeforest_basic_jjun
│ │ ├── Dockerfile
│ │ ├── build_simg.sh
│ │ ├── readme.txt
│ │ └── upload_singularity_container.py
├── forestview
│ ├── __init__.py
│ ├── analysis_view_launchers.py
│ ├── analysis_views
│ │ ├── __init__.py
│ │ ├── analysissummaryview.py
│ │ ├── sorterdefinitionsview.py
│ │ └── testview.py
│ ├── analysiscontext.py
│ ├── bandpass_filter.py
│ ├── core
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── chrome-tabs
│ │ │ ├── LICENSE.txt
│ │ │ ├── css
│ │ │ │ ├── chrome-tabs-dark-theme.css
│ │ │ │ ├── chrome-tabs-dark-theme.styl
│ │ │ │ ├── chrome-tabs.css
│ │ │ │ └── chrome-tabs.styl
│ │ │ └── js
│ │ │ │ └── chrome-tabs.js
│ │ ├── draggabilly.js
│ │ ├── forestviewcontrolpanel.py
│ │ ├── forestviewmainwindow.py
│ │ ├── tabbar.py
│ │ └── viewcontainer.py
│ ├── filterrecording.py
│ ├── forestview.py
│ ├── recording_views
│ │ ├── __init__.py
│ │ ├── clusterview.py
│ │ ├── currentstateview.py
│ │ ├── electrodegeometryview.py
│ │ ├── electrodegeometrywidget.js
│ │ ├── featurespaceview.py
│ │ ├── recording_view_launchers.py
│ │ ├── recordingsummaryview.py
│ │ ├── sortingresultdetailview.py
│ │ ├── sortingresultstableview.py
│ │ ├── stdoutsender.py
│ │ ├── tablewidget.js
│ │ ├── tablewidget.py
│ │ ├── templatesview.py
│ │ ├── testplotlyview.py
│ │ ├── timeseriesview.py
│ │ ├── unitdetailview.py
│ │ ├── unitstableview.py
│ │ └── unittableview.py
│ ├── recordingcontext.py
│ ├── sortingresultcontext.py
│ ├── spikeforest_view_launchers.py
│ ├── spikeforest_views
│ │ ├── __init__.py
│ │ ├── aggregatedsortingresultstableview.py
│ │ ├── currentstateview.py
│ │ ├── recordingtableview.py
│ │ ├── tablewidget.js
│ │ └── tablewidget.py
│ ├── spikeforestcontext.py
│ ├── spikefront_view_launchers.py
│ ├── spikefront_views
│ │ ├── __init__.py
│ │ ├── mainresulttableview.py
│ │ ├── tablewidget.js
│ │ └── tablewidget.py
│ └── spikefrontcontext.py
├── pytest.ini
├── setup.py
├── sfdata
│ ├── __init__.py
│ └── sfdata.py
├── spikeforest
│ ├── __init__.py
│ ├── example_datasets
│ │ ├── __init__.py
│ │ ├── real.py
│ │ ├── synthesize_random_firings.py
│ │ ├── synthesize_random_waveforms.py
│ │ ├── synthesize_single_waveform.py
│ │ ├── synthesize_timeseries.py
│ │ ├── toy_example1.py
│ │ └── yass_example.py
│ ├── extractors
│ │ ├── __init__.py
│ │ ├── efficientaccess
│ │ │ ├── __init__.py
│ │ │ └── efficientaccessrecordingextractor.py
│ │ ├── klustasortingextractor
│ │ │ ├── __init__.py
│ │ │ └── klustasortingextractor.py
│ │ ├── neuroscopesortingextractor
│ │ │ ├── __init__.py
│ │ │ └── neuroscopesortingextractor.py
│ │ ├── sfmdaextractors
│ │ │ ├── __init__.py
│ │ │ ├── mdaio.py
│ │ │ └── sfmdaextractors.py
│ │ └── tools.py
│ └── spikewidgets
│ │ ├── __init__.py
│ │ ├── devel
│ │ ├── __init__.py
│ │ ├── compute_unit_snrs.py
│ │ └── saveplot.py
│ │ ├── example_datasets
│ │ ├── __init__.py
│ │ ├── real.py
│ │ ├── synthesize_random_firings.py
│ │ ├── synthesize_random_waveforms.py
│ │ ├── synthesize_single_waveform.py
│ │ ├── synthesize_timeseries.py
│ │ └── toy_example1.py
│ │ ├── lazyfilters
│ │ ├── __init__.py
│ │ ├── bandpass_filter.py
│ │ ├── lazyfilterrecording.py
│ │ └── whiten.py
│ │ ├── tables
│ │ ├── __init__.py
│ │ └── sortingcomparisontable
│ │ │ ├── __init__.py
│ │ │ └── sortingcomparisontable.py
│ │ ├── validation
│ │ ├── __init__.py
│ │ └── sortingcomparison
│ │ │ ├── __init__.py
│ │ │ └── sortingcomparison.py
│ │ └── widgets
│ │ ├── __init__.py
│ │ ├── crosscorrelogramswidget
│ │ ├── __init__.py
│ │ └── crosscorrelogramswidget.py
│ │ ├── electrodegeometrywidget
│ │ ├── __init__.py
│ │ └── electrodegeometrywidget.py
│ │ ├── featurespacewidget
│ │ ├── __init__.py
│ │ └── featurespacewidget.py
│ │ ├── sortingaccuracywidget
│ │ ├── __init__.py
│ │ └── sortingaccuracywidget.py
│ │ ├── timeserieswidget
│ │ ├── __init__.py
│ │ └── timeserieswidget.py
│ │ └── unitwaveformswidget
│ │ ├── __init__.py
│ │ └── unitwaveformswidget.py
├── spikeforest_analysis
│ ├── __init__.py
│ ├── aggregate_sorting_results.py
│ ├── bandpass_filter.py
│ ├── compare_sortings_with_truth.py
│ ├── compute_units_info.py
│ ├── computerecordinginfo.py
│ ├── filterrecording.py
│ ├── impl.py
│ ├── sfmdaextractors
│ │ ├── __init__.py
│ │ ├── mdaio.py
│ │ └── sfmdaextractors.py
│ ├── sort_recordings.py
│ ├── sortingcomparison.py
│ ├── summarize_recordings.py
│ ├── summarize_sortings.py
│ └── whiten.py
├── spikeforest_common
│ ├── __init__.py
│ ├── autoscale_recording.py
│ ├── bandpass_filter.py
│ └── filterrecording.py
├── spikeforestsorters
│ ├── __init__.py
│ ├── build_simg_using_docker.sh
│ ├── containers
│ │ └── yass
│ │ │ ├── Dockerfile
│ │ │ ├── build_simg.sh
│ │ │ └── readme.txt
│ ├── descriptions
│ │ ├── alg_herding_spikes_2.md
│ │ ├── alg_ironclust.md
│ │ ├── alg_jrclust.md
│ │ ├── alg_jrclust.png
│ │ ├── alg_kilosort.md
│ │ ├── alg_kilosort.png
│ │ ├── alg_kilosort2.md
│ │ ├── alg_klusta.md
│ │ ├── alg_mountainsort4.md
│ │ ├── alg_mountainsort4.png
│ │ ├── alg_spyking_circus.md
│ │ ├── alg_spyking_circus.png
│ │ ├── alg_tridesclous.md
│ │ ├── alg_waveclus.md
│ │ ├── alg_waveclus.png
│ │ └── alg_yass.md
│ ├── herdingspikes2
│ │ ├── __init__.py
│ │ ├── container
│ │ │ ├── Dockerfile
│ │ │ ├── build_simg.sh
│ │ │ ├── readme.txt
│ │ │ └── upload_singularity_container.py
│ │ └── herdingspikes2.py
│ ├── ironclust
│ │ ├── __init__.py
│ │ ├── install_ironclust.py
│ │ ├── ironclust.py
│ │ └── test0_ironclust.py
│ ├── jrclust
│ │ ├── __init__.py
│ │ ├── default.prm
│ │ ├── install_jrclust.py
│ │ ├── jrclust.py
│ │ ├── mdaio
│ │ │ ├── arrayify.m
│ │ │ ├── pathify32.m
│ │ │ ├── pathify64.m
│ │ │ ├── readmda.m
│ │ │ ├── readmda_block.m
│ │ │ ├── readmdadims.m
│ │ │ ├── writemda.m
│ │ │ ├── writemda16i.m
│ │ │ ├── writemda16ui.m
│ │ │ ├── writemda32.m
│ │ │ ├── writemda32ui.m
│ │ │ └── writemda64.m
│ │ ├── p_jrclust.m
│ │ └── template.prm
│ ├── kilosort
│ │ ├── __init__.py
│ │ ├── install_kilosort.py
│ │ ├── kilosort.py
│ │ ├── mdaio
│ │ │ ├── arrayify.m
│ │ │ ├── pathify32.m
│ │ │ ├── pathify64.m
│ │ │ ├── readmda.m
│ │ │ ├── readmda_block.m
│ │ │ ├── readmdadims.m
│ │ │ ├── writemda.m
│ │ │ ├── writemda16i.m
│ │ │ ├── writemda16ui.m
│ │ │ ├── writemda32.m
│ │ │ ├── writemda32ui.m
│ │ │ └── writemda64.m
│ │ └── p_kilosort.m
│ ├── kilosort2
│ │ ├── __init__.py
│ │ ├── install_kilosort2.py
│ │ ├── kilosort2.py
│ │ ├── kilosort2_channelmap.txt
│ │ ├── mdaio
│ │ │ ├── arrayify.m
│ │ │ ├── pathify32.m
│ │ │ ├── pathify64.m
│ │ │ ├── readmda.m
│ │ │ ├── readmda_block.m
│ │ │ ├── readmdadims.m
│ │ │ ├── writemda.m
│ │ │ ├── writemda16i.m
│ │ │ ├── writemda16ui.m
│ │ │ ├── writemda32.m
│ │ │ ├── writemda32ui.m
│ │ │ └── writemda64.m
│ │ └── p_kilosort2.m
│ ├── klusta
│ │ ├── __init__.py
│ │ ├── config_default.prm
│ │ ├── container
│ │ │ ├── Dockerfile
│ │ │ ├── build_simg.sh
│ │ │ ├── readme.txt
│ │ │ └── upload_singularity_container.py
│ │ └── klusta.py
│ ├── mountainsort4
│ │ ├── __init__.py
│ │ ├── bandpass_filter.py
│ │ ├── container
│ │ │ ├── Dockerfile
│ │ │ ├── build_simg.sh
│ │ │ ├── readme.txt
│ │ │ └── upload_singularity_container.py
│ │ ├── filterrecording.py
│ │ ├── mountainsort4.py
│ │ └── whiten.py
│ ├── spyking_circus
│ │ ├── __init__.py
│ │ ├── container
│ │ │ ├── Dockerfile
│ │ │ ├── build_simg.sh
│ │ │ ├── readme.txt
│ │ │ └── upload_singularity_container.py
│ │ └── spyking_circus.py
│ ├── tridesclous
│ │ ├── __init__.py
│ │ ├── container
│ │ │ ├── Dockerfile
│ │ │ ├── build_simg.sh
│ │ │ ├── readme.txt
│ │ │ └── upload_singularity_container.py
│ │ └── tridesclous.py
│ ├── waveclus
│ │ ├── __init__.py
│ │ ├── install_waveclus.py
│ │ ├── mdaio
│ │ │ ├── arrayify.m
│ │ │ ├── pathify32.m
│ │ │ ├── pathify64.m
│ │ │ ├── readmda.m
│ │ │ ├── readmda_block.m
│ │ │ ├── readmdadims.m
│ │ │ ├── writemda.m
│ │ │ ├── writemda16i.m
│ │ │ ├── writemda16ui.m
│ │ │ ├── writemda32.m
│ │ │ ├── writemda32ui.m
│ │ │ └── writemda64.m
│ │ ├── p_waveclus.m
│ │ ├── set_parameters_spf.m
│ │ └── waveclus.py
│ ├── yass
│ │ ├── __init__.py
│ │ ├── config_default.yaml
│ │ ├── container
│ │ │ ├── Dockerfile
│ │ │ ├── build_simg.sh
│ │ │ ├── build_simg_magland.sh
│ │ │ ├── readme.txt
│ │ │ └── upload_singularity_container.py
│ │ ├── test0_yass1.py
│ │ ├── tools.py
│ │ ├── yass.py
│ │ └── yasssortingextractor.py
│ └── yass1
│ │ ├── __init__.py
│ │ ├── config_default.yaml
│ │ ├── container
│ │ ├── Dockerfile
│ │ ├── build_simg.sh
│ │ ├── build_simg_magland.sh
│ │ ├── readme.txt
│ │ └── upload_singularity_container.py
│ │ ├── test0_yass1.py
│ │ ├── tools.py
│ │ ├── yass1.py
│ │ └── yasssortingextractor1.py
└── spikeforestwidgets
│ ├── __init__.py
│ ├── correlogramswidget.py
│ ├── dist
│ ├── d3.v5.min.js
│ └── jquery-3.3.1.min.js
│ ├── electrodegeometrywidget.py
│ ├── featurespacewidget
│ ├── __init__.py
│ ├── canvaswidget.js
│ ├── featurespacemodel.js
│ ├── featurespacewidget.js
│ ├── featurespacewidget.py
│ ├── featurespacewidget_plotly.py
│ ├── mda.js
│ ├── test_featurespacewidget.html
│ └── test_featurespacewidget.ipynb
│ ├── package.json
│ ├── src
│ ├── datasetselectwidget
│ │ └── datasetselectwidget.js
│ ├── datasetwidget
│ │ └── datasetwidget.js
│ ├── ephys_viz
│ │ ├── clustermetricswidget.js
│ │ ├── evdatasetwidget.js
│ │ ├── firingsmodel.js
│ │ ├── geomwidget.js
│ │ ├── index.js
│ │ ├── jsqcanvaswidget.js
│ │ ├── jsqcore.js
│ │ ├── load_binary_file_part.js
│ │ ├── load_resources.js
│ │ ├── load_text_file.js
│ │ ├── mda.js
│ │ ├── ml-layout.css
│ │ ├── sortcomparisonwidget.js
│ │ ├── templateswidget.js
│ │ ├── timedisplayclass.js
│ │ ├── timeseriesmodel.js
│ │ ├── timeserieswidget.js
│ │ ├── view_cluster_metrics.js
│ │ ├── view_dataset.js
│ │ ├── view_geometry.js
│ │ ├── view_sort_comparison.js
│ │ ├── view_templates.js
│ │ └── view_timeseries.js
│ ├── index.js
│ └── lariloginwidget
│ │ └── lariloginwidget.js
│ ├── templatewidget
│ ├── __init__.py
│ ├── canvaswidget.js
│ ├── mda.js
│ ├── templatewidget.js
│ └── templatewidget.py
│ ├── timeserieswidget
│ ├── __init__.py
│ ├── canvaswidget.js
│ ├── mda.js
│ ├── test_timeserieswidget.html
│ ├── test_timeserieswidget.ipynb
│ ├── timeseriesmodel.js
│ ├── timeserieswidget.js
│ └── timeserieswidget.py
│ ├── unitwaveformswidget.py
│ └── webpack.config.js
├── webapps
└── mountainbrowser
│ ├── .gitignore
│ ├── README.md
│ ├── electron
│ ├── package.json
│ └── src
│ │ ├── index.js
│ │ └── preload.js
│ ├── gallery
│ └── gallery_snapshot.sh
│ ├── open_as_chrome_app.sh
│ ├── package.json
│ ├── server
│ └── index.js
│ ├── src
│ ├── components
│ │ ├── AppBar.js
│ │ ├── BrowserTree.js
│ │ ├── ConfigView.js
│ │ ├── ElectronConfigView.js
│ │ ├── ItemView.js
│ │ ├── KacheryConfigView.js
│ │ ├── KacheryManager.js
│ │ ├── KacheryStatusIndicator.js
│ │ ├── LocalStorageConfigView.js
│ │ ├── MainWindow.js
│ │ ├── PairioConfigView.js
│ │ ├── PathBar.js
│ │ ├── SpacerComponent.js
│ │ ├── Tree.js
│ │ ├── TreeNode.js
│ │ └── developmentTest.js
│ ├── index.js
│ ├── index_template.html
│ ├── itemviewplugins
│ │ ├── DevTestView
│ │ │ ├── ComputeRecordingInfo.json
│ │ │ ├── DevTestViewPlugin.js
│ │ │ ├── gen_jobs.py
│ │ │ └── repeat_text.json
│ │ ├── ElectrodeGeometryView
│ │ │ └── ElectrodeGeometryViewPlugin.js
│ │ ├── FileContentView
│ │ │ ├── FileContentView.js
│ │ │ └── FileContentViewPlugin.js
│ │ ├── ReactComponentPythonCompanion.js
│ │ ├── RecordingSummaryView
│ │ │ ├── ComputeRecordingInfo.json
│ │ │ ├── ComputeUnitDetail.json
│ │ │ ├── ComputeUnitsInfo.json
│ │ │ ├── FilterTimeseries.json
│ │ │ ├── RecordingSummaryViewPlugin.js
│ │ │ ├── UnitDetailWidget.js
│ │ │ ├── UnitsTable.js
│ │ │ ├── computeunitdetail.py
│ │ │ └── genjobs.py
│ │ ├── UnitWaveformsView
│ │ │ └── UnitWaveformsViewPlugin.js
│ │ └── index.js
│ ├── mountainclient-js
│ │ ├── impl
│ │ │ └── mountainclientimpl.js
│ │ ├── index.js
│ │ └── package.json
│ ├── spikeforestwidgets-js
│ │ ├── ElectrodeGeometryWidget
│ │ │ └── ElectrodeGeometryWidget.js
│ │ ├── common
│ │ │ └── CanvasPainter.js
│ │ ├── index.js
│ │ └── package.json
│ └── viewplugins
│ │ └── ElectrodeGeometryView
│ │ └── ElectrodeGeometryViewPlugin.js
│ └── webpack.config.js
└── working
├── admin
├── bumpversion_mountaintools.sh
└── bumpversion_spikeforest.sh
├── configuration
├── admin_config.py
└── old
│ └── admin_config.ipynb
├── gen_synth_datasets_mearec
├── driver.py
├── gen_synth_datasets.ipynb
├── gen_synth_datasets.py
├── gen_synth_datasets_neuronexus.ipynb
├── gen_synth_datasets_sqmea.ipynb
├── outputvisualizer.py
├── select_templates.py
└── synthesize_timeseries.py
├── main_analysis
├── main_analysis
├── main_analysis.json
├── main_analysis_irc.json
├── run_all.sh
└── run_all_irc.sh
├── notebooks
├── view_main_analysis.ipynb
├── view_main_analysis_irc.ipynb
└── view_main_analysis_jjj.ipynb
├── old_compute_resources
├── ccmlin008-80
│ └── start.sh
├── ccmlin008-default
│ └── start.sh
├── ccmlin008-gpu
│ └── start.sh
├── ccmlin008-parallel
│ ├── job_index--batch_1552580755.7813327_RmyAq8.txt
│ └── start.sh
├── ccmlin008-sc
│ └── start.sh
├── ccmlin008-test
│ └── start.sh
├── jfm-laptop
│ └── start.sh
└── local-computer
│ └── start.sh
├── old_main_analysis
├── analysis.hybrid_janelia.json
├── analysis.magland_synth_param_search.json
├── analysis.manual_buzsaki_petersen.json
├── analysis.manual_franklab.json
├── analysis.paired_boyden32c.json
├── analysis.paired_crcns.json
├── analysis.paired_kampff.json
├── analysis.paired_mea64c.json
├── analysis.paired_monotrode.json
├── analysis.synth_bionet.json
├── analysis.synth_magland.json
├── analysis.synth_mearec_neuronexus.json
├── analysis.synth_mearec_tetrode.json
├── analysis.synth_monotrode.json
├── analysis.synth_visapy.json
├── analysis.test_range.json
├── analysis.test_reliability.json
├── analysis.test_samplerate.json
├── apply_sorters_to_recordings
│ ├── __init__.py
│ └── apply_sorters_to_recordings.py
├── compute_resources_ccmlin000.json
├── compute_resources_ccmlin008.json
├── run_all.sh
├── run_some_ccmlin000.sh
├── spikeforest_analysis
├── start_continuous_processing.sh
├── test_irc.analysis.visapy_mea.json
└── test_kilosort.sh
├── prepare_recordings
├── descriptions
│ ├── spf_hybrid_janelia.md
│ ├── spf_manual_buzsakilab.md
│ ├── spf_manual_franklab.md
│ ├── spf_paired_boyden32c.md
│ ├── spf_paired_crcns.md
│ ├── spf_paired_kampff.md
│ ├── spf_paired_mea64c.md
│ ├── spf_paired_monotrode.md
│ ├── spf_synth_bionet.md
│ ├── spf_synth_magland.md
│ ├── spf_synth_mearec_neuronexus.md
│ ├── spf_synth_mearec_sqmea64c.md
│ ├── spf_synth_mearec_tetrode.md
│ ├── spf_synth_monotrode.md
│ ├── spf_synth_visapy.md
│ ├── spf_test_reliability.md
│ └── spf_test_samplerate.md
├── load_study_set_from_md.py
├── prepare_all.sh
├── prepare_hybrid_janelia_recordings.py
├── prepare_manual_buzsaki_petersen_recordings.py
├── prepare_manual_buzsakilab_recordings.py
├── prepare_manual_franklab_recordings.py
├── prepare_paired_monotrode_recordings.py
├── prepare_paired_recordings.py
├── prepare_synth_bionet_recordings.py
├── prepare_synth_magland_recordings.py
├── prepare_synth_mearec_neuronexus_recordings.py
├── prepare_synth_mearec_sqmea64c_recordings.py
├── prepare_synth_mearec_tetrode_recordings.py
├── prepare_synth_monotrode_recordings.py
├── prepare_synth_visapy_recordings.py
├── prepare_test_reliability_recordings.py
├── prepare_test_samplerate_recordings.py
├── prepare_toy_recordings_local.py
└── show_public_recordings.py
├── scratch
└── troubleshoot_compare_gt.py
├── simulations
└── mearec
│ ├── create_MEArec_recordings_spikeforest.py
│ ├── create_MEArec_templates_spikeforest.py
│ ├── register_recordings_in_spikeforest.py
│ ├── spf_synth_mearec.md
│ └── view_info.py
├── test_analysis
├── analysis.test_irc_synth_magland.json
├── analysis.test_ks2_synth_magland.json
├── analysis.test_ks_synth_magland.json
├── analysis.test_ms4_synth_magland.json
├── analysis.test_sc_paired_mea64c.json
├── analysis.test_sc_synth_magland.json
├── analysis.test_yass_synth_magland.json
├── prepare_test_recordings.py
└── run_test.sh
├── test_sorters
├── test0_tridesclous.py
├── test_sorters.py
└── troubleshoot_ms4.py
├── testing
└── plotlyplot
│ └── plotlyplot-example.ipynb
├── tests
└── cairio_benchmarking
│ └── cairio_benchmarking.ipynb
└── website
├── 001_prepare_recordings.sh
├── 002_run_analysis.sh
├── 003_assemble_website_data.sh
├── 004_load_data_into_local_database.sh
├── 004b_load_news_posts_into_local_database.sh
├── 005_load_data_into_remote_database_prod.sh
├── 005_load_data_into_remote_database_staging.sh
├── 005b_load_news_posts_into_remote_database_prod.sh
├── 005b_load_news_posts_into_remote_database_staging.sh
├── 006_upload_public_files.sh
├── 007_generate_unit_details.sh
├── WIP_assemble_website_data.ipynb
├── assemble_website_data.py
├── do_assemble_website_data.sh
├── do_assemble_website_data_irc.sh
├── explore_results.ipynb
├── generate_spikesprays.ipynb
├── generate_unit_details.py
├── load_data_into_local_database.sh
├── load_data_into_remote_database.sh
├── load_data_into_remote_database_prod.sh
├── load_news_posts_into_local_database.sh
├── load_news_posts_into_remote_database.sh
├── load_news_posts_into_remote_database_prod.sh
├── make_news_posts.py
├── make_website_data.sh
├── make_website_data_directory.py
├── update_analysis_history.py
└── upload_public_files.py
/.codepod.yml:
--------------------------------------------------------------------------------
1 | image: "spikeforest/codepod_spikeforest2"
2 | tasks:
3 | - command: ./devel/codepod_init.sh
4 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - "3.6"
4 |
5 | services:
6 | - docker
7 |
8 | before_install:
9 | - docker pull spikeforest/codepod_spikeforest2
10 |
11 | install:
12 | - pip install pytest
13 | - pip install codepod
14 | - pip install ./mountaintools
15 | - pip install ./spikeforest
16 |
17 | script:
18 | - codepod --command='devel/codepod_test.sh' --no-pull --docker_opts="-e EXTRA_CMD='./devel/codepod_test.sh'" .
19 | # - PYTHONDONTWRITEBYTECODE=1 python -m pytest -p no:cacheprovider
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## SpikeForest
2 |
3 | Please use the [new version of SpikeForest](https://github.com/flatironinstitute/spikeforest2).
4 |
5 | You can also [view the old README](./README_old.md).
6 |
--------------------------------------------------------------------------------
/codepod_run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | # Run this script to open the project in codepod
5 | # To install codepod: pip install --upgrade codepod
6 | # You must also have docker installed
7 | # Once in codepod, you can, for exampe, open vscode: code .
8 |
9 | OPTS=""
10 |
11 | # SHA-1 cache directory
12 | if [ ! -z "$SHA1_CACHE_DIR" ]; then
13 | OPTS="$OPTS -v $SHA1_CACHE_DIR:/tmp/sha1-cache -v /dev/shm:/dev/shm"
14 | fi
15 |
16 | if [ -d "$HOME/.mountaintools" ]; then
17 | OPTS="$OPTS -v $HOME/.mountaintools:/home/user/.mountaintools"
18 | fi
19 |
20 | eval "codepod -g $PWD $OPTS $@"
21 |
--------------------------------------------------------------------------------
/devel/codepod_init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | # This script is called when the gitpod container starts.
5 | # It is called from the root directory of the project
6 |
7 | pip install -e ./mountaintools
8 | pip install -e ./spikeforest
9 | pip install six
10 |
11 | # vscode extensions
12 | code --install-extension ms-python.python
13 | code --install-extension eamodio.gitlens
14 | code --install-extension bierner.markdown-preview-github-styles
15 | # code --install-extension rduldulao.py-coverage-view
16 |
--------------------------------------------------------------------------------
/devel/codepod_test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | PYTHONDONTWRITEBYTECODE=1 python -m pytest -p no:cacheprovider
5 |
--------------------------------------------------------------------------------
/devel/docker/codepod_spikeforest2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM spikeforest/codepod:latest
2 |
3 | # jp_proxy_widget for vdomr in jupyterlab
4 | RUN . /venv/bin/activate \
5 | && pip install jupyterlab jp_proxy_widget \
6 | && jupyter nbextension enable --py --sys-prefix jp_proxy_widget \
7 | && jupyter labextension install jp_proxy_widget \
8 | && jupyter labextension install @jupyter-widgets/jupyterlab-manager
9 |
10 | # python packages
11 | RUN . /venv/bin/activate \
12 | && pip install jupyterlab numpy matplotlib \
13 | requests pillow pandas h5py scipy \
14 | ipython ipywidgets six
15 |
16 | RUN apt-get update && apt-get install -y tmux vim
17 |
18 | RUN . /venv/bin/activate \
19 | && pip install pytest pytest-cov idna autopep8
20 |
--------------------------------------------------------------------------------
/devel/docker/codepod_spikeforest2/readme.txt:
--------------------------------------------------------------------------------
1 | # This builds the docker image used by codepod
2 |
3 | # How to build
4 | docker build -t magland/codepod_spikeforest2 .
5 |
6 | # How to push to dockerhub
7 | docker push magland/codepod_spikeforest2
8 |
--------------------------------------------------------------------------------
/devel/format_python.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | # you must first pip install autopep8
5 | autopep8 -ir mountaintools spikesorters working gui
6 |
7 |
--------------------------------------------------------------------------------
/devel/requirements.txt:
--------------------------------------------------------------------------------
1 | pep8
2 | autopep8
3 | pytest
4 | codepod
5 | ml_ms4alg
6 | jupyterlab
7 |
--------------------------------------------------------------------------------
/devel/setup_colab.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # colab
4 | pip install jupyter_http_over_ws
5 | jupyter serverextension enable --py jupyter_http_over_ws
--------------------------------------------------------------------------------
/devel/setup_jp_proxy_widget.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # jpwidget
4 | pip install jupyterlab
5 | pip install jp_proxy_widget
6 | jupyter nbextension enable --py --sys-prefix jp_proxy_widget
7 | jupyter labextension install jp_proxy_widget
8 | jupyter labextension install @jupyter-widgets/jupyterlab-manager
--------------------------------------------------------------------------------
/devel/setup_nbstripout.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | pip install --upgrade nbstripout
5 | nbstripout --install --attributes .gitattributes
6 |
--------------------------------------------------------------------------------
/devel/setup_python.sh:
--------------------------------------------------------------------------------
1 | # run `. setup_python.sh`
2 | # #!/bin/bash
3 | # set -e
4 | # ./setup_python.sh env_name
5 |
6 | CONDA_ENV=${1:-spikeforest}
7 | conda deactivate
8 | conda env remove -n $CONDA_ENV -y || echo "$CONDA_ENV conda environment not removed. Try closing other terminals using $CONDA_ENV"
9 |
10 | conda create -n $CONDA_ENV python=3.6 jupyterlab -y
11 | conda activate $CONDA_ENV
12 |
13 | ## This script is called when the gitpod container starts.
14 | ## It is called from the root directory of the project
15 | pip install -r devel/requirements.txt
16 | pip install -e mountaintools/
17 | pip install -e spikeforest/
18 |
19 | conda install -c anaconda pyqt -y
20 | ## for issues relating to gui/sf_main/start_sf_main.py
21 | ## follow instruction here https://github.com/Ultimaker/Cura/pull/131#issuecomment-176088664
22 |
23 | ## install jupyter extension
24 | conda install -c conda-forge ipywidgets -y
25 | jupyter labextension install @jupyter-widgets/jupyterlab-manager
26 |
27 |
28 | ## run test
29 | pytest
30 |
--------------------------------------------------------------------------------
/docs/basic_flow_chart_spikeforest.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/docs/basic_flow_chart_spikeforest.jpg
--------------------------------------------------------------------------------
/docs/blurbs/auto-install-algs.md:
--------------------------------------------------------------------------------
1 | ## Auto installation of sorting algorithms
2 |
3 | 20 May 2019
4 |
5 | The MATLAB sorting algorithms are not as rigorously repeatable since they do not utilize singularity containers. To remedy this, I am building in auto-install scripts for these processors. For IronClust, the install script just clones the IronClust repo at a particular commit. For KiloSort and Kilosort2, a GPU/CUDA compilation step is also part of the auto-install.
6 |
7 | Thus far I have implemented this auto-installer for the following algs:
8 |
9 | * IronClust
10 |
11 | * Kilosort2
12 |
13 | The following environment variables will no longer have an effect: `IRONCLUST_PATH`, `KILOSORT2_PATH`
14 |
15 | For development and testing purposes, if you want to use the old method (not auto installing) you can set the following environment variables instead: `IRONCLUST_PATH_DEV`, `KILOSORT2_PATH_DEV`
16 |
17 | Otherwise, the source code for these projects will (by default) automatically be cloned to `~/spikeforest_algs/`
18 |
19 | For example,
20 |
21 | ```
22 | ~/spikeforest_algs/ironclust_042b600b014de13f6d11d3b4e50e849caafb4709
23 | ```
24 |
25 | The wrappers include hard-coded commits for the repos.
26 |
27 | In this way, it is possible to update the commit (e.g., bug fix) without incrementing the wrapper version.
28 |
29 |
--------------------------------------------------------------------------------
/docs/news/2019-07-01-spikeforum.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: SpikeForum
3 | date: 2019-07-01
4 | author: Jeremy Magland
5 | ---
6 |
7 | We have created a new user forum called [spikeforum.org](https://spikeforum.org) that supports the SpikeForest and SpikeInterface projects. You can browse the posts there or log in using google or github to participate in the discussion. It's a great place to ask questions or make suggestions.
--------------------------------------------------------------------------------
/docs/spike_sorting.md:
--------------------------------------------------------------------------------
1 | # Spike sorting
2 |
3 | To run spike sorting on a recording featured on the SpikeForest website, follow this [tutorial](tutorials/spike_sorting_spikeforest_recording.md).
4 |
5 |
--------------------------------------------------------------------------------
/docs/tutorials/j2run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # you need pip install j2cli
5 |
6 | j2 spike_sorting_spikeforest_recording.md.j2 -o spike_sorting_spikeforest_recording.md
7 | j2 spike_sorting_spikeforest_recording.py.j2 -o spike_sorting_spikeforest_recording.py
8 | chmod a+x spike_sorting_spikeforest_recording.py
9 |
10 | j2 spike_sorting_single_recording.md.j2 -o spike_sorting_single_recording.md
11 | j2 spike_sorting_single_recording.py.j2 -o spike_sorting_single_recording.py
12 | chmod a+x spike_sorting_single_recording.py
13 |
--------------------------------------------------------------------------------
/docs/tutorials/j2templates/explain_force_run.md:
--------------------------------------------------------------------------------
1 | When using MountainTools processors (via `execute()`), results are
2 | automatically cached. To force rerun, use the `_force_run=True` option.
--------------------------------------------------------------------------------
/docs/tutorials/j2templates/install_spikeforest_pypi.md:
--------------------------------------------------------------------------------
1 | The first step is to install spikeforest and mountaintools. The easiest way is to use
2 | the PyPI packages as follows.
3 |
4 | ```
5 | {% include './j2templates/install_spikeforest_pypi.sh' %}
6 | ```
7 |
8 | To use the containerized versions of the spike sorters (recommended), you should
9 | [install singularity 2.6.1](https://www.sylabs.io/guides/2.6/user-guide/quick_start.html#quick-installation-steps).
10 | This will work for all of the non-Matlab spike sorters (in the future we will
11 | also containerize the Matlab packages).
--------------------------------------------------------------------------------
/docs/tutorials/j2templates/install_spikeforest_pypi.sh:
--------------------------------------------------------------------------------
1 | pip install --upgrade spikeforest==0.11.0
2 | pip install --upgrade mountaintools==0.7.1
--------------------------------------------------------------------------------
/docs/tutorials/j2templates/load_spikeforest_tetrode_recording.py:
--------------------------------------------------------------------------------
1 | from spikeforest import SFMdaRecordingExtractor, SFMdaSortingExtractor
2 | from mountaintools import client as mt
3 |
4 | # Configure to download from the public spikeforest kachery node
5 | mt.configDownloadFrom('spikeforest.public')
6 |
7 | # Load an example tetrode recording with its ground truth
8 | # You can also substitute any of the other available recordings
9 | recdir = 'sha1dir://fb52d510d2543634e247e0d2d1d4390be9ed9e20.synth_magland/datasets_noise10_K10_C4/001_synth'
10 |
11 | print('loading recording...')
12 | recording = SFMdaRecordingExtractor(dataset_directory=recdir, download=True)
13 | sorting_true = SFMdaSortingExtractor(firings_file=recdir + '/firings_true.mda')
--------------------------------------------------------------------------------
/docs/tutorials/j2templates/prepare_toy_recording.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | from spikeforest import example_datasets
4 | from spikeforest import SFMdaRecordingExtractor, SFMdaSortingExtractor
5 |
6 | recording, sorting_true = example_datasets.toy_example1()
7 |
8 | recdir = 'toy_example1'
9 |
10 | # remove the toy recording directory if it exists
11 | if os.path.exists(recdir):
12 | shutil.rmtree(recdir)
13 |
14 | print('Preparing toy recording...')
15 | SFMdaRecordingExtractor.write_recording(recording=recording, save_path=recdir)
16 | SFMdaSortingExtractor.write_sorting(sorting=sorting_true, save_path=recdir + '/firings_true.mda')
17 |
18 |
--------------------------------------------------------------------------------
/docs/tutorials/j2templates/prerequisites.md:
--------------------------------------------------------------------------------
1 | At this point, SpikeForest has only been tested in Linux. In the future we will support OSX. It is also possible to use Linux within Windows.
2 |
--------------------------------------------------------------------------------
/docs/tutorials/j2templates/run_spike_sorting.py:
--------------------------------------------------------------------------------
1 | # import a spike sorter from the spikesorters module of spikeforest
2 | from spikeforestsorters import MountainSort4
3 | import os
4 | import shutil
5 |
6 | # In place of MountainSort4 you could use any of the following:
7 | #
8 | # MountainSort4, SpykingCircus, KiloSort, KiloSort2, YASS
9 | # IronClust, HerdingSpikes2, JRClust, Tridesclous, Klusta
10 | # although the Matlab sorters require further setup.
11 |
12 | # clear and create an empty output directory (keep things tidy)
13 | if os.path.exists('test_outputs'):
14 | shutil.rmtree('test_outputs')
15 | os.makedirs('test_outputs', exist_ok=True)
16 |
17 | # Run spike sorting in the default singularity container
18 | print('Spike sorting...')
19 | MountainSort4.execute(
20 | recording_dir=recdir,
21 | firings_out='test_outputs/ms4_firings.mda',
22 | detect_sign=-1,
23 | adjacency_radius=50,
24 | _container='default'
25 | )
26 |
27 | # Load the result into a sorting extractor
28 | sorting = SFMdaSortingExtractor(firings_file='test_outputs/ms4_firings.mda')
--------------------------------------------------------------------------------
/docs/tutorials/readme.txt:
--------------------------------------------------------------------------------
1 | Many of these files are generated from Jinja2 templates. Therefore you should
2 | edit the .j2 files. To regenerate, use
3 |
4 | pip install j2cli
5 |
6 | ./j2run.sh
7 |
8 |
--------------------------------------------------------------------------------
/docs/tutorials/spike_sorting_single_recording.py.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | {% include 'j2templates/prepare_toy_recording.py' %}
4 |
5 | {% include 'j2templates/run_spike_sorting.py' %}
6 |
7 | {% include 'j2templates/compare_with_truth.py' %}
8 |
9 | print('Done. See test_outputs/')
--------------------------------------------------------------------------------
/docs/tutorials/spike_sorting_spikeforest_recording.py.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | {% include 'j2templates/load_spikeforest_tetrode_recording.py' %}
4 |
5 | {% include 'j2templates/run_spike_sorting.py' %}
6 |
7 | {% include 'j2templates/compare_with_truth.py' %}
8 |
9 | print('Done. See test_outputs/')
--------------------------------------------------------------------------------
/docs/updates/2019-04-11.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-11
2 |
3 | #kachery #kbucket #forestview
4 |
5 | To configure kachery URLs:
6 |
7 | For example, add the following entry to `~/.mountaintools/kacheries:`
8 |
9 | ```
10 | kbucket http://kbucket.flatironinstitute.org:8080
11 | ```
12 |
13 | The, for example, you can auto-download from kbucket when running forestview:
14 |
15 | ```
16 | forestview sha1dir://03e12d2f1af3e038886f25a94871f3723a66502a.toy_recordings --download-from kbucket
17 | ```
--------------------------------------------------------------------------------
/docs/updates/2019-04-12.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-12
2 |
3 | #spikeforest #analysis
4 |
5 | You can now run an analysis from anywhere via something like:
6 | ```
7 | cd working/main_analysis
8 | ./spikeforest_analysis key://pairio/spikeforest/analysis.visapy_mea.json --login
9 | ```
10 |
11 | and then view the results via:
12 |
13 | ```
14 | forestview key://pairio/spikeforest/spikeforest_analysis_results.visapy_mea.json
15 | ```
16 |
17 | The analysis itself is now provided in the above .json file which can be inspected via:
18 |
19 | ```
20 | mt-cat key://pairio/spikeforest/analysis.visapy_mea.json
21 | ```
--------------------------------------------------------------------------------
/docs/updates/2019-04-13.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-13
2 |
3 | #forestview #analysis
4 |
5 | I added some prelim views for opening an analysis in forestview. So you can do:
6 |
7 | ```
8 | cd working/main_analysis
9 | forestview --mode analysis analysis.visapy_mea.json
10 | ```
11 |
12 | It's a read-only view of that analysis file. In the future we can enabling editing and even launching, and viewing the results.
13 |
14 | The second update is that I checked in a file called .vscode/snippets/python.json. If you install the "Project Snippets" vscode extension then you can make use of these. The first one I added was to create a new forestview view class. So you just type "forestview-view" and vscode will popup a nice snippet interface where you can type in the name of the class, etc.
--------------------------------------------------------------------------------
/docs/updates/2019-04-17.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-17
2 |
3 | #forestview
4 |
5 | I have made the following improvements to forestview.
6 |
7 | * Moveable tabs
8 | * Closeable tabs
9 | * Option to always_open_new -- which defaults to False, so we don't open, e.g., the settings tab more than once. (suggested by Alex M)
10 | * Tab labels and titles for the views. Implement tabLabel() and and title() methods on the view (can be dynamic)
11 |
12 | It is in a new branch called "forestview2". I'll merge it in once it has been tested.
13 |
14 | Also, I solved the race conditions for executing javascript. And now the right way to execute javascript in a vdomr component is to use postRenderScript as well as self.executeJavascript()
15 |
16 | Both are guaranteed to happen AFTER the component element gets added to the DOM. And the latter is guaranteed to happen after the postRenderScript. This is important because often you'll try to execute javascript before the thing is actually rendered, so it's important to establish this exec order without making crazy hoops of setTimeout's and checks.
17 |
18 |
--------------------------------------------------------------------------------
/docs/updates/2019-04-18.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-18
2 |
3 | #spikeforest #cleanup
4 |
5 | SpikeExtractors and SpikeToolkit dependencies have continued to cause some problems, so to avoid future pitfalls, I have deleted spikeforest.spikeextractors and spikeforest.spiketoolkit (we had mostly moved away from those anyway). I have updated / cleaned up the whole repo... and the unit tests passed. I have also updated the notebooks.
6 |
7 | Made vdomr components more robust and updated elec geom widget
8 |
9 | The vdomr plotly component is now much smoother (e.g., the cluster view). The resizing works much better.
--------------------------------------------------------------------------------
/docs/updates/2019-04-19.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-18
2 |
3 | #spikeforest #cleanup
4 |
5 | SpikeExtractors and SpikeToolkit dependencies have continued to cause some problems, so to avoid future pitfalls, I have deleted spikeforest.spikeextractors and spikeforest.spiketoolkit (we had mostly moved away from those anyway). I have updated / cleaned up the whole repo... and the unit tests passed. I have also updated the notebooks.
6 |
7 | Made vdomr components more robust and updated elec geom widget
8 |
9 | The vdomr plotly component is now much smoother (e.g., the cluster view). The resizing works much better.
--------------------------------------------------------------------------------
/docs/updates/2019-04-25.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-25
2 |
3 | #spikeforest
4 |
5 | All sorters now run in a single batch (one batch per compute resource). This is important for James' multi-parameter comparison.
6 |
7 | Created script for continuous spikeforest processing
8 |
9 | Split paired recordings analysis into multiple pieces
10 |
11 | Created prelim .md files for descriptions of study sets
12 |
13 | Worked with James to create .md files for all the study sets, with front-matter.
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/docs/updates/2019-04-26.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-26
2 |
3 | #spikeforest #spike-front
4 |
5 | Created entrypoint script to deploy to remote database for website (working/website/load_data_into_remote_database.sh)
6 |
7 | Worked on spike-front gui -- heatmap on home page to display results for study sets, expandable to studies
8 |
9 | systematized the prepare_*.py scripts
--------------------------------------------------------------------------------
/docs/updates/2019-04-29.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-29
2 |
3 | #spikeforest
4 |
5 | Systematizing the prepare/analyze/assemble scripts for website
6 |
7 | Creating test analyses: working/test_analysis
8 |
9 | Debugging kilosort2 wrapper with James
10 |
11 | Ran all processing from scratch (using MLPROCESSORS_FORCE_RUN=TRUE)
12 | Total took ~6 hours. Below breakdown of timing
13 |
14 | clock time
15 | 13:20 started
16 | 13:41 finished paired_boyden32c (21 minutes)
17 | 14:06 finished paired_crcns (25 minutes)
18 | 14:34 finished paired_kampff (28 minutes)
19 | 14:48 finished paired_mea64c (14 minutes)
20 | 14:55 finished synth_visapy (7 minutes)
21 | 15:23 finished synth_magland (28 minutes)
22 | 15:48 finished synth_mearec_tetrode (25 minutes)
23 | 16:02 finished manual_franklab (14 minutes)
24 | 17:37 finished synth_bionet (95 minutes)
25 | 19:10 finished synth_mearec_neuronexus (93 minutes)
26 |
27 | This was excluding KiloSort2 (as well as SC for some)
28 | Except: accidentally did not exclude ks2 from synth_mearec_neuronexus -- may that's why it took especially long
29 | Note: ks2 not working at present
30 |
31 | Subsequent (cached) run of all processing takes ~3 minutes.
--------------------------------------------------------------------------------
/docs/updates/2019-04-30.md:
--------------------------------------------------------------------------------
1 | ## 2019-04-30
2 |
3 | #spikeforest #spike-front
4 |
5 | assemble_website_data.py: add ability to upload website data also to pairio/kachery
6 |
7 | include study sets in analysis output
8 |
9 | created expandable heatmap table for table -- organize by study sets, expand to studies
--------------------------------------------------------------------------------
/docs/updates/2019-05-02.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-03
2 |
3 | #spikeforest #mountaintools
4 |
5 | Spoke to Witold about maintaining mountaintools -- writing some clear docs, tests, etc.
6 |
7 | Add ks2 to visapy analysis
8 |
9 | Better handling when a comparison is missing in analysis -- fill with zeros
10 |
11 | Bug fix -- do not create snapshot of firings file if it does not exist
12 |
13 | Began list of TODO items on github in prep for the May 15th prelim release
--------------------------------------------------------------------------------
/docs/updates/2019-05-03.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-03
2 |
3 | #spikeforest #mountaintools
4 |
5 | Did pep8 formatting of .py files throughout
6 |
7 | Going forward, I am using the following options for pep8 and autopep8 in my .vscode/settings.json:
8 |
9 | ```
10 | {
11 | "python.pythonPath": "/home/magland/miniconda3/envs/devel/bin/python",
12 | "python.linting.pep8Enabled": true,
13 | "python.formatting.provider": "autopep8",
14 | "python.formatting.autopep8Args": ["--select=E111,,E124,E126,E128,E131,E225,E226,E231,E251,E265,E502,E261,E265,E301,E302,E303,W291,W293,W391"],
15 | "python.linting.pep8Args": [
16 | "--ignore=E501,W293"
17 | ],
18 | "python.linting.pylintEnabled": true,
19 | "python.analysis.openFilesOnly": false
20 | }
21 | ```
22 |
23 | Moved location of compute-resource-start.
24 |
25 | So @jamesjun, in your compute resource scripts (start.sh) you now need to change `../../../bin/compute-resource-start` to `compute-resource-start`. It also involves reinstalling mountaintools so that this script comes available.
26 |
27 | Running ks2 on all (non-tetrode) studies
28 |
29 | Fixed bug in readme introduced by accident in pep8 formatting
30 |
31 |
--------------------------------------------------------------------------------
/docs/updates/2019-05-06.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-06
2 |
3 | #spikeforest #kachery
4 |
5 | New functionality allows restricting access to kachery nodes for both upload and download. For example, put the following in
6 | `~/.mountaintools/kachery_tokens`
7 | ```
8 | spikeforest.kbucket download **********
9 | spikeforest.kbucket upload **********
10 | ```
11 | and the following can be removed as it is deprecated:
12 | `~/.mountaintools/kachery_upload_tokens`
13 |
14 | Implemented cpu heatmap chart for website
15 |
16 | Refactoring to depend on 0.4.2 of SpikeExtractors
17 |
18 | New spike sorting containers - to conform with SE 0.4.2
19 |
20 |
--------------------------------------------------------------------------------
/docs/updates/2019-05-07.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-07
2 |
3 | #spikeforest #mountaintools #spike-front
4 |
5 | Implemented skip_failing option for mountaintools jobs
6 | uses cached (failing) result when previous run had failed
7 |
8 | Created generate_spikesprays.py
9 |
10 | spike-front: Added spikespray model, controller, api call
11 |
12 | loaded spikespray info (include spikesprayUrl field) into spike-front db
13 |
14 |
--------------------------------------------------------------------------------
/docs/updates/2019-05-08.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-08
2 |
3 | #spikeforest #mountaintools #spike-front #forestview
4 |
5 | Updated code to generate spike sprays to
6 | * run in batches
7 | * use a compute resource
8 | * use singularity container
9 |
10 | Added configComputeResources which takes a dict
11 | This allows configuring multiple compute resources from a file
12 |
13 | Created spike-front mode in forestview
14 |
15 | Created StudyAnalysisResult objects in assemble data step
--------------------------------------------------------------------------------
/docs/updates/2019-05-09.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-09
2 |
3 | #spikeforest
4 |
5 | StudyAnalysisResult -> spike-front website
6 |
7 |
--------------------------------------------------------------------------------
/docs/updates/2019-05-10.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-10
2 |
3 | #spike-front
4 |
5 | Website updates
6 | Introduce StudyAnalysisResult model
7 | Reuse components (e.g., HeatmapCount is now used for COUNT, AVG, CPU -- will eventually be renamed)
8 | Adjust route functionalty
9 | Change /sorters to algorithms, /study/:studyName -> /studyresult/:studyName
10 | Adjust data model to have cpu times on StudyAnalysisResults
11 | Highlight selected marker on scatterplot
12 | UnitDetail component (which will hold the spike spray)
13 | Update some copy here and there
14 | Disabled fancy scrolling for now -- may restore later.
15 | Explicit passing of props in various places.
16 |
17 |
--------------------------------------------------------------------------------
/docs/updates/2019-05-13.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-13
2 |
3 | #spikefront #spike-front
4 |
5 | spike-front: Merging with Liz's modifications
6 |
7 | Fixed Klusta sorter wrapper
8 |
9 | Created singularity containers for klusta and tridesclous sorters
10 |
11 | Added locale env variables for mountaintools processors running in singularity containers (important for Click library in python)
12 |
13 | update copy of website -- explain that these are prelim results, etc.
--------------------------------------------------------------------------------
/docs/updates/2019-05-14.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-14
2 |
3 | #spikefront #spike-front
4 |
5 | preparing for prelim spikeforest release
6 |
7 | deployed to heroku
8 |
9 | working on spikespray data retrieval
10 |
11 | ran processing with updated sorters (incl. Klusta)
12 |
13 | Integrated Alex's modifications to copy
14 |
15 | Consolidated About/Info copy on website
16 |
17 | worked with Dylan and Paul to resolve kbucket server down
--------------------------------------------------------------------------------
/docs/updates/2019-05-15.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-15
2 |
3 | #spikefront #spike-front
4 |
5 | working with Witold on MountainTools docs
6 |
7 | fixing spikespray data retrieval
8 |
9 | spike-front scatterplot fixed selected unit issue reported by Alex
10 | - semi-fixed (selected unit does not persist when changing sorters)
11 |
12 | troubleshooting kbucket down
13 |
14 | environment=MATLAB for select algorithms
15 |
16 | spike-front: fix compile warnings
17 |
18 |
--------------------------------------------------------------------------------
/docs/updates/2019-05-17.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-17
2 |
3 | #spikeforest #spike-front
4 |
5 | Released website preview! and sent email to spike sorting authors
6 |
7 | Fixed log svg (thanks A. Morley)
8 |
9 | Created staging -> deploy pipeline
10 |
11 | Include study set info in a couple places and provide links to study set descriptions
12 |
13 | Page for showing markdown of study set descriptions
14 |
15 | Formatted copy on website
16 |
17 | Ran ironclust on gpu for better cpu performance
18 |
19 | Fixed avg. cpu time bug
20 |
21 | Adjusted slider bar performance (smaller increments and only updates on mouse release)
22 |
23 | Split study sets visually into groups (paired, synth, hybrid, manual)
24 |
25 | Created full tutorials (used jinja2 templates!) for sorting spikeforest data
26 |
27 | Fixed MountainTools bug of downloading sha1dir://... files
28 |
29 | Uploaded singularity containers to spikeforest.public kachery node
30 |
31 | Uploaded test recording to spikeforest.public
32 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/docs/updates/2019-05-20.md:
--------------------------------------------------------------------------------
1 | ## 2019-05-20
2 |
3 | #spikeforest
4 |
5 | Auto-install sorting algorithms for IronClust and Kilosort2
6 |
7 |
--------------------------------------------------------------------------------
/mountaintools/.bumpversion.cfg:
--------------------------------------------------------------------------------
1 | [bumpversion]
2 | current_version = 0.7.0
3 | commit = True
4 | tag = True
5 |
6 | [bumpversion:file:setup.py]
7 |
8 |
--------------------------------------------------------------------------------
/mountaintools/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | *.egg-info
3 | node_modules
4 | .env
5 | package-lock.json
6 | .mypy_cache
--------------------------------------------------------------------------------
/mountaintools/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - '3.6'
4 | install:
5 | - pip install pytest
6 | - pip install .
7 | script:
8 | - pytest -s
9 | deploy:
10 | provider: pypi
11 | user: magland
12 | on:
13 | tags: true
14 | password:
15 | secure: TCUvFkYhvTbw39DSaQtZUK5Px94aV//foon+5YOLtXWcdeone91JIBoKPKmX+ifMygDGowJawI2ARp2UV7wHfMecwS/iHZQ8lI4KPskSxuTRjDbQGUD3adXksegKj3LIYPSJAeONx9T7em9xG09n64cIR70kthxIk2+kxlShIU7ivrBSxPRAuT/xD+L96oXSaIzaqi+hwQypy8ZTyN8ls7jowcjL0Yi6MXDKs1baSIuQqI4GDDU7iPivYGWbi8VzC6d533vP9/ZwNDAcK017akjhmJYagvd/LTh6rkkCDmS7381h8QoR0GnYhJDYReXRrhfSz60tqZFRNBraSP3spM/reITRXBO+yCeo1tI66BhtlbBOoBj69TB2wNLUU3ybggTD5vZ351eK+8rFkfxFZYdXlbC32oDcnKej1YdUUmzpxZOEKMFSSmWgC0fsWEibUcvsa/t216+uhqBlNfzxq1Bhv1g/nx6yYLMXmE88C62DD9deSRGtCvMt/KfwmEsEN+otjH61EIdNGeD16On85W00e4r9rX5TYAHQ4f9o4Skf8fWTcrBOBZJBkfOWZbEB0KHTlCR54p50wuFKnRO0ibJVtng4Oy7FTvG94umICdkWwvPLZtysKDjlMVIHi4+TAimZ5H++rgUYL5qb83FadIxl+XicjarNKtRA4d8mxjE=
16 |
--------------------------------------------------------------------------------
/mountaintools/bin/mt-cat:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import sys
5 | import argparse
6 | from mountaintools import client as mt
7 |
8 | # @mtlogging.log(root=True)
9 | def main():
10 | """main routine
11 | """
12 |
13 | parser = argparse.ArgumentParser(
14 | description='Write the contents of a local or remote file to stdout')
15 | parser.add_argument('path', help='remote or local path of file')
16 | parser.add_argument('--download-from', required=False, default=None)
17 |
18 | args = parser.parse_args()
19 |
20 | if args.download_from:
21 | mt.configDownloadFrom([args.download_from])
22 |
23 | path2 = mt.realizeFile(path=args.path)
24 |
25 | if not path2:
26 | print('Unable to realize file.', file=sys.stderr)
27 | exit(-1)
28 |
29 | with open(path2, 'rb') as f:
30 | while True:
31 | data = os.read(f.fileno(), 4096)
32 | if len(data) == 0:
33 | break
34 | os.write(sys.stdout.fileno(), data)
35 |
36 |
37 | if __name__ == "__main__":
38 | main()
39 |
--------------------------------------------------------------------------------
/mountaintools/bin/mt-resolve-key-path:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import sys
5 | import argparse
6 | import time
7 | from mountaintools import client as mt
8 | import mtlogging
9 |
10 | # @mtlogging.log(root=True)
11 | def main():
12 | parser = argparse.ArgumentParser(description = 'Display the resolved path associated with a key://... path.')
13 | parser.add_argument('key_path', help='Path to local file or directory')
14 |
15 | args = parser.parse_args()
16 | key_path = args.key_path
17 |
18 | address = mt.resolveKeyPath(key_path=key_path)
19 |
20 | print(address)
21 |
22 | if __name__== "__main__":
23 | main()
24 |
25 |
--------------------------------------------------------------------------------
/mountaintools/containers/build_simg_using_docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | DEST=$1
5 | RECIPE=$2
6 |
7 | ## This will be the command we run inside docker container
8 | cmd="singularity build /tmp/out.simg $RECIPE"
9 |
10 | ## Run the command inside the docker container
11 | docker rm build_sing || echo "."
12 | docker run --privileged --userns=host --name build_sing -v $PWD:/working magland/singularity:2.6.0 \
13 | bash -c "$cmd"
14 |
15 | echo "Copying file out of container"
16 | docker cp build_sing:/tmp/out.simg $DEST
17 |
--------------------------------------------------------------------------------
/mountaintools/containers/mountaintools_basic/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | #########################################
4 | ### Python, etc
5 | RUN apt-get update && apt-get -y install git wget build-essential
6 | RUN apt-get install -y python3 python3-pip
7 | RUN ln -s python3 /usr/bin/python
8 | RUN ln -s pip3 /usr/bin/pip
9 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
10 |
11 | RUN echo "20 March 2019"
12 |
13 | #########################################
14 | RUN mkdir /src
15 | RUN git clone https://github.com/flatironinstitute/spikeforest /src/spikeforest && cd /src/spikeforest
16 | WORKDIR /src/spikeforest
17 | RUN pip install -e ./mountaintools
18 | RUN pip install -e ./spikeforest
19 |
--------------------------------------------------------------------------------
/mountaintools/containers/mountaintools_basic/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../build_simg_using_docker.sh mountaintools_basic.simg docker://magland/mountaintools_basic
5 |
--------------------------------------------------------------------------------
/mountaintools/containers/mountaintools_basic/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t magland/mountaintools_basic .
3 |
4 | # then push to docker hub (if needed)
5 | docker push magland/mountaintools_basic
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/mountaintools/containers/mountaintools_basic/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 |
5 | mt.login()
6 | sha1_path = mt.saveFile('mountaintools_basic.simg', upload_to='spikeforest.spikeforest2')
7 | print(sha1_path)
8 |
--------------------------------------------------------------------------------
/mountaintools/docs/.gitignore:
--------------------------------------------------------------------------------
1 | doctrees
2 | .buildinfo
3 | *.aux
4 | *.fdb_latexmk
5 | *.fls
6 | *.log
7 | *.pdf
8 | *.gz
9 |
--------------------------------------------------------------------------------
/mountaintools/docs/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/mountaintools/docs/.nojekyll
--------------------------------------------------------------------------------
/mountaintools/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = .
8 | BUILDDIR = build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/mountaintools/docs/_static/dummy.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/mountaintools/docs/_static/dummy.txt
--------------------------------------------------------------------------------
/mountaintools/docs/index.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/mountaintools/docs/index.rst:
--------------------------------------------------------------------------------
1 | MountainTools
2 | =============
3 |
4 | MountainTools API
5 | =================
6 |
7 | :doc:`mountainclient`
8 |
9 | .. :ref:`search`
10 |
--------------------------------------------------------------------------------
/mountaintools/docs/modules.rst:
--------------------------------------------------------------------------------
1 | mountainclient
2 | ==============
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | mountainclient
8 |
--------------------------------------------------------------------------------
/mountaintools/docs/mountainclient.rst:
--------------------------------------------------------------------------------
1 | MountainClient
2 | ==============
3 |
4 | .. automodule:: mountainclient.mountainclient
5 | :members:
6 |
7 |
--------------------------------------------------------------------------------
/mountaintools/examples/.gitignore:
--------------------------------------------------------------------------------
1 | *.npy
2 |
--------------------------------------------------------------------------------
/mountaintools/examples/example_mandelbrot/__init__.py:
--------------------------------------------------------------------------------
1 | # dummy
2 |
--------------------------------------------------------------------------------
/mountaintools/examples/example_mandelbrot/mandelbrot/__init__.py:
--------------------------------------------------------------------------------
1 | from .mandelbrot import *
2 |
--------------------------------------------------------------------------------
/mountaintools/kachery/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node
2 | EXPOSE 25481
3 | VOLUME /share
4 |
5 | ADD . /kachery
6 | RUN cd /kachery && npm install .
7 | WORKDIR /share
8 |
--------------------------------------------------------------------------------
/mountaintools/kachery/how_to_run_in_docker.txt:
--------------------------------------------------------------------------------
1 | An example:
2 |
3 | docker run -v $PWD/data:/share -p 25481:25481 -it magland/kachery /bin/bash -c "KACHERY_UPLOAD_DIR=/share KACHERY_UPLOAD_TOKEN=token1 /kachery/src/kacheryserver.js"
4 |
--------------------------------------------------------------------------------
/mountaintools/kachery/how_to_test.txt:
--------------------------------------------------------------------------------
1 | You define the following in a test .env file:
2 |
3 | KACHERY_UPLOAD_DIR=kachery
4 | KACHERY_TEST_SIGNATURE=test1
5 | KACHERY_UPLOAD_TOKEN=token1
6 |
7 | Now start the server by running: src/kacheryserver.js
8 |
9 | Suppose we have a file file.txt. We can check the hash via:
10 |
11 | > sha1sum file.txt
12 |
13 | Suppose we find the hash to be d4a4fd472647f5ad8cc564048ed17ff6bf4a16f8 and that
14 | the kachery server is listening at http://localhost:25481. Also, assume that the
15 | KACHERY_TEST_SIGNATURE variable is set to test1 for the server.
16 |
17 | To add a file:
18 | > curl --request POST --data-binary "@file.txt" http://localhost:25481/set/sha1/d4a4fd472647f5ad8cc564048ed17ff6bf4a16f8?signature=test1
19 |
20 | To check the file:
21 | > curl http://localhost:25481/check/sha1/d4a4fd472647f5ad8cc564048ed17ff6bf4a16f8
22 |
23 | To download the file:
24 | > curl http://localhost:25481/get/sha1/d4a4fd472647f5ad8cc564048ed17ff6bf4a16f8
25 |
--------------------------------------------------------------------------------
/mountaintools/kachery/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@magland/kachery",
3 | "version": "0.1.0",
4 | "description": "Share data for scientific research",
5 | "scripts": {
6 | "test": "echo \"No tests\" && exit 0"
7 | },
8 | "keywords": [
9 | "mountaintools"
10 | ],
11 | "author": "Jeremy Magland",
12 | "license": "Apache-2.0",
13 | "homepage": "https://github.com/flatironinstitute/spikeforest",
14 | "main": "src/kacheryserver.js",
15 | "dependencies": {
16 | "cors": "^2.8.4",
17 | "dotenv": "latest",
18 | "express": "^4.16.3",
19 | "fs-extra": ""
20 | },
21 | "bin": {}
22 | }
23 |
--------------------------------------------------------------------------------
/mountaintools/mlprocessors/__init__.py:
--------------------------------------------------------------------------------
1 | from .core import *
2 | from .registry import *
3 | from .validators import *
4 | from .mountainjob import MountainJob
5 | from .mountainjobresult import MountainJobResult
6 | from .shellscript import ShellScript
7 | from .temporarydirectory import TemporaryDirectory
8 | from .jobqueue import JobQueue
9 | from .paralleljobhandler import ParallelJobHandler
10 | from .slurmjobhandler import SlurmJobHandler
11 |
12 | PLACEHOLDER = ''
13 |
14 | __all__ = [
15 | "Input", "Output",
16 | "Parameter", "StringParameter", "IntegerParameter", "FloatParameter",
17 | "Processor",
18 | "registry", "register_processor", "ProcessorRegistry",
19 | "Validator", "ValueValidator", "RegexValidator", "FileExtensionValidator", "FileExistsValidator",
20 | "MountainJob"
21 | ]
22 |
--------------------------------------------------------------------------------
/mountaintools/mlprocessors/defaultjobhandler.py:
--------------------------------------------------------------------------------
1 | from .jobhandler import JobHandler
2 | from .mountainjob import MountainJob
3 | from .mountainjobresult import MountainJobResult
4 |
5 |
6 | class DefaultJobHandler(JobHandler):
7 | def __init__(self):
8 | super().__init__()
9 |
10 | def executeJob(self, job: MountainJob) -> MountainJobResult:
11 | job.result._status = 'running'
12 | result = job._execute()
13 | job.result._status = 'finished'
14 | return result
15 |
16 | def iterate(self) -> None:
17 | pass
18 |
19 | def isFinished(self) -> bool:
20 | return True
21 |
22 | def halt(self) -> None:
23 | pass
24 |
25 | def cleanup(self) -> None:
26 | pass
27 |
--------------------------------------------------------------------------------
/mountaintools/mlprocessors/execute.py:
--------------------------------------------------------------------------------
1 | from .createjobs import createJob
2 | from typing import Optional
3 |
4 |
5 | def execute(
6 | proc,
7 | _container: Optional[str]=None,
8 | _use_cache: bool=True,
9 | _skip_failing: Optional[bool]=None,
10 | _skip_timed_out: Optional[bool]=None,
11 | _force_run: Optional[bool]=None,
12 | _keep_temp_files: Optional[bool]=None,
13 | _label: Optional[str]=None,
14 | **kwargs
15 | ):
16 | job = createJob(proc, _container=_container, _use_cache=_use_cache, _skip_failing=_skip_failing, _skip_timed_out=_skip_timed_out, _force_run=_force_run, _keep_temp_files=_keep_temp_files, _label=_label, _verbose=False, **kwargs)
17 | result = job.execute()
18 | return result
19 |
--------------------------------------------------------------------------------
/mountaintools/mlprocessors/jobhandler.py:
--------------------------------------------------------------------------------
1 | import abc
2 | import time
3 | from .mountainjob import MountainJob
4 | from .mountainjobresult import MountainJobResult
5 |
6 |
7 | class JobHandler():
8 | def __init__(self):
9 | super().__init__()
10 | self._parent_job_handler = None
11 |
12 | @abc.abstractmethod
13 | def executeJob(self, job: MountainJob) -> MountainJobResult:
14 | pass
15 |
16 | @abc.abstractmethod
17 | def iterate(self) -> None:
18 | pass
19 |
20 | @abc.abstractmethod
21 | def isFinished(self) -> bool:
22 | pass
23 |
24 | @abc.abstractmethod
25 | def halt(self) -> None:
26 | pass
27 |
28 | @abc.abstractmethod
29 | def cleanup(self) -> None:
30 | pass
31 |
32 | def wait(self, timeout: float=-1):
33 | timer = time.time()
34 | while not self.isFinished():
35 | self.iterate()
36 | elapsed = time.time() - timer
37 | if (timeout >= 0) and (elapsed > timeout):
38 | return False
39 | if not self.isFinished():
40 | time.sleep(0.2)
41 | return True
42 |
--------------------------------------------------------------------------------
/mountaintools/mountainclient/__init__.py:
--------------------------------------------------------------------------------
1 | from .mountainclient import MountainClient, client
2 | from .kachery_tokens import KacheryTokens
3 | from .filelock import FileLock
4 |
--------------------------------------------------------------------------------
/mountaintools/mountainclient/mttyping.py:
--------------------------------------------------------------------------------
1 | from typing import Union, List
2 |
3 | StrOrStrList = Union[str, List[str]]
4 | StrOrDict = Union[str, dict]
5 |
--------------------------------------------------------------------------------
/mountaintools/mountainclient/unittests/try_local_db_multiprocess.py:
--------------------------------------------------------------------------------
1 | from mountaintools import client as mt
2 | import multiprocessing
3 |
4 | # key=dict(test='key')
5 | # for ii in range(20):
6 | # val0='{}'.format(ii)
7 | # mt.setValue(key=key,value=val0)
8 | # val1=mt.getValue(key=key)
9 | # print(val0,val1)
10 | # assert val0==val1
11 |
12 | # for ii in range(20):
13 | # subkey='{}'.format(ii)
14 | # val0='{}'.format(ii)
15 | # mt.setValue(key=key,subkey=subkey,value=val0)
16 | # val1=mt.getValue(key=key,subkey=subkey)
17 | # print(val0,val1)
18 | # assert val0==val1
19 |
20 |
21 | def _test1(ii):
22 | key = dict(test='key3')
23 | val0 = '{}'.format(ii)
24 | mt.setValue(key=key, value=val0)
25 | val1 = mt.getValue(key=key)
26 | return val1
27 |
28 | pool = multiprocessing.Pool(100)
29 | pool.map(_test1, [ii for ii in range(100)])
30 | pool.close()
31 | pool.join()
32 |
--------------------------------------------------------------------------------
/mountaintools/mountaintools/__init__.py:
--------------------------------------------------------------------------------
1 | from mountainclient import client
2 | from mountainclient import MountainClient
3 | from mountainclient import FileLock
4 | from .reactcomponentpythoncompanion import ReactComponentPythonCompanion
--------------------------------------------------------------------------------
/mountaintools/mtlogging/__init__.py:
--------------------------------------------------------------------------------
1 | from .mtlogging import log, sublog
2 |
--------------------------------------------------------------------------------
/mountaintools/pairioserver/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node
2 | MAINTAINER Jeremy Magland
3 | EXPOSE 24341
4 | VOLUME /pairioserver
5 |
6 | ADD . /src
7 | RUN cd /src && \
8 | npm install .
9 | WORKDIR /pairioserver
10 | ENTRYPOINT [ "/src/docker_entry.sh" ]
11 |
--------------------------------------------------------------------------------
/mountaintools/pairioserver/docker_entry.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | export CAIRIO_ADMIN_TOKEN=$1
6 | export MONGODB_URL=$2
7 | export PORT=$3
8 |
9 | node /src/pairioserver/pairioserver.js
10 |
--------------------------------------------------------------------------------
/mountaintools/pairioserver/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "pairio",
3 | "version": "0.1.0",
4 | "description": "Pairio server and python client",
5 | "main": "javascript/index.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1",
8 | "server": "node pairioserver/pairioserver.js"
9 | },
10 | "repository": {
11 | "type": "git",
12 | "url": "git+https://github.com/magland/pairio.git"
13 | },
14 | "author": "Jeremy Magland",
15 | "license": "Apache-2.0",
16 | "bugs": {
17 | "url": "https://github.com/magland/pairio/issues"
18 | },
19 | "homepage": "https://github.com/magland/pairio#readme",
20 | "dependencies": {
21 | "axios": "^0.18.0",
22 | "cors": "^2.8.4",
23 | "dotenv": "^6.1.0",
24 | "express": "^4.16.4",
25 | "json-stable-stringify": "^1.0.1",
26 | "mongodb": "^3.1.8",
27 | "request": "^2.88.0"
28 | }
29 | }
--------------------------------------------------------------------------------
/mountaintools/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | addopts = --ignore=old --ignore=tmp -m "not slow and not exclude"
3 |
--------------------------------------------------------------------------------
/mountaintools/requirements.txt:
--------------------------------------------------------------------------------
1 | # mountaintools dependencies
2 | # you can install these via: pip install -r requirements.txt
3 | numpy
4 | matplotlib
5 | requests
6 | pillow
7 | pandas
8 | ipython
9 | h5py
--------------------------------------------------------------------------------
/mountaintools/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 |
3 | pkg_name = "mountaintools"
4 |
5 | # NOTE: you should install this project in development mode
6 | # > python setup.py develop
7 |
8 | setuptools.setup(
9 | name=pkg_name,
10 | version="0.7.1",
11 | author="Jeremy Magland",
12 | author_email="jmagland@flatironinstitute.org",
13 | description="Tools for reproducible scientific research",
14 | packages=setuptools.find_packages(),
15 | scripts=[
16 | 'bin/mt-cat',
17 | 'bin/mt-ls',
18 | 'bin/mt-download',
19 | 'bin/mt-snapshot',
20 | 'bin/mt-resolve-key-path',
21 | 'bin/mt-find',
22 | 'bin/kachery-token',
23 | 'bin/mt-execute-job'
24 | ],
25 | install_requires=[
26 | 'matplotlib', 'requests', 'ipython', 'simple-crypt', 'python-dotenv', 'simplejson'
27 | # 'matplotlib','requests','ipython','simple-crypt','python-dotenv', 'asyncio', 'nest_asyncio', 'aiohttp'
28 | ],
29 | classifiers=(
30 | "Programming Language :: Python :: 3",
31 | "License :: OSI Approved :: Apache Software License",
32 | "Operating System :: OS Independent",
33 | )
34 | )
35 |
--------------------------------------------------------------------------------
/mountaintools/vdomr/README.md:
--------------------------------------------------------------------------------
1 | # vdomr
2 |
3 | TODO
--------------------------------------------------------------------------------
/mountaintools/vdomr/__init__.py:
--------------------------------------------------------------------------------
1 | from .helpers import *
2 | from .component import Component
3 | from . import devel
4 | from . import components
5 | from .vdomr import register_callback, create_callback, invoke_callback, exec_javascript, set_timeout, _take_javascript_to_execute, _set_server_session
6 | from .vdomr import config_jupyter, config_colab, config_server, config_pyqt5, mode, init_colab
7 | from .vdomr import pyqt5_start
8 |
9 | from .vdomrserver import VDOMRServer
10 |
--------------------------------------------------------------------------------
/mountaintools/vdomr/components/__init__.py:
--------------------------------------------------------------------------------
1 | from .components import SelectBox, Button, LineEdit, Pyplot, ScrollArea, LazyDiv, PlotlyPlot, RadioButton
2 |
--------------------------------------------------------------------------------
/mountaintools/vdomr/devel/__init__.py:
--------------------------------------------------------------------------------
1 | from .devel import loadBootstrap, loadCss, loadJavascript
2 |
--------------------------------------------------------------------------------
/mountaintools/vdomr/google/colab/__init__.py:
--------------------------------------------------------------------------------
1 | from . import output
2 |
--------------------------------------------------------------------------------
/mountaintools/vdomr/google/colab/output/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Colabs output package."""
15 | # pylint: disable=g-multiple-import
16 | from google.colab.output._js import register_callback
17 |
--------------------------------------------------------------------------------
/mountaintools/vdomr/server_example/server_example.py:
--------------------------------------------------------------------------------
1 | import vdomr as vd
2 | import os
3 |
4 |
5 | class Status(vd.Component):
6 | def __init__(self):
7 | vd.Component.__init__(self)
8 | self._status = ''
9 |
10 | def setStatus(self, status):
11 | self._status = status
12 | self.refresh()
13 |
14 | def render(self):
15 | return vd.div('STATUS: ' + self._status)
16 |
17 |
18 | class MyApp():
19 | def __init__(self):
20 | pass
21 |
22 | def createSession(self):
23 | status = Status()
24 | status.setStatus('test1')
25 |
26 | def on_click():
27 | print('clicked')
28 | status.setStatus('clicked...')
29 | return 'return_string'
30 | root = vd.div(vd.h3('testing'), vd.h2('testing2'),
31 | vd.button('push me', onclick=on_click), status)
32 | return root
33 |
34 |
35 | if __name__ == "__main__":
36 | APP = MyApp()
37 | server = vd.VDOMRServer(APP)
38 | server.start()
39 |
--------------------------------------------------------------------------------
/old/gui/batchmonitor.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | gui/batchmonitor/start_batchmonitor.py --collection=spikeforest --share_id=spikeforest.spikeforest2
4 |
--------------------------------------------------------------------------------
/old/gui/browse_recordings.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | gui/browse_recordings/start_browse_recordings.py --collection=spikeforest --share_id=spikeforest.spikeforest2
4 |
--------------------------------------------------------------------------------
/old/gui/sfbrowser.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | gui/sfbrowser/start_sfbrowser.py --collection=spikeforest --share_id=spikeforest.spikeforest2
4 |
--------------------------------------------------------------------------------
/old/gui/sfbrowser_snr.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | gui/sfbrowser/start_sfbrowser.py --collection=spikeforest --share_id=spikeforest.spikeforest2
4 |
--------------------------------------------------------------------------------
/old/gui/sorting_result_explorer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | gui/sorting_result_explorer/start_sorting_result_explorer.py --collection=spikeforest --share_id=spikeforest.spikeforest2
4 |
--------------------------------------------------------------------------------
/old/gui/sorting_result_explorer/start_sorting_result_explorer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import vdomr as vd
5 | from mountaintools import client as mt
6 | from sortingresultsexplorermainwindow import SortingResultsExplorerMainWindow
7 | os.environ['SIMPLOT_SRC_DIR'] = '../../simplot'
8 |
9 |
10 | class TheApp():
11 | def __init__(self):
12 | pass
13 |
14 | def createSession(self):
15 | print('creating main window')
16 | W = SortingResultsExplorerMainWindow()
17 | print('done creating main window')
18 | return W
19 |
20 |
21 | def main():
22 | # Configure readonly access to kbucket
23 | mt.autoConfig(collection='spikeforest', key='spikeforest2-readonly')
24 |
25 | APP = TheApp()
26 | server = vd.VDOMRServer(APP)
27 | server.start()
28 |
29 |
30 | if __name__ == "__main__":
31 | main()
32 |
--------------------------------------------------------------------------------
/old/gui/test_view_timeseries.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ./view_timeseries.py kbucket://15734439d8cf/groundtruth/visapy_mea/set1 "$@"
5 |
--------------------------------------------------------------------------------
/old/misc/simplot/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | Testing
10 |
13 |
14 |
15 |
16 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | addopts = --ignore=old --ignore=tmp -m "not slow and not exclude"
3 |
4 | # Ignore warnings such as DeprecationWarning and pytest.PytestUnknownMarkWarning
5 | filterwarnings = ignore::pytest.PytestWarning
6 |
--------------------------------------------------------------------------------
/run_pytest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Run pytest with a clean temporary directory for MOUNTAIN_DIR and KBUCKET_CACHE_DIR
4 | # You can still send command-line arguments
5 |
6 | TEST_DIR=$PWD/tmp_pytest_working
7 |
8 | if [ -d "TEST_DIR" ]; then
9 | rm -rf $TEST_DIR;
10 | fi
11 |
12 | mkdir -p $TEST_DIR/.mountain
13 | mkdir -p $TEST_DIR/sha1-cache
14 | export MOUNTAIN_DIR=$TEST_DIR/.mountain
15 | export KBUCKET_CACHE_DIR=$TEST_DIR/sha1-cache
16 |
17 | pytest "$@"
18 |
19 |
--------------------------------------------------------------------------------
/spikeforest/.bumpversion.cfg:
--------------------------------------------------------------------------------
1 | [bumpversion]
2 | current_version = 0.10.3
3 | commit = True
4 | tag = True
5 |
6 | [bumpversion:file:setup.py]
7 |
8 |
--------------------------------------------------------------------------------
/spikeforest/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - '3.6'
4 | install:
5 | - pip install pytest
6 | - pip install mountaintools==0.7.1
7 | - pip install .
8 | script:
9 | - echo "no test"
10 | # - pytest -s
11 | deploy:
12 | provider: pypi
13 | user: magland
14 | on:
15 | tags: true
16 | password:
17 | secure: qE/jQlSwRcAX75u+Pg5KQqRKcKbbaEDIPcRgeMuJHhuPe+ed93NgVbOAaRcrxmmN1RAATBczPy/Iu/oJKfvqTMzGzD2urctllFZH4B7L9ToGyqS90cQFiBKpPa0qM3DediF7WbenHXzuupoo5txBr6UmDVnucGleVAksXxyw3HsmC8f0Xf50cdJyxQFtswvTiIloJubrm2DjX9GBtgHvZDLkiY1yyu9sOEScX+spZ17hIOQMeR0q15GBtqbg7J9QyzB6vJUumWj5/DLy1sZxHVAREEFb+KB9xjs2nncZPCz/ysZowg1Wg8aRFW7l772Z8j5Do/1shVtj6jQ8NyZ7uNH6HN7LOxmUmvQWPvKSdpHStj9y/Tgo/IrWfKTsNJq2aK0MgFDCD2BfQZ0cyQeaMnP5rzuOlVL7W3m7sEXp01raJfusp5vxEfgTqYZvPC2553/0gzbq3E0nkM+sswe/eDDb4F3qdWs4bSjKjNTZEpZhRZcZmyaJDq9K0eNi8YHD/+YDzdR+daKQkNREIHXcOdQFKLOIKuEGp7/mDMvT7I1lU4y2CuXq+DIObFw6uoX1SDucrH05uMQzghqeMnXjZqjoy60a3d9hvpgDf6I5Wmbxed0v2DHif/Skm1YI3FynY7Hk2QvzmRnZSjnLx5UoR2/VD57ElWYvTUWH745EpFo=
18 |
--------------------------------------------------------------------------------
/spikeforest/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include spikesorters *.params
2 | recursive-include spikesorters config*
3 | recursive-include spikeforestwidgets *.js
4 | recursive-include spikeforestwidgets *.css
5 | recursive-include forestview *.js
6 | recursive-include forestview *.css
7 |
--------------------------------------------------------------------------------
/spikeforest/containers/build_simg_using_docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | DEST=$1
5 | RECIPE=$2
6 |
7 | ## This will be the command we run inside docker container
8 | cmd="singularity build /tmp/out.simg $RECIPE"
9 |
10 | ## Run the command inside the docker container
11 | docker rm build_sing || echo "."
12 | docker run --privileged --userns=host --name build_sing -v $PWD:/working magland/singularity:2.6.0 \
13 | bash -c "$cmd"
14 |
15 | echo "Copying file out of container"
16 | docker cp build_sing:/tmp/out.simg $DEST
17 |
--------------------------------------------------------------------------------
/spikeforest/containers/pysing/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | #########################################
4 | ### Python, etc
5 | RUN apt-get update && apt-get -y install git wget build-essential
6 | RUN apt-get install -y python3 python3-pip
7 | RUN ln -s python3 /usr/bin/python
8 | RUN ln -s pip3 /usr/bin/pip
9 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
10 |
11 | RUN echo "30 May 2019"
12 |
13 | #########################################
14 | # Singularity 2.6.1
15 | # See: https://www.sylabs.io/guides/2.6/user-guide/quick_start.html#quick-installation-steps
16 | RUN git clone https://github.com/sylabs/singularity.git /singularity
17 | WORKDIR /singularity
18 | RUN git fetch --all && git checkout 2.6.1
19 | RUN apt-get update && apt-get install -y libtool m4 automake libarchive-dev
20 | RUN ./autogen.sh
21 | RUN ./configure --prefix=/usr/local
22 | RUN make
23 | RUN make install
24 |
25 | #########################################
26 | # For convenience
27 | RUN apt-get update && apt-get install -y nano curl
--------------------------------------------------------------------------------
/spikeforest/containers/pysing/readme.txt:
--------------------------------------------------------------------------------
1 | # build the docker container and push to docker hub
2 | docker build -t magland/pysing .
3 | docker push magland/pysing
4 |
5 | # Enter shell within the container
6 | docker run -it magland/pysing bash
7 |
8 | # you may want to mount the kbucket cache directory
9 | -v $KBUCKET_CACHE_DIR:/tmp/sha1-cache
10 |
--------------------------------------------------------------------------------
/spikeforest/containers/spikeforest_basic/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | #########################################
4 | ### Python, etc
5 | RUN apt-get update && apt-get -y install git wget build-essential
6 | RUN apt-get install -y python3 python3-pip
7 | RUN ln -s python3 /usr/bin/python
8 | RUN ln -s pip3 /usr/bin/pip
9 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
10 |
11 | RUN echo "29 March 2019"
12 |
13 | #########################################
14 | # spikeforest dependencies
15 | RUN pip install numpy scipy matplotlib requests pillow pandas ipython h5py setuptools-git scikit-learn python-frontmatter
16 | RUN pip install spikeextractors==0.4.2
17 | RUN pip install spiketoolkit==0.3.4
18 |
--------------------------------------------------------------------------------
/spikeforest/containers/spikeforest_basic/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../build_simg_using_docker.sh spikeforest_basic.simg docker://magland/spikeforest_basic
5 |
--------------------------------------------------------------------------------
/spikeforest/containers/spikeforest_basic/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t magland/spikeforest_basic .
3 |
4 | # then push to docker hub (if needed)
5 | docker push magland/spikeforest_basic
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/containers/spikeforest_basic/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 |
5 | sha1_path = mt.saveFile('spikeforest_basic.simg')
6 | print(sha1_path)
7 | sha1_path = mt.saveFile('spikeforest_basic.simg', upload_to='spikeforest.public')
8 | print(sha1_path)
9 |
--------------------------------------------------------------------------------
/spikeforest/containers/spikeforest_basic_jjun/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | #########################################
4 | ### Python, etc
5 | RUN apt-get update && apt-get -y install git wget build-essential
6 | RUN apt-get install -y python3 python3-pip
7 | RUN ln -s python3 /usr/bin/python
8 | RUN ln -s pip3 /usr/bin/pip
9 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
10 |
11 | RUN echo "26 May 2019"
12 |
13 | #########################################
14 | # spikeforest dependencies
15 | RUN pip install numpy scipy matplotlib requests pillow pandas ipython h5py setuptools-git scikit-learn python-frontmatter
16 | RUN pip install spikeextractors==0.4.2
17 |
--------------------------------------------------------------------------------
/spikeforest/containers/spikeforest_basic_jjun/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../build_simg_using_docker.sh spikeforest_basic.simg docker://jamesjun/spikeforest_basic
5 |
--------------------------------------------------------------------------------
/spikeforest/containers/spikeforest_basic_jjun/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t jamesjun/spikeforest_basic .
3 |
4 | # then push to docker hub (if needed)
5 | docker push jamesjun/spikeforest_basic
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/containers/spikeforest_basic_jjun/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 |
5 | mt.login()
6 | sha1_path = mt.saveFile('spikeforest_basic.simg', upload_to='spikeforest.kbucket')
7 | print(sha1_path)
8 |
--------------------------------------------------------------------------------
/spikeforest/forestview/__init__.py:
--------------------------------------------------------------------------------
1 | from .core import ForestViewMainWindow
2 | from .spikeforestcontext import SpikeForestContext
3 | from .analysiscontext import AnalysisContext
4 | from .forestview import forestview
5 |
--------------------------------------------------------------------------------
/spikeforest/forestview/analysis_views/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/forestview/analysis_views/__init__.py
--------------------------------------------------------------------------------
/spikeforest/forestview/analysis_views/testview.py:
--------------------------------------------------------------------------------
1 | import vdomr as vd
2 |
3 |
4 | class TestView(vd.Component):
5 | def __init__(self, context, opts=None, prepare_result=None):
6 | vd.Component.__init__(self)
7 | self._context = context
8 | self._opts = opts
9 | self._size = (100, 100)
10 | self._widget = TestWidget(context)
11 |
12 | @staticmethod
13 | def prepareView(context, opts):
14 | # prepare code goes here
15 | # Or, you can remove this function altogether
16 | pass
17 |
18 | def setSize(self, size):
19 | self._size = size
20 | self._widget.setSize(size)
21 |
22 | def size(self):
23 | return self._size
24 |
25 | def tabLabel(self):
26 | return 'Test view'
27 |
28 | def render(self):
29 | return self._widget
30 |
31 |
32 | class TestWidget(vd.Component):
33 | def __init__(self, context):
34 | vd.Component.__init__(self)
35 | self._context = context
36 | self._size = (100, 100)
37 |
38 | def setSize(self, size):
39 | self._size = size
40 |
41 | def size(self):
42 | return self._size
43 |
44 | def render(self):
45 | return vd.div('Test created via snippet.')
46 |
--------------------------------------------------------------------------------
/spikeforest/forestview/core/.gitignore:
--------------------------------------------------------------------------------
1 | chrome-tabs/.gitignore
2 | chrome-tabs/*.json
3 | chrome-tabs/*.html
4 | chrome-tabs/*.gif
5 | chrome-tabs/*.mov
6 | chrome-tabs/*.md
7 | chrome-tabs/demo
8 | chrome-tabs/svg
9 |
--------------------------------------------------------------------------------
/spikeforest/forestview/core/__init__.py:
--------------------------------------------------------------------------------
1 | from .forestviewmainwindow import ForestViewMainWindow
2 |
--------------------------------------------------------------------------------
/spikeforest/forestview/core/chrome-tabs/LICENSE.txt:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 | Copyright (c) 2013 Adam Schwartz
3 |
4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
5 |
6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
7 |
8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9 |
--------------------------------------------------------------------------------
/spikeforest/forestview/recording_views/__init__.py:
--------------------------------------------------------------------------------
1 | from .recording_view_launchers import get_recording_view_launchers
2 |
--------------------------------------------------------------------------------
/spikeforest/forestview/recording_views/currentstateview.py:
--------------------------------------------------------------------------------
1 | import vdomr as vd
2 | import json
3 |
4 |
5 | class CurrentStateView(vd.Component):
6 | def __init__(self, context):
7 | vd.Component.__init__(self)
8 | self._context = context
9 | self._context.onAnyStateChanged(self.refresh)
10 | self._size = (100, 100)
11 |
12 | def tabLabel(self):
13 | return 'Current state'
14 |
15 | def setSize(self, size):
16 | if self._size == size:
17 | return
18 | self._size = size
19 | self.refresh()
20 |
21 | def size(self):
22 | return self._size
23 |
24 | def render(self):
25 | state0 = self._context.stateObject()
26 | return vd.div(
27 | vd.pre(
28 | json.dumps(state0, indent=4)
29 | )
30 | )
31 |
--------------------------------------------------------------------------------
/spikeforest/forestview/spikeforest_views/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/forestview/spikeforest_views/__init__.py
--------------------------------------------------------------------------------
/spikeforest/forestview/spikeforest_views/currentstateview.py:
--------------------------------------------------------------------------------
1 | import vdomr as vd
2 | import json
3 |
4 |
5 | class CurrentStateView(vd.Component):
6 | def __init__(self, context, opts=None):
7 | vd.Component.__init__(self)
8 | self._context = context
9 | self._context.onAnyStateChanged(self.refresh)
10 | self._size = (100, 100)
11 |
12 | def tabLabel(self):
13 | return 'Current state'
14 |
15 | def setSize(self, size):
16 | if self._size == size:
17 | return
18 | self._size = size
19 | self.refresh()
20 |
21 | def size(self):
22 | return self._size
23 |
24 | def render(self):
25 | state0 = self._context.stateObject()
26 | return vd.div(
27 | vd.pre(
28 | json.dumps(state0, indent=4)
29 | )
30 | )
31 |
--------------------------------------------------------------------------------
/spikeforest/forestview/spikefront_view_launchers.py:
--------------------------------------------------------------------------------
1 | from .spikeforest_views.currentstateview import CurrentStateView
2 | from .spikefront_views.mainresulttableview import MainResultTableView
3 |
4 | import vdomr as vd
5 | from mountaintools import client as mt
6 | import json
7 |
8 |
9 | def get_spikefront_view_launchers(context):
10 | launchers = []
11 | groups = []
12 | ret = dict(
13 | groups=groups,
14 | launchers=launchers
15 | )
16 |
17 | # General
18 | groups.append(dict(name='general', label=''))
19 |
20 | launchers.append(dict(
21 | group='general', name='current-state', label='Current state',
22 | view_class=CurrentStateView,
23 | context=context, opts=dict(),
24 | enabled=True
25 | ))
26 |
27 | launchers.append(dict(
28 | group='general', name='main-result-table', label='Main result table',
29 | view_class=MainResultTableView,
30 | context=context, opts=dict(),
31 | always_open_new=False,
32 | enabled=True
33 | ))
34 |
35 | return ret
36 |
--------------------------------------------------------------------------------
/spikeforest/forestview/spikefront_views/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/forestview/spikefront_views/__init__.py
--------------------------------------------------------------------------------
/spikeforest/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | addopts = --ignore=old --ignore=tmp -m "not slow and not exclude"
3 |
--------------------------------------------------------------------------------
/spikeforest/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 |
3 | pkg_name = "spikeforest"
4 |
5 | # NOTE: you should install this project in development mode
6 | # > python setup.py develop
7 |
8 | setuptools.setup(
9 | name=pkg_name,
10 | version="0.11.0",
11 | author="Jeremy Magland",
12 | author_email="jmagland@flatironinstitute.org",
13 | description="Spike sorting",
14 | packages=setuptools.find_packages(),
15 | package_dir={
16 | 'spikeforest': 'spikeforest',
17 | 'spikeforestsorters': 'spikeforestsorters',
18 | 'forestview': 'forestview',
19 | 'spikeforest_analysis': 'spikeforest_analysis',
20 | 'spikeforest_common': 'spikeforest_common'
21 | },
22 | include_package_data=True,
23 | install_requires=[
24 | 'numpy', 'scipy', 'matplotlib',
25 | 'requests', 'pillow', 'pandas',
26 | 'ipython', 'h5py', 'setuptools-git',
27 | 'scikit-learn', 'python-frontmatter',
28 | 'spikeextractors==0.5.4',
29 | 'spiketoolkit==0.3.6',
30 | 'spikesorters==0.1.3'
31 | ],
32 | scripts=['bin/forestview'],
33 | classifiers=(
34 | "Programming Language :: Python :: 3",
35 | "License :: OSI Approved :: Apache Software License",
36 | "Operating System :: OS Independent",
37 | )
38 | )
39 |
--------------------------------------------------------------------------------
/spikeforest/sfdata/__init__.py:
--------------------------------------------------------------------------------
1 | from .sfdata import SFData
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/__init__.py:
--------------------------------------------------------------------------------
1 | from .extractors import SFMdaRecordingExtractor, SFMdaSortingExtractor, mdaio
2 | from .extractors import EfficientAccessRecordingExtractor
3 | from .extractors import loadProbeFile, saveProbeFile, writeBinaryDatFormat, getSubExtractorsByProperty
4 | from .extractors import NeuroscopeSortingExtractor
5 | from . import example_datasets
6 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/example_datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .toy_example1 import toy_example1
2 | from .real import real
3 | from .yass_example import yass_example
4 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/example_datasets/real.py:
--------------------------------------------------------------------------------
1 | from spikeforest import SFMdaRecordingExtractor
2 | import numpy as np
3 |
4 |
5 | def real(name='franklab_tetrode', download=True):
6 | if name == 'franklab_tetrode':
7 | dsdir = 'kbucket://b5ecdf1474c5/datasets/neuron_paper/franklab_tetrode'
8 | IX = SFMdaRecordingExtractor(dataset_directory=dsdir, download=download)
9 | return (IX, None)
10 | else:
11 | raise Exception('Unrecognized name for real dataset: ' + name)
12 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/example_datasets/synthesize_timeseries.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def synthesize_timeseries(*, sorting, waveforms, noise_level=1, samplerate=30000, duration=60, waveform_upsamplefac=13):
5 | num_timepoints = np.int64(samplerate * duration)
6 | waveform_upsamplefac = int(waveform_upsamplefac)
7 | W = waveforms
8 |
9 | M, TT, _ = W.shape[0], W.shape[1], W.shape[2]
10 | T = int(TT / waveform_upsamplefac)
11 | Tmid = int(np.ceil((T + 1) / 2 - 1))
12 |
13 | N = num_timepoints
14 |
15 | X = np.random.randn(M, N) * noise_level
16 |
17 | unit_ids = sorting.get_unit_ids()
18 | for k0 in unit_ids:
19 | waveform0 = waveforms[:, :, k0 - 1]
20 | times0 = sorting.get_unit_spike_train(unit_id=k0)
21 | for t0 in times0:
22 | amp0 = 1
23 | frac_offset = int(np.floor((t0 - np.floor(t0)) * waveform_upsamplefac))
24 | tstart = np.int64(np.floor(t0)) - Tmid
25 | if (0 <= tstart) and (tstart + T <= N):
26 | X[:, tstart:tstart + T] = X[:, tstart:tstart + T] + waveform0[:,
27 | frac_offset::waveform_upsamplefac] * amp0
28 |
29 | return X
30 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/example_datasets/toy_example1.py:
--------------------------------------------------------------------------------
1 | import spikeextractors as se
2 | import numpy as np
3 | from .synthesize_random_waveforms import synthesize_random_waveforms
4 | from .synthesize_random_firings import synthesize_random_firings
5 | from .synthesize_timeseries import synthesize_timeseries
6 |
7 |
8 | def toy_example1(duration=10, num_channels=4, samplerate=30000, K=10, firing_rates=None, noise_level=10):
9 | upsamplefac = 13
10 |
11 | waveforms, geom = synthesize_random_waveforms(K=K, M=num_channels, average_peak_amplitude=-100,
12 | upsamplefac=upsamplefac)
13 | times, labels = synthesize_random_firings(K=K, duration=duration, samplerate=samplerate, firing_rates=firing_rates)
14 | labels = labels.astype(np.int64)
15 | OX = se.NumpySortingExtractor()
16 | OX.set_times_labels(times, labels)
17 | X = synthesize_timeseries(sorting=OX, waveforms=waveforms, noise_level=noise_level, samplerate=samplerate, duration=duration,
18 | waveform_upsamplefac=upsamplefac)
19 |
20 | IX = se.NumpyRecordingExtractor(timeseries=X, samplerate=samplerate, geom=geom)
21 | return (IX, OX)
22 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/example_datasets/yass_example.py:
--------------------------------------------------------------------------------
1 | from spikeforest import SFMdaRecordingExtractor, SFMdaSortingExtractor, example_datasets
2 | import numpy as np
3 | import os
4 |
5 |
6 | def yass_example(download=True, set_id=1):
7 | if set_id in range(1, 7):
8 | dsdir = 'kbucket://15734439d8cf/groundtruth/visapy_mea/set{}'.format(set_id)
9 | IX = SFMdaRecordingExtractor(dataset_directory=dsdir, download=download)
10 | path1 = os.path.join(dsdir, 'firings_true.mda')
11 | print(path1)
12 | OX = SFMdaSortingExtractor(path1)
13 | return (IX, OX)
14 | else:
15 | raise Exception('Invalid ID for yass_example {} is not betewen 1..6'.format(set_id))
16 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/extractors/__init__.py:
--------------------------------------------------------------------------------
1 | from .sfmdaextractors import SFMdaRecordingExtractor, SFMdaSortingExtractor, mdaio
2 | from .efficientaccess import EfficientAccessRecordingExtractor
3 | from .neuroscopesortingextractor import NeuroscopeSortingExtractor
4 | from .tools import loadProbeFile, saveProbeFile, writeBinaryDatFormat, getSubExtractorsByProperty
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/extractors/efficientaccess/__init__.py:
--------------------------------------------------------------------------------
1 | from .efficientaccessrecordingextractor import EfficientAccessRecordingExtractor
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/extractors/klustasortingextractor/__init__.py:
--------------------------------------------------------------------------------
1 | from .klustasortingextractor import KlustaSortingExtractor
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/extractors/neuroscopesortingextractor/__init__.py:
--------------------------------------------------------------------------------
1 | from .neuroscopesortingextractor import NeuroscopeSortingExtractor
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/extractors/sfmdaextractors/__init__.py:
--------------------------------------------------------------------------------
1 | from .sfmdaextractors import SFMdaRecordingExtractor, SFMdaSortingExtractor
2 | from . import mdaio
3 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/__init__.py:
--------------------------------------------------------------------------------
1 | from .widgets.timeserieswidget import TimeseriesWidget
2 | from .widgets.featurespacewidget import FeatureSpaceWidget
3 | from .widgets.unitwaveformswidget import UnitWaveformsWidget
4 | from .widgets.sortingaccuracywidget import SortingAccuracyWidget
5 | from .widgets.crosscorrelogramswidget import CrossCorrelogramsWidget
6 | from .widgets.electrodegeometrywidget import ElectrodeGeometryWidget
7 |
8 | from .tables.sortingcomparisontable import SortingComparisonTable
9 |
10 | from .validation.sortingcomparison import SortingComparison
11 |
12 | from . import example_datasets
13 | from . import lazyfilters
14 | from . import devel
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/devel/__init__.py:
--------------------------------------------------------------------------------
1 | from .compute_unit_snrs import compute_unit_snrs
2 | from .saveplot import savePlot
3 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/devel/saveplot.py:
--------------------------------------------------------------------------------
1 | from matplotlib import pyplot as plt
2 | from PIL import Image
3 | import os
4 |
5 | def savePlot(fname,quality=40,close_figure=True):
6 | plt.savefig(fname+'.png')
7 | if close_figure:
8 | plt.close()
9 | im=Image.open(fname+'.png').convert('RGB')
10 | os.remove(fname+'.png')
11 | im.save(fname,quality=quality)
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/example_datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .toy_example1 import toy_example1
2 | from .real import real
3 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/example_datasets/real.py:
--------------------------------------------------------------------------------
1 | from spikeforest import SFMdaRecordingExtractor
2 | import numpy as np
3 |
4 |
5 | def real(name='franklab_tetrode', download=True):
6 | if name == 'franklab_tetrode':
7 | dsdir = 'kbucket://b5ecdf1474c5/datasets/neuron_paper/franklab_tetrode'
8 | IX = SFMdaRecordingExtractor(dataset_directory=dsdir, download=download)
9 | return (IX, None)
10 | else:
11 | raise Exception('Unrecognized name for real dataset: ' + name)
12 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/example_datasets/synthesize_timeseries.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def synthesize_timeseries(*, sorting, waveforms, noise_level=1, samplerate=30000, duration=60, waveform_upsamplefac=13):
5 | num_timepoints = np.int64(samplerate * duration)
6 | waveform_upsamplefac = int(waveform_upsamplefac)
7 | W = waveforms
8 |
9 | M, TT, _ = W.shape[0], W.shape[1], W.shape[2]
10 | T = int(TT / waveform_upsamplefac)
11 | Tmid = int(np.ceil((T + 1) / 2 - 1))
12 |
13 | N = num_timepoints
14 |
15 | X = np.random.randn(M, N) * noise_level
16 |
17 | unit_ids = sorting.get_unit_ids()
18 | for k0 in unit_ids:
19 | waveform0 = waveforms[:, :, k0 - 1]
20 | times0 = sorting.get_unit_spike_train(unit_id=k0)
21 | for t0 in times0:
22 | amp0 = 1
23 | frac_offset = int(np.floor((t0 - np.floor(t0)) * waveform_upsamplefac))
24 | tstart = np.int64(np.floor(t0)) - Tmid
25 | if (0 <= tstart) and (tstart + T <= N):
26 | X[:, tstart:tstart + T] = X[:, tstart:tstart + T] + waveform0[:, frac_offset::waveform_upsamplefac] * amp0
27 |
28 | return X
29 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/example_datasets/toy_example1.py:
--------------------------------------------------------------------------------
1 | import spikeextractors as se
2 | import numpy as np
3 | from .synthesize_random_waveforms import synthesize_random_waveforms
4 | from .synthesize_random_firings import synthesize_random_firings
5 | from .synthesize_timeseries import synthesize_timeseries
6 |
7 |
8 | def toy_example1(duration=10, num_channels=4, samplerate=30000, K=10):
9 | upsamplefac = 13
10 |
11 | waveforms, geom = synthesize_random_waveforms(K=K, M=num_channels, average_peak_amplitude=-100,
12 | upsamplefac=upsamplefac)
13 | times, labels = synthesize_random_firings(K=K, duration=duration, samplerate=samplerate)
14 | labels = labels.astype(np.int64)
15 | OX = se.NumpySortingExtractor()
16 | OX.set_times_labels(times, labels)
17 | X = synthesize_timeseries(sorting=OX, waveforms=waveforms, noise_level=10, samplerate=samplerate, duration=duration,
18 | waveform_upsamplefac=upsamplefac)
19 |
20 | IX = se.NumpyRecordingExtractor(timeseries=X, samplerate=samplerate, geom=geom)
21 | return (IX, OX)
22 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/lazyfilters/__init__.py:
--------------------------------------------------------------------------------
1 | from .bandpass_filter import bandpass_filter
2 | from .whiten import whiten
3 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/tables/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/spikeforest/spikewidgets/tables/__init__.py
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/tables/sortingcomparisontable/__init__.py:
--------------------------------------------------------------------------------
1 | from .sortingcomparisontable import SortingComparisonTable
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/validation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/spikeforest/spikewidgets/validation/__init__.py
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/validation/sortingcomparison/__init__.py:
--------------------------------------------------------------------------------
1 | from .sortingcomparison import SortingComparison
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/widgets/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/spikeforest/spikewidgets/widgets/__init__.py
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/widgets/crosscorrelogramswidget/__init__.py:
--------------------------------------------------------------------------------
1 | from .crosscorrelogramswidget import CrossCorrelogramsWidget
2 | from .crosscorrelogramswidget import compute_crosscorrelogram
3 | from .crosscorrelogramswidget import compute_autocorrelogram
4 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/widgets/electrodegeometrywidget/__init__.py:
--------------------------------------------------------------------------------
1 | from .electrodegeometrywidget import ElectrodeGeometryWidget
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/widgets/featurespacewidget/__init__.py:
--------------------------------------------------------------------------------
1 | from .featurespacewidget import FeatureSpaceWidget
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/widgets/sortingaccuracywidget/__init__.py:
--------------------------------------------------------------------------------
1 | from .sortingaccuracywidget import SortingAccuracyWidget
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/widgets/timeserieswidget/__init__.py:
--------------------------------------------------------------------------------
1 | from .timeserieswidget import TimeseriesWidget
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest/spikewidgets/widgets/unitwaveformswidget/__init__.py:
--------------------------------------------------------------------------------
1 | from .unitwaveformswidget import UnitWaveformsWidget
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest_analysis/__init__.py:
--------------------------------------------------------------------------------
1 | from .compare_sortings_with_truth import GenSortingComparisonTable, GenSortingComparisonTableNew
2 | from .aggregate_sorting_results import aggregate_sorting_results
3 | from .compute_units_info import compute_units_info, ComputeUnitsInfo
4 | from .computerecordinginfo import ComputeRecordingInfo
5 | from .bandpass_filter import bandpass_filter
6 | from .whiten import whiten
7 | from .sortingcomparison import SortingComparison
8 | from .sort_recordings import find_sorter_processor_and_container
9 | # from .sort_recordings import IronClust
10 | # from .sort_recordings import KiloSort
11 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest_analysis/sfmdaextractors/__init__.py:
--------------------------------------------------------------------------------
1 | from .sfmdaextractors import SFMdaRecordingExtractor, SFMdaSortingExtractor
2 | from . import mdaio
3 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest_analysis/sort_recordings.py:
--------------------------------------------------------------------------------
1 | from mountaintools import client as mt
2 | # from . import sorters as sorters
3 |
4 | from spikeforestsorters import MountainSort4, SpykingCircus, YASS, YASS1, IronClust, KiloSort, KiloSort2, MountainSort4TestError, HerdingSpikes2, JRClust, Klusta, Tridesclous, Waveclus
5 |
6 | Processors = dict(
7 | MountainSort4=(MountainSort4, 'default'),
8 | IronClust=(IronClust, None),
9 | SpykingCircus=(SpykingCircus, 'default'),
10 | KiloSort=(KiloSort, None),
11 | KiloSort2=(KiloSort2, None),
12 | Yass=(YASS, 'default'),
13 | Yass1=(YASS1, 'default'),
14 | MountainSort4TestError=(MountainSort4TestError, 'default'),
15 | HerdingSpikes2=(HerdingSpikes2, 'default'),
16 | JRClust=(JRClust, None),
17 | Klusta=(Klusta, 'default'),
18 | Tridesclous=(Tridesclous, 'default'),
19 | Waveclus=(Waveclus, None),
20 | )
21 |
22 |
23 | def find_sorter_processor_and_container(processor_name):
24 | if processor_name not in Processors:
25 | raise Exception('No such sorter: ' + processor_name)
26 | SS = Processors[processor_name][0]
27 | SS_container = Processors[processor_name][1]
28 | if SS_container:
29 | if SS_container == 'default':
30 | SS_container = SS.CONTAINER
31 | return SS, SS_container
32 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest_analysis/summarize_recordings.py:
--------------------------------------------------------------------------------
1 | # from matplotlib import pyplot as plt
2 | from .compute_units_info import ComputeUnitsInfo
3 | from .computerecordinginfo import ComputeRecordingInfo
4 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest_analysis/summarize_sortings.py:
--------------------------------------------------------------------------------
1 | import mlprocessors as mlpr
2 | from mountaintools import client as mt
3 | import os
4 | from copy import deepcopy
5 | import mtlogging
6 |
7 |
8 | @mtlogging.log()
9 | def summarize_sortings(sortings, compute_resource, label=None):
10 | print('')
11 | print('>>>>>> {}'.format(label or 'summarize sortings'))
12 |
13 | print('Gathering summarized sortings...')
14 | summarized_sortings = []
15 | for sorting in sortings:
16 | summary = dict()
17 | summary['plots'] = dict()
18 |
19 | sorting2 = deepcopy(sorting)
20 | sorting2['summary'] = summary
21 | summarized_sortings.append(sorting2)
22 |
23 | return summarized_sortings
24 |
--------------------------------------------------------------------------------
/spikeforest/spikeforest_common/__init__.py:
--------------------------------------------------------------------------------
1 | from .autoscale_recording import autoScaleRecordingToNoiseLevel, estimateRecordingNoiseLevel
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/__init__.py:
--------------------------------------------------------------------------------
1 | from .mountainsort4 import MountainSort4, MountainSort4TestError, MountainSort4Old
2 | from .spyking_circus import SpykingCircus
3 | from .yass import YASS
4 | from .yass1 import YASS1
5 | from .ironclust import IronClust
6 | from .kilosort import KiloSort, KiloSortOld
7 | from .kilosort2 import KiloSort2
8 | from .herdingspikes2 import HerdingSpikes2
9 | from .jrclust import JRClust
10 | from .klusta import Klusta
11 | from .tridesclous import Tridesclous
12 | #from .tridesclous_old import TridesclousOld
13 | from .waveclus import Waveclus
14 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/build_simg_using_docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | DEST=$1
5 | RECIPE=$2
6 |
7 | ## This will be the command we run inside docker container
8 | cmd="singularity build /tmp/out.simg $RECIPE"
9 |
10 | ## Run the command inside the docker container
11 | docker rm build_sing || echo "."
12 | docker run --privileged --userns=host --name build_sing -v $PWD:/working magland/singularity:2.6.0 \
13 | bash -c "$cmd"
14 |
15 | echo "Copying file out of container"
16 | docker cp build_sing:/tmp/out.simg $DEST
17 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/containers/yass/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | RUN echo "7 February 2019"
4 |
5 | #########################################
6 | ### Python, etc
7 | RUN apt-get update && apt-get -y install git wget build-essential
8 | RUN apt-get install -y python3 python3-pip
9 | RUN ln -s python3 /usr/bin/python
10 | RUN ln -s pip3 /usr/bin/pip
11 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
12 |
13 | #########################################
14 | ### spikeforest2
15 | RUN mkdir /src
16 | RUN git clone https://github.com/flatironinstitute/spikeforest2 /src/spikeforest2
17 | WORKDIR /src/spikeforest2
18 | RUN pip install -r requirements.txt
19 | RUN python setup.py develop
20 |
21 | #########################################
22 | ### Install yass, a previous version
23 | RUN mkdir /src
24 | RUN git clone https://github.com/paninski-lab/yass.git /src/yass
25 | WORKDIR /src/yass
26 | RUN git checkout tags/0.5
27 | RUN python2 setup.py develop
28 |
29 | ## call yass via python2 /path/to/binary/yass
30 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/containers/yass/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../build_simg_using_docker.sh yass.simg docker://jamesjun/yass
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/containers/yass/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t jamesjun/yass .
3 |
4 | # then push to docker hub (if needed)
5 | docker push jamesjun/yass
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_ironclust.md:
--------------------------------------------------------------------------------
1 | ---
2 | label: IRONCLUST
3 | dockerfile:
4 | environment: MATLAB
5 | wrapper: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/ironclust/ironclust.py
6 | website: https://github.com/jamesjun/ironclust
7 | source_code: https://github.com/jamesjun/ironclust
8 | authors: James J. Jun
9 | processor_name: IronClust
10 | doi:
11 | ---
12 | _
13 | # IRONCLUST
14 |
15 | ## Description
16 |
17 | Spike sorting software developed at Flatiron Institute, based on JRCLUST (Janelia Rocket Clust).
18 |
19 | ## Installation notes
20 |
21 | **Prerequisites:**
22 |
23 | * MATLAB with the following
24 | - Statistics and Machine Learning Toolbox
25 | - Parallel Computing Toolbox
26 |
27 | ## References
28 |
29 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_jrclust.md:
--------------------------------------------------------------------------------
1 | ---
2 | label: JRCLUST
3 | dockerfile:
4 | environment: MATLAB
5 | wrapper: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/jrclust/jrclust.py
6 | notes:
7 | website: https://github.com/JaneliaSciComp/JRCLUST
8 | source_code: https://github.com/JaneliaSciComp/JRCLUST
9 | authors: James Jun and Alan Liddell
10 | processor_name: JRClust
11 | doi: 10.1101/101030
12 | ---
13 |
14 | # JRCLUST
15 |
16 | ## Description
17 |
18 | From the website: JRCLUST is a scalable and customizable package for spike sorting on high-density silicon probes. It is written in MATLAB and CUDA. JRCLUST was originally developed by James Jun and is currently maintained by Vidrio Technologies.
19 |
20 | ## References
21 | [1] Jun, JJ et al., Real-time spike sorting platform for high-density extracellular probes with ground-truth validation and drift correction. bioRxiv, 101030; doi: https://doi.org/10.1101/101030
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_jrclust.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/spikeforestsorters/descriptions/alg_jrclust.png
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_kilosort.md:
--------------------------------------------------------------------------------
1 | ---
2 | label: KILOSORT
3 | dockerfile:
4 | environment: MATLAB
5 | wrapper: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/kilosort/kilosort.py
6 | website: https://github.com/cortex-lab/KiloSort
7 | source_code: https://github.com/cortex-lab/KiloSort
8 | authors: Marius Pachitariu
9 | processor_name: KiloSort
10 | doi: 10.1101/061481
11 | ---
12 | _
13 | # KILOSORT
14 |
15 | ## Description
16 |
17 | *From the KiloSort website*: Fast spike sorting for hundreds of channels. Implements an integrated template matching framework for detecting and clustering spikes from multi-channel electrophysiological recordings.
18 |
19 | ## References
20 |
21 | [1] Pachitariu, Marius, et al. "Fast and accurate spike sorting of high-channel count probes with KiloSort." Advances in Neural Information Processing Systems. 2016.
22 |
23 | [2] Pachitariu, Marius, et al. "Kilosort: realtime spike-sorting for extracellular electrophysiology with hundreds of channels." BioRxiv (2016): 061481.
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_kilosort.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/spikeforestsorters/descriptions/alg_kilosort.png
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_kilosort2.md:
--------------------------------------------------------------------------------
1 | ---
2 | label: KILOSORT2
3 | dockerfile:
4 | environment: MATLAB
5 | wrapper: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/kilosort2/kilosort2.py
6 | website: https://github.com/MouseLand/Kilosort2
7 | source_code: https://github.com/MouseLand/Kilosort2
8 | authors: Marius Pachitariu
9 | processor_name: KiloSort2
10 | doi:
11 | ---
12 | _
13 | # KILOSORT2
14 |
15 | ## Description
16 |
17 | *From the KiloSort2 website*: Kilosort2: automated spike sorting with drift tracking and template matching on GPUs
18 |
19 | A Matlab package for spike sorting electrophysiological data up to 1024 channels. In many cases, and especially for Neuropixels probes, the automated output of Kilosort2 requires minimal manual curation.
20 |
21 | ## Installation notes
22 |
23 | Requires CUDA toolkit to be installed
24 |
25 | ## References
26 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_mountainsort4.md:
--------------------------------------------------------------------------------
1 | ---
2 | label: MOUNTAINSORT4
3 | dockerfile: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/mountainsort4/container/Dockerfile
4 | wrapper: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/mountainsort4/mountainsort4.py
5 | website: https://github.com/flatironinstitute/mountainsort_examples/blob/master/README.md
6 | source_code: https://github.com/magland/ml_ephys
7 | authors: Jeremy Magland, Alex Barnett, Jason Chung, Loren Frank, and Leslie Greengard
8 | processor_name: MountainSort4
9 | doi: 10.1016/j.neuron.2017.08.030
10 | ---
11 | _
12 | # MOUNTAINSORT4
13 |
14 | ## Description
15 |
16 | MountainSort is spike sorting algorithm that uses a novel density-based clustering method called ISO-SPLIT.
17 |
18 | ## References
19 |
20 | Chung, J. E., Magland, J. F., Barnett, A. H., Tolosa, V. M., Tooker, A. C., Lee, K. Y., ... & Greengard, L. F. (2017). A fully automated approach to spike sorting. Neuron, 95(6), 1381-1394.
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_mountainsort4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/spikeforestsorters/descriptions/alg_mountainsort4.png
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_spyking_circus.md:
--------------------------------------------------------------------------------
1 | ---
2 | label: SPYKING_CIRCUS
3 | processor_name: SpykingCircus
4 | dockerfile: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/spyking_circus/container/Dockerfile
5 | wrapper: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/spyking_circus/spyking_circus.py
6 | website: https://spyking-circus.readthedocs.io/en/latest/
7 | authors: Pierre Yger and Olivier Marre
8 | processor_name: SpykingCircus
9 | doi: 10.7554/eLife.34518
10 | ---
11 | _
12 | # SPYKING_CIRCUS
13 |
14 | ## Description
15 |
16 | From the SpyKING CIRCUS website: The SpyKING CIRCUS is a massively parallel code to perform semi automatic spike sorting on large extra-cellular recordings. Using a smart clustering and a greedy template matching approach, the code can solve the problem of overlapping spikes, and has been tested both for in vitro and in vivo data, from tens of channels to up to 4225 channels.
17 |
18 | ## References
19 |
20 | Yger P., Spampinato, G.L.B, Esposito E., Lefebvre B., Deny S., Gardella C., Stimberg M., Jetter F., Zeck G. Picaud S., Duebel J., Marre O., A spike sorting toolbox for up to thousands of electrodes validated with ground truth recordings in vitro and in vivo, eLife 2018;7:e34518
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_spyking_circus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/spikeforestsorters/descriptions/alg_spyking_circus.png
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_tridesclous.md:
--------------------------------------------------------------------------------
1 | ---
2 | label: TRIDESCLOUS
3 | dockerfile: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/container/Dockerfile
4 | wrapper: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/tridesclous/tridesclous.py
5 | notes:
6 | website: https://github.com/tridesclous/tridesclous
7 | source_code:
8 | authors: Samuel Garcia and Christophe Pouzat
9 | processor_name: Tridesclous
10 | doi:
11 | ---
12 | _
13 | # TRIDESCLOUS
14 |
15 | ## Description
16 |
17 | From the Tridesclous website: The primary goal of tridesclous is to provide a toolkit to teach good practices in spike sorting techniques.
18 | This tools is now mature and can be used for experimental data.
19 |
20 | ## References
21 |
22 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_waveclus.md:
--------------------------------------------------------------------------------
1 | ---
2 | label: Waveclus
3 | dockerfile:
4 | wrapper: https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/waveclus/waveclus.py
5 | website: https://github.com/csn-le/wave_clus/wiki
6 | source_code: https://github.com/csn-le/wave_clus
7 | authors: F. J. Chaure, H. G. Rey and R. Quian Quiroga
8 | processor_name: Waveclus
9 | doi: 10.1152/jn.00339.2018
10 | ---
11 | _
12 | # Waveclus
13 |
14 | ## Description
15 |
16 | Wave_clus is a fast and unsupervised algorithm for spike detection and sorting that runs under Windows, Mac or Linux operating systems.
17 |
18 | To install, download this repository into a folder. In MATLAB (R2009b or higher) go to Set Path and add the directory wave_clus with subfolders to the MATLAB path.
19 |
20 |
21 | ## References
22 |
23 | [1] A novel and fully automatic spike sorting implementation with variable number of features. F. J. Chaure, H. G. Rey and R. Quian Quiroga. Journal of Neurophysiology; 2018. https://doi.org/10.1152/jn.00339.2018
24 |
25 | [2] Quian Quiroga R, Nadasdy Z, Ben-Shaul Y. Unsupervised spike detection and sorting with wavelets and superparamagnetic clustering. Neural Comput 16: 1661–1687, 2004. doi:10.1162/089976604774201631.
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/descriptions/alg_waveclus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/flatironinstitute/spikeforest_old/d9470194dc906b949178b9c44d14aea57a1f6c27/spikeforest/spikeforestsorters/descriptions/alg_waveclus.png
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/herdingspikes2/__init__.py:
--------------------------------------------------------------------------------
1 | from .herdingspikes2 import HerdingSpikes2
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/herdingspikes2/container/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | #########################################
4 | ### Python, etc
5 | RUN apt-get update && apt-get -y install git wget build-essential
6 | RUN apt-get install -y python3 python3-pip
7 | RUN ln -s python3 /usr/bin/python
8 | RUN ln -s pip3 /usr/bin/pip
9 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
10 |
11 | RUN echo "21 June 2019"
12 |
13 | ### spikeextractors
14 | RUN pip install spikeextractors==0.5.2
15 |
16 | ### spiketoolkit
17 | RUN pip install spiketoolkit==0.3.4
18 |
19 | #########################################
20 | ### Herding spikes 2
21 | RUN pip install joblib
22 | RUN pip install numpy
23 | RUN pip install herdingspikes==0.3.2
24 |
25 | ### Other dependencies
26 | RUN pip install requests
27 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/herdingspikes2/container/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../../build_simg_using_docker.sh herdingspikes2.simg docker://magland/herdingspikes2
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/herdingspikes2/container/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t magland/herdingspikes2 .
3 |
4 | # then push to docker hub (if needed)
5 | docker push magland/herdingspikes2
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/herdingspikes2/container/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 | sha1_path = mt.saveFile('herdingspikes2.simg', upload_to=['spikeforest.kbucket', 'spikeforest.public'])
5 | print(sha1_path)
6 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/ironclust/__init__.py:
--------------------------------------------------------------------------------
1 | from .ironclust import IronClust
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/__init__.py:
--------------------------------------------------------------------------------
1 | from .jrclust import JRClust
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/mdaio/arrayify.m:
--------------------------------------------------------------------------------
1 | function X = arrayify(X)
2 | % ARRAYIFY - if a string, read in from file, otherwise leave as an array
3 | %
4 | % X = arrayify(X)
5 |
6 | % Barnett 6/16/16
7 |
8 | if ischar(X), X = readmda(X); end
9 |
10 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/mdaio/pathify32.m:
--------------------------------------------------------------------------------
1 | function X = pathify32(X)
2 | % PATHIFY32 if array, write to an MDA & give path, otherwise leave as path.
3 | %
4 | % X = pathify32(X) uses single-precision float MDA files.
5 |
6 | % Barnett 6/17/16
7 |
8 | if isnumeric(X)
9 | dir = [tempdir,'/mountainlab/tmp_short_term'];
10 | if ~exist(dir,'dir'), mkdir(dir); end % note can handle creation of parents
11 | fname = [dir,'/',num2str(randi(1e15)),'.mda']; % random filename
12 | writemda(X,fname,'float32');
13 | X = fname;
14 | end
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/mdaio/pathify64.m:
--------------------------------------------------------------------------------
1 | function X = pathify64(X)
2 | % PATHIFY64 if array, write to an MDA & give path, otherwise leave as path.
3 | %
4 | % X = pathify64(X) uses double-precision float MDA files.
5 |
6 | % Barnett 6/17/16
7 |
8 | if isnumeric(X)
9 | dir = [tempdir,'/mountainlab/tmp_short_term'];
10 | if ~exist(dir,'dir'), mkdir(dir); end % note can handle creation of parents
11 | fname = [dir,'/',num2str(randi(1e15)),'.mda']; % random filename
12 | writemda(X,fname,'float64');
13 | X = fname;
14 | end
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/mdaio/readmdadims.m:
--------------------------------------------------------------------------------
1 | function S=readmdadims(fname)
2 | %READMDADIMS - read only dimensions of a .mda file. MDA stands for multi-dimensional array.
3 | %
4 | % See http://magland.github.io//articles/mda-format/
5 | %
6 | % Syntax: dims=readmdadims(fname)
7 | %
8 | % Inputs:
9 | % fname - path to the .mda file
10 | %
11 | % Outputs:
12 | % dims - row vector of dimension sizes of multi-dimensional array
13 | %
14 | % Other m-files required: none
15 | %
16 | % See also: writemda
17 |
18 | % Author: Alex Barnett 7/22/16
19 |
20 | F=fopen(fname,'rb');
21 |
22 | try
23 | code=fread(F,1,'int32');
24 | catch
25 | error('Problem reading file: %s',fname);
26 | end
27 | if (code>0)
28 | num_dims=code;
29 | code=-1;
30 | else
31 | fread(F,1,'int32');
32 | num_dims=fread(F,1,'int32');
33 | end;
34 | dim_type_str='int32';
35 | if (num_dims<0)
36 | num_dims=-num_dims;
37 | dim_type_str='int64';
38 | end;
39 |
40 | S=zeros(1,num_dims);
41 | for j=1:num_dims
42 | S(j)=fread(F,1,dim_type_str);
43 | end;
44 |
45 | fclose(F);
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/mdaio/writemda16i.m:
--------------------------------------------------------------------------------
1 | function writemda16i(X,fname)
2 | writemda(X,fname,'int16');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/mdaio/writemda16ui.m:
--------------------------------------------------------------------------------
1 | function writemda16ui(X,fname)
2 | writemda(X,fname,'uint16');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/mdaio/writemda32.m:
--------------------------------------------------------------------------------
1 | function writemda32(X,fname)
2 | writemda(X,fname,'float32');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/mdaio/writemda32ui.m:
--------------------------------------------------------------------------------
1 | function writemda32ui(X,fname)
2 | writemda(X,fname,'uint32');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/jrclust/mdaio/writemda64.m:
--------------------------------------------------------------------------------
1 | function writemda64(X,fname)
2 | writemda(X,fname,'float64');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/__init__.py:
--------------------------------------------------------------------------------
1 | from .kilosort import KiloSort, KiloSortOld
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/mdaio/arrayify.m:
--------------------------------------------------------------------------------
1 | function X = arrayify(X)
2 | % ARRAYIFY - if a string, read in from file, otherwise leave as an array
3 | %
4 | % X = arrayify(X)
5 |
6 | % Barnett 6/16/16
7 |
8 | if ischar(X), X = readmda(X); end
9 |
10 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/mdaio/pathify32.m:
--------------------------------------------------------------------------------
1 | function X = pathify32(X)
2 | % PATHIFY32 if array, write to an MDA & give path, otherwise leave as path.
3 | %
4 | % X = pathify32(X) uses single-precision float MDA files.
5 |
6 | % Barnett 6/17/16
7 |
8 | if isnumeric(X)
9 | dir = [tempdir,'/mountainlab/tmp_short_term'];
10 | if ~exist(dir,'dir'), mkdir(dir); end % note can handle creation of parents
11 | fname = [dir,'/',num2str(randi(1e15)),'.mda']; % random filename
12 | writemda(X,fname,'float32');
13 | X = fname;
14 | end
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/mdaio/pathify64.m:
--------------------------------------------------------------------------------
1 | function X = pathify64(X)
2 | % PATHIFY64 if array, write to an MDA & give path, otherwise leave as path.
3 | %
4 | % X = pathify64(X) uses double-precision float MDA files.
5 |
6 | % Barnett 6/17/16
7 |
8 | if isnumeric(X)
9 | dir = [tempdir,'/mountainlab/tmp_short_term'];
10 | if ~exist(dir,'dir'), mkdir(dir); end % note can handle creation of parents
11 | fname = [dir,'/',num2str(randi(1e15)),'.mda']; % random filename
12 | writemda(X,fname,'float64');
13 | X = fname;
14 | end
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/mdaio/readmdadims.m:
--------------------------------------------------------------------------------
1 | function S=readmdadims(fname)
2 | %READMDADIMS - read only dimensions of a .mda file. MDA stands for multi-dimensional array.
3 | %
4 | % See http://magland.github.io//articles/mda-format/
5 | %
6 | % Syntax: dims=readmdadims(fname)
7 | %
8 | % Inputs:
9 | % fname - path to the .mda file
10 | %
11 | % Outputs:
12 | % dims - row vector of dimension sizes of multi-dimensional array
13 | %
14 | % Other m-files required: none
15 | %
16 | % See also: writemda
17 |
18 | % Author: Alex Barnett 7/22/16
19 |
20 | F=fopen(fname,'rb');
21 |
22 | try
23 | code=fread(F,1,'int32');
24 | catch
25 | error('Problem reading file: %s',fname);
26 | end
27 | if (code>0)
28 | num_dims=code;
29 | code=-1;
30 | else
31 | fread(F,1,'int32');
32 | num_dims=fread(F,1,'int32');
33 | end;
34 | dim_type_str='int32';
35 | if (num_dims<0)
36 | num_dims=-num_dims;
37 | dim_type_str='int64';
38 | end;
39 |
40 | S=zeros(1,num_dims);
41 | for j=1:num_dims
42 | S(j)=fread(F,1,dim_type_str);
43 | end;
44 |
45 | fclose(F);
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/mdaio/writemda16i.m:
--------------------------------------------------------------------------------
1 | function writemda16i(X,fname)
2 | writemda(X,fname,'int16');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/mdaio/writemda16ui.m:
--------------------------------------------------------------------------------
1 | function writemda16ui(X,fname)
2 | writemda(X,fname,'uint16');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/mdaio/writemda32.m:
--------------------------------------------------------------------------------
1 | function writemda32(X,fname)
2 | writemda(X,fname,'float32');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/mdaio/writemda32ui.m:
--------------------------------------------------------------------------------
1 | function writemda32ui(X,fname)
2 | writemda(X,fname,'uint32');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort/mdaio/writemda64.m:
--------------------------------------------------------------------------------
1 | function writemda64(X,fname)
2 | writemda(X,fname,'float64');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/__init__.py:
--------------------------------------------------------------------------------
1 | from .kilosort2 import KiloSort2
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/kilosort2_channelmap.txt:
--------------------------------------------------------------------------------
1 | % create a channel map file
2 |
3 | Nchannels = {}; % number of channels
4 | connected = true(Nchannels, 1);
5 | chanMap = 1:Nchannels;
6 | chanMap0ind = chanMap - 1;
7 |
8 | xcoords = {};
9 | ycoords = {};
10 | kcoords = {};
11 |
12 | fs = {}; % sampling frequency
13 | save(fullfile('chanMap.mat'), ...
14 | 'chanMap','connected', 'xcoords', 'ycoords', 'kcoords', 'chanMap0ind', 'fs')
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/mdaio/arrayify.m:
--------------------------------------------------------------------------------
1 | function X = arrayify(X)
2 | % ARRAYIFY - if a string, read in from file, otherwise leave as an array
3 | %
4 | % X = arrayify(X)
5 |
6 | % Barnett 6/16/16
7 |
8 | if ischar(X), X = readmda(X); end
9 |
10 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/mdaio/pathify32.m:
--------------------------------------------------------------------------------
1 | function X = pathify32(X)
2 | % PATHIFY32 if array, write to an MDA & give path, otherwise leave as path.
3 | %
4 | % X = pathify32(X) uses single-precision float MDA files.
5 |
6 | % Barnett 6/17/16
7 |
8 | if isnumeric(X)
9 | dir = [tempdir,'/mountainlab/tmp_short_term'];
10 | if ~exist(dir,'dir'), mkdir(dir); end % note can handle creation of parents
11 | fname = [dir,'/',num2str(randi(1e15)),'.mda']; % random filename
12 | writemda(X,fname,'float32');
13 | X = fname;
14 | end
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/mdaio/pathify64.m:
--------------------------------------------------------------------------------
1 | function X = pathify64(X)
2 | % PATHIFY64 if array, write to an MDA & give path, otherwise leave as path.
3 | %
4 | % X = pathify64(X) uses double-precision float MDA files.
5 |
6 | % Barnett 6/17/16
7 |
8 | if isnumeric(X)
9 | dir = [tempdir,'/mountainlab/tmp_short_term'];
10 | if ~exist(dir,'dir'), mkdir(dir); end % note can handle creation of parents
11 | fname = [dir,'/',num2str(randi(1e15)),'.mda']; % random filename
12 | writemda(X,fname,'float64');
13 | X = fname;
14 | end
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/mdaio/readmdadims.m:
--------------------------------------------------------------------------------
1 | function S=readmdadims(fname)
2 | %READMDADIMS - read only dimensions of a .mda file. MDA stands for multi-dimensional array.
3 | %
4 | % See http://magland.github.io//articles/mda-format/
5 | %
6 | % Syntax: dims=readmdadims(fname)
7 | %
8 | % Inputs:
9 | % fname - path to the .mda file
10 | %
11 | % Outputs:
12 | % dims - row vector of dimension sizes of multi-dimensional array
13 | %
14 | % Other m-files required: none
15 | %
16 | % See also: writemda
17 |
18 | % Author: Alex Barnett 7/22/16
19 |
20 | F=fopen(fname,'rb');
21 |
22 | try
23 | code=fread(F,1,'int32');
24 | catch
25 | error('Problem reading file: %s',fname);
26 | end
27 | if (code>0)
28 | num_dims=code;
29 | code=-1;
30 | else
31 | fread(F,1,'int32');
32 | num_dims=fread(F,1,'int32');
33 | end;
34 | dim_type_str='int32';
35 | if (num_dims<0)
36 | num_dims=-num_dims;
37 | dim_type_str='int64';
38 | end;
39 |
40 | S=zeros(1,num_dims);
41 | for j=1:num_dims
42 | S(j)=fread(F,1,dim_type_str);
43 | end;
44 |
45 | fclose(F);
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/mdaio/writemda16i.m:
--------------------------------------------------------------------------------
1 | function writemda16i(X,fname)
2 | writemda(X,fname,'int16');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/mdaio/writemda16ui.m:
--------------------------------------------------------------------------------
1 | function writemda16ui(X,fname)
2 | writemda(X,fname,'uint16');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/mdaio/writemda32.m:
--------------------------------------------------------------------------------
1 | function writemda32(X,fname)
2 | writemda(X,fname,'float32');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/mdaio/writemda32ui.m:
--------------------------------------------------------------------------------
1 | function writemda32ui(X,fname)
2 | writemda(X,fname,'uint32');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/kilosort2/mdaio/writemda64.m:
--------------------------------------------------------------------------------
1 | function writemda64(X,fname)
2 | writemda(X,fname,'float64');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/klusta/__init__.py:
--------------------------------------------------------------------------------
1 | from .klusta import Klusta
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/klusta/config_default.prm:
--------------------------------------------------------------------------------
1 |
2 | experiment_name = r"{}"
3 | prb_file = r"{}"
4 |
5 | traces = dict(
6 | raw_data_files=[experiment_name + '.dat'],
7 | voltage_gain=1.,
8 | sample_rate={},
9 | n_channels={},
10 | dtype={},
11 | )
12 |
13 | spikedetekt = dict(
14 | chunk_size_seconds=3,
15 | chunk_overlap_seconds=.015,
16 |
17 | n_excerpts=50,
18 | excerpt_size_seconds=1,
19 | threshold_strong_std_factor={},
20 | threshold_weak_std_factor={},
21 | detect_spikes={},
22 |
23 | connected_component_join_size=1,
24 |
25 | extract_s_before={},
26 | extract_s_after={},
27 |
28 | n_features_per_channel={},
29 | pca_n_waveforms_max=10000,
30 | )
31 |
32 | klustakwik2 = dict(
33 | num_starting_clusters=50,
34 | )
35 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/klusta/container/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | #########################################
4 | ### Python, etc
5 | RUN apt-get update && apt-get -y install git wget build-essential
6 | RUN apt-get install -y python3 python3-pip
7 | RUN ln -s python3 /usr/bin/python
8 | RUN ln -s pip3 /usr/bin/pip
9 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
10 |
11 | RUN echo "26 June 2019"
12 |
13 | #########################################
14 | ### Klusta
15 | RUN pip install Cython h5py tqdm
16 | RUN pip install scipy
17 | RUN pip install click klusta klustakwik2
18 |
19 | ### spikeextractors
20 | RUN pip install spikeextractors==0.4.2
21 | RUN pip install spiketoolkit==0.3.4
22 | RUN pip install spikesorters==0.1.1
23 |
24 | ### Other dependencies
25 | RUN pip install requests
26 |
27 | ENV LANG C.UTF-8
28 | ENV LC_ALL C.UTF-8
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/klusta/container/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../../build_simg_using_docker.sh klusta.simg docker://magland/klusta
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/klusta/container/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t magland/klusta .
3 |
4 | # then push to docker hub (if needed)
5 | docker push magland/klusta
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/klusta/container/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 | sha1_path = mt.saveFile('klusta.simg')
5 | print(sha1_path)
6 | mt.saveFile('klusta.simg', upload_to='spikeforest.public')
7 | print(sha1_path)
8 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/mountainsort4/__init__.py:
--------------------------------------------------------------------------------
1 | from .mountainsort4 import MountainSort4, MountainSort4TestError, MountainSort4Old
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/mountainsort4/container/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | #########################################
4 | ### Python, etc
5 | RUN apt-get update && apt-get -y install git wget build-essential
6 | RUN apt-get install -y python3 python3-pip
7 | RUN ln -s python3 /usr/bin/python
8 | RUN ln -s pip3 /usr/bin/pip
9 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
10 |
11 | RUN echo "26 June 2019"
12 |
13 | RUN mkdir /src
14 |
15 | #########################################
16 | ### MountainSort
17 | RUN pip install pybind11
18 | RUN pip install isosplit5
19 | RUN git clone https://github.com/magland/ml_ms4alg /src/ml_ms4alg
20 | WORKDIR /src/ml_ms4alg
21 | RUN python setup.py develop
22 |
23 | ### spikeextractors
24 | RUN pip install spikeextractors==0.4.2
25 | RUN pip install spiketoolkit==0.3.4
26 | RUN pip install git+https://github.com/spikeinterface/spikesorters
27 |
28 | ### Other dependencies
29 | RUN pip install requests
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/mountainsort4/container/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../../build_simg_using_docker.sh mountainsort4.simg docker://magland/mountainsort4
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/mountainsort4/container/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t magland/mountainsort4 .
3 |
4 | # then push to docker hub (if needed)
5 | docker push magland/mountainsort4
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/mountainsort4/container/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 |
5 | mt.login()
6 | sha1_path = mt.saveFile('mountainsort4.simg', upload_to='spikeforest.kbucket')
7 | print(sha1_path)
8 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/spyking_circus/__init__.py:
--------------------------------------------------------------------------------
1 | from .spyking_circus import SpykingCircus
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/spyking_circus/container/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../../build_simg_using_docker.sh spyking_circus.simg docker://magland/spyking_circus
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/spyking_circus/container/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t magland/spyking_circus .
3 |
4 | # then push to docker hub (if needed)
5 | docker push magland/spyking_circus
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/spyking_circus/container/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 |
5 | sha1_path = mt.saveFile('spyking_circus.simg')
6 | print(sha1_path)
7 | sha1_path = mt.saveFile('spyking_circus.simg', upload_to='spikeforest.public')
8 | print(sha1_path)
9 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/tridesclous/__init__.py:
--------------------------------------------------------------------------------
1 | from .tridesclous import Tridesclous
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/tridesclous/container/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../../build_simg_using_docker.sh tridesclous.simg docker://magland/tridesclous
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/tridesclous/container/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t magland/tridesclous .
3 |
4 | # then push to docker hub (if needed)
5 | docker push magland/tridesclous
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/tridesclous/container/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 | sha1_path = mt.saveFile('tridesclous.simg', upload_to='spikeforest.public')
5 | print(sha1_path)
6 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/__init__.py:
--------------------------------------------------------------------------------
1 | from .waveclus import Waveclus
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/mdaio/arrayify.m:
--------------------------------------------------------------------------------
1 | function X = arrayify(X)
2 | % ARRAYIFY - if a string, read in from file, otherwise leave as an array
3 | %
4 | % X = arrayify(X)
5 |
6 | % Barnett 6/16/16
7 |
8 | if ischar(X), X = readmda(X); end
9 |
10 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/mdaio/pathify32.m:
--------------------------------------------------------------------------------
1 | function X = pathify32(X)
2 | % PATHIFY32 if array, write to an MDA & give path, otherwise leave as path.
3 | %
4 | % X = pathify32(X) uses single-precision float MDA files.
5 |
6 | % Barnett 6/17/16
7 |
8 | if isnumeric(X)
9 | dir = [tempdir,'/mountainlab/tmp_short_term'];
10 | if ~exist(dir,'dir'), mkdir(dir); end % note can handle creation of parents
11 | fname = [dir,'/',num2str(randi(1e15)),'.mda']; % random filename
12 | writemda(X,fname,'float32');
13 | X = fname;
14 | end
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/mdaio/pathify64.m:
--------------------------------------------------------------------------------
1 | function X = pathify64(X)
2 | % PATHIFY64 if array, write to an MDA & give path, otherwise leave as path.
3 | %
4 | % X = pathify64(X) uses double-precision float MDA files.
5 |
6 | % Barnett 6/17/16
7 |
8 | if isnumeric(X)
9 | dir = [tempdir,'/mountainlab/tmp_short_term'];
10 | if ~exist(dir,'dir'), mkdir(dir); end % note can handle creation of parents
11 | fname = [dir,'/',num2str(randi(1e15)),'.mda']; % random filename
12 | writemda(X,fname,'float64');
13 | X = fname;
14 | end
15 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/mdaio/readmdadims.m:
--------------------------------------------------------------------------------
1 | function S=readmdadims(fname)
2 | %READMDADIMS - read only dimensions of a .mda file. MDA stands for multi-dimensional array.
3 | %
4 | % See http://magland.github.io//articles/mda-format/
5 | %
6 | % Syntax: dims=readmdadims(fname)
7 | %
8 | % Inputs:
9 | % fname - path to the .mda file
10 | %
11 | % Outputs:
12 | % dims - row vector of dimension sizes of multi-dimensional array
13 | %
14 | % Other m-files required: none
15 | %
16 | % See also: writemda
17 |
18 | % Author: Alex Barnett 7/22/16
19 |
20 | F=fopen(fname,'rb');
21 |
22 | try
23 | code=fread(F,1,'int32');
24 | catch
25 | error('Problem reading file: %s',fname);
26 | end
27 | if (code>0)
28 | num_dims=code;
29 | code=-1;
30 | else
31 | fread(F,1,'int32');
32 | num_dims=fread(F,1,'int32');
33 | end;
34 | dim_type_str='int32';
35 | if (num_dims<0)
36 | num_dims=-num_dims;
37 | dim_type_str='int64';
38 | end;
39 |
40 | S=zeros(1,num_dims);
41 | for j=1:num_dims
42 | S(j)=fread(F,1,dim_type_str);
43 | end;
44 |
45 | fclose(F);
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/mdaio/writemda16i.m:
--------------------------------------------------------------------------------
1 | function writemda16i(X,fname)
2 | writemda(X,fname,'int16');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/mdaio/writemda16ui.m:
--------------------------------------------------------------------------------
1 | function writemda16ui(X,fname)
2 | writemda(X,fname,'uint16');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/mdaio/writemda32.m:
--------------------------------------------------------------------------------
1 | function writemda32(X,fname)
2 | writemda(X,fname,'float32');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/mdaio/writemda32ui.m:
--------------------------------------------------------------------------------
1 | function writemda32ui(X,fname)
2 | writemda(X,fname,'uint32');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/waveclus/mdaio/writemda64.m:
--------------------------------------------------------------------------------
1 | function writemda64(X,fname)
2 | writemda(X,fname,'float64');
3 | end
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass/__init__.py:
--------------------------------------------------------------------------------
1 | from .yass import YASS
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass/config_default.yaml:
--------------------------------------------------------------------------------
1 | # Sample configuration file, for a complete reference see
2 | # examples/config_sample_complete.yaml
3 |
4 | # root_folder, recordings, geometry, dtype, sampling_rate, n_channels, spatial_radius, spike_size_ms, filter
5 | # data/, neuropixel.bin, neuropixel_channels.npy, int16, 30000, 10, 70, 1, True
6 |
7 | data:
8 | root_folder: {}
9 | recordings: {}
10 | geometry: {}
11 |
12 | resources:
13 | max_memory: 1000000000
14 |
15 | recordings:
16 | dtype: {}
17 | sampling_rate: {}
18 | n_channels: {}
19 | spatial_radius: {}
20 | spike_size_ms: {}
21 |
22 | preprocess:
23 | filter: {}
24 | templates_partial_data: 1
25 | whiten_batchwise: False
26 |
27 | spikes:
28 | detection: threshold
29 | temporal_features: 3
30 |
31 | deconvolution:
32 | rank: 3
33 | threshold: 4
34 | lam: 20
35 |
36 | neural_network_detector:
37 | filename: detect_nn1.ckpt
38 | threshold_spike: 0.5
39 |
40 | neural_network_triage:
41 | filename: triage_nn1.ckpt
42 | threshold_collision: 0.9
43 |
44 | neural_network_autoencoder:
45 | filename: ae_nn1.ckpt
46 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass/container/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | #########################################
4 | ### Python, etc
5 | RUN apt-get update && apt-get -y install git wget build-essential
6 | RUN apt-get install -y python3 python3-pip
7 | RUN ln -s python3 /usr/bin/python
8 | RUN ln -s pip3 /usr/bin/pip
9 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
10 |
11 | RUN echo "6 May 2019"
12 |
13 | #########################################
14 | ### Install yass, a previous version
15 | RUN pip install tensorflow==1.13.0rc1
16 | RUN git clone https://github.com/paninski-lab/yass.git /src/yass && cd /src/yass && git checkout tags/0.5
17 | WORKDIR /src/yass
18 | RUN pip install -r requirements.txt
19 | RUN python setup.py install
20 |
21 | ### spikeextractors
22 | RUN pip install spikeextractors==0.4.2
23 |
24 | ### Other dependencies
25 | RUN pip install requests
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass/container/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../../build_simg_using_docker.sh yass.simg docker://magland/yass
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass/container/build_simg_magland.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../build_simg_using_docker.sh yass.simg docker://magland/yass
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass/container/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t magland/yass .
3 |
4 | # then push to docker hub (if needed)
5 | docker push magland/yass
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass/container/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 | mt.login()
5 | sha1_path = mt.saveFile('yass.simg', upload_to='spikeforest.kbucket')
6 | print(sha1_path)
7 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass1/__init__.py:
--------------------------------------------------------------------------------
1 | from .yass1 import YASS1
2 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass1/container/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | #########################################
4 | ### Python, etc
5 | RUN apt-get update && apt-get -y install git wget build-essential
6 | RUN apt-get install -y python3 python3-pip
7 | RUN ln -s python3 /usr/bin/python
8 | RUN ln -s pip3 /usr/bin/pip
9 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python3-tk
10 |
11 | RUN echo "21 May 2019"
12 |
13 | #########################################
14 | ### Install yass, a previous version
15 | RUN pip install tensorflow==1.13.0rc1
16 | RUN git clone https://github.com/paninski-lab/yass.git /src/yass && cd /src/yass && git checkout tags/0.1
17 | WORKDIR /src/yass
18 | RUN pip install -r requirements.txt
19 | RUN python setup.py install
20 |
21 | ### spikeextractors
22 | RUN pip install spikeextractors==0.4.2
23 |
24 | ### Other dependencies
25 | RUN pip install requests
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass1/container/build_simg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../../build_simg_using_docker.sh yass1.simg docker://jamesjun/yass1
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass1/container/build_simg_magland.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | ../build_simg_using_docker.sh yass.simg docker://magland/yass
5 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass1/container/readme.txt:
--------------------------------------------------------------------------------
1 | # first build (if needed)
2 | docker build -t jamesjun/yass1 .
3 |
4 | # then push to docker hub (if needed)
5 | docker push jamesjun/yass1
6 |
7 | # then create singularity image
8 | ./build_simg.sh
9 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestsorters/yass1/container/upload_singularity_container.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from mountaintools import client as mt
4 | mt.login()
5 | sha1_path = mt.saveFile('yass.simg', upload_to='spikeforest.kbucket')
6 | print(sha1_path)
7 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestwidgets/__init__.py:
--------------------------------------------------------------------------------
1 | from .timeserieswidget import TimeseriesWidget, precomputeMultiscaleRecordings
2 | from .templatewidget import TemplateWidget
3 | from .featurespacewidget import FeatureSpaceWidget, FeatureSpaceWidgetPlotly
4 | from .electrodegeometrywidget import ElectrodeGeometryWidget
5 | from .unitwaveformswidget import UnitWaveformWidget, UnitWaveformsWidget
6 | from .correlogramswidget import CorrelogramsWidget
7 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestwidgets/featurespacewidget/__init__.py:
--------------------------------------------------------------------------------
1 | from .featurespacewidget import FeatureSpaceWidget
2 | from .featurespacewidget_plotly import FeatureSpaceWidgetPlotly
3 |
--------------------------------------------------------------------------------
/spikeforest/spikeforestwidgets/featurespacewidget/featurespacemodel.js:
--------------------------------------------------------------------------------
1 | window.FeatureSpaceModel=FeatureSpaceModel;
2 |
3 | const Mda = window.Mda;
4 |
5 | function FeatureSpaceModel(X, params) {
6 | var that = this;
7 |
8 | if (!params) params={samplerate:0};
9 |
10 | this.getChannelData = function(ch,t1,t2) {
11 | let ret=[];
12 | for (let t=t1; t
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
21 |
22 |
23 |
24 |