├── .coveragerc
├── .gitattributes
├── .github
└── workflows
│ ├── build.yml
│ ├── keep-alive.yml
│ ├── mirror-ebrains.yml
│ └── test.yml
├── .gitignore
├── .readthedocs.yml
├── .upload_docs.py
├── .zenodo.json
├── ACKNOWLEDGMENTS.md
├── AUTHORS.txt
├── CHANGELOG.rst
├── CITATION.cff
├── CONTRIBUTING.md
├── LGPL.txt
├── LICENSE.txt
├── MANIFEST.in
├── Makefile
├── README.rst
├── bluepyefe
├── __init__.py
├── auto_targets.py
├── cell.py
├── ecode
│ ├── DeHyperPol.py
│ ├── HyperDePol.py
│ ├── SpikeRec.py
│ ├── __init__.py
│ ├── negCheops.py
│ ├── posCheops.py
│ ├── ramp.py
│ ├── sAHP.py
│ ├── sineSpec.py
│ ├── step.py
│ └── tools.py
├── extract.py
├── igorpy
│ └── __init__.py
├── nwbreader.py
├── plotting.py
├── protocol.py
├── reader.py
├── recording.py
├── rheobase.py
├── target.py
├── tools.py
└── translate_legacy_config.py
├── docs
├── Makefile
└── source
│ ├── _static
│ └── bbp.jpg
│ ├── _templates
│ └── module.rst
│ ├── api.rst
│ ├── conf.py
│ ├── index.rst
│ └── logo
│ └── BluePyEfeBanner.jpg
├── examples
├── How_to_use_efel_settings.ipynb
├── __init__.py
└── example_of_extraction.ipynb
├── pyproject.toml
├── requirements.txt
├── requirements_docs.txt
├── tests
├── __init__.py
├── ecode
│ ├── __init__.py
│ ├── test_apthresh.py
│ └── test_sahp.py
├── exp_data
│ ├── B6
│ │ ├── B6_Ch0_IDRest_181.ibw
│ │ ├── B6_Ch0_IDRest_182.ibw
│ │ ├── B6_Ch0_IDRest_183.ibw
│ │ ├── B6_Ch0_IDRest_184.ibw
│ │ ├── B6_Ch0_IDRest_185.ibw
│ │ ├── B6_Ch3_IDRest_181.ibw
│ │ ├── B6_Ch3_IDRest_182.ibw
│ │ ├── B6_Ch3_IDRest_183.ibw
│ │ ├── B6_Ch3_IDRest_184.ibw
│ │ └── B6_Ch3_IDRest_185.ibw
│ ├── B8
│ │ ├── B8_Ch0_IDRest_145.ibw
│ │ ├── B8_Ch0_IDRest_146.ibw
│ │ ├── B8_Ch0_IDRest_147.ibw
│ │ ├── B8_Ch0_IDRest_148.ibw
│ │ ├── B8_Ch0_IDRest_149.ibw
│ │ ├── B8_Ch3_IDRest_145.ibw
│ │ ├── B8_Ch3_IDRest_146.ibw
│ │ ├── B8_Ch3_IDRest_147.ibw
│ │ ├── B8_Ch3_IDRest_148.ibw
│ │ └── B8_Ch3_IDRest_149.ibw
│ ├── B95_Ch0_IDRest_107.ibw
│ ├── B95_Ch3_IDRest_107.ibw
│ ├── csv_lccr
│ │ └── dummy
│ │ │ ├── dummy_ch1_cols.txt
│ │ │ └── protocol.txt
│ └── hippocampus-portal
│ │ ├── 99111002.nwb
│ │ └── data-provenance.txt
├── test_cell.py
├── test_ecode_tools.py
├── test_efel_settings.py
├── test_extractor.py
├── test_lccr_csv_reader.py
├── test_legacy_config.py
├── test_nwbreader.py
├── test_protocol.py
├── test_recording.py
├── test_target.py
└── utils.py
└── tox.ini
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit = bluepyefe/_version.py
3 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | bluepyefe/_version.py export-subst
2 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: Build
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | tags:
8 | - '[0-9]+.[0-9]+.[0-9]+'
9 |
10 | jobs:
11 | call-test-workflow:
12 | uses: BlueBrain/BluePyEfe/.github/workflows/test.yml@master
13 |
14 | build-tag-n-publish:
15 | name: Build, tag and publish on PyPI
16 | runs-on: ubuntu-latest
17 | needs: call-test-workflow
18 | permissions:
19 | contents: write
20 | steps:
21 | - uses: actions/checkout@v3
22 | - name: Set up Python 3.10
23 | uses: actions/setup-python@v4
24 | with:
25 | python-version: "3.10"
26 |
27 | - name: Bump version and push tag
28 | uses: anothrNick/github-tag-action@1.64.0
29 | if: ${{ !startsWith(github.ref, 'refs/tags/') }}
30 | id: tag
31 | env:
32 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
33 | WITH_V: false
34 | DEFAULT_BUMP: patch
35 |
36 | - name: Build a source tarball and wheel
37 | run: |
38 | pip install build
39 | python -m build
40 |
41 | - name: Get and store tag from 'Bump version and push tag' step
42 | if: ${{ !startsWith(github.ref, 'refs/tags/') }}
43 | run: echo "TAG_NAME=${{ steps.tag.outputs.new_tag }}" >> $GITHUB_ENV
44 | - name: Get and store tag from triggered tag push
45 | if: ${{ startsWith(github.ref, 'refs/tags/') }}
46 | run: echo "TAG_NAME=${{ github.ref_name }}" >> $GITHUB_ENV
47 |
48 | - name: Release
49 | uses: softprops/action-gh-release@v1
50 | with:
51 | tag_name: ${{ env.TAG_NAME }}
52 | name: ${{ env.TAG_NAME }}
53 | generate_release_notes: true
54 |
55 | - name: Publish package to PyPI
56 | uses: pypa/gh-action-pypi-publish@release/v1
57 | with:
58 | user: __token__
59 | password: ${{ secrets.PYPI_PASSWORD }}
60 |
--------------------------------------------------------------------------------
/.github/workflows/keep-alive.yml:
--------------------------------------------------------------------------------
1 | name: Keep-alive
2 |
3 | on:
4 | schedule:
5 | # Runs every sunday at 3 a.m.
6 | - cron: '0 3 * * SUN'
7 |
8 | jobs:
9 | call-test-workflow:
10 | uses: BlueBrain/BluePyEfe/.github/workflows/test.yml@master
11 |
12 | keep-workflow-alive:
13 | name: Keep workflow alive
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | with:
18 | ref: master
19 |
20 | - name: Get date from 50 days ago
21 | run: |
22 | datethen=`date -d "-50 days" --utc +%FT%TZ`
23 | echo "datelimit=$datethen" >> $GITHUB_ENV
24 | - name: setup git config
25 | if: github.event.repository.pushed_at <= env.datelimit
26 | run: |
27 | # setup the username and email.
28 | git config user.name "Github Actions Keepalive Bot"
29 | git config user.email "<>"
30 | - name: commit IF last commit is older than 50 days
31 | if: github.event.repository.pushed_at <= env.datelimit
32 | run: |
33 | git commit -m "Empty commit to keep the gihub workflows alive" --allow-empty
34 | git push origin master
35 |
--------------------------------------------------------------------------------
/.github/workflows/mirror-ebrains.yml:
--------------------------------------------------------------------------------
1 | name: Mirror to Ebrains
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 |
7 | jobs:
8 | to_ebrains:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - name: syncmaster
12 | uses: wei/git-sync@v3
13 | with:
14 | source_repo: "BlueBrain/BluePyEfe"
15 | source_branch: "master"
16 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/BlueBrain/bluepyefe.git"
17 | destination_branch: "master"
18 | - name: synctags
19 | uses: wei/git-sync@v3
20 | with:
21 | source_repo: "BlueBrain/BluePyEfe"
22 | source_branch: "refs/tags/*"
23 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/BlueBrain/bluepyefe.git"
24 | destination_branch: "refs/tags/*"
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Test
2 |
3 | on:
4 | pull_request:
5 | # allows this workflow to be reusable (e.g. by the build workflow)
6 | workflow_call:
7 |
8 | jobs:
9 | test:
10 | runs-on: ubuntu-latest
11 | strategy:
12 | matrix:
13 | python-version: ["3.9", "3.10", "3.11", "3.12"]
14 |
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Set up Python ${{ matrix.python-version }}
18 | uses: actions/setup-python@v2
19 | with:
20 | python-version: ${{ matrix.python-version }}
21 | - name: Install dependencies
22 | run: |
23 | python -m pip install --upgrade pip setuptools
24 | pip install tox tox-gh-actions
25 | - name: Run tox
26 | run: tox
27 | - name: "Upload coverage to Codecov"
28 | uses: codecov/codecov-action@v2
29 | with:
30 | fail_ci_if_error: false
31 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.swp
3 | /bluepyefe.egg-info/
4 | load_module
5 | .DS_Store
6 | .tox/
7 | .coverage
8 | .python-version
9 | dist
10 | BluePyEfe.egg-info
11 | /testrun
12 | test_run
13 | .idea/
14 | MouseCells/
15 | .ipynb_checkpoints/
16 | coverage.xml
17 | MouseCells_sAHP/
18 | MouseCells_APThreshold/
19 | tests/exp_data/X/
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | sphinx:
9 | configuration: docs/source/conf.py
10 | fail_on_warning: true
11 |
12 | build:
13 | os: "ubuntu-20.04"
14 | tools:
15 | python: "3.10"
16 |
17 | python:
18 | install:
19 | - method: pip
20 | path: .
21 | - requirements: requirements_docs.txt
22 |
--------------------------------------------------------------------------------
/.upload_docs.py:
--------------------------------------------------------------------------------
1 | #!/bin/env python
2 |
3 | from __future__ import print_function
4 |
5 | import sys
6 | import os
7 | import contextlib
8 | import datetime
9 |
10 | metadata_template = \
11 | """---
12 | packageurl: https://bbpteam.epfl.ch/repository/devpi/bbprelman/dev/bluepyefe
13 | major: {major_version}
14 | description: Get efeatures from experimental data
15 | repository: https://bbpcode.epfl.ch/code/#/admin/projects/sim/BluePyEfe
16 | externaldoc: https://bbpcode.epfl.ch/documentation/#BluePyEfe
17 | updated: {date}
18 | maintainers: Werner Van Geit
19 | name: BluePyEfe
20 | license: BBP-internal-confidential
21 | version: {version}
22 | contributors: Christian Roessert, Werner Van Geit, BBP
23 | minor: {minor_version}
24 | ---
25 | """
26 |
27 |
28 | @contextlib.contextmanager
29 | def cd(dir_name):
30 | """Change directory"""
31 | old_cwd = os.getcwd()
32 | os.chdir(dir_name)
33 | try:
34 | yield
35 | finally:
36 | os.chdir(old_cwd)
37 |
38 |
39 | def main():
40 | """Main"""
41 | doc_dir = sys.argv[1]
42 |
43 | doc_dir = os.path.abspath(doc_dir)
44 |
45 | with cd(doc_dir):
46 | print('Reading BluePyEfe version ...')
47 | import bluepyefe
48 | bluepyefe_version = bluepyefe.__version__
49 | bluepyefe_major_version = bluepyefe_version.split('.')[0]
50 | bluepyefe_minor_version = bluepyefe_version.split('.')[1]
51 | print('BluePyEfe version is: %s' % bluepyefe_version)
52 |
53 | finished_filename = '.doc_version'
54 |
55 | if os.path.exists(finished_filename):
56 | os.remove(finished_filename)
57 |
58 | metadata_filename = 'metadata.md'
59 |
60 | metadata_content = metadata_template.format(
61 | major_version=bluepyefe_major_version,
62 | minor_version=bluepyefe_minor_version,
63 | date=datetime.datetime.now().strftime("%d/%m/%y"),
64 | version=bluepyefe_version)
65 |
66 | print('Created metadata: %s' % metadata_content)
67 |
68 | with open(metadata_filename, 'w') as metadata_file:
69 | metadata_file.write(metadata_content)
70 |
71 | print('Wrote metadata to: %s' % metadata_filename)
72 |
73 | with open(finished_filename, 'w') as finished_file:
74 | finished_file.write(bluepyefe_version)
75 |
76 | print('Wrote doc version info to: %s' % finished_filename)
77 |
78 |
79 | if __name__ == '__main__':
80 | main()
81 |
--------------------------------------------------------------------------------
/.zenodo.json:
--------------------------------------------------------------------------------
1 | {
2 | "title" : "BluePyEfe",
3 | "license": "LGPL-3.0",
4 | "upload_type": "software",
5 | "description": "BluePyEfe aims at easing the process of reading experimental recordings and extracting batches of electrical features from these recordings. To do so, it combines trace reading functions and features extraction functions from the eFel library. BluePyEfe outputs protocols and features files in the format used by BluePyOpt for neuron electrical model building.",
6 | "creators": [
7 | {
8 | "affiliation": "Blue Brain Project, EPFL",
9 | "name": "Rössert, Christian",
10 | "orcid": "0000-0002-4839-2424"
11 | },
12 | {
13 | "affiliation": "Blue Brain Project, EPFL",
14 | "name": "Van Geit, Werner",
15 | "orcid": "0000-0002-2915-720X"
16 | },
17 | {
18 | "affiliation": "Blue Brain Project, EPFL",
19 | "name": "Iavarone, Elisabetta",
20 | "orcid": "0000-0001-5157-247X"
21 | },
22 | {
23 | "affiliation": "Institute of Biophysics (IBF), CNR",
24 | "name": "Bologna, Luca Leonardo",
25 | "orcid": "0000-0002-7280-9285"
26 | },
27 | {
28 | "affiliation": "Blue Brain Project, EPFL",
29 | "name": "Damart, Tanguy",
30 | "orcid": "0000-0003-2175-7304"
31 | },
32 | {
33 | "affiliation": "Blue Brain Project, EPFL",
34 | "name": "Jaquier, Aurélien",
35 | "orcid": "0000-0001-6202-6175"
36 | },
37 | {
38 | "affiliation": "Blue Brain Project, EPFL",
39 | "name": "Mandge, Darshan",
40 | "orcid": "0000-0002-7104-4604"
41 | },
42 | {
43 | "affiliation": "Blue Brain Project, EPFL",
44 | "name": "Tuncel, Anil",
45 | "orcid": "0000-0003-0317-2556"
46 | },
47 | {
48 | "affiliation": "Blue Brain Project, EPFL",
49 | "name": "Kilic, Ilkan",
50 | "orcid": "0009-0004-4234-8766"
51 | },
52 | {
53 | "affiliation": "Blue Brain Project, EPFL",
54 | "name": "Sanin, Aleksei"
55 | },
56 | {
57 | "affiliation": "Centre National de la Recherche Scientifique (CNRS)",
58 | "name": "Davison, Andrew",
59 | "orcid": "0000-0002-4793-7541"
60 | }
61 | ]
62 | }
--------------------------------------------------------------------------------
/ACKNOWLEDGMENTS.md:
--------------------------------------------------------------------------------
1 | This work has been partially funded by the European Union Seventh Framework Program (FP7/20072013) under grant agreement no. 604102 (HBP), the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreement No. 720270, 785907 (Human Brain Project SGA1/SGA2).
2 |
--------------------------------------------------------------------------------
/AUTHORS.txt:
--------------------------------------------------------------------------------
1 | Christian Rössert @ BBP
2 | Werner Van Geit @ BBP
3 | Elisabetta Iavarone @ BBP
4 | Luca Leonardo Bologna @ IBF
5 | Tanguy Damart @ BBP
6 | Aurélien Jaquier @ BBP
7 | Darshan Mandge @ BBP
8 | Anil Tuncel @ BBP
9 | Ilkan Kilic @ BBP
10 | Aleksei Sanin @ BBP
11 | Andrew Davison @ CNRS
12 |
--------------------------------------------------------------------------------
/CHANGELOG.rst:
--------------------------------------------------------------------------------
1 |
2 | BluePyEfe Change Log
3 | =====================
4 |
5 |
6 | v2.0.0
7 | -------
8 |
9 | **Architectural changes**:
10 |
11 | * Restructuring of the code to a class-based code that can be used as an API
12 | * Implementation of a hierarchy of the handling of the metadata associated with each trace
13 | * Implementation of extraction of feature for non-step protocols
14 | * BluePyEfe can now output the protocols as time series that can be read by BluePyOpt as protocols
15 | * changes in the structure of the input config dictionary
16 | * changes in plotting function of both features and traces
17 |
18 | **Changes in the way the mean and standard deviations are computed**:
19 |
20 | * The mean and std for a feature are saved if the number of data point for this target +- tolerance is above the threshold_nvalue_save option. NaNs are not taken into accound when comparing the number of points with threshold_nvalue_save.
21 | * Any cell for which the rheobase could not be computed is not used when computing the mean or std of the features. In this case, it is however possible to set this rheobase value by hand before calling the mean_efeature function.
22 | * Instead of saving protocols and efeatures having NaNs as mean or std, BPE2 removes them and issues a warning instead. If for a given protocol/target, there are no non-Nan values, then the protocol is not saved at all (a warning is issued as well).
23 |
24 | **Implementation of an automatic step detection when ton/toff/tend/amp is not known**:
25 |
26 | * An automatic step detection has been implemented for some of the simple eCodes (see bluepyefe/ecode/),
27 | it provides as an output ton, toff, tend, amp and hypamp.
28 | This automatic detection only works when the signal to noise ratio of the stimuli is good enough.
29 | Therefore, before exploiting the efeatures, the user should check that the automatic step detection has indeed found the correct step.
30 | This can be checked by plotting the real current data on top of the reconstruction of the current resulting from the step detection.
31 | For the non-step protocols, the timing information need to be provided by the user.
32 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | title: "BluePyEfe"
4 | doi: 10.5281/zenodo.3728191
5 | url: https://doi.org/10.5281/zenodo.3728191
6 | abstract: "BluePyEfe aims at easing the process of reading experimental recordings and extracting batches of electrical features from these recordings. To do so, it combines trace reading functions and features extraction functions from the eFel library. BluePyEfe outputs protocols and features files in the format used by BluePyOpt for neuron electrical model building."
7 | authors:
8 | - family-names: "Rössert"
9 | given-names: "Christian"
10 | - family-names: "Van Geit"
11 | given-names: "Werner"
12 | - family-names: "Iavarone"
13 | given-names: "Elisabetta"
14 | - family-names: "Bologna"
15 | given-names: "Luca Leonardo"
16 | - family-names: "Damart"
17 | given-names: "Tanguy"
18 | - family-names: "Jaquier"
19 | given-names: "Aurélien"
20 | - family-names: "Mandge"
21 | given-names: "Darshan"
22 | - family-names: "Tuncel"
23 | given-names: "Anil"
24 | - family-names: "Kilic"
25 | given-names: "Ilkan"
26 | - family-names: "Sanin"
27 | given-names: "Aleksei"
28 | - family-names: "Davison"
29 | given-names: "Andrew"
30 | date-released: 2020-03-01
31 | publisher: "Zenodo"
32 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to the BluePyEfe
2 |
3 | We would love for you to contribute to the BluePyEfe and help make it even better than it is today! As a
4 | contributor, here are the guidelines we would like you to follow:
5 | - [Question or Problem?](#question)
6 | - [Issues and Bugs](#issue)
7 | - [Feature Requests](#feature)
8 | - [Submission Guidelines](#submit)
9 | - [Signing the CLA](#cla)
10 |
11 | ## Got a Question or Problem?
12 |
13 | Please do not hesitate to contact us on [Gitter](https://gitter.im/bluebrain/bluepyefe).
14 |
15 | ## Found a Bug?
16 |
17 | If you find a bug in the source code, you can help us by [submitting an issue](#submit-issue) to our
18 | [GitHub Repository][github]. Even better, you can [submit a Pull Request](#submit-pr) with a fix.
19 |
20 | ## Missing a Feature?
21 |
22 | You can *request* a new feature by [submitting an issue](#submit-issue) to our GitHub Repository. If you would like to
23 | *implement* a new feature, please submit an issue with a proposal for your work first, to be sure that we can use it.
24 |
25 | ## Submission Guidelines
26 |
27 | ### Submitting an Issue
28 |
29 | Before you submit an issue, please search the issue tracker, maybe an issue for your problem already exists and the
30 | discussion might inform you of workarounds readily available.
31 |
32 | We want to fix all the issues as soon as possible, but before fixing a bug we need to reproduce and confirm it. In order
33 | to reproduce bugs we will need as much information as possible, and preferably be in touch with you to gather
34 | information.
35 |
36 | ### Submitting a Pull Request (PR)
37 |
38 | When you wish to contribute to the code base, please consider the following guidelines:
39 | * Make a [fork](https://guides.github.com/activities/forking/) of this repository.
40 | * Make your changes in your fork, in a new git branch:
41 | ```shell
42 | git checkout -b my-fix-branch master
43 | ```
44 | * Create your patch, ideally including appropriate test cases.
45 | * Run the full test suite, and ensure that all tests pass.
46 | * Commit your changes using a descriptive commit message.
47 | ```shell
48 | git commit -a
49 | ```
50 | Note: the optional commit `-a` command line option will automatically “add” and “rm” edited files.
51 | * Push your branch to GitHub:
52 | ```shell
53 | git push origin my-fix-branch
54 | ```
55 | * In GitHub, send a Pull Request to the `master` branch of the upstream repository of the relevant component.
56 | * If we suggest changes then:
57 | * Make the required updates.
58 | * Re-run the test suites to ensure tests are still passing.
59 | * Rebase your branch and force push to your GitHub repository (this will update your Pull Request):
60 | ```shell
61 | git rebase master -i
62 | git push -f
63 | ```
64 | That’s it! Thank you for your contribution!
65 |
66 | #### After your pull request is merged
67 |
68 | After your pull request is merged, you can safely delete your branch and pull the changes from the main (upstream)
69 | repository:
70 | * Delete the remote branch on GitHub either through the GitHub web UI or your local shell as follows:
71 | ```shell
72 | git push origin --delete my-fix-branch
73 | ```
74 | * Check out the master branch:
75 | ```shell
76 | git checkout master -f
77 | ```
78 | * Delete the local branch:
79 | ```shell
80 | git branch -D my-fix-branch
81 | ```
82 | * Update your master with the latest upstream version:
83 | ```shell
84 | git pull --ff upstream master
85 | ```
86 | [github]: https://github.com/BlueBrain/BluePyEfe
87 |
--------------------------------------------------------------------------------
/LGPL.txt:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
166 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | BluePyEfe - Bluebrain Python E-feature extraction Library
2 |
3 | BluePyEfe is licensed under the LGPL, unless noted otherwise, e.g., for external
4 | dependencies. See file LGPL.txt for the full license.
5 | Examples and test are BSD-licensed.
6 | External dependencies are either LGPL or BSD-licensed.
7 | See file AUTHORS.txt for further details.
8 |
9 | Copyright (C) 2005-2024, Blue Brain Project/EPFL.
10 |
11 | This program is free software: you can redistribute it and/or modify it under
12 | the terms of the GNU Lesser General Public License as published by the
13 | Free Software Foundation, either version 3 of the License, or (at your option)
14 | any later version.
15 |
16 | This program is distributed in the hope that it will be useful,
17 | but WITHOUT ANY WARRANTY;
18 | without even the implied warranty of
19 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 |
21 | See the GNU Lesser General Public License for more details.
22 |
23 | You should have received a copy of the GNU Lesser General Public License
24 | along with this program. If not, see .
25 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include versioneer.py
2 | include bluepyefe/_version.py
3 | include bluepyefe/tools/configs/*.json
4 | include bluepyefe/formats/configs/*.json
5 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | test: clean install_tox
2 | tox -v
3 | install_tox:
4 | pip install tox
5 | clean:
6 | @find . -name "*.pyc" -exec rm -rf {} \;
7 | rm -rf BluePyEfe.egg-info
8 | rm -rf dist
9 | rm -rf testtype*
10 | rm -rf temptype*
11 | doc: clean install_tox
12 | tox -v -e py3-docs
13 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | .. warning::
2 | The Blue Brain Project concluded in December 2024, so development has ceased under the BlueBrain GitHub organization.
3 | Future development will take place at: https://github.com/openbraininstitute/BluePyEfe
4 |
5 | |banner|
6 |
7 | -----------------
8 |
9 | BluePyEfe: Blue Brain Python E-feature extraction
10 | =================================================
11 |
12 | +----------------+------------+
13 | | Latest Release | |pypi| |
14 | +----------------+------------+
15 | | Documentation | |docs| |
16 | +----------------+------------+
17 | | License | |license| |
18 | +----------------+------------+
19 | | Build Status | |tests| |
20 | +----------------+------------+
21 | | Coverage | |coverage| |
22 | +----------------+------------+
23 | | Citation | |zenodo| |
24 | +----------------+------------+
25 | | Gitter | |gitter| |
26 | +----------------+------------+
27 |
28 |
29 | Introduction
30 | ============
31 |
32 | BluePyEfe aims at easing the process of reading experimental recordings and extracting
33 | batches of electrical features from these recordings. To do so, it combines trace reading
34 | functions and features extraction functions from the `eFel library `_.
35 |
36 | BluePyEfe outputs protocols and features files in the format used
37 | by `BluePyOpt `_ for neuron electrical model building.
38 |
39 | How to cite
40 | ===========
41 | If you use this repository in your work, please refer to the "Cite this repository" button at the top of the repository page to get various citation formats, including APA and BibTeX.
42 |
43 | For detailed citation information, please refer to the `CITATION.cff <./CITATION.cff>`_ file.
44 |
45 | Requirements
46 | ============
47 |
48 | * `Python 3.9+ `_
49 | * `eFEL eFeature Extraction Library `_ (automatically installed by pip)
50 | * `Numpy `_ (automatically installed by pip)
51 | * `Scipy `_ (automatically installed by pip)
52 | * `Neo `_ (automatically installed by pip)
53 | * The instruction below are written assuming you have access to a command shell on Linux / UNIX / MacOSX / Cygwin
54 |
55 | Installation
56 | ============
57 |
58 | To install BluePyEfe, run:
59 |
60 | .. code-block:: bash
61 |
62 | pip install bluepyefe
63 |
64 |
65 | Quick Start and Operating Principle
66 | ===================================
67 |
68 | For a hands-on introduction to BluePyEfe, have a look at the notebook `examples/example_of_extraction.ipynb `_
69 |
70 | The goal of the present package is to extract meaningful electrophysiological features (e-features) from voltage time series.
71 | The e-features considered in the present package are the one implemented in the `eFEL python library `_. See `this pdf `_ for a list of available e-features.
72 |
73 | The present package makes one major assumption: E-features are more meaningful if they are coming from a set of traces rather than a single trace. And they are even more meaningful if these traces come from different cells of the same cellular type.
74 | This assumption dictates the organisation of the package and has several consequences:
75 |
76 | The efeatures extracted through the package will always be averaged over the trace considered. For example, the AP_amplitude will be an average over all the action potentials present in a trace. If you wish to work on an AP by AP basis, please consider using the eFEL library directly.
77 |
78 | A large part of the present software is therefore dedicated to averaging the features across set of "equivalent" recordings. To be able to average e-features across different cells in a meaningful way, an equivalence must be established between the traces coming from these different cells. It would not make sense to average the mean firing frequency obtain cell A on a 1s long step protocol with the one obtain for cell B on a ramp protocol that lasts for 500ms. We chose to define recordings as equivalent based on two criteria: (1) They have the same name and (2) they are of the same amplitude when the amplitude is expressed as a percentage of the rheobase of the cell.
79 |
80 | A pseudo-code for the main function of the package (bluepyefe.extract.extract_efeatures) could look as follows:
81 |
82 | #. Load the data to memory by reading all the files containing the traces
83 | #. Extract the required e-features for all the traces
84 | #. Compute the rheobases of the cells based on one or several protocols
85 | #. Use these rheobases to associate to each protocol an amplitude expressed in % of the rheobase
86 | #. Compute the mean and standard deviations for the e-features across traces having the same amplitude
87 | #. Save the results and plot the traces and e-features
88 |
89 | Each of these steps are parametrized by a number of settings, therefore we recommend that you read carefully the docstring of the function.
90 |
91 | Coming from the legacy version
92 | ==============================
93 | The legacy version (v0.4*) is moved to the legacy branch.
94 | Changes introduced in v2.0.0 are listed in the `CHANGELOG.rst `_.
95 | That is the only file you need to look at for the changes as the future changes will also be noted there.
96 |
97 | Funding
98 | =======
99 | This work has been partially funded by the European Union Seventh Framework Program (FP7/20072013) under grant agreement no. 604102 (HBP), and by the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreements No. 720270 (Human Brain Project SGA1) and No. 785907 (Human Brain Project SGA2) and by the EBRAINS research infrastructure, funded from the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreement No. 945539 (Human Brain Project SGA3).
100 |
101 | Copyright (c) 2016-2024 Blue Brain Project/EPFL
102 |
103 | .. |pypi| image:: https://img.shields.io/pypi/v/bluepyefe.svg
104 | :target: https://pypi.org/project/bluepyefe/
105 | :alt: latest release
106 | .. |docs| image:: https://readthedocs.org/projects/bluepyefe/badge/?version=latest
107 | :target: https://bluepyefe.readthedocs.io/
108 | :alt: latest documentation
109 | .. |license| image:: https://img.shields.io/pypi/l/bluepyefe.svg
110 | :target: https://github.com/BlueBrain/bluepyefe/blob/master/LICENSE.txt
111 | :alt: license
112 | .. |tests| image:: https://github.com/BlueBrain/BluePyEfe/workflows/Build/badge.svg?branch=master
113 | :target: https://github.com/BlueBrain/BluePyEfe/actions
114 | :alt: Actions build status
115 | .. |coverage| image:: https://codecov.io/github/BlueBrain/BluePyEfe/coverage.svg?branch=master
116 | :target: https://codecov.io/gh/BlueBrain/BluePyEfe
117 | :alt: coverage
118 | .. |gitter| image:: https://badges.gitter.im/Join%20Chat.svg
119 | :target: https://gitter.im/bluebrain/bluepyefe
120 | :alt: gitter
121 | .. |zenodo| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3728191.svg
122 | :target: https://doi.org/10.5281/zenodo.3728191
123 | :alt: DOI
124 |
125 | ..
126 | The following image is also defined in the index.rst file, as the relative path is
127 | different, depending from where it is sourced.
128 | The following location is used for the github README
129 | The index.rst location is used for the docs README; index.rst also defined an end-marker,
130 | to skip content after the marker 'substitutions'.
131 |
132 | .. substitutions
133 | .. |banner| image:: https://raw.githubusercontent.com/BlueBrain/BluePyEfe/master/docs/source/logo/BluePyEfeBanner.jpg
134 |
135 |
--------------------------------------------------------------------------------
/bluepyefe/__init__.py:
--------------------------------------------------------------------------------
1 | """Init script"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 |
--------------------------------------------------------------------------------
/bluepyefe/auto_targets.py:
--------------------------------------------------------------------------------
1 | """class AutoTarget"""
2 |
3 | """
4 | Copyright (c) 2021, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 |
22 |
23 | class AutoTarget():
24 |
25 | """Fuzzy targets defined by lists of ecodes, amplitudes and efeatures.
26 | The AutoTarget will try to find the best combination of ecodes, amplitudes
27 | and efeatures that match the data available in the recordings."""
28 |
29 | def __init__(
30 | self,
31 | protocols,
32 | amplitudes,
33 | efeatures,
34 | min_recordings_per_amplitude=10,
35 | preferred_number_protocols=1,
36 | tolerance=10,
37 | ):
38 | """
39 | Extract efeatures.
40 |
41 | Args:
42 | protocols (list of str): list of eCodes, by order of priority.
43 | amplitudes (list of int): list of amplitudes, expressed as
44 | percentages of the rheobase, by order of priority
45 | efeatures (list str): list of efeatures.
46 | min_recordings_per_amplitude (int): minimum number of recordings
47 | a amplitude should have to be considered as a target.
48 | preferred_number_protocols (int): out of all the available
49 | amplitudes, how many should be used.
50 | tolerance (float): tolerance used to bin the efeatures around the
51 | target amplitudes.
52 | """
53 |
54 | self.efeatures = efeatures
55 | self.protocols = protocols
56 | self.amplitudes = amplitudes
57 |
58 | self.min_recordings_per_amplitude = min_recordings_per_amplitude
59 | self.preferred_number_protocols = preferred_number_protocols
60 | self.tolerance = tolerance
61 |
62 | self.active_amplitudes = []
63 | self.active_ecode = []
64 |
65 | def select_ecode_and_amplitude(self, recordings):
66 | """Based on what ephys data is available, builds an effective (active)
67 | set of targets."""
68 |
69 | for ecode in self.protocols:
70 |
71 | available_amps = []
72 | for r in recordings:
73 | if r.protocol_name == ecode and r.amp_rel is not None:
74 | available_amps.append(r.amp_rel)
75 |
76 | for amp in self.amplitudes:
77 | n_available_rec = sum(
78 | abs(a - amp) < self.tolerance for a in available_amps
79 | )
80 | if n_available_rec >= self.min_recordings_per_amplitude:
81 | self.active_ecode.append(ecode)
82 | self.active_amplitudes.append(amp)
83 |
84 | if (
85 | self.active_ecode and
86 | len(self.active_ecode) > self.preferred_number_protocols
87 | ):
88 | self.active_ecode = self.active_ecode[
89 | :self.preferred_number_protocols]
90 | self.active_amplitudes = self.active_amplitudes[
91 | :self.preferred_number_protocols]
92 |
93 | def is_valid(self):
94 | """Check if the present AutoTarget has active targets (if matching
95 | ephys data were found)"""
96 |
97 | return bool(self.active_amplitudes) and bool(self.active_ecode)
98 |
99 | def generate_targets(self):
100 | """Build a list of targets in the format expected by the main
101 | extraction function of BluePyEfe using the targets presently
102 | active."""
103 |
104 | targets = []
105 |
106 | for amp, protocol_name in zip(
107 | self.active_amplitudes, self.active_ecode):
108 | for efeature in self.efeatures:
109 | targets.append({
110 | "efeature": efeature,
111 | "protocol": protocol_name,
112 | "amplitude": amp,
113 | "tolerance": self.tolerance,
114 | })
115 |
116 | return targets
117 |
118 |
119 | def default_auto_targets():
120 | """Define a set of 3 generic AutoTarget for firing pattern properties,
121 | AP waveform properties and hyperpolarizing step properties."""
122 |
123 | auto_firing_pattern = AutoTarget(
124 | protocols=["Step", "FirePattern", "IDrest", "IDRest", "IDthresh",
125 | "IDThresh", "IDThres", "IDthres", "IV"],
126 | amplitudes=[200, 150, 250, 300],
127 | efeatures=['voltage_base',
128 | 'adaptation_index2',
129 | 'mean_frequency',
130 | 'time_to_first_spike',
131 | 'time_to_last_spike',
132 | 'inv_first_ISI',
133 | 'inv_second_ISI',
134 | 'inv_third_ISI',
135 | 'inv_fourth_ISI',
136 | 'inv_fifth_ISI',
137 | 'inv_last_ISI',
138 | 'ISI_CV',
139 | 'ISI_log_slope',
140 | 'doublet_ISI',
141 | 'AP_amplitude',
142 | 'AP1_amp',
143 | 'APlast_amp',
144 | 'AHP_depth',
145 | 'AHP_time_from_peak'],
146 | min_recordings_per_amplitude=1,
147 | preferred_number_protocols=2,
148 | tolerance=25.,
149 | )
150 |
151 | auto_ap_waveform = AutoTarget(
152 | protocols=["APWaveform", "APwaveform", "Step", "FirePattern",
153 | "IDrest", "IDRest", "IDthresh", "IDThresh", "IDThres",
154 | "IDthres", "IV"],
155 | amplitudes=[300, 350, 250, 400, 200],
156 | efeatures=["AP_amplitude",
157 | "AP1_amp",
158 | "AP2_amp",
159 | 'AP_width',
160 | "AP_duration_half_width",
161 | "AP_rise_time",
162 | "AP_fall_time",
163 | 'AHP_depth_abs',
164 | 'AHP_time_from_peak',
165 | "AHP_depth"],
166 | min_recordings_per_amplitude=1,
167 | preferred_number_protocols=1,
168 | tolerance=25.,
169 | )
170 |
171 | auto_iv = AutoTarget(
172 | protocols=["IV", "Step"],
173 | amplitudes=[-50, -100],
174 | efeatures=['voltage_base',
175 | 'steady_state_voltage_stimend',
176 | 'ohmic_input_resistance_vb_ssse',
177 | 'voltage_deflection',
178 | 'voltage_deflection_begin',
179 | 'decay_time_constant_after_stim',
180 | 'Spikecount',
181 | 'sag_ratio1',
182 | 'sag_ratio2',
183 | 'sag_amplitude',
184 | 'sag_time_constant'],
185 | min_recordings_per_amplitude=1,
186 | preferred_number_protocols=1,
187 | tolerance=10.,
188 | )
189 |
190 | return [auto_firing_pattern, auto_ap_waveform, auto_iv]
191 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/DeHyperPol.py:
--------------------------------------------------------------------------------
1 | """DeHyperPol eCode"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import numpy
23 |
24 | from ..recording import Recording
25 | from .tools import scipy_signal2d
26 |
27 | logger = logging.getLogger(__name__)
28 |
29 |
30 | class DeHyperPol(Recording):
31 |
32 | """DeHyperpol current stimulus
33 |
34 | The hyperpolarizing step is usually fixed at 150% of rheobase, and the hyperpolarizing step
35 | can usually vary from -40% to -160% of rheobase.
36 |
37 | .. code-block:: none
38 |
39 | hypamp hypamp+amp hypamp+amp2 hypamp
40 | : : : :
41 | : _________________ : :
42 | : | | : :
43 | : | | : :
44 | |_______________| | : ___________
45 | ^ ^ | : | ^
46 | : : | : | :
47 | : : | : | :
48 | : : |____________| :
49 | : : ^ ^ :
50 | : : : : :
51 | : : : : :
52 | t=0 ton tmid toff tend
53 | """
54 |
55 | def __init__(
56 | self,
57 | config_data,
58 | reader_data,
59 | protocol_name="DeHyperPol",
60 | efel_settings=None
61 | ):
62 |
63 | super(DeHyperPol, self).__init__(
64 | config_data, reader_data, protocol_name
65 | )
66 |
67 | self.ton = None
68 | self.tmid = None
69 | self.toff = None
70 | self.tend = None
71 | self.amp = None
72 | self.amp2 = None
73 | self.hypamp = None
74 | self.dt = None
75 |
76 | self.amp_rel = None
77 | self.amp2_rel = None
78 | self.hypamp_rel = None
79 |
80 | if self.t is not None and self.current is not None:
81 | self.interpret(
82 | self.t, self.current, self.config_data, self.reader_data
83 | )
84 |
85 | if self.voltage is not None:
86 | self.set_autothreshold()
87 | self.compute_spikecount(efel_settings)
88 |
89 | self.export_attr = ["ton", "tmid", "toff", "tend", "amp", "amp2", "hypamp",
90 | "dt", "amp_rel", "amp2_rel", "hypamp_rel"]
91 |
92 | def get_stimulus_parameters(self):
93 | """Returns the eCode parameters"""
94 | ecode_params = {
95 | "delay": self.ton,
96 | "tmid": self.tmid,
97 | "toff": self.toff,
98 | "amp": self.amp,
99 | "amp2": self.amp2,
100 | "thresh_perc": self.amp2_rel,
101 | "duration": self.toff - self.ton,
102 | "totduration": self.tend,
103 | }
104 | return ecode_params
105 |
106 | def interpret(self, t, current, config_data, reader_data):
107 | """Analyse a current array and extract from it the parameters
108 | needed to reconstruct the array"""
109 | self.dt = t[1]
110 |
111 | # Smooth the current
112 | smooth_current = scipy_signal2d(current, 85)
113 |
114 | self.set_timing_ecode(["ton", "tmid", "toff"], config_data)
115 |
116 | hypamp_value = numpy.median(
117 | numpy.concatenate(
118 | (smooth_current[: self.ton], smooth_current[self.toff :])
119 | )
120 | )
121 | self.set_amplitudes_ecode("hypamp", config_data, reader_data, hypamp_value)
122 |
123 | amp_value = numpy.median(smooth_current[self.ton : self.tmid]) - self.hypamp
124 | self.set_amplitudes_ecode("amp", config_data, reader_data, amp_value)
125 |
126 | amp2_value = numpy.median(smooth_current[self.tmid : self.toff]) - self.hypamp
127 | self.set_amplitudes_ecode("amp2", config_data, reader_data, amp2_value)
128 |
129 | # Converting back to ms
130 | for name_timing in ["ton", "tmid", "toff"]:
131 | self.index_to_ms(name_timing, t)
132 | self.tend = len(t) * self.dt
133 |
134 | def generate(self):
135 | """Generate the current array from the parameters of the ecode"""
136 |
137 | ton = int(self.ton / self.dt)
138 | tmid = int(self.tmid / self.dt)
139 | toff = int(self.toff / self.dt)
140 |
141 | time = numpy.arange(0.0, self.tend, self.dt)
142 | current = numpy.full(time.shape, numpy.float64(self.hypamp))
143 | current[ton:tmid] += numpy.float64(self.amp)
144 | current[tmid:toff] += numpy.float64(self.amp2)
145 |
146 | return time, current
147 |
148 | def compute_relative_amp(self, amp_threshold):
149 | self.amp_rel = 100.0 * self.amp / amp_threshold
150 | self.amp2_rel = 100.0 * self.amp2 / amp_threshold
151 | self.hypamp_rel = 100.0 * self.hypamp / amp_threshold
152 |
153 | def get_plot_amplitude_title(self):
154 | return " ({:.01f}%/{:.01f}%)".format(self.amp_rel, self.amp2_rel)
155 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/HyperDePol.py:
--------------------------------------------------------------------------------
1 | """HyperDePol eCode"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import numpy
23 |
24 | from ..recording import Recording
25 | from .tools import scipy_signal2d
26 |
27 | logger = logging.getLogger(__name__)
28 |
29 |
30 | class HyperDePol(Recording):
31 |
32 | """HyperDepol current stimulus
33 |
34 | The hyperpolarizing step is usually fixed at 100% of rheobase, and the hyperpolarizing step
35 | can usually vary from -40% to -160% of rheobase.
36 |
37 | .. code-block:: none
38 |
39 | hypamp hypamp+amp hypamp+amp2 hypamp
40 | : : : :
41 | : : _____________________ :
42 | : : | | :
43 | : : | | :
44 | : : | | :
45 | : : | | :
46 | : : | | :
47 | |_______________ : | |___________
48 | ^ | : | ^ ^
49 | : |___________________| : :
50 | : ^ ^ : :
51 | : : : : :
52 | : : : : :
53 | t=0 ton tmid toff tend
54 | """
55 |
56 | def __init__(
57 | self,
58 | config_data,
59 | reader_data,
60 | protocol_name="HyperDePol",
61 | efel_settings=None
62 | ):
63 |
64 | super(HyperDePol, self).__init__(
65 | config_data, reader_data, protocol_name
66 | )
67 |
68 | self.ton = None
69 | self.tmid = None
70 | self.toff = None
71 | self.tend = None
72 | self.amp = None
73 | self.amp2 = None
74 | self.hypamp = None
75 | self.dt = None
76 |
77 | self.amp_rel = None
78 | self.amp2_rel = None
79 | self.hypamp_rel = None
80 |
81 | if self.t is not None and self.current is not None:
82 | self.interpret(
83 | self.t, self.current, self.config_data, self.reader_data
84 | )
85 |
86 | if self.voltage is not None:
87 | self.set_autothreshold()
88 | self.compute_spikecount(efel_settings)
89 |
90 | self.export_attr = ["ton", "tmid", "toff", "tend", "amp", "amp2",
91 | "hypamp", "dt", "amp_rel", "amp2_rel",
92 | "hypamp_rel"]
93 |
94 | def get_stimulus_parameters(self):
95 | """Returns the eCode parameters"""
96 | ecode_params = {
97 | "delay": self.ton,
98 | "tmid": self.tmid,
99 | "toff": self.toff,
100 | "amp": self.amp,
101 | "amp2": self.amp2,
102 | "thresh_perc": self.amp_rel,
103 | "duration": self.toff - self.ton,
104 | "totduration": self.tend,
105 | }
106 | return ecode_params
107 |
108 | def interpret(self, t, current, config_data, reader_data):
109 | """Analyse a current array and extract from it the parameters
110 | needed to reconstruct the array"""
111 | self.dt = t[1]
112 |
113 | # Smooth the current
114 | smooth_current = scipy_signal2d(current, 85)
115 |
116 | self.set_timing_ecode(["ton", "tmid", "toff"], config_data)
117 |
118 | hypamp_value = numpy.median(
119 | numpy.concatenate(
120 | (smooth_current[: self.ton], smooth_current[self.toff :])
121 | )
122 | )
123 | self.set_amplitudes_ecode("hypamp", config_data, reader_data, hypamp_value)
124 |
125 | amp_value = numpy.median(smooth_current[self.ton : self.tmid]) - self.hypamp
126 | self.set_amplitudes_ecode("amp", config_data, reader_data, amp_value)
127 |
128 | amp2_value = numpy.median(smooth_current[self.tmid : self.toff]) - self.hypamp
129 | self.set_amplitudes_ecode("amp2", config_data, reader_data, amp2_value)
130 |
131 | # Converting back to ms
132 | for name_timing in ["ton", "tmid", "toff"]:
133 | self.index_to_ms(name_timing, t)
134 | self.tend = len(t) * self.dt
135 |
136 | def generate(self):
137 | """Generate the current array from the parameters of the ecode"""
138 |
139 | ton = int(self.ton / self.dt)
140 | tmid = int(self.tmid / self.dt)
141 | toff = int(self.toff / self.dt)
142 |
143 | time = numpy.arange(0.0, self.tend, self.dt)
144 | current = numpy.full(time.shape, numpy.float64(self.hypamp))
145 | current[ton:tmid] += numpy.float64(self.amp)
146 | current[tmid:toff] += numpy.float64(self.amp2)
147 |
148 | return time, current
149 |
150 | def compute_relative_amp(self, amp_threshold):
151 | self.amp_rel = 100.0 * self.amp / amp_threshold
152 | self.amp2_rel = 100.0 * self.amp2 / amp_threshold
153 | self.hypamp_rel = 100.0 * self.hypamp / amp_threshold
154 |
155 | def get_plot_amplitude_title(self):
156 | return " ({:.01f}%/{:.01f}%)".format(self.amp_rel, self.amp2_rel)
157 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/SpikeRec.py:
--------------------------------------------------------------------------------
1 | """Step eCode class"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import numpy
23 |
24 | from ..recording import Recording
25 | from .tools import base_current
26 | from .tools import scipy_signal2d
27 |
28 | logger = logging.getLogger(__name__)
29 |
30 |
31 | def group_indexes(values, gap=10):
32 | """Return a list of clusters from a list where consecutive
33 | values follow each other forming clusters
34 | eg: [12, 14, 15, 20, 56, 60, 61, 62, 63] -> [[12, 14, 15, 20], [56, 60,
35 | 61, 62, 63]]"""
36 |
37 | clusters = []
38 |
39 | for v in values:
40 | if not (len(clusters)) or clusters[-1][-1] + gap < v:
41 | clusters.append([v])
42 | else:
43 | clusters[-1].append(v)
44 |
45 | return clusters
46 |
47 |
48 | def detect_spike(amp, hypamp, smooth_current, dt):
49 |
50 | tspike = []
51 | duration = []
52 | delta = []
53 |
54 | threshold = hypamp + (0.1 * amp)
55 | candidate_spikes = numpy.argwhere(smooth_current > threshold).flatten()
56 | candidate_spikes = group_indexes(candidate_spikes, gap=10)
57 |
58 | for spike in candidate_spikes:
59 | tspike.append(spike[0] - 1)
60 | duration.append(spike[-1] - spike[0] + 1)
61 |
62 | if len(tspike) > 1:
63 | for i in range(1, len(tspike)):
64 | end = tspike[i - 1] + duration[i - 1]
65 | start = tspike[i]
66 | delta.append(dt * (start - end))
67 |
68 | tspike = numpy.asarray(tspike) * dt
69 | duration = numpy.mean(numpy.asarray(duration) * dt)
70 | delta = numpy.mean(delta)
71 |
72 | return tspike, duration, delta
73 |
74 |
75 | class SpikeRec(Recording):
76 |
77 | """SpikeRec current stimulus
78 |
79 | .. code-block:: none
80 |
81 | hypamp hypamp+amp hypamp hypamp+amp . . .
82 | : : : :
83 | : _________________ : _________________ _________________
84 | : | | : | | | |
85 | : | | : | | * len(tspike) | |
86 | : | | : | | . . . | |
87 | : | | : | | | |
88 | |_______________| |__________| |__ __| |___
89 | : : : : : ^
90 | : : : : : :
91 | : : : : : :
92 | <--tspike[0] --><-spike_duration-><- delta -><-spike_duration-> . . . tend
93 |
94 | """
95 |
96 | def __init__(
97 | self,
98 | config_data,
99 | reader_data,
100 | protocol_name="SpikeRec",
101 | efel_settings=None
102 | ):
103 |
104 | super(SpikeRec, self).__init__(config_data, reader_data, protocol_name)
105 |
106 | self.tend = None
107 | self.tspike = []
108 | self.spike_duration = None # in ms
109 | self.amp = None
110 | self.hypamp = None
111 | self.dt = None
112 | self.delta = None # Time difference between two spikes
113 |
114 | self.amp_rel = None
115 | self.hypamp_rel = None
116 |
117 | if self.t is not None and self.current is not None:
118 | self.interpret(
119 | self.t, self.current, self.config_data, self.reader_data
120 | )
121 |
122 | if self.voltage is not None:
123 | self.set_autothreshold()
124 | self.compute_spikecount(efel_settings)
125 |
126 | self.export_attr = ["tend", "tspike", "spike_duration", "delta",
127 | "amp", "hypamp", "dt", "amp_rel", "hypamp_rel"]
128 |
129 | @property
130 | def ton(self):
131 | return 0.0
132 |
133 | @property
134 | def toff(self):
135 | return self.tend
136 |
137 | @property
138 | def multi_stim_start(self):
139 | return list(self.tspike)
140 |
141 | @property
142 | def multi_stim_end(self):
143 | return [t + self.spike_duration for t in self.tspike]
144 |
145 | def get_stimulus_parameters(self):
146 | """Returns the eCode parameters"""
147 |
148 | ecode_params = {
149 | "delay": self.tspike[0],
150 | "n_spikes": len(self.tspike),
151 | "delta": self.delta,
152 | "amp": self.amp,
153 | "thresh_perc": self.amp_rel,
154 | "spike_duration": self.spike_duration,
155 | "totduration": self.tend,
156 | }
157 |
158 | return ecode_params
159 |
160 | def interpret(self, t, current, config_data, reader_data):
161 | """Analyse a current with a step and extract from it the parameters
162 | needed to reconstruct the array"""
163 | self.dt = t[1]
164 |
165 | # Smooth the current
166 | smooth_current = scipy_signal2d(current, 15)
167 |
168 | hypamp_value = base_current(current)
169 | self.set_amplitudes_ecode("hypamp", config_data, reader_data, hypamp_value)
170 |
171 | amp_value = numpy.max(smooth_current)
172 | self.set_amplitudes_ecode("amp", config_data, reader_data, amp_value)
173 |
174 | # Get the beginning and end of the spikes
175 | if (
176 | not len(self.tspike)
177 | or self.spike_duration is None
178 | or self.delta is None
179 | ):
180 | self.tspike, self.spike_duration, self.delta = detect_spike(
181 | self.amp, self.hypamp, smooth_current, self.dt
182 | )
183 |
184 | self.tend = len(t) * self.dt
185 |
186 | def generate(self):
187 | """Generate the step current array from the parameters of the ecode"""
188 |
189 | t = numpy.arange(0.0, self.tend, self.dt)
190 | current = numpy.full(t.shape, numpy.float64(self.hypamp))
191 |
192 | spike_start = int(self.tspike[0] / self.dt)
193 | spike_end = int((self.tspike[0] + self.spike_duration) / self.dt)
194 | current[spike_start:spike_end] += numpy.float64(self.amp)
195 |
196 | for i in range(1, len(self.tspike)):
197 | spike_start = int(spike_end + (self.delta / self.dt))
198 | spike_end = spike_start + int(self.spike_duration / self.dt)
199 | current[spike_start:spike_end] += numpy.float64(self.amp)
200 |
201 | return t, current
202 |
203 | def in_target(self, target, tolerance, absolute_amplitude):
204 | """Returns a boolean. True if the delta of the eCode is close to
205 | target and False otherwise."""
206 | logger.warning(
207 | "The eCode SpikeRec uses delta between current spikes "
208 | "in ms as target, not amplitude"
209 | )
210 | if numpy.abs(target - self.delta) < tolerance:
211 | return True
212 | else:
213 | return False
214 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/__init__.py:
--------------------------------------------------------------------------------
1 | """eCode init script"""
2 |
3 | """
4 | Copyright (c) 2020, EPFL/Blue Brain Project
5 | This file is part of BluePyOpt
6 | This library is free software; you can redistribute it and/or modify it under
7 | the terms of the GNU Lesser General Public License version 3.0 as published
8 | by the Free Software Foundation.
9 | This library is distributed in the hope that it will be useful, but WITHOUT
10 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
12 | details.
13 | You should have received a copy of the GNU Lesser General Public License
14 | along with this library; if not, write to the Free Software Foundation, Inc.,
15 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 | """
17 |
18 | from . import DeHyperPol
19 | from . import HyperDePol
20 | from . import SpikeRec
21 | from . import negCheops
22 | from . import posCheops
23 | from . import ramp
24 | from . import sAHP
25 | from . import sineSpec
26 | from . import step
27 |
28 | # The ecode names have to be lower case only to avoid having to
29 | # define duplicates for upper and lower cases.
30 | eCodes = {
31 | "spontaneous": step.Step,
32 | "idrest": step.Step,
33 | "idthresh": step.Step,
34 | "idthres": step.Step,
35 | "apwaveform": step.Step,
36 | "iv": step.Step,
37 | "step": step.Step,
38 | "spontaps": step.Step,
39 | "firepattern": step.Step,
40 | "sponnohold30": step.Step,
41 | "sponhold30": step.Step,
42 | "spontnohold30": step.Step,
43 | "sponthold30": step.Step,
44 | "starthold": step.Step,
45 | "startnohold": step.Step,
46 | "delta": step.Step,
47 | "sahp": sAHP.SAHP,
48 | "idhyperpol": sAHP.SAHP,
49 | "irdepol": sAHP.SAHP,
50 | "irhyperpol": sAHP.SAHP,
51 | "iddepol": sAHP.SAHP,
52 | "ramp": ramp.Ramp,
53 | "ap_thresh": ramp.Ramp,
54 | "apthresh": ramp.Ramp,
55 | "hyperdepol": HyperDePol.HyperDePol,
56 | "dehyperpol": DeHyperPol.DeHyperPol,
57 | "negcheops": negCheops.NegCheops,
58 | "poscheops": posCheops.PosCheops,
59 | "spikerec": SpikeRec.SpikeRec,
60 | "sinespec": sineSpec.SineSpec,
61 | }
62 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/negCheops.py:
--------------------------------------------------------------------------------
1 | """Cheops eCode"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import numpy
23 |
24 | from ..recording import Recording
25 | from .tools import scipy_signal2d
26 |
27 | logger = logging.getLogger(__name__)
28 |
29 |
30 | class NegCheops(Recording):
31 |
32 | # pylint: disable=line-too-long,anomalous-backslash-in-string
33 |
34 | """NegCheops current stimulus
35 |
36 | .. code-block:: none
37 |
38 | hypamp hypamp+amp hypamp hypamp+amp hypamp hypamp+amp hypamp
39 | : : : : : : :
40 | : : : : : : :
41 | |__________ : ____________ : ____________ : ____________
42 | : :\ : /: :\ : /: :\ : /: ^
43 | : : \ : / : : \ : / : : \ : / : :
44 | : : \ : / : : \ : / : : \ : / : :
45 | : : \ : / : : \ : / : : \ : / : :
46 | : : \ : / : : \ : / : : \ : / : :
47 | : : \ : / : : \ : / : : \ : / : :
48 | : : \ : / : : \ : / : : \ : / : :
49 | : : \ : / : : \ : / : : \ : / : :
50 | : : \ : / : : \ : / : : \ : / : :
51 | : : \ : / : : \ : / : : \ : / : :
52 | : : \ : / : : \ : / : : \ : / : :
53 | : : \ / : : \ / : : \ / : :
54 | : : ' : : ' : : ' : :
55 | : : : : : : : :
56 | t=0 ton t1 t2 t3 t4 toff tend
57 | """
58 |
59 | def __init__(
60 | self,
61 | config_data,
62 | reader_data,
63 | protocol_name="NegCheops",
64 | efel_settings=None
65 | ):
66 |
67 | super(NegCheops, self).__init__(
68 | config_data, reader_data, protocol_name
69 | )
70 |
71 | self.ton = None
72 | self.t1 = None
73 | self.t2 = None
74 | self.t3 = None
75 | self.t4 = None
76 | self.tend = None
77 | self.amp = None
78 | self.hypamp = None
79 | self.dt = None
80 |
81 | self.amp_rel = None
82 | self.hypamp_rel = None
83 |
84 | if self.t is not None and self.current is not None:
85 | self.interpret(
86 | self.t, self.current, self.config_data, self.reader_data
87 | )
88 |
89 | if self.voltage is not None:
90 | self.set_autothreshold()
91 | self.compute_spikecount(efel_settings)
92 |
93 | self.export_attr = ["ton", "t1", "t2", "t3", "t4", "toff", "tend",
94 | "amp", "hypamp", "dt", "amp_rel", "hypamp_rel"]
95 |
96 | def get_stimulus_parameters(self):
97 | """Returns the eCode parameters"""
98 | ecode_params = {
99 | "delay": self.ton,
100 | "t1": self.t1,
101 | "t2": self.t2,
102 | "t3": self.t3,
103 | "t4": self.t4,
104 | "toff": self.toff,
105 | "amp": self.amp,
106 | "thresh_perc": self.amp_rel,
107 | "duration": self.toff - self.ton,
108 | "totduration": self.tend,
109 | }
110 | return ecode_params
111 |
112 | def interpret(self, t, current, config_data, reader_data):
113 | """Analyse a current array and extract from it the parameters
114 | needed to reconstruct the array"""
115 | self.dt = t[1]
116 |
117 | # Smooth the current
118 | smooth_current = scipy_signal2d(current, 85)
119 |
120 | self.set_timing_ecode(
121 | ["ton", "t1", "t2", "t3", "t4", "toff"], config_data)
122 |
123 | hypamp_value = numpy.median(smooth_current[: self.ton])
124 | self.set_amplitudes_ecode("hypamp", config_data, reader_data, hypamp_value)
125 |
126 | amp_value = numpy.min(smooth_current[:]) - self.hypamp
127 | self.set_amplitudes_ecode("amp", config_data, reader_data, amp_value)
128 |
129 | # Converting back to ms
130 | for name_timing in ["ton", "t1", "t2", "t3", "t4", "toff"]:
131 | self.index_to_ms(name_timing, t)
132 | self.tend = len(t) * self.dt
133 |
134 | def generate(self):
135 | """Generate the current array from the parameters of the ecode"""
136 |
137 | ton = int(self.ton / self.dt)
138 | t1 = int(self.t1 / self.dt)
139 | t2 = int(self.t2 / self.dt)
140 | t3 = int(self.t3 / self.dt)
141 | t4 = int(self.t4 / self.dt)
142 | toff = int(self.toff / self.dt)
143 |
144 | time = numpy.arange(0.0, self.tend, self.dt)
145 | current = numpy.full(time.shape, numpy.float64(self.hypamp))
146 |
147 | # First peak
148 | mid = int(0.5 * (ton + t1))
149 | current[ton:mid] += numpy.linspace(0.0, self.amp, mid - ton)
150 | current[mid:t1] += numpy.linspace(self.amp, 0.0, t1 - mid)
151 |
152 | # Second peak
153 | mid = int(0.5 * (t2 + t3))
154 | current[t2:mid] += numpy.linspace(0.0, self.amp, mid - t2)
155 | current[mid:t3] += numpy.linspace(self.amp, 0.0, t3 - mid)
156 |
157 | # Third peak
158 | mid = int(0.5 * (t4 + toff))
159 | current[t4:mid] += numpy.linspace(0.0, self.amp, mid - t4)
160 | current[mid:toff] += numpy.linspace(self.amp, 0.0, toff - mid)
161 |
162 | return time, current
163 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/posCheops.py:
--------------------------------------------------------------------------------
1 | """Cheops eCode"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import numpy
23 |
24 | from ..recording import Recording
25 | from .tools import scipy_signal2d
26 |
27 | logger = logging.getLogger(__name__)
28 |
29 |
30 | class PosCheops(Recording):
31 |
32 | # pylint: disable=line-too-long,anomalous-backslash-in-string
33 |
34 | """PosCheops current stimulus
35 |
36 | .. code-block:: none
37 |
38 | hypamp hypamp+amp hypamp hypamp+amp hypamp hypamp+amp hypamp
39 | : : : : : : :
40 | : . : . : . :
41 | : / \ : / \ : / \ :
42 | : / \ : / \ : / \ :
43 | : / \ : / \ : / \ :
44 | : / \ : / \ : / \ :
45 | : / \ : / \ : / \ :
46 | : / \ : / \ : / \ :
47 | : / \ : / \ : / \ :
48 | : / \ : / \ : / \ :
49 | : / \ : / \ : / \ :
50 | : / \ : / \ : / \ :
51 | : / \ : / \ : / \ :
52 | |__________ / \ __________ / \ __________ / \ __________
53 | : : : : : : : ^
54 | : : : : : : : :
55 | : : : : : : : :
56 | t=0 ton t1 t2 t3 t4 toff tend
57 | """
58 |
59 | def __init__(
60 | self,
61 | config_data,
62 | reader_data,
63 | protocol_name="PosCheops",
64 | efel_settings=None
65 | ):
66 |
67 | super(PosCheops, self).__init__(
68 | config_data, reader_data, protocol_name
69 | )
70 |
71 | self.ton = None
72 | self.t1 = None
73 | self.t2 = None
74 | self.t3 = None
75 | self.t4 = None
76 | self.tend = None
77 | self.amp = None
78 | self.hypamp = None
79 | self.dt = None
80 |
81 | self.amp_rel = None
82 | self.hypamp_rel = None
83 |
84 | if self.t is not None and self.current is not None:
85 | self.interpret(
86 | self.t, self.current, self.config_data, self.reader_data
87 | )
88 |
89 | if self.voltage is not None:
90 | self.set_autothreshold()
91 | self.compute_spikecount(efel_settings)
92 |
93 | self.export_attr = ["ton", "t1", "t2", "t3", "t4", "toff", "tend",
94 | "amp", "hypamp", "dt", "amp_rel", "hypamp_rel"]
95 |
96 | def get_stimulus_parameters(self):
97 | """Returns the eCode parameters"""
98 | ecode_params = {
99 | "delay": self.ton,
100 | "t1": self.t1,
101 | "t2": self.t2,
102 | "t3": self.t3,
103 | "t4": self.t4,
104 | "toff": self.toff,
105 | "amp": self.amp,
106 | "thresh_perc": self.amp_rel,
107 | "duration": self.toff - self.ton,
108 | "totduration": self.tend
109 | }
110 | return ecode_params
111 |
112 | def interpret(self, t, current, config_data, reader_data):
113 | """Analyse a current array and extract from it the parameters
114 | needed to reconstruct the array"""
115 | self.dt = t[1]
116 |
117 | # Smooth the current
118 | smooth_current = scipy_signal2d(current, 85)
119 |
120 | self.set_timing_ecode(
121 | ["ton", "t1", "t2", "t3", "t4", "toff"], config_data)
122 |
123 | hypamp_value = numpy.median(smooth_current[: self.ton])
124 | self.set_amplitudes_ecode("hypamp", config_data, reader_data, hypamp_value)
125 |
126 | amp_value = numpy.max(smooth_current[:]) - self.hypamp
127 | self.set_amplitudes_ecode("amp", config_data, reader_data, amp_value)
128 |
129 | # Converting back to ms
130 | for name_timing in ["ton", "t1", "t2", "t3", "t4", "toff"]:
131 | self.index_to_ms(name_timing, t)
132 | self.tend = len(t) * self.dt
133 |
134 | def generate(self):
135 | """Generate the current array from the parameters of the ecode"""
136 |
137 | ton = int(self.ton / self.dt)
138 | t1 = int(self.t1 / self.dt)
139 | t2 = int(self.t2 / self.dt)
140 | t3 = int(self.t3 / self.dt)
141 | t4 = int(self.t4 / self.dt)
142 | toff = int(self.toff / self.dt)
143 |
144 | time = numpy.arange(0.0, self.tend, self.dt)
145 | current = numpy.full(time.shape, numpy.float64(self.hypamp))
146 |
147 | # First peak
148 | mid = int(0.5 * (self.ton + self.t1) / self.dt)
149 | current[ton:mid] += numpy.linspace(0.0, self.amp, mid - ton)
150 | current[mid:t1] += numpy.linspace(self.amp, 0.0, t1 - mid)
151 |
152 | # Second peak
153 | mid = int(0.5 * (self.t2 + self.t3) / self.dt)
154 | current[t2:mid] += numpy.linspace(0.0, self.amp, mid - t2)
155 | current[mid:t3] += numpy.linspace(self.amp, 0.0, t3 - mid)
156 |
157 | # Third peak
158 | mid = int(0.5 * (self.t4 + self.toff) / self.dt)
159 | current[t4:mid] += numpy.linspace(0.0, self.amp, mid - t4)
160 | current[mid:toff] += numpy.linspace(self.amp, 0.0, toff - mid)
161 |
162 | return time, current
163 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/ramp.py:
--------------------------------------------------------------------------------
1 | """Step eCode class"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import numpy
23 |
24 | from ..recording import Recording
25 | from .tools import base_current
26 | from .tools import scipy_signal2d
27 |
28 | logger = logging.getLogger(__name__)
29 |
30 |
31 | class Ramp(Recording):
32 |
33 | """Ramp current stimulus
34 |
35 | .. code-block:: none
36 |
37 | hypamp hypamp+amp hypamp
38 | : : :
39 | : : :
40 | : /| :
41 | : / | :
42 | : / | :
43 | : / | :
44 | : / | :
45 | : / | :
46 | : / | :
47 | : / | :
48 | : / | :
49 | |___________ / |__________________________
50 | ^ ^ ^ ^
51 | : : : :
52 | : : : :
53 | t=0 ton toff tend
54 | """
55 |
56 | def __init__(
57 | self,
58 | config_data,
59 | reader_data,
60 | protocol_name="ramp",
61 | efel_settings=None
62 | ):
63 |
64 | super(Ramp, self).__init__(config_data, reader_data, protocol_name)
65 |
66 | self.ton = None
67 | self.toff = None
68 | self.tend = None
69 | self.amp = None
70 | self.hypamp = None
71 | self.dt = None
72 |
73 | self.amp_rel = None
74 | self.hypamp_rel = None
75 |
76 | if self.t is not None and self.current is not None:
77 | self.interpret(
78 | self.t, self.current, self.config_data, self.reader_data
79 | )
80 |
81 | if self.voltage is not None:
82 | self.set_autothreshold()
83 | self.compute_spikecount(efel_settings)
84 |
85 | self.export_attr = ["ton", "toff", "tend", "amp", "hypamp", "dt",
86 | "amp_rel", "hypamp_rel"]
87 |
88 | def get_stimulus_parameters(self):
89 | """Returns the eCode parameters"""
90 | ecode_params = {
91 | "delay": self.ton,
92 | "amp": self.amp,
93 | "thresh_perc": self.amp_rel,
94 | "duration": self.toff - self.ton,
95 | "totduration": self.tend,
96 | }
97 | return ecode_params
98 |
99 | def interpret(self, t, current, config_data, reader_data):
100 | """Analyse a current with a step and extract from it the parameters
101 | needed to reconstruct the array"""
102 | self.dt = t[1]
103 |
104 | # Smooth the current
105 | smooth_current = scipy_signal2d(current, 85)
106 |
107 | self.set_timing_ecode(["ton"], config_data)
108 |
109 | if "toff" in config_data and config_data["toff"] is not None:
110 | self.toff = int(round(config_data["toff"] / self.dt))
111 | else:
112 | self.toff = numpy.argmax(smooth_current[self.ton:]) + self.ton
113 |
114 | hypamp_value = base_current(current)
115 | self.set_amplitudes_ecode("hypamp", config_data, reader_data, hypamp_value)
116 |
117 | amp_value = numpy.median(current[self.toff - 10 : self.toff]) - self.hypamp
118 | self.set_amplitudes_ecode("amp", config_data, reader_data, amp_value)
119 |
120 | # Converting back to ms
121 | for name_timing in ["ton", "toff"]:
122 | self.index_to_ms(name_timing, t)
123 | self.tend = len(t) * self.dt
124 |
125 | def generate(self):
126 | """Generate the step current array from the parameters of the ecode"""
127 |
128 | ton_idx = int(self.ton / self.dt)
129 | toff_idx = int(self.toff / self.dt)
130 |
131 | t = numpy.arange(0.0, self.tend, self.dt)
132 | current = numpy.full(t.shape, numpy.float64(self.hypamp))
133 | current[ton_idx:toff_idx] += numpy.linspace(
134 | 0.0, self.amp, toff_idx - ton_idx
135 | )
136 |
137 | return t, current
138 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/sineSpec.py:
--------------------------------------------------------------------------------
1 | """Resonance eCode class"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import numpy
23 |
24 | from ..recording import Recording
25 | from .tools import base_current
26 | from .tools import scipy_signal2d
27 |
28 | logger = logging.getLogger(__name__)
29 |
30 |
31 | class SineSpec(Recording):
32 |
33 | """SineSpec current stimulus"""
34 |
35 | def __init__(
36 | self,
37 | config_data,
38 | reader_data,
39 | protocol_name="SineSpec",
40 | efel_settings=None
41 | ):
42 |
43 | super(SineSpec, self).__init__(config_data, reader_data, protocol_name)
44 |
45 | self.ton = None
46 | self.toff = None
47 | self.tend = None
48 | self.amp = None
49 | self.hypamp = None
50 | self.dt = None
51 |
52 | self.amp_rel = None
53 | self.hypamp_rel = None
54 |
55 | if self.t is not None and self.current is not None:
56 | self.interpret(
57 | self.t, self.current, self.config_data, self.reader_data
58 | )
59 |
60 | if self.voltage is not None:
61 | self.set_autothreshold()
62 | self.compute_spikecount(efel_settings)
63 |
64 | self.export_attr = ["ton", "toff", "tend", "amp", "hypamp", "dt",
65 | "amp_rel", "hypamp_rel"]
66 |
67 | def get_stimulus_parameters(self):
68 | """Returns the eCode parameters"""
69 | ecode_params = {
70 | "delay": self.ton,
71 | "amp": self.amp,
72 | "thresh_perc": self.amp_rel,
73 | "duration": self.toff - self.ton,
74 | "totduration": self.tend,
75 | }
76 | return ecode_params
77 |
78 | def interpret(self, t, current, config_data, reader_data):
79 | """Analyse a current with a step and extract from it the parameters
80 | needed to reconstruct the array"""
81 | self.dt = t[1]
82 |
83 | # Smooth the current
84 | smooth_current = scipy_signal2d(current, 85)
85 |
86 | if "ton" in config_data and config_data["ton"] is not None:
87 | self.ton = int(round(config_data["ton"] / self.dt))
88 | else:
89 | self.ton = 150
90 | logger.warning(
91 | "As ton was not specified for protocol {}, it will "
92 | "be set to 150ms.".format(self.protocol_name)
93 | )
94 |
95 | if "toff" in config_data and config_data["toff"] is not None:
96 | self.toff = int(round(config_data["toff"] / self.dt))
97 | else:
98 | self.toff = 5100
99 | logger.warning(
100 | "As toff was not specified for protocol {}, it will "
101 | "be set to 5100ms.".format(self.protocol_name)
102 | )
103 |
104 | hypamp_value = base_current(current)
105 | self.set_amplitudes_ecode("hypamp", config_data, reader_data, hypamp_value)
106 |
107 | amp_value = numpy.max(smooth_current) - self.hypamp
108 | self.set_amplitudes_ecode("amp", config_data, reader_data, amp_value)
109 |
110 | # Converting back to ms
111 | for name_timing in ["ton", "toff"]:
112 | self.index_to_ms(name_timing, t)
113 | self.tend = len(t) * self.dt
114 |
115 | def generate(self):
116 | """Generate the SineSpec current array from the parameters of the
117 | ecode"""
118 | ton_idx = int(self.ton / self.dt)
119 | toff_idx = int(self.toff / self.dt)
120 |
121 | t = numpy.arange(0.0, self.tend, self.dt)
122 | t_sine = numpy.arange(0.0, self.tend / 1e3, self.dt / 1e3)
123 |
124 | current = self.amp * numpy.sin(
125 | 2.0
126 | * numpy.pi
127 | * (1.0 + (1.0 / (5.15 - (t_sine - 0.1))))
128 | * (t_sine - 0.1)
129 | )
130 |
131 | current[:ton_idx] = 0.0
132 | current[toff_idx:] = 0.0
133 | current += self.hypamp
134 |
135 | return t, current
136 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/step.py:
--------------------------------------------------------------------------------
1 | """Step eCode class"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import numpy
23 |
24 | from ..recording import Recording
25 | from .tools import base_current
26 | from .tools import scipy_signal2d
27 |
28 | logger = logging.getLogger(__name__)
29 |
30 |
31 | class Step(Recording):
32 |
33 | """Step current stimulus
34 |
35 | .. code-block:: none
36 |
37 | hypamp hypamp+amp hypamp
38 | : : :
39 | : : :
40 | : ______________________ :
41 | : | | :
42 | : | | :
43 | : | | :
44 | : | | :
45 | |__________________| |______________________
46 | ^ ^ ^ ^
47 | : : : :
48 | : : : :
49 | t=0 ton toff tend
50 | """
51 |
52 | def __init__(
53 | self,
54 | config_data,
55 | reader_data,
56 | protocol_name="step",
57 | efel_settings=None
58 | ):
59 |
60 | super(Step, self).__init__(config_data, reader_data, protocol_name)
61 |
62 | self.ton = None
63 | self.toff = None
64 | self.tend = None
65 | self.dt = None
66 |
67 | self.amp_rel = None
68 | self.hypamp_rel = None
69 |
70 | if self.t is not None and self.current is not None:
71 | self.interpret(
72 | self.t, self.current, self.config_data, self.reader_data
73 | )
74 |
75 | if self.voltage is not None:
76 | self.set_autothreshold()
77 | self.compute_spikecount(efel_settings)
78 |
79 | self.export_attr = ["ton", "toff", "tend", "amp", "hypamp", "dt",
80 | "amp_rel", "hypamp_rel"]
81 |
82 | def get_stimulus_parameters(self):
83 | """Returns the eCode parameters"""
84 | ecode_params = {
85 | "delay": self.ton,
86 | "amp": self.amp,
87 | "thresh_perc": self.amp_rel,
88 | "duration": self.toff - self.ton,
89 | "totduration": self.tend,
90 | }
91 | return ecode_params
92 |
93 | def interpret(self, t, current, config_data, reader_data):
94 | """Analyse a current with a step and extract from it the parameters
95 | needed to reconstruct the array"""
96 | self.dt = t[1]
97 |
98 | # Smooth the current
99 | smooth_current = None
100 |
101 | # Set the threshold to detect the step
102 | noise_level = numpy.std(numpy.concatenate((self.current[:50], self.current[-50:])))
103 | step_threshold = numpy.max([4.5 * noise_level, 1e-5])
104 |
105 | # The buffer prevent miss-detection of the step when artifacts are
106 | # present at the very start or very end of the current trace
107 | buffer_detect = 2.0
108 | idx_buffer = int(buffer_detect / self.dt)
109 | idx_buffer = max(1, idx_buffer)
110 |
111 | if "ton" in config_data and config_data["ton"] is not None:
112 | self.ton = int(round(config_data["ton"] / self.dt))
113 | elif "ton" in reader_data and reader_data["ton"] is not None:
114 | self.ton = int(round(reader_data["ton"]))
115 | else:
116 | self.ton = None
117 |
118 | # toff (index, not ms)
119 | if "toff" in config_data and config_data["toff"] is not None:
120 | self.toff = int(round(config_data["toff"] / self.dt))
121 | elif "toff" in reader_data and reader_data["toff"] is not None:
122 | self.toff = int(round(reader_data["toff"]))
123 | else:
124 | self.toff = None
125 |
126 | # Infer the begin and end of the step current
127 | if self.ton is None:
128 | if self.hypamp is None:
129 | self.hypamp = base_current(current)
130 | if smooth_current is None:
131 | smooth_current = scipy_signal2d(current, 85)
132 | _ = numpy.abs(smooth_current[idx_buffer:] - self.hypamp)
133 | self.ton = idx_buffer + numpy.argmax(_ > step_threshold)
134 | elif self.hypamp is None:
135 | # Infer the base current hypamp
136 | self.hypamp = base_current(current, idx_ton=self.ton)
137 |
138 | if self.toff is None:
139 | if smooth_current is None:
140 | smooth_current = scipy_signal2d(current, 85)
141 | _ = numpy.flip(
142 | numpy.abs(smooth_current[:-idx_buffer] - self.hypamp)
143 | )
144 | self.toff = (
145 | (len(current) - numpy.argmax(_ > step_threshold)) - 1 - idx_buffer
146 | )
147 |
148 | # Get the amplitude of the step current (relative to hypamp)
149 | if self.amp is None:
150 | self.amp = (
151 | numpy.median(current[self.ton : self.toff]) - self.hypamp
152 | )
153 |
154 | # Converting back ton and toff to ms
155 | self.ton = t[int(round(self.ton))]
156 | self.toff = t[int(round(self.toff))]
157 |
158 | self.tend = len(t) * self.dt
159 |
160 | # Check for some common step detection failures when the current
161 | # is constant.
162 | if self.ton >= self.toff or self.ton >= self.tend or \
163 | self.toff > self.tend:
164 |
165 | self.ton = 0.
166 | self.toff = self.tend
167 |
168 | logger.warning(
169 | "The automatic step detection failed for the recording "
170 | f"{self.protocol_name} in files {self.files}. You should "
171 | "specify ton and toff by hand in your files_metadata "
172 | "for this file."
173 | )
174 |
175 | def generate(self):
176 | """Generate the step current array from the parameters of the ecode"""
177 | ton_idx = int(self.ton / self.dt)
178 | toff_idx = int(self.toff / self.dt)
179 |
180 | t = numpy.arange(0.0, self.tend, self.dt)
181 | current = numpy.full(t.shape, numpy.float64(self.hypamp))
182 | current[ton_idx:toff_idx] += numpy.float64(self.amp)
183 |
184 | return t, current
185 |
--------------------------------------------------------------------------------
/bluepyefe/ecode/tools.py:
--------------------------------------------------------------------------------
1 | """Tools shared by all eCodes"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import numpy
22 | from scipy.ndimage import median_filter
23 |
24 |
25 | def scipy_signal2d(data, width):
26 | return median_filter(data, size=width).tolist()
27 |
28 |
29 | def base_current(current, idx_ton=300):
30 | """Compute the base current from the first few points of the current
31 | array"""
32 |
33 | # Get the base current hypamp
34 | upper_lim = min(idx_ton, len(current))
35 | smooth_current = scipy_signal2d(current[:upper_lim], 85)
36 | return numpy.median(smooth_current)
37 |
--------------------------------------------------------------------------------
/bluepyefe/igorpy/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright (c) 2020, EPFL/Blue Brain Project
3 |
4 | This file is part of BluePyEfe
5 |
6 | This library is free software; you can redistribute it and/or modify it under
7 | the terms of the GNU Lesser General Public License version 3.0 as published
8 | by the Free Software Foundation.
9 |
10 | This library is distributed in the hope that it will be useful, but WITHOUT
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
13 | details.
14 |
15 | You should have received a copy of the GNU Lesser General Public License
16 | along with this library; if not, write to the Free Software Foundation, Inc.,
17 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 | """
19 |
20 |
21 | import re
22 |
23 | import numpy as np
24 | from igor2 import binarywave
25 |
26 | from io import StringIO
27 |
28 |
29 | def _bytes_to_str(bytes_):
30 | """concatenates an iterable of bytes to a str
31 |
32 | Args:
33 | bytes_(numpy.ndarray): array of bytes
34 |
35 | Returns:
36 | str of concatenated bytes with omitted `x00` bytes
37 | """
38 | return "".join(bytes_[bytes_ != b""].astype(str))
39 |
40 |
41 | class IgorHeader(object):
42 | """Header metaclass information."""
43 |
44 | def __init__(self, version, content):
45 | header = content["wave_header"]
46 | self.bname = header["bname"].decode("utf-8")
47 | self.dUnits = _bytes_to_str(header["dataUnits"])
48 | self.npnts = header["npnts"]
49 | self.wavenotes = content["note"].decode("utf-8")
50 |
51 | if version == 5:
52 | self.xUnits = _bytes_to_str(header["dimUnits"])
53 | self.dx = header["sfA"][0]
54 | self.next = header["next"]
55 | self.creationDate = header["creationDate"]
56 | self.modDate = header["modDate"]
57 | self.sfA = header["sfA"].astype(np.double)
58 | self.dimUnits = header["dimUnits"].astype(str)
59 | self.fsValid = header["fsValid"]
60 | self.topFullScale = header["topFullScale"]
61 | self.botFullScale = header["botFullScale"]
62 | self.dataEUnits = header["dataEUnits"]
63 | self.dimEUnits = header["dimEUnits"]
64 | self.dimLabels = header["dimLabels"]
65 | self.waveNoteH = header["waveNoteH"]
66 | # self.platform = header['platform']
67 | else:
68 | self.xUnits = _bytes_to_str(header["xUnits"])
69 | self.dx = header["hsA"]
70 |
71 |
72 | def read_wave_notes(wavenotes):
73 | """parse wavenotes to collect them into a dict
74 |
75 | Args:
76 | wavenotes(str): string of wavenotes
77 |
78 | Returns:
79 | dict of wavenotes
80 | """
81 | wavenotes = dict(re.findall("(.+?):(.+?);", wavenotes))
82 | return wavenotes
83 |
84 |
85 | def read_from_binary(content):
86 | """Reads Igor's (Wavemetric) binary wave format represented as
87 | `content` string. Basically it applies `read_from_handle` to `content`
88 | wrapped into a file handler.
89 |
90 | Args:
91 | content(str): string
92 |
93 | Returns:
94 | see `read_from_handle` output
95 | """
96 | file_handle = StringIO(content)
97 | return read_from_handle(file_handle)
98 |
99 |
100 | def read(filename):
101 | """Reads Igor's (Wavemetric) binary wave format from a file under
102 | `filename` path.
103 |
104 | Args:
105 | filename(str):
106 |
107 | Returns:
108 | see `read_from_handle` output
109 | """
110 |
111 | with open(filename, "rb") as f:
112 | return read_from_handle(f)
113 |
114 |
115 | def read_from_handle(f):
116 | """Reads Igor's (Wavemetric) binary wave format, .ibw or .bwav, files.
117 |
118 | Args:
119 | f(file): file handle
120 |
121 | Returns:
122 | A tuple of (headerType instance, numpy vector) where `headerType
123 | instance` contains a meta info about the wave and `numpy vector`
124 | contains wave data. `numpy vector` is writeable.
125 | """
126 |
127 | data = binarywave.load(f)
128 | version = data["version"]
129 | assert version in (2, 3, 5), (
130 | "Fileversion is '" + str(version) + "', not supported"
131 | )
132 |
133 | content = data["wave"]
134 | wdata = np.copy(content["wData"])
135 | header = IgorHeader(version, content)
136 | return header, wdata
137 |
--------------------------------------------------------------------------------
/bluepyefe/protocol.py:
--------------------------------------------------------------------------------
1 | """Protocol class"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 |
22 | import numpy
23 | import logging
24 |
25 | from bluepyefe.ecode import eCodes
26 |
27 | logger = logging.getLogger(__name__)
28 |
29 |
30 | class Protocol():
31 |
32 | """Protocol informs about the current stimulus that was used to obtain
33 | efeatures at a given amplitude for a given protocol name. This class
34 | is mainly used to produce a description of the experimental protocol
35 | that can be used in BluePyOpt"""
36 |
37 | def __init__(
38 | self,
39 | name,
40 | amplitude,
41 | tolerance,
42 | feature_targets=None,
43 | global_rheobase=None,
44 | mode="mean",
45 | ):
46 | """Constructor
47 |
48 | Args:
49 | name (str): name of the protocol (ex: 'APWaveform')
50 | amplitude (float): amplitude of the current stimuli for the
51 | present protocol (expressed as a percentage of the
52 | threshold amplitude or in absolute current depending on the
53 | setting absolute_amplitude)
54 | tolerance (float): tolerance around the target amplitude in which
55 | an experimental recording will be seen as a hit during
56 | efeatures extraction (expressed as a percentage of the
57 | threshold amplitude or in absolute current depending on the
58 | setting absolute_amplitude)
59 | feature_targets (list): list of EFeatureTarget associated to the
60 | protocol
61 | global_rheobase (float): average rheobase across all cells
62 | mode (str): if the protocol matches several recordings, the mode
63 | set the logic of how the output will be generating. Must be
64 | 'mean', 'median' or 'lnmc'
65 | """
66 |
67 | self.name = name
68 | self.amplitude = amplitude
69 | self.tolerance = tolerance
70 |
71 | self.feature_targets = feature_targets
72 | if self.feature_targets is None:
73 | self.feature_targets = []
74 |
75 | self.global_rheobase = global_rheobase
76 | self.mode = mode
77 |
78 | self.recordings = []
79 |
80 | @property
81 | def stimulus_name(self):
82 | """Name of the stimulus associated to the protocol"""
83 |
84 | return f"{self.name}_{self.amplitude}"
85 |
86 | @property
87 | def n_match(self):
88 | """Number of recordings whose amplitude matched the present protocol"""
89 |
90 | return sum([f.sample_size for f in self.feature_targets])
91 |
92 | @property
93 | def ecode(self):
94 | """Create a temporary eCode that matches all the recordings for the
95 | present protocol. The eCode's parameters are computed differently
96 | depending on the mode of the protocol"""
97 |
98 | if not self.recordings:
99 | return None
100 |
101 | for ecode in eCodes.keys():
102 | if ecode.lower() in self.name.lower():
103 | ecode = eCodes[ecode]({}, {}, self.name)
104 | break
105 | else:
106 | raise KeyError(
107 | "There is no eCode linked to the stimulus name {}. See "
108 | "ecode/__init__.py for the available stimuli names"
109 | "".format(self.name.lower())
110 | )
111 |
112 | if self.mode == "mean":
113 | self.reduce_ecode(ecode, operator=numpy.nanmean)
114 | elif self.mode == "median":
115 | self.reduce_ecode(ecode, operator=numpy.nanmedian)
116 | elif self.mode == "min":
117 | self.reduce_ecode(ecode, operator=numpy.nanmin)
118 | elif self.mode == "max":
119 | self.reduce_ecode(ecode, operator=numpy.nanmax)
120 | else:
121 | raise ValueError("'mode' should be mean or median")
122 |
123 | return ecode
124 |
125 | def append(self, recording):
126 | """Append a Recording to the present protocol"""
127 |
128 | for i, target in enumerate(self.feature_targets):
129 | if target.efeature_name in recording.efeatures:
130 | self.feature_targets[i].append(
131 | recording.efeatures[target.efeature_name],
132 | recording.files
133 | )
134 |
135 | if (
136 | recording.auto_threshold is not None and
137 | "Threshold" not in self.feature_targets[i].efel_settings
138 | ):
139 | self.feature_targets[i]._auto_thresholds.append(
140 | recording.auto_threshold)
141 |
142 | self.recordings.append(recording)
143 |
144 | def as_dict(self):
145 | """Returns a dictionary that defines the present protocol. This
146 | definition is computed differently depending on the mode of the
147 | protocol
148 | """
149 |
150 | return {
151 | "holding": {
152 | "delay": 0.0,
153 | "amp": self.ecode.hypamp,
154 | "duration": self.ecode.tend,
155 | "totduration": self.ecode.tend,
156 | },
157 | "step": self.ecode.get_stimulus_parameters(),
158 | }
159 |
160 | def reduce_ecode(self, ecode, operator):
161 | """Creates an eCode defined from the parameters of all the recordings
162 | matching the present protocol"""
163 |
164 | if not self.recordings:
165 | logger.warning(
166 | "Could not compute average ecode for protocol {} target {} "
167 | "because it didn't match any recordings".format(
168 | self.name, self.amplitude
169 | )
170 | )
171 | return None
172 |
173 | params = [r.get_params() for r in self.recordings]
174 |
175 | if self.global_rheobase is None and "amp" in params[0]:
176 | logger.warning(
177 | "No global threshold amplitude passed. This can result"
178 | " in inconsistencies in-between protocols if some cells "
179 | "only matched a subset of the targets."
180 | )
181 |
182 | for key in params[0]:
183 |
184 | if isinstance(params[0][key], (list, numpy.ndarray)):
185 | logger.warning(
186 | "Parameter {} for protocol {} is a list and cannot be "
187 | "averaged across recordings".format(key, self.name)
188 | )
189 | setattr(ecode, key, params[0][key])
190 | continue
191 |
192 | if key == "amp" and self.global_rheobase:
193 | amp_rel = operator([c["amp_rel"] for c in params])
194 | mean_param = float(amp_rel) * self.global_rheobase / 100.
195 | elif key == "amp2" and self.global_rheobase:
196 | amp_rel2 = operator([c["amp2_rel"] for c in params])
197 | mean_param = float(amp_rel2) * self.global_rheobase / 100.
198 | else:
199 | mean_param = operator([numpy.nan if c[key] is None else c[key] for c in params])
200 |
201 | if numpy.isnan(mean_param):
202 | mean_param = None
203 |
204 | setattr(ecode, key, mean_param)
205 |
206 | return ecode
207 |
208 | def __str__(self):
209 | """String representation"""
210 |
211 | str_form = "Protocol {} {:.1f}%:\n".format(
212 | self.name, self.amplitude
213 | )
214 |
215 | str_form += "Number of matching recordings: {}".format(self.n_match)
216 |
217 | if self.n_match:
218 | str_form += "\neCode: {}\n".format(self.as_dict)
219 |
220 | return str_form
221 |
--------------------------------------------------------------------------------
/bluepyefe/reader.py:
--------------------------------------------------------------------------------
1 | """Trace reader functions"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import h5py
23 | import numpy
24 | import scipy.io
25 | from neo import io
26 | import os
27 |
28 | from . import igorpy
29 | from .nwbreader import BBPNWBReader, ScalaNWBReader, AIBSNWBReader
30 |
31 | logger = logging.getLogger(__name__)
32 |
33 |
34 | def _check_metadata(metadata, reader_name, required_entries=[]):
35 |
36 | for entry in required_entries:
37 |
38 | if entry not in metadata:
39 |
40 | raise KeyError(
41 | "The trace reader {} expects the metadata {}. The "
42 | "entry {} was not provided.".format(
43 | reader_name, ", ".join(e for e in required_entries), entry
44 | )
45 | )
46 |
47 |
48 | def axon_reader(in_data):
49 | """Reader to read .abf
50 |
51 | Args:
52 | in_data (dict): of the format
53 |
54 | .. code-block:: python
55 |
56 | {
57 | "filepath": "./XXX.abf",
58 | "i_unit": "pA",
59 | "t_unit": "s",
60 | "v_unit": "mV"
61 | }
62 | """
63 |
64 | fp = in_data["filepath"]
65 | r = io.AxonIO(filename=fp)
66 | bl = r.read_block(lazy=False)
67 |
68 | data = []
69 | for trace in bl.segments:
70 |
71 | dt = 1.0 / int(trace.analogsignals[0].sampling_rate)
72 | np_trace = numpy.asarray(trace.analogsignals)
73 |
74 | if np_trace.shape[0] == 2:
75 | v = np_trace[0, :]
76 | c = np_trace[1, :]
77 | elif np_trace.shape[-1] == 2:
78 | v = np_trace[:, :, 0]
79 | c = np_trace[:, :, 1]
80 | else:
81 | raise Exception(f"Unknown .abf format for file {fp}. Maybe "
82 | "it does not have current data?")
83 |
84 | data.append({
85 | "voltage": numpy.asarray(v).flatten(),
86 | "current": numpy.asarray(c).flatten(),
87 | "dt": dt
88 | })
89 |
90 | return data
91 |
92 |
93 | def igor_reader(in_data):
94 | """Reader to read old .ibw
95 |
96 | Args:
97 | in_data (dict): of the format
98 |
99 | .. code-block:: python
100 |
101 | {
102 | 'i_file': './XXX.ibw',
103 | 'v_file': './XXX.ibw',
104 | 'v_unit': 'V',
105 | 't_unit': 's',
106 | 'i_unit': 'A'
107 | }
108 | """
109 |
110 | _check_metadata(
111 | in_data, igor_reader.__name__, ["v_file", "i_file", "t_unit"]
112 | )
113 |
114 | # Read file
115 | notes_v, voltage = igorpy.read(in_data["v_file"])
116 | notes_i, current = igorpy.read(in_data["i_file"])
117 |
118 | if "A" in notes_v.dUnits and "V" in notes_i.dUnits:
119 |
120 | logger.warning(
121 | "It seems that the i_file and v_file are reversed for file: "
122 | "{}".format(in_data["v_file"])
123 | )
124 |
125 | voltage, current = current, voltage
126 | notes_v, notes_i = notes_i, notes_v
127 |
128 | # Extract data
129 | trace_data = {}
130 | trace_data["voltage"] = numpy.asarray(voltage)
131 | trace_data["v_unit"] = str(notes_v.dUnits).replace(" ", "")
132 | trace_data["dt"] = notes_v.dx
133 | trace_data["current"] = numpy.asarray(current)
134 | trace_data["i_unit"] = str(notes_i.dUnits).replace(" ", "")
135 |
136 | return [trace_data]
137 |
138 |
139 | def read_matlab(in_data):
140 | """To read .mat from http://gigadb.org/dataset/100535
141 |
142 | Args:
143 | in_data (dict): of the format
144 |
145 | .. code-block:: python
146 |
147 | {
148 | 'filepath': './161214_AL_113_CC.mat',
149 | 'ton': 2000,
150 | 'toff': 2500,
151 | 'v_unit': 'V',
152 | 't_unit': 's',
153 | 'i_unit': 'A'
154 | }
155 | """
156 |
157 | _check_metadata(
158 | in_data,
159 | read_matlab.__name__,
160 | ["filepath", "i_unit", "v_unit", "t_unit"],
161 | )
162 |
163 | r = scipy.io.loadmat(in_data["filepath"])
164 |
165 | data = []
166 | for k, v in r.items():
167 |
168 | if "Trace" in k and k[-1] == "1":
169 |
170 | trace_data = {
171 | "current": v[:, 1],
172 | "voltage": r[k[:-1] + "2"][:, 1],
173 | "dt": v[1, 0],
174 | }
175 |
176 | data.append(trace_data)
177 |
178 | return data
179 |
180 |
181 | def nwb_reader(in_data):
182 | """Reader for .nwb
183 |
184 | Args:
185 | in_data (dict): of the format
186 |
187 | .. code-block:: python
188 |
189 | {
190 | 'filepath': './XXX.nwb',
191 | "protocol_name": "IV",
192 | "repetition": 1 (or [1, 3, ...]) # Optional
193 | }
194 | """
195 |
196 | _check_metadata(
197 | in_data,
198 | nwb_reader.__name__,
199 | ["filepath", "protocol_name"],
200 | )
201 |
202 | target_protocols = in_data['protocol_name']
203 | if isinstance(target_protocols, str):
204 | target_protocols = [target_protocols]
205 |
206 | with h5py.File(in_data["filepath"], "r") as content:
207 | if "data_organization" in content:
208 | reader = BBPNWBReader(
209 | content=content,
210 | target_protocols=target_protocols,
211 | v_file=in_data.get("v_file", None),
212 | repetition=in_data.get("repetition", None),
213 | )
214 | elif "timeseries" in content["acquisition"].keys():
215 | reader = AIBSNWBReader(content, target_protocols)
216 | else:
217 | reader = ScalaNWBReader(content, target_protocols, repetition=in_data.get("repetition", None))
218 |
219 | data = reader.read()
220 |
221 | return data
222 |
223 |
224 | def csv_lccr_reader(in_data):
225 | """Reader to read .txt (csv_lccr)
226 |
227 | Args:
228 | in_data (dict): of the format:
229 |
230 | .. code-block:: python
231 |
232 | {
233 | 'filepath': "./XXX.txt",
234 | 'dt': 0.1,
235 | 'ton': 2000,
236 | 'toff': 2500,
237 | 'ljp': 14.0,
238 | 'amplitudes': [10 -10 20 -20 30 -30 40 -40 50 -50],
239 | 'hypamp': -20 # (units should match 'amplitudes'),
240 | 'remove_last_100ms': True,
241 | 'v_unit': 'mV',
242 | 't_unit': 'ms',
243 | 'i_unit': 'pA' # current unit for 'amplitudes' and 'hypamp'
244 | }
245 | """
246 | _check_metadata(
247 | in_data,
248 | csv_lccr_reader.__name__,
249 | ["filepath", "dt", "amplitudes", "v_unit", "t_unit", "i_unit", "ton", "toff", "hypamp"],
250 | )
251 |
252 | data = []
253 |
254 | fln = os.path.join(in_data['filepath'])
255 | if not os.path.isfile(fln):
256 | raise FileNotFoundError(
257 | "Please provide a string with the filename of the txt file; "
258 | f"current path not found: {fln}"
259 | )
260 |
261 | dt = in_data['dt']
262 | ton = in_data['ton']
263 | toff = in_data['toff']
264 | amplitudes = in_data['amplitudes']
265 | hypamp = in_data['hypamp']
266 |
267 | import csv
268 | with open(fln, 'rt') as f:
269 | reader = csv.reader(f, delimiter='\t')
270 | columns = list(zip(*reader))
271 | length = numpy.shape(columns)[1]
272 |
273 | voltages = numpy.array([
274 | [
275 | float(string) if string not in ["-", ""] else 0
276 | for string in column
277 | ]
278 | for column in columns
279 | ])
280 | t = numpy.arange(length) * dt
281 |
282 | # Remove last 100 ms if needed
283 | if in_data.get('remove_last_100ms', False):
284 | slice_end = int(-100. / dt)
285 | voltages = voltages[:, :slice_end]
286 | t = t[:slice_end]
287 |
288 | for amplitude, voltage in zip(amplitudes, voltages):
289 | current = numpy.zeros_like(voltage)
290 | ion, ioff = int(ton / dt), int(toff / dt)
291 | current[:] = hypamp
292 | current[ion:ioff] = amplitude + hypamp
293 | trace_data = {
294 | "filename": os.path.basename(in_data['filepath']),
295 | "current": current,
296 | "voltage": voltage,
297 | "t": t,
298 | "dt": numpy.float64(dt),
299 | "ton": numpy.float64(ton),
300 | "toff": numpy.float64(toff),
301 | "amp": numpy.float64(amplitude),
302 | "hypamp": numpy.float64(hypamp),
303 | "ljp": in_data.get('ljp', 0),
304 | "i_unit": in_data['i_unit'],
305 | "v_unit": in_data['v_unit'],
306 | "t_unit": in_data['t_unit'],
307 | }
308 |
309 | data.append(trace_data)
310 |
311 | return data
312 |
--------------------------------------------------------------------------------
/bluepyefe/rheobase.py:
--------------------------------------------------------------------------------
1 | """Functions related to the computation of the rheobase"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import logging
22 | import numpy
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 |
27 | def _get_list_spiking_amplitude(cell, protocols_rheobase):
28 | """Return the list of sorted list of amplitude that triggered at least
29 | one spike"""
30 |
31 | amps = []
32 | spike_counts = []
33 |
34 | for i, rec in enumerate(cell.recordings):
35 | if rec.protocol_name in protocols_rheobase:
36 | if rec.spikecount is not None:
37 |
38 | amps.append(rec.amp)
39 | spike_counts.append(rec.spikecount)
40 |
41 | if rec.amp < 0.01 and rec.spikecount >= 1:
42 | logger.warning(
43 | f"A recording of cell {cell.name} protocol "
44 | f"{rec.protocol_name} shows spikes at a "
45 | "suspiciously low current in a trace from file"
46 | f" {rec.files}. Check that the ton and toff are"
47 | "correct or for the presence of unwanted spikes."
48 | )
49 |
50 | if amps:
51 | amps, spike_counts = zip(*sorted(zip(amps, spike_counts)))
52 |
53 | return amps, spike_counts
54 |
55 |
56 | def compute_rheobase_absolute(cell, protocols_rheobase, spike_threshold=1):
57 | """ Compute the rheobase by finding the smallest current amplitude
58 | triggering at least one spike.
59 |
60 | Args:
61 | cell (Cell): cell for which to compute the rheobase
62 | protocols_rheobase (list): names of the protocols that will be
63 | used to compute the rheobase of the cells. E.g: ['IDthresh'].
64 | spike_threshold (int): number of spikes above which a recording
65 | is considered to compute the rheobase.
66 | """
67 |
68 | amps, spike_counts = _get_list_spiking_amplitude(cell, protocols_rheobase)
69 |
70 | for amp, spike_count in zip(amps, spike_counts):
71 | if spike_count >= spike_threshold:
72 | cell.rheobase = amp
73 | break
74 |
75 |
76 | def compute_rheobase_flush(cell, protocols_rheobase, flush_length=1, upper_bound_spikecount=None):
77 | """ Compute the rheobase by finding the smallest current amplitude that::
78 |
79 | 1. Triggered at least one spike
80 | 2. Is followed by flush_length other traces that also trigger spikes.
81 |
82 | The advantage of this strategy is that it ignores traces showing spurious
83 | spikes at low amplitudes.
84 |
85 | Args:
86 | cell (Cell): cell for which to compute the rheobase
87 | protocols_rheobase (list): names of the protocols that will be
88 | used to compute the rheobase of the cells. E.g: ['IDthresh'].
89 | flush_length (int): number of traces that needs to show spikes for
90 | the candidate trace to be considered the rheobase.
91 | upper_bound_spikecount (int): if the spikecount of a recording is higher
92 | than this number, the recording will not trigger the start of a flush
93 | """
94 |
95 | amps, spike_counts = _get_list_spiking_amplitude(cell, protocols_rheobase)
96 |
97 | for i, amp in enumerate(amps):
98 |
99 | # We missed the threshold
100 | if upper_bound_spikecount is not None:
101 | if spike_counts[i] > upper_bound_spikecount:
102 | break
103 |
104 | if spike_counts[i]:
105 |
106 | end_flush = min(i + 1 + flush_length, len(amps))
107 |
108 | if (
109 | numpy.count_nonzero(spike_counts[i + 1:end_flush]) == flush_length
110 | ):
111 | cell.rheobase = amp
112 | break
113 |
114 |
115 | def compute_rheobase_majority_bin(
116 | cell, protocols_rheobase, min_step=0.01, majority=0.5
117 | ):
118 | """ Compute the rheobase by finding the smallest current amplitude
119 | triggering at least 1 spikes in the majority (default 50%) of the
120 | recordings.
121 |
122 | Args:
123 | cell (Cell): cell for which to compute the rheobase
124 | protocols_rheobase (list): names of the protocols that will be
125 | used to compute the rheobase of the cells. E.g: ['IDthresh'].
126 | min_step (float): minimum step above which amplitudes can be
127 | considered as separate steps
128 | majority (float): the proportion of sweeps with spike_threshold
129 | spikes to consider the target amplitude as rheobase
130 | """
131 |
132 | amps, spike_counts = _get_list_spiking_amplitude(cell, protocols_rheobase)
133 |
134 | bins = numpy.arange(min(amps), max(amps), min_step)
135 | bins_of_amps = numpy.digitize(amps, bins, right=False)
136 |
137 | for i, bin in enumerate(bins):
138 |
139 | spikes = [
140 | spike_counts[j] for j, idx in enumerate(bins_of_amps) if idx == i
141 | ]
142 | perc_spiking = numpy.mean([bool(s) for s in spikes])
143 |
144 | if perc_spiking >= majority:
145 | cell.rheobase = bin + (min_step / 2.)
146 | break
147 |
148 |
149 | def compute_rheobase_interpolation(cell, protocols_rheobase):
150 | """ Compute the rheobase by fitting the reverse IF curve and finding the
151 | intersection with the line x = 1.
152 |
153 | Args:
154 | cell (Cell): cell for which to compute the rheobase
155 | protocols_rheobase (list): names of the protocols that will be
156 | used to compute the rheobase of the cells. E.g: ['IDthresh'].
157 | """
158 |
159 | amps, spike_counts = _get_list_spiking_amplitude(cell, protocols_rheobase)
160 |
161 | # Remove the excess zeros
162 | idx = next(
163 | (i for i in range(len(amps) - 1) if not spike_counts[i] and spike_counts[i + 1]),
164 | None
165 | )
166 | if idx is None:
167 | return
168 | amps = amps[idx:]
169 | spike_counts = spike_counts[idx:]
170 |
171 | if amps:
172 |
173 | try:
174 | fit = numpy.poly1d(numpy.polyfit(spike_counts, amps, deg=1))
175 | except:
176 | logger.error(
177 | f"Rheobase interpolation did not converge on cell {cell.name}"
178 | )
179 | return
180 |
181 | cell.rheobase = fit(1)
182 |
--------------------------------------------------------------------------------
/bluepyefe/target.py:
--------------------------------------------------------------------------------
1 | """EFeatureTarget class"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import numpy
22 | import logging
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 |
27 | class EFeatureTarget():
28 |
29 | """E-feature target defined by an efeature to compute for a given protocol
30 | and amplitude. Contains the resulting values"""
31 |
32 | def __init__(
33 | self,
34 | efeature_name,
35 | efel_feature_name,
36 | protocol_name,
37 | amplitude,
38 | tolerance,
39 | efel_settings=None,
40 | ):
41 | """Constructor.
42 |
43 | Args:
44 | efeature_name (str): name of the feature (can be different than
45 | the efel_feature_name - e.g. Spikecount_phase1)
46 | efel_feature_name (str): name of the eFeature in the eFEL library
47 | (ex: 'AP1_peak')
48 | protocol_name (str): name of the recording from which the efeature
49 | should be computed
50 | amplitude (float): amplitude of the current stimuli for which the
51 | efeature should be computed (expressed as a percentage of the
52 | threshold amplitude (rheobase))
53 | tolerance (float): tolerance around the target amplitude in which
54 | an experimental recording will be seen as a hit during
55 | efeatures extraction (expressed as a percentage of the
56 | threshold amplitude (rheobase))
57 | efel_settings (dict): target specific efel settings.
58 | """
59 |
60 | self.efeature_name = efeature_name
61 | if self.efeature_name is None:
62 | self.efeature_name = efel_feature_name
63 | self.efel_feature_name = efel_feature_name
64 | self.protocol_name = protocol_name
65 |
66 | self.amplitude = amplitude
67 | self.tolerance = tolerance
68 |
69 | self.efel_settings = efel_settings
70 | if self.efel_settings is None:
71 | self.efel_settings = {}
72 |
73 | self._values = []
74 | self._files = []
75 | self._auto_thresholds = []
76 |
77 | @property
78 | def values(self):
79 | """Return all values."""
80 | return self._values
81 |
82 | @property
83 | def mean(self):
84 | """Average of the e-feature value at target"""
85 |
86 | return numpy.nanmean(self._values)
87 |
88 | @property
89 | def std(self):
90 | """Standard deviation of the e-feature value at target"""
91 |
92 | return numpy.nanstd(self._values)
93 |
94 | @property
95 | def sample_size(self):
96 | """Number of value that matched the target present"""
97 |
98 | return len(self._values)
99 |
100 | def append(self, value, files=None):
101 | """Append a feature value to the present target"""
102 |
103 | if not isinstance(value, (int, float)):
104 | raise TypeError("Expected value of type int or float")
105 |
106 | if numpy.isnan(value) or value is None:
107 | logger.info(
108 | "Trying to append {} to efeature {} for protocol {} {}. Value "
109 | "will be ignored".format(
110 | value,
111 | self.efel_feature_name,
112 | self.protocol_name,
113 | self.amplitude
114 | )
115 | )
116 | return
117 |
118 | self._values.append(value)
119 | if files:
120 | self._files += files
121 |
122 | def clear(self):
123 | """Clear the list of feature values"""
124 |
125 | self._values = []
126 | self._files = []
127 |
128 | def add_effective_threshold(self):
129 | """If auto threshold detection was used during feature extraction,
130 | update the efel settings with the Threshold that was actually used"""
131 |
132 | if self._auto_thresholds:
133 | self.efel_settings["Threshold"] = numpy.median(self._auto_thresholds)
134 |
135 | def as_dict(self, save_files_used=False, default_std_value=1e-3):
136 | """Returns the target in the form of a dictionary in a legacy format"""
137 |
138 | self.add_effective_threshold()
139 |
140 | std = self.std
141 | if std == 0.0:
142 | logger.warning(
143 | "Standard deviation for efeatures {} stimulus {} is 0 and "
144 | "will be set to {}".format(
145 | self.efel_feature_name, self.protocol_name, default_std_value
146 | )
147 | )
148 | std = default_std_value
149 |
150 | feature_dict = {
151 | "feature": self.efel_feature_name,
152 | "val": [self.mean, std],
153 | "n": self.sample_size,
154 | "efel_settings": self.efel_settings
155 | }
156 |
157 | if self.efeature_name:
158 | feature_dict['efeature_name'] = self.efeature_name
159 | if save_files_used:
160 | feature_dict['files'] = self.files
161 |
162 | return feature_dict
163 |
164 | def __str__(self):
165 | """String representation"""
166 |
167 | str_form = "Target E-Feature {} for protocol {} {:.1f}%:\n".format(
168 | self.efel_feature_name, self.protocol_name, self.amplitude
169 | )
170 |
171 | str_form += "Sample size (n): {}".format(self.sample_size)
172 |
173 | if self.sample_size:
174 | str_form += "\nMean: {:.5f}\nStandard deviation: {:.5f}".format(
175 | self.mean, self.std
176 | )
177 |
178 | return str_form
179 |
--------------------------------------------------------------------------------
/bluepyefe/tools.py:
--------------------------------------------------------------------------------
1 | """Tool and miscellaneous functions"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 | import json
22 | import numpy
23 | import efel
24 |
25 |
26 | DEFAULT_EFEL_SETTINGS = {
27 | 'strict_stiminterval': True,
28 | 'Threshold': -20.,
29 | 'interp_step': 0.025
30 | }
31 |
32 |
33 | PRESET_PROTOCOLS_RHEOBASE = [
34 | "IV", "Step", "FirePattern", "IDrest", "IDRest", "IDthresh", "IDThresh", "IDThres", "IDthres"
35 | ]
36 |
37 |
38 | def to_ms(t, t_unit):
39 | """Converts a time series to ms.
40 |
41 | Args:
42 | t (array): time series.
43 | t_unit (str): unit of the time series. Has to be "s", "sec",
44 | "seconds", "ms" or "10th_ms".
45 | """
46 |
47 | if t_unit.lower() in ["s", "sec", "seconds"]:
48 | return t * 1e3
49 | elif t_unit == "ms":
50 | return t
51 | elif t_unit == "10th_ms":
52 | return t * 0.1
53 | else:
54 | raise Exception("Time unit '{}' is unknown.".format(t_unit))
55 |
56 |
57 | def to_nA(current, i_unit):
58 | """Converts a current series to nA.
59 |
60 | Args:
61 | current (array): current series.
62 | i_unit (str): unit of the current series. Has to be "a", "amperes",
63 | "amps", "mA", "uA", "pA" or "nA".
64 | """
65 |
66 | if i_unit.lower() in ["a", "amperes", "amps"]:
67 | return current * 1e9
68 | elif i_unit == "mA":
69 | return current * 1e6
70 | elif i_unit == "uA":
71 | return current * 1e3
72 | elif i_unit == "pA":
73 | return current * 1e-3
74 | elif i_unit == "nA":
75 | return current
76 | else:
77 | raise Exception("Current unit '{}' is unknown.".format(i_unit))
78 |
79 |
80 | def to_mV(voltage, v_unit):
81 | """Converts a voltage series to mV.
82 |
83 | Args:
84 | voltage (array): voltage series.
85 | v_unit (str): unit of the voltage series. Has to be "v", "volts",
86 | "uV" or "mV".
87 | """
88 |
89 | if v_unit.lower() in ["v", "volts"]:
90 | return voltage * 1e3
91 | elif v_unit == "uV":
92 | return voltage * 1e-3
93 | elif v_unit == "mV":
94 | return voltage
95 | else:
96 | raise Exception("Voltage unit '{}' is unknown.".format(v_unit))
97 |
98 |
99 | def set_efel_settings(efeature_settings):
100 | """Reset the eFEl settings and set them as requested by the user (uses
101 | default value otherwise).
102 |
103 | Args:
104 | efeature_settings (dict): eFEL settings in the form
105 | {setting_name: setting_value}.
106 | """
107 |
108 | efel.reset()
109 |
110 | for setting, value in efeature_settings.items():
111 |
112 | if setting in ['stim_start', 'stim_end']:
113 | value = float(value)
114 |
115 | efel.set_setting(setting, value)
116 |
117 |
118 | def dict_to_json(data, path):
119 | """Save some data in a json file."""
120 |
121 | s = json.dumps(data, indent=2, cls=NumpyEncoder)
122 | with open(path, "w") as f:
123 | f.write(s)
124 |
125 |
126 | class NumpyEncoder(json.JSONEncoder):
127 |
128 | """To make Numpy arrays JSON serializable"""
129 |
130 | def default(self, obj):
131 | if isinstance(
132 | obj,
133 | (
134 | numpy.int_,
135 | numpy.intc,
136 | numpy.intp,
137 | numpy.int8,
138 | numpy.int16,
139 | numpy.int32,
140 | numpy.int64,
141 | numpy.uint8,
142 | numpy.uint16,
143 | numpy.uint32,
144 | numpy.uint64,
145 | ),
146 | ):
147 | return int(obj)
148 | elif isinstance(
149 | obj, (numpy.float16, numpy.float32, numpy.float64)
150 | ):
151 | return float(obj)
152 | elif isinstance(obj, numpy.ndarray):
153 | return obj.tolist()
154 | return json.JSONEncoder.default(self, obj)
155 |
--------------------------------------------------------------------------------
/bluepyefe/translate_legacy_config.py:
--------------------------------------------------------------------------------
1 | """To translate config dictionaries from BluePyEfe 1 to input needed by BluePyEfe 2"""
2 |
3 | """
4 | Copyright (c) 2022, EPFL/Blue Brain Project
5 |
6 | This file is part of BluePyEfe
7 |
8 | This library is free software; you can redistribute it and/or modify it under
9 | the terms of the GNU Lesser General Public License version 3.0 as published
10 | by the Free Software Foundation.
11 |
12 | This library is distributed in the hope that it will be useful, but WITHOUT
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
15 | details.
16 |
17 | You should have received a copy of the GNU Lesser General Public License
18 | along with this library; if not, write to the Free Software Foundation, Inc.,
19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 | """
21 |
22 | import pathlib
23 |
24 |
25 | def translate_legacy_files_metadata(config):
26 | """Translate the legacy field "cells" into the new files_metadata"""
27 |
28 | if "path" in config:
29 | ephys_path = pathlib.Path(config["path"])
30 | else:
31 | ephys_path = pathlib.Path("./")
32 |
33 | files_metadata = {}
34 | for cell_name in config["cells"]:
35 | for protocol_name in config["cells"][cell_name]["experiments"]:
36 | for file_metadata in config["cells"][cell_name]["experiments"][protocol_name]["files"]:
37 |
38 | if cell_name not in files_metadata:
39 | files_metadata[cell_name] = {}
40 | if protocol_name not in files_metadata[cell_name]:
41 | files_metadata[cell_name][protocol_name] = []
42 |
43 | if "i_file" in file_metadata:
44 | filepaths = {
45 | "i_file": str(ephys_path / file_metadata["i_file"]),
46 | "v_file": str(ephys_path / file_metadata["v_file"]),
47 | }
48 | else:
49 | filepaths = {"filepath": str(ephys_path / file_metadata["filepath"])}
50 |
51 | files_metadata[cell_name][protocol_name].append(file_metadata)
52 | files_metadata[cell_name][protocol_name][-1].update(filepaths)
53 |
54 | if protocol_name in config["options"]["onoff"]:
55 | files_metadata[cell_name][protocol_name][-1]["ton"] = config[
56 | "options"]["onoff"][protocol_name][0]
57 | files_metadata[cell_name][protocol_name][-1]["toff"] = config[
58 | "options"]["onoff"][protocol_name][1]
59 |
60 | return files_metadata
61 |
62 |
63 | def translate_legacy_targets(config):
64 | """Translate the legacy field "targets" into the new targets"""
65 |
66 | targets = []
67 |
68 | for protocol in config["features"]:
69 | for feature in config["features"][protocol]:
70 | if "spikerate" in feature:
71 | continue
72 | for amp, tol in zip(config["options"]["target"], config["options"]["tolerance"]):
73 |
74 | if amp == "all":
75 | continue
76 | if amp == "noinput":
77 | effective_amp = 0
78 | effective_tolerance = 10
79 | else:
80 | effective_amp = amp
81 | effective_tolerance = tol
82 |
83 | efel_settings = {}
84 | if "strict_stiminterval" in config["options"]:
85 | if protocol in config["options"]["strict_stiminterval"]:
86 | efel_settings["strict_stiminterval"] = config["options"][
87 | "strict_stiminterval"][protocol]
88 | elif "base" in config["options"]["strict_stiminterval"]:
89 | efel_settings["strict_stiminterval"] = config["options"][
90 | "strict_stiminterval"]["base"]
91 |
92 | targets.append(
93 | {
94 | "efeature": feature,
95 | "protocol": protocol,
96 | "amplitude": effective_amp,
97 | "tolerance": effective_tolerance,
98 | "efel_settings": efel_settings
99 | }
100 | )
101 |
102 | return targets
103 |
104 |
105 | def translate_legacy_config(config):
106 | """Translate a legacy config from BluePyEfe 1 to BluePyEfe 2"""
107 |
108 | files_metadata = translate_legacy_files_metadata(config)
109 | targets = translate_legacy_targets(config)
110 | protocols_rheobase = config["options"]["expthreshold"]
111 |
112 | rheobase_strategy = "absolute"
113 | rheobase_settings = {}
114 | if "spike_threshold" in config["options"]:
115 | rheobase_settings["spike_threshold"] = config["options"]["spike_threshold"]
116 |
117 | return {
118 | "files_metadata": files_metadata,
119 | "targets": targets,
120 | "protocols_rheobase": protocols_rheobase,
121 | "rheobase_strategy": rheobase_strategy,
122 | "rheobase_settings": rheobase_settings
123 | }
124 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = build
9 |
10 | # User-friendly check for sphinx-build
11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
13 | endif
14 |
15 | # Internal variables.
16 | PAPEROPT_a4 = -D latex_paper_size=a4
17 | PAPEROPT_letter = -D latex_paper_size=letter
18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
19 | # the i18n builder cannot share the environment and doctrees with the others
20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
21 |
22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
23 |
24 | help:
25 | @echo "Please use \`make ' where is one of"
26 | @echo " html to make standalone HTML files"
27 | @echo " dirhtml to make HTML files named index.html in directories"
28 | @echo " singlehtml to make a single large HTML file"
29 | @echo " pickle to make pickle files"
30 | @echo " json to make JSON files"
31 | @echo " htmlhelp to make HTML files and a HTML help project"
32 | @echo " qthelp to make HTML files and a qthelp project"
33 | @echo " applehelp to make an Apple Help Book"
34 | @echo " devhelp to make HTML files and a Devhelp project"
35 | @echo " epub to make an epub"
36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
37 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
39 | @echo " text to make text files"
40 | @echo " man to make manual pages"
41 | @echo " texinfo to make Texinfo files"
42 | @echo " info to make Texinfo files and run them through makeinfo"
43 | @echo " gettext to make PO message catalogs"
44 | @echo " changes to make an overview of all changed/added/deprecated items"
45 | @echo " xml to make Docutils-native XML files"
46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
47 | @echo " linkcheck to check all external links for integrity"
48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
49 | @echo " coverage to run coverage check of the documentation (if enabled)"
50 |
51 | clean:
52 | rm -rf $(BUILDDIR)/*
53 |
54 | html:
55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
56 | @echo
57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
58 |
59 | dirhtml:
60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
61 | @echo
62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
63 |
64 | singlehtml:
65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
66 | @echo
67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
68 |
69 | pickle:
70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
71 | @echo
72 | @echo "Build finished; now you can process the pickle files."
73 |
74 | json:
75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
76 | @echo
77 | @echo "Build finished; now you can process the JSON files."
78 |
79 | htmlhelp:
80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
81 | @echo
82 | @echo "Build finished; now you can run HTML Help Workshop with the" \
83 | ".hhp project file in $(BUILDDIR)/htmlhelp."
84 |
85 | qthelp:
86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
87 | @echo
88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/eFEL.qhcp"
91 | @echo "To view the help file:"
92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/eFEL.qhc"
93 |
94 | applehelp:
95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
96 | @echo
97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
98 | @echo "N.B. You won't be able to view it unless you put it in" \
99 | "~/Library/Documentation/Help or install it in your application" \
100 | "bundle."
101 |
102 | devhelp:
103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
104 | @echo
105 | @echo "Build finished."
106 | @echo "To view the help file:"
107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/eFEL"
108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/eFEL"
109 | @echo "# devhelp"
110 |
111 | epub:
112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
113 | @echo
114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
115 |
116 | latex:
117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
118 | @echo
119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
121 | "(use \`make latexpdf' here to do that automatically)."
122 |
123 | latexpdf:
124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
125 | @echo "Running LaTeX files through pdflatex..."
126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
128 |
129 | latexpdfja:
130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
131 | @echo "Running LaTeX files through platex and dvipdfmx..."
132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
134 |
135 | text:
136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
137 | @echo
138 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
139 |
140 | man:
141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
142 | @echo
143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
144 |
145 | texinfo:
146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
147 | @echo
148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
149 | @echo "Run \`make' in that directory to run these through makeinfo" \
150 | "(use \`make info' here to do that automatically)."
151 |
152 | info:
153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
154 | @echo "Running Texinfo files through makeinfo..."
155 | make -C $(BUILDDIR)/texinfo info
156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
157 |
158 | gettext:
159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
160 | @echo
161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
162 |
163 | changes:
164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
165 | @echo
166 | @echo "The overview file is in $(BUILDDIR)/changes."
167 |
168 | linkcheck:
169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
170 | @echo
171 | @echo "Link check complete; look for any errors in the above output " \
172 | "or in $(BUILDDIR)/linkcheck/output.txt."
173 |
174 | doctest:
175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
176 | @echo "Testing of doctests in the sources finished, look at the " \
177 | "results in $(BUILDDIR)/doctest/output.txt."
178 |
179 | coverage:
180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
181 | @echo "Testing of coverage in the sources finished, look at the " \
182 | "results in $(BUILDDIR)/coverage/python.txt."
183 |
184 | xml:
185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
186 | @echo
187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
188 |
189 | pseudoxml:
190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
191 | @echo
192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
193 |
--------------------------------------------------------------------------------
/docs/source/_static/bbp.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/docs/source/_static/bbp.jpg
--------------------------------------------------------------------------------
/docs/source/_templates/module.rst:
--------------------------------------------------------------------------------
1 | {{ fullname }}
2 | {{ underline }}
3 |
4 | .. automodule:: {{ fullname }}
5 | :members:
6 |
--------------------------------------------------------------------------------
/docs/source/api.rst:
--------------------------------------------------------------------------------
1 | .. BluePyEde documentation master file.
2 | You can adapt this file completely to your liking, but it should at least
3 | contain the root `toctree` directive.
4 |
5 | Python API
6 | ==========
7 |
8 | .. autosummary::
9 | :nosignatures:
10 | :toctree: _autosummary
11 | :recursive:
12 |
13 | bluepyefe
14 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # BluePyEfe documentation build configuration file, created by
4 | # sphinx-quickstart on Mon Aug 26 16:33:43 2019.
5 | #
6 | # This file is execfile()d with the current directory set to its containing dir.
7 | #
8 | # Note that not all possible configuration values are present in this
9 | # autogenerated file.
10 | #
11 | # All configuration values have a default; values that are commented out
12 | # serve to show the default.
13 |
14 | import sys
15 | import os
16 | import bluepyefe
17 |
18 | # If extensions (or modules to document with autodoc) are in another directory,
19 | # add these directories to sys.path here. If the directory is relative to the
20 | # documentation root, use os.path.abspath to make it absolute, like shown here.
21 | sys.path.insert(0, os.path.abspath('.'))
22 | sys.path.insert(0, os.path.abspath('bluepyefe'))
23 |
24 | # -- General configuration -----------------------------------------------
25 |
26 | # If your documentation needs a minimal Sphinx version, state it here.
27 | #needs_sphinx = '1.0'
28 |
29 | # Add any Sphinx extension module names here, as strings. They can be extensions
30 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
31 | extensions = [
32 | 'sphinx.ext.autodoc',
33 | "sphinx.ext.autosummary",
34 | 'sphinx.ext.doctest',
35 | 'sphinx.ext.viewcode',
36 | "sphinx.ext.napoleon",
37 | ]
38 |
39 | # Add any paths that contain templates here, relative to this directory.
40 | templates_path = ['_templates']
41 |
42 | # The suffix of source filenames.
43 | source_suffix = '.rst'
44 |
45 | # The encoding of source files.
46 | #source_encoding = 'utf-8-sig'
47 |
48 | # The master toctree document.
49 | master_doc = 'index'
50 |
51 | # General information about the project.
52 | project = u'BluePyEfe'
53 |
54 | # The version info for the project you're documenting, acts as replacement for
55 | # |version| and |release|, also used in various other places throughout the
56 | # built documents.
57 | #
58 |
59 | # The short X.Y version.
60 | version = bluepyefe.__version__
61 | # The full version, including alpha/beta/rc tags.
62 | release = bluepyefe.__version__
63 |
64 | # The language for content autogenerated by Sphinx. Refer to documentation
65 | # for a list of supported languages.
66 | #language = None
67 |
68 | # There are two options for replacing |today|: either, you set today to some
69 | # non-false value, then it is used:
70 | #today = ''
71 | # Else, today_fmt is used as the format for a strftime call.
72 | #today_fmt = '%B %d, %Y'
73 |
74 | # List of patterns, relative to source directory, that match files and
75 | # directories to ignore when looking for source files.
76 | exclude_patterns = []
77 |
78 | # The reST default role (used for this markup: `text`) to use for all documents.
79 | #default_role = None
80 |
81 | # If true, '()' will be appended to :func: etc. cross-reference text.
82 | #add_function_parentheses = True
83 |
84 | # If true, the current module name will be prepended to all description
85 | # unit titles (such as .. function::).
86 | #add_module_names = True
87 |
88 | # If true, sectionauthor and moduleauthor directives will be shown in the
89 | # output. They are ignored by default.
90 | #show_authors = False
91 |
92 | # The name of the Pygments (syntax highlighting) style to use.
93 | pygments_style = 'sphinx'
94 |
95 | # A list of ignored prefixes for module index sorting.
96 | #modindex_common_prefix = []
97 |
98 | # autosummary settings
99 | autosummary_generate = True
100 |
101 | # autodoc settings
102 | autodoc_typehints = "signature"
103 | autodoc_default_options = {
104 | "members": True,
105 | "show-inheritance": True,
106 | }
107 | autoclass_content = "both"
108 |
109 | add_module_names = False
110 |
111 | suppress_warnings = [
112 | 'autosummary.import_cycle',
113 | ]
114 |
115 | # -- Options for HTML output ---------------------------------------------
116 |
117 | # The theme to use for HTML and HTML Help pages. See the documentation for
118 | # a list of builtin themes.
119 | html_theme = 'sphinx-bluebrain-theme'
120 | html_title = 'BluepyEfe'
121 | html_show_sourcelink = False
122 | html_theme_options = {
123 | "repo_url": "https://github.com/BlueBrain/BluePyEfe/",
124 | "repo_name": "BlueBrain/BluePyEfe"
125 | }
126 |
127 | # Theme options are theme-specific and customize the look and feel of a theme
128 | # further. For a list of options available for each theme, see the
129 | # documentation.
130 | #html_theme_options = {}
131 |
132 | # Add any paths that contain custom themes here, relative to this directory.
133 | # html_theme_path = ['./']
134 |
135 | # The name for this set of Sphinx documents. If None, it defaults to
136 | # " v documentation".
137 | #html_title = None
138 |
139 | # A shorter title for the navigation bar. Default is the same as html_title.
140 | #html_short_title = None
141 |
142 | # The name of an image file (relative to this directory) to place at the top
143 | # of the sidebar.
144 | # html_logo = "_static/bbp.png"
145 |
146 | # The name of an image file (within the static path) to use as favicon of the
147 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
148 | # pixels large.
149 | #html_favicon = None
150 |
151 | # Add any paths that contain custom static files (such as style sheets) here,
152 | # relative to this directory. They are copied after the builtin static files,
153 | # so a file named "default.css" will overwrite the builtin "default.css".
154 | html_static_path = ['_static']
155 |
156 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
157 | # using the given strftime format.
158 | #html_last_updated_fmt = '%b %d, %Y'
159 |
160 | # If true, SmartyPants will be used to convert quotes and dashes to
161 | # typographically correct entities.
162 | #html_use_smartypants = True
163 |
164 | # Custom sidebar templates, maps document names to template names.
165 | #html_sidebars = {}
166 |
167 | # Additional templates that should be rendered to pages, maps page names to
168 | # template names.
169 | #html_additional_pages = {}
170 |
171 | # If false, no module index is generated.
172 | #html_domain_indices = True
173 |
174 | # If false, no index is generated.
175 | #html_use_index = True
176 |
177 | # If true, the index is split into individual pages for each letter.
178 | #html_split_index = False
179 |
180 | # If true, links to the reST sources are added to the pages.
181 | #html_show_sourcelink = True
182 |
183 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
184 | #html_show_sphinx = True
185 |
186 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
187 | #html_show_copyright = True
188 |
189 | # If true, an OpenSearch description file will be output, and all pages will
190 | # contain a tag referring to it. The value of this option must be the
191 | # base URL from which the finished HTML is served.
192 | #html_use_opensearch = ''
193 |
194 | # This is the file name suffix for HTML files (e.g. ".xhtml").
195 | #html_file_suffix = None
196 |
197 | # Output file base name for HTML help builder.
198 | htmlhelp_basename = 'BluePyEfedoc'
199 |
200 |
201 | # -- Options for LaTeX output --------------------------------------------
202 |
203 | latex_elements = {
204 | # The paper size ('letterpaper' or 'a4paper').
205 | # 'papersize': 'letterpaper',
206 |
207 | # The font size ('10pt', '11pt' or '12pt').
208 | # 'pointsize': '10pt',
209 |
210 | # Additional stuff for the LaTeX preamble.
211 | # 'preamble': '',
212 | }
213 |
214 | # Grouping the document tree into LaTeX files. List of tuples
215 | # (source start file, target name, title, author, documentclass [howto/manual]).
216 | latex_documents = [
217 | ('index', 'BluePyEfe.tex', u'BluePyEfe Documentation',
218 | u'Blue Brain Project', 'manual'),
219 | ]
220 |
221 | # The name of an image file (relative to this directory) to place at the top of
222 | # the title page.
223 | #latex_logo = None
224 |
225 | # For "manual" documents, if this is true, then toplevel headings are parts,
226 | # not chapters.
227 | #latex_use_parts = False
228 |
229 | # If true, show page references after internal links.
230 | #latex_show_pagerefs = False
231 |
232 | # If true, show URL addresses after external links.
233 | #latex_show_urls = False
234 |
235 | # Documents to append as an appendix to all manuals.
236 | #latex_appendices = []
237 |
238 | # If false, no module index is generated.
239 | #latex_domain_indices = True
240 |
241 |
242 | # -- Options for manual page output --------------------------------------
243 |
244 | # One entry per manual page. List of tuples
245 | # (source start file, name, description, authors, manual section).
246 | man_pages = [
247 | ('index', 'bluepyefe', u'BluePyEfe Documentation',
248 | [u'Blue Brain Project'], 1)
249 | ]
250 |
251 | # If true, show URL addresses after external links.
252 | #man_show_urls = False
253 |
254 |
255 | # -- Options for Texinfo output ------------------------------------------
256 |
257 | # Grouping the document tree into Texinfo files. List of tuples
258 | # (source start file, target name, title, author,
259 | # dir menu entry, description, category)
260 | texinfo_documents = [
261 | ('index', 'BluePyEfe', u'BluePyEfe Documentation',
262 | u'Blue Brain Project', 'BluePyEfe', 'One line description of project.',
263 | 'Miscellaneous'),
264 | ]
265 |
266 | # Documents to append as an appendix to all manuals.
267 | #texinfo_appendices = []
268 |
269 | # If false, no module index is generated.
270 | #texinfo_domain_indices = True
271 |
272 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
273 | #texinfo_show_urls = 'footnote'
274 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 |
2 | .. include:: ../../README.rst
3 | :end-before: .. substitutions
4 |
5 | .. toctree::
6 | :hidden:
7 | :maxdepth: 3
8 |
9 | Home
10 | api.rst
11 |
12 | .. |banner| image:: /logo/BluePyEfeBanner.jpg
13 |
--------------------------------------------------------------------------------
/docs/source/logo/BluePyEfeBanner.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/docs/source/logo/BluePyEfeBanner.jpg
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright (c) 2016, EPFL/Blue Brain Project
3 |
4 | This file is part of BluePyOpt
5 |
6 | This library is free software; you can redistribute it and/or modify it under
7 | the terms of the GNU Lesser General Public License version 3.0 as published
8 | by the Free Software Foundation.
9 |
10 | This library is distributed in the hope that it will be useful, but WITHOUT
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
13 | details.
14 |
15 | You should have received a copy of the GNU Lesser General Public License
16 | along with this library; if not, write to the Free Software Foundation, Inc.,
17 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 | """
19 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools >= 64", "setuptools-scm>=8.0"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "bluepyefe"
7 | authors = [
8 | {name = "Blue Brain Project, EPFL", email = "werner.vangeit@epfl.ch"},
9 | ]
10 | description="Blue Brain Python E-feature extraction"
11 | readme = "README.rst"
12 | license = {file = "LICENSE.txt"}
13 | requires-python = ">= 3.9"
14 | dynamic = ["version"]
15 | dependencies = [
16 | "numpy",
17 | "neo",
18 | "matplotlib",
19 | "efel",
20 | "scipy",
21 | "h5py",
22 | "igor2",
23 | ]
24 | classifiers = [
25 | "Development Status :: 4 - Beta",
26 | "Environment :: Console",
27 | "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
28 | "Programming Language :: Python :: 3",
29 | "Operating System :: POSIX",
30 | "Topic :: Scientific/Engineering",
31 | "Topic :: Utilities",
32 | ]
33 | keywords = [
34 | "neuroscience",
35 | "BlueBrainProject",
36 | ]
37 |
38 | [project.urls]
39 | Homepage = "https://github.com/BlueBrain/BluePyEfe"
40 | Source = "https://github.com/BlueBrain/BluePyEfe"
41 | Repository = "https://github.com/BlueBrain/BluePyEfe.git"
42 | Tracker = "https://github.com/BlueBrain/BluePyEfe/issues"
43 | Documentation = "https://bluepyefe.readthedocs.io/en/latest"
44 |
45 | [tool.setuptools_scm]
46 | version_scheme = "python-simplified-semver"
47 | local_scheme = "no-local-version"
48 |
49 | [tool.setuptools.packages.find]
50 | include = ["bluepyefe"]
51 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -e .
2 |
--------------------------------------------------------------------------------
/requirements_docs.txt:
--------------------------------------------------------------------------------
1 | # Copyright 2020-2023 Blue Brain Project / EPFL
2 |
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 |
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | sphinx>=2.0.0
16 | sphinx-bluebrain-theme
17 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/__init__.py
--------------------------------------------------------------------------------
/tests/ecode/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/ecode/__init__.py
--------------------------------------------------------------------------------
/tests/ecode/test_apthresh.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.ecode.APThreshold tests"""
2 |
3 | import unittest
4 | import pytest
5 | import glob
6 | import json
7 |
8 | import bluepyefe.extract
9 | import bluepyefe.tools
10 | from tests.utils import download_apthresh_datafiles
11 |
12 |
13 | def get_apthresh_config(absolute_amplitude=False):
14 | download_apthresh_datafiles()
15 |
16 | interesting_efeatures = {
17 | "Spikecount": {},
18 | "mean_frequency": {},
19 | "ISI_CV": {},
20 | "AP1_amp": {},
21 | "AP_width": {},
22 | }
23 |
24 | files_metadata1 = []
25 | for file in glob.glob("./tests/exp_data/X/X_APThreshold_ch0_*.ibw"):
26 | files_metadata1.append(
27 | {
28 | "i_file": file,
29 | "v_file": file.replace("ch0", "ch1"),
30 | "i_unit": "A",
31 | "v_unit": "V",
32 | "t_unit": "ms",
33 | "dt": 0.25,
34 | "ljp": 14,
35 | "ton": 10, # in ms
36 | "tmid": 260, # in ms
37 | "tmid2": 360, # in ms
38 | "toff": 1360, # in ms
39 | }
40 | )
41 | files_metadata2 = []
42 | for file in glob.glob("./tests/exp_data/X/X_IDthresh_ch0_*.ibw"):
43 | files_metadata2.append(
44 | {
45 | "i_file": file,
46 | "v_file": file.replace("ch0", "ch1"),
47 | "i_unit": "A",
48 | "v_unit": "V",
49 | "t_unit": "ms",
50 | "dt": 0.25,
51 | "ljp": 14,
52 | }
53 | )
54 |
55 | files_metadata = {
56 | "MouseNeuron1": {"APThreshold": files_metadata1, "IDthresh": files_metadata2},
57 | }
58 |
59 | if absolute_amplitude:
60 | targets = {
61 | "APThreshold": {
62 | "amplitudes": [0.0, 0.225, 0.5, 0.69, 0.41, 0.595],
63 | "tolerances": [0.01],
64 | "efeatures": interesting_efeatures,
65 | "location": "soma",
66 | }
67 | }
68 |
69 | else:
70 | targets = {
71 | "APThreshold": {
72 | "amplitudes": [150],
73 | "tolerances": [10.0],
74 | "efeatures": interesting_efeatures,
75 | "location": "soma",
76 | }
77 | }
78 |
79 | return files_metadata, bluepyefe.extract.convert_legacy_targets(targets)
80 |
81 | class APThreshTest(unittest.TestCase):
82 | def test_extract_apthresh(self):
83 | for absolute_amplitude in [True, False]:
84 | with self.subTest(absolute_amplitude=absolute_amplitude):
85 | self.run_test_with_absolute_amplitude(absolute_amplitude)
86 |
87 | def run_test_with_absolute_amplitude(self, absolute_amplitude):
88 | files_metadata, targets = get_apthresh_config(absolute_amplitude)
89 |
90 | cells = bluepyefe.extract.read_recordings(files_metadata=files_metadata)
91 |
92 | cells = bluepyefe.extract.extract_efeatures_at_targets(
93 | cells=cells, targets=targets
94 | )
95 |
96 | bluepyefe.extract.compute_rheobase(cells, protocols_rheobase=["IDthresh"])
97 |
98 | self.assertEqual(len(cells), 1)
99 | self.assertEqual(len(cells[0].recordings), 21)
100 | self.assertLess(abs(cells[0].rheobase - 0.1103), 0.01)
101 |
102 | # amplitude test for one recording
103 | # sort the recordings because they can be in any order,
104 | # and we want to select the same one each time we test
105 | apthresh_recs = [rec for rec in cells[0].recordings if rec.protocol_name == "APThreshold"]
106 | rec1 = sorted(apthresh_recs, key=lambda x: x.amp)[1]
107 | self.assertLess(abs(rec1.amp - 0.1740), 0.01)
108 | self.assertLess(abs(rec1.amp_rel - 157.7), 0.1)
109 |
110 |
111 | protocols = bluepyefe.extract.group_efeatures(
112 | cells,
113 | targets,
114 | use_global_rheobase=True,
115 | protocol_mode="mean",
116 | absolute_amplitude=absolute_amplitude
117 | )
118 |
119 | _ = bluepyefe.extract.create_feature_protocol_files(
120 | cells=cells, protocols=protocols, output_directory="MouseCells_APThreshold"
121 | )
122 |
123 | for protocol in protocols:
124 | if protocol.name == "APThreshold" and protocol.amplitude == 150:
125 | for target in protocol.feature_targets:
126 | if target.efel_feature_name == "Spikecount":
127 | self.assertEqual(target.mean, 14)
128 | break
129 |
130 | bluepyefe.extract.plot_all_recordings_efeatures(
131 | cells, protocols, output_dir="MouseCells_APThreshold/"
132 | )
133 |
134 | with open("MouseCells_APThreshold/features.json") as fp:
135 | features = json.load(fp)
136 | with open("MouseCells_APThreshold/protocols.json") as fp:
137 | protocols = json.load(fp)
138 |
139 | self.assertEqual(len(features), len(protocols))
140 |
141 | if __name__ == "__main__":
142 | unittest.main()
--------------------------------------------------------------------------------
/tests/ecode/test_sahp.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.ecode.sAHP tests"""
2 |
3 | import unittest
4 | import glob
5 | import json
6 |
7 | import bluepyefe.extract
8 | import bluepyefe.tools
9 | from tests.utils import download_sahp_datafiles
10 |
11 |
12 | def get_sahp_config(absolute_amplitude=False):
13 | download_sahp_datafiles()
14 |
15 | interesting_efeatures = {
16 | "Spikecount": {},
17 | "mean_frequency": {},
18 | "ISI_CV": {},
19 | "AP1_amp": {},
20 | "AP_width": {},
21 | }
22 |
23 | files_metadata1 = []
24 | for file in glob.glob("./tests/exp_data/X/X_sAHP_ch0_*.ibw"):
25 | files_metadata1.append(
26 | {
27 | "i_file": file,
28 | "v_file": file.replace("ch0", "ch1"),
29 | "i_unit": "A",
30 | "v_unit": "V",
31 | "t_unit": "ms",
32 | "dt": 0.25,
33 | "ljp": 14,
34 | "ton": 10, # in ms
35 | "tmid": 260, # in ms
36 | "tmid2": 360, # in ms
37 | "toff": 1360, # in ms
38 | }
39 | )
40 | files_metadata2 = []
41 | for file in glob.glob("./tests/exp_data/X/X_IDthresh_ch0_*.ibw"):
42 | files_metadata2.append(
43 | {
44 | "i_file": file,
45 | "v_file": file.replace("ch0", "ch1"),
46 | "i_unit": "A",
47 | "v_unit": "V",
48 | "t_unit": "ms",
49 | "dt": 0.25,
50 | "ljp": 14,
51 | }
52 | )
53 |
54 | files_metadata = {
55 | "MouseNeuron1": {"sAHP": files_metadata1, "IDthresh": files_metadata2},
56 | }
57 |
58 | if absolute_amplitude:
59 | targets = {
60 | "sAHP": {
61 | "amplitudes": [0.315, 0.225, 0.5, 0.69, 0.41, 0.595],
62 | "tolerances": [0.1],
63 | "efeatures": interesting_efeatures,
64 | "location": "soma",
65 | }
66 | }
67 |
68 | else:
69 | targets = {
70 | "sAHP": {
71 | "amplitudes": [285, 200, 450, 625, 370, 540],
72 | "tolerances": [10.0],
73 | "efeatures": interesting_efeatures,
74 | "location": "soma",
75 | }
76 | }
77 |
78 | return files_metadata, bluepyefe.extract.convert_legacy_targets(targets)
79 |
80 |
81 | class SAHPTest(unittest.TestCase):
82 | def test_extract_sahp(self):
83 | for absolute_amplitude in [True, False]:
84 | with self.subTest(absolute_amplitude=absolute_amplitude):
85 | self.run_test_with_absolute_amplitude(absolute_amplitude)
86 |
87 | def run_test_with_absolute_amplitude(self, absolute_amplitude):
88 | files_metadata, targets = get_sahp_config(absolute_amplitude)
89 |
90 | cells = bluepyefe.extract.read_recordings(files_metadata=files_metadata)
91 |
92 | cells = bluepyefe.extract.extract_efeatures_at_targets(
93 | cells=cells, targets=targets
94 | )
95 |
96 | bluepyefe.extract.compute_rheobase(cells, protocols_rheobase=["IDthresh"])
97 |
98 | self.assertEqual(len(cells), 1)
99 | self.assertEqual(len(cells[0].recordings), 24)
100 | self.assertLess(abs(cells[0].rheobase - 0.1103), 0.01)
101 |
102 | # amplitude test for one recording
103 | # sort the recordings because they can be in any order,
104 | # and we want to select the same one each time we test
105 | sahp_recs = [rec for rec in cells[0].recordings if rec.protocol_name == "sAHP"]
106 | rec1 = sorted(sahp_recs, key=lambda x: x.amp2)[1]
107 | self.assertLess(abs(rec1.amp - 0.0953), 0.01)
108 | self.assertLess(abs(rec1.amp2 - 0.3153), 0.01)
109 | self.assertLess(abs(rec1.amp_rel - 86.4), 0.1)
110 | self.assertLess(abs(rec1.amp2_rel - 285.8), 0.1)
111 |
112 |
113 | protocols = bluepyefe.extract.group_efeatures(
114 | cells,
115 | targets,
116 | use_global_rheobase=True,
117 | protocol_mode="mean",
118 | absolute_amplitude=absolute_amplitude
119 | )
120 |
121 | _ = bluepyefe.extract.create_feature_protocol_files(
122 | cells=cells, protocols=protocols, output_directory="MouseCells_sAHP"
123 | )
124 |
125 | for protocol in protocols:
126 | if protocol.name == "sAHP" and protocol.amplitude == 625:
127 | for target in protocol.feature_targets:
128 | if target.efel_feature_name == "Spikecount":
129 | self.assertEqual(target.mean, 6)
130 | break
131 |
132 | bluepyefe.extract.plot_all_recordings_efeatures(
133 | cells, protocols, output_dir="MouseCells_sAHP/"
134 | )
135 |
136 | with open("MouseCells_sAHP/features.json") as fp:
137 | features = json.load(fp)
138 | with open("MouseCells_sAHP/protocols.json") as fp:
139 | protocols = json.load(fp)
140 |
141 | self.assertEqual(len(features), len(protocols))
142 |
143 | if __name__ == "__main__":
144 | unittest.main()
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch0_IDRest_181.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch0_IDRest_181.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch0_IDRest_182.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch0_IDRest_182.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch0_IDRest_183.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch0_IDRest_183.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch0_IDRest_184.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch0_IDRest_184.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch0_IDRest_185.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch0_IDRest_185.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch3_IDRest_181.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch3_IDRest_181.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch3_IDRest_182.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch3_IDRest_182.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch3_IDRest_183.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch3_IDRest_183.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch3_IDRest_184.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch3_IDRest_184.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B6/B6_Ch3_IDRest_185.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B6/B6_Ch3_IDRest_185.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch0_IDRest_145.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch0_IDRest_145.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch0_IDRest_146.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch0_IDRest_146.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch0_IDRest_147.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch0_IDRest_147.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch0_IDRest_148.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch0_IDRest_148.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch0_IDRest_149.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch0_IDRest_149.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch3_IDRest_145.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch3_IDRest_145.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch3_IDRest_146.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch3_IDRest_146.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch3_IDRest_147.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch3_IDRest_147.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch3_IDRest_148.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch3_IDRest_148.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B8/B8_Ch3_IDRest_149.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B8/B8_Ch3_IDRest_149.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B95_Ch0_IDRest_107.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B95_Ch0_IDRest_107.ibw
--------------------------------------------------------------------------------
/tests/exp_data/B95_Ch3_IDRest_107.ibw:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/B95_Ch3_IDRest_107.ibw
--------------------------------------------------------------------------------
/tests/exp_data/csv_lccr/dummy/protocol.txt:
--------------------------------------------------------------------------------
1 | STEP_LONG
2 | ch1
3 | 10000
4 | 200 800 400
5 | 10 -10 20 -20 30 -30 40 -40 50 -50 60 -60 70 -70 80 -80 90 -90 100 -100 150 200 250 300 400 500 600
6 |
--------------------------------------------------------------------------------
/tests/exp_data/hippocampus-portal/99111002.nwb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlueBrain/BluePyEfe/11a1e0c59e2541362e22279f6c78edbff44db975/tests/exp_data/hippocampus-portal/99111002.nwb
--------------------------------------------------------------------------------
/tests/exp_data/hippocampus-portal/data-provenance.txt:
--------------------------------------------------------------------------------
1 | Hippocampus Hub, (2021) a website and data portal developed and operated jointly by the Institute of Biophysics, National Research Council (Consiglio Nazionale delle Ricerche [CNR]), Italy
2 | and the Blue Brain Project (BBP), École polytechnique fédérale de Lausanne (EPFL), Switzerland.
3 | https://www.hippocampushub.eu
4 |
5 | Applicable terms and conditions:
6 | Consiglio Nazionale delle Ricerche, Istituto di Biofisica — https://www.hippocampushub.eu/build/about#terms_and_conditions
7 | EPFL/ Blue Brain Project (BBP) — https://www.hippocampushub.eu/model/terms-of-use/
8 |
--------------------------------------------------------------------------------
/tests/test_cell.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.cell tests"""
2 |
3 | import unittest
4 |
5 | import bluepyefe.cell
6 | import bluepyefe.recording
7 | from bluepyefe.rheobase import compute_rheobase_absolute
8 |
9 |
10 | class CellTest(unittest.TestCase):
11 | def setUp(self):
12 |
13 | self.cell = bluepyefe.cell.Cell(name="MouseNeuron")
14 |
15 | file_metadata = {
16 | "i_file": "./tests/exp_data/B95_Ch0_IDRest_107.ibw",
17 | "v_file": "./tests/exp_data/B95_Ch3_IDRest_107.ibw",
18 | "i_unit": "pA",
19 | "v_unit": "mV",
20 | "t_unit": "s",
21 | "dt": 0.00025,
22 | "ljp": 14.0,
23 | }
24 |
25 | self.cell.read_recordings(protocol_data=[file_metadata], protocol_name="IDRest")
26 |
27 | self.cell.extract_efeatures(
28 | protocol_name="IDRest", efeatures=["Spikecount", "AP1_amp"]
29 | )
30 |
31 | def test_efeature_extraction(self):
32 | recording = self.cell.recordings[0]
33 | self.assertEqual(2, len(recording.efeatures))
34 | self.assertEqual(recording.efeatures["Spikecount"], 9.0)
35 | self.assertLess(abs(recording.efeatures["AP1_amp"] - 66.4), 2.0)
36 |
37 | def test_amp_threshold(self):
38 | recording = self.cell.recordings[0]
39 | compute_rheobase_absolute(self.cell, ["IDRest"])
40 | self.cell.compute_relative_amp()
41 | self.assertEqual(recording.amp, self.cell.rheobase)
42 | self.assertEqual(recording.amp_rel, 100.0)
43 |
44 |
45 | if __name__ == "__main__":
46 | unittest.main()
47 |
--------------------------------------------------------------------------------
/tests/test_ecode_tools.py:
--------------------------------------------------------------------------------
1 | from bluepyefe.ecode import tools
2 | import numpy as np
3 | from numpy.testing import assert_array_almost_equal, assert_almost_equal
4 |
5 |
6 | def test_scipy_signal2d():
7 | np.random.seed(42)
8 | data = np.random.uniform(-1, 1, 10)
9 | res = tools.scipy_signal2d(data, 85)
10 | assert_array_almost_equal(
11 | res,
12 | [
13 | 0.2022300234864176,
14 | 0.1973169683940732,
15 | 0.1973169683940732,
16 | 0.1973169683940732,
17 | 0.1973169683940732,
18 | 0.1973169683940732,
19 | 0.1973169683940732,
20 | 0.2022300234864176,
21 | 0.2022300234864176,
22 | 0.2022300234864176,
23 | ],
24 | )
25 |
26 |
27 | def test_base_current():
28 | np.random.seed(42)
29 | data = np.random.uniform(-1, 1, 10)
30 | base = tools.base_current(data)
31 | assert_almost_equal(base, 0.1973169683940732)
32 |
--------------------------------------------------------------------------------
/tests/test_efel_settings.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.cell tests"""
2 |
3 | import unittest
4 | import pytest
5 |
6 | import bluepyefe.cell
7 | import bluepyefe.recording
8 |
9 |
10 | class EfelSettingTest(unittest.TestCase):
11 | def setUp(self):
12 |
13 | self.cell = bluepyefe.cell.Cell(name="MouseNeuron")
14 |
15 | file_metadata = {
16 | "i_file": "./tests/exp_data/B95_Ch0_IDRest_107.ibw",
17 | "v_file": "./tests/exp_data/B95_Ch3_IDRest_107.ibw",
18 | "i_unit": "pA",
19 | "v_unit": "mV",
20 | "t_unit": "s",
21 | "dt": 0.00025,
22 | "ljp": 14.0,
23 | }
24 |
25 | self.cell.read_recordings(
26 | protocol_data=[file_metadata],
27 | protocol_name="IDRest"
28 | )
29 |
30 | def test_efel_threshold(self):
31 |
32 | self.cell.recordings[0].efeatures = {}
33 |
34 | self.cell.extract_efeatures(
35 | protocol_name="IDRest",
36 | efeatures=["Spikecount", "AP1_amp"],
37 | efel_settings={'Threshold': 40.}
38 | )
39 |
40 | recording = self.cell.recordings[0]
41 | self.assertEqual(recording.efeatures["Spikecount"], 0.)
42 | self.assertLess(abs(recording.efeatures["AP1_amp"] - 66.68), 0.01)
43 |
44 | def test_efel_strictstim(self):
45 |
46 | self.cell.recordings[0].efeatures = {}
47 |
48 | self.cell.extract_efeatures(
49 | protocol_name="IDRest",
50 | efeatures=["Spikecount"],
51 | efel_settings={
52 | 'stim_start': 0,
53 | 'stim_end': 100,
54 | 'strict_stiminterval': True
55 | }
56 | )
57 |
58 | self.assertEqual(self.cell.recordings[0].efeatures["Spikecount"], 0.)
59 |
60 | def test_efel_threshold(self):
61 |
62 | self.cell.recordings[0].efeatures = {}
63 |
64 | self.cell.extract_efeatures(
65 | protocol_name="IDRest",
66 | efeatures=["Spikecount"],
67 | efel_settings={'Threshold': 40}
68 | )
69 |
70 | recording = self.cell.recordings[0]
71 | self.assertEqual(recording.efeatures["Spikecount"], 0.)
72 |
73 | def test_efel_incorrect_threshold(self):
74 |
75 | self.cell.recordings[0].efeatures = {}
76 |
77 | with pytest.raises(ValueError):
78 | self.cell.extract_efeatures(
79 | protocol_name="IDRest",
80 | efeatures=["Spikecount"],
81 | efel_settings={'Threshold': ["40."]}
82 | )
83 |
84 | if __name__ == "__main__":
85 | unittest.main()
86 |
--------------------------------------------------------------------------------
/tests/test_extractor.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.extractor tests"""
2 |
3 | import unittest
4 | import glob
5 | import json
6 |
7 | import bluepyefe.extract
8 | import bluepyefe.tools
9 | from tests.utils import download_sahp_datafiles
10 |
11 |
12 | def get_config(absolute_amplitude=False):
13 |
14 | interesting_efeatures = {
15 | "Spikecount": {},
16 | "mean_frequency": {},
17 | "ISI_CV": {},
18 | "AP1_amp": {},
19 | "AP_width": {},
20 | }
21 |
22 | files_metadata1 = []
23 | for file in glob.glob("./tests/exp_data/B6/B6_Ch0_IDRest_*.ibw"):
24 | files_metadata1.append(
25 | {
26 | "i_file": file,
27 | "v_file": file.replace("Ch0", "Ch3"),
28 | "i_unit": "pA",
29 | "v_unit": "mV",
30 | "t_unit": "s",
31 | "dt": 0.00025,
32 | "ljp": 14.0,
33 | }
34 | )
35 |
36 | # Do the same for the second cell
37 | files_metadata2 = []
38 | for file in glob.glob("./tests/exp_data/B8/B8_Ch0_IDRest_*.ibw"):
39 | files_metadata2.append(
40 | {
41 | "i_file": file,
42 | "v_file": file.replace("Ch0", "Ch3"),
43 | "i_unit": "pA",
44 | "v_unit": "mV",
45 | "t_unit": "s",
46 | "dt": 0.00025,
47 | "ljp": 14.0,
48 | }
49 | )
50 |
51 | files_metadata = {
52 | "MouseNeuron1": {"IDRest": files_metadata1},
53 | "MouseNeuron2": {"IDRest": files_metadata2},
54 | }
55 |
56 | if absolute_amplitude:
57 | targets = {
58 | "IDRest": {
59 | "amplitudes": [0.15, 0.25],
60 | "tolerances": [0.05],
61 | "efeatures": interesting_efeatures,
62 | "location": "soma",
63 | }
64 | }
65 |
66 | else:
67 | targets = {
68 | "IDRest": {
69 | "amplitudes": [150, 200, 250],
70 | "tolerances": [20.0],
71 | "efeatures": interesting_efeatures,
72 | "location": "soma",
73 | }
74 | }
75 |
76 | return files_metadata, bluepyefe.extract.convert_legacy_targets(targets)
77 |
78 |
79 | class ExtractorTest(unittest.TestCase):
80 | def test_extract(self):
81 |
82 | files_metadata, targets = get_config()
83 |
84 | cells = bluepyefe.extract.read_recordings(files_metadata=files_metadata)
85 |
86 | cells = bluepyefe.extract.extract_efeatures_at_targets(
87 | cells=cells, targets=targets
88 | )
89 |
90 | bluepyefe.extract.compute_rheobase(cells, protocols_rheobase=["IDRest"])
91 |
92 | self.assertEqual(len(cells), 2)
93 | self.assertEqual(len(cells[0].recordings), 5)
94 | self.assertEqual(len(cells[1].recordings), 5)
95 |
96 | self.assertLess(abs(cells[0].rheobase - 0.119), 0.01)
97 | self.assertLess(abs(cells[1].rheobase - 0.0923), 0.01)
98 |
99 | protocols = bluepyefe.extract.group_efeatures(
100 | cells,
101 | targets,
102 | use_global_rheobase=True,
103 | protocol_mode="mean"
104 | )
105 |
106 | _ = bluepyefe.extract.create_feature_protocol_files(
107 | cells=cells, protocols=protocols, output_directory="MouseCells"
108 | )
109 |
110 | for protocol in protocols:
111 | if protocol.name == "IDRest" and protocol.amplitude == 250.:
112 | for target in protocol.feature_targets:
113 | if target.efel_feature_name == "Spikecount":
114 | self.assertEqual(target.mean, 78.5)
115 | self.assertEqual(target.std, 3.5)
116 | break
117 |
118 | bluepyefe.extract.plot_all_recordings_efeatures(
119 | cells, protocols, output_dir="MouseCells/"
120 | )
121 |
122 | with open("MouseCells/features.json") as fp:
123 | features = json.load(fp)
124 | with open("MouseCells/protocols.json") as fp:
125 | protocols = json.load(fp)
126 |
127 | self.assertEqual(len(features), len(protocols))
128 |
129 | def test_extract_auto_fail_rheobase(self):
130 |
131 | files_metadata, _ = get_config()
132 |
133 | efeatures, protocol_definitions, current = bluepyefe.extract.extract_efeatures(
134 | output_directory="./",
135 | files_metadata=files_metadata,
136 | rheobase_strategy="flush",
137 | rheobase_settings={"upper_bound_spikecount": 4}
138 | )
139 |
140 | self.assertEqual(len(efeatures), 0)
141 |
142 | def test_extract_auto(self):
143 |
144 | files_metadata, _ = get_config()
145 |
146 | auto_targets = bluepyefe.auto_targets.default_auto_targets()
147 |
148 | cells = bluepyefe.extract.read_recordings(
149 | files_metadata,
150 | recording_reader=None,
151 | map_function=map,
152 | efel_settings=bluepyefe.tools.DEFAULT_EFEL_SETTINGS
153 | )
154 |
155 | for cell in cells:
156 | cell.rheobase = 0.07
157 | cell.compute_relative_amp()
158 |
159 | recordings = []
160 | for c in cells:
161 | recordings += c.recordings
162 |
163 | for i in range(len(auto_targets)):
164 | auto_targets[i].select_ecode_and_amplitude(recordings)
165 |
166 | # Extract the efeatures and group them around the preset of targets.
167 | targets = []
168 | for at in auto_targets:
169 | targets += at.generate_targets()
170 |
171 | self.assertEqual(len(targets), 48)
172 |
173 | def test_extract_absolute(self):
174 |
175 | files_metadata, targets = get_config(True)
176 |
177 | cells = bluepyefe.extract.read_recordings(files_metadata=files_metadata)
178 |
179 | cells = bluepyefe.extract.extract_efeatures_at_targets(
180 | cells=cells, targets=targets
181 | )
182 |
183 | self.assertEqual(len(cells), 2)
184 | self.assertEqual(len(cells[0].recordings), 5)
185 | self.assertEqual(len(cells[1].recordings), 5)
186 |
187 | self.assertEqual(cells[0].rheobase, None)
188 | self.assertEqual(cells[1].rheobase, None)
189 |
190 | protocols = bluepyefe.extract.group_efeatures(
191 | cells,
192 | targets,
193 | absolute_amplitude=True,
194 | protocol_mode="mean"
195 | )
196 |
197 | _ = bluepyefe.extract.create_feature_protocol_files(
198 | cells=cells, protocols=protocols, output_directory="MouseCells"
199 | )
200 |
201 | for cell in cells:
202 | for r in cell.recordings:
203 | print(r.amp, r.efeatures)
204 |
205 | for protocol in protocols:
206 | if protocol.name == "IDRest" and protocol.amplitude == 0.25:
207 | for target in protocol.feature_targets:
208 | if target.efel_feature_name == "Spikecount":
209 | self.assertEqual(target.mean, 76.5)
210 | self.assertAlmostEqual(target.std, 5.590169, 4)
211 | break
212 |
213 | bluepyefe.extract.plot_all_recordings_efeatures(
214 | cells, protocols, output_dir="MouseCells/"
215 | )
216 |
217 | with open("MouseCells/features.json") as fp:
218 | features = json.load(fp)
219 | with open("MouseCells/protocols.json") as fp:
220 | protocols = json.load(fp)
221 |
222 | self.assertEqual(len(features), len(protocols))
223 |
224 |
225 |
226 |
227 | if __name__ == "__main__":
228 | unittest.main()
229 |
--------------------------------------------------------------------------------
/tests/test_lccr_csv_reader.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.nwbreader tests"""
2 | import unittest
3 | import h5py
4 | from pathlib import Path
5 | from bluepyefe.reader import csv_lccr_reader
6 |
7 |
8 | class TestCSVLCCRReaders(unittest.TestCase):
9 | def setUp(self):
10 | self.test_data = {
11 | 'filepath': './tests/exp_data/csv_lccr/dummy/dummy_ch1_cols.txt',
12 | 'protocol_name': 'Step',
13 | "dt": 0.1,
14 | "amplitudes": [10, -10, 20, -20, 30, -30, 40, -40, 50, -50, 60, -60, 70, -70, 80, -80, 90, -90, 100, -100, 150, 200, 250, 300, 400, 500, 600],
15 | "ljp": 14.0,
16 | "v_file": "dummy",
17 | "i_unit": "pA",
18 | "v_unit": "mV",
19 | "t_unit": "ms",
20 | "ton": 200,
21 | "toff": 800,
22 | "hypamp": -20,
23 | "remove_last_100ms": True,
24 | }
25 |
26 | def test_csv_lccr_reader(self):
27 | filepath = Path(self.test_data['filepath'])
28 | self.assertTrue(filepath.is_file(), f"{filepath} is not a valid file")
29 |
30 | result = csv_lccr_reader(self.test_data)
31 | self.assertIsInstance(result, list, f"Result for {filepath} should be a list")
32 | self.assertEqual(len(result), 27, f"Result for {filepath} should have 27 entries")
33 |
34 | for entry in result:
35 | self.assertIn('filename', entry)
36 | self.assertIn('voltage', entry)
37 | self.assertIn('current', entry)
38 | self.assertIn('t', entry)
39 | self.assertIn('dt', entry)
40 | self.assertIn('ton', entry)
41 | self.assertIn('toff', entry)
42 | self.assertIn('amp', entry)
43 | self.assertIn('hypamp', entry)
44 | self.assertIn('ljp', entry)
45 | self.assertIn('i_unit', entry)
46 | self.assertIn('v_unit', entry)
47 | self.assertIn('t_unit', entry)
48 |
49 | def test_csv_lccr_reader_empty_amplitudes(self):
50 | test_data = self.test_data.copy()
51 | test_data['amplitudes'] = []
52 | result = csv_lccr_reader(test_data)
53 | self.assertEqual(len(result), 0, "Result should be an empty list when amplitudes are empty")
54 |
55 | def test_csv_lccr_reader_file_not_found(self):
56 | test_data = self.test_data.copy()
57 | test_data['filepath'] = './non_existent_file.txt'
58 | with self.assertRaises(FileNotFoundError):
59 | csv_lccr_reader(test_data)
60 |
61 | def test_csv_lccr_reader_remove_last_100ms(self):
62 | test_data = self.test_data.copy()
63 | test_data['remove_last_100ms'] = True
64 |
65 | result = csv_lccr_reader(test_data)
66 |
67 | original_length = 14000
68 | expected_length = original_length - int(100 / test_data['dt'])
69 |
70 | for entry in result:
71 | self.assertEqual(len(entry['t']), expected_length, "Time array length should be reduced by 100 ms")
72 | self.assertEqual(len(entry['voltage']), expected_length, "Voltage array length should be reduced by 100 ms")
73 | self.assertEqual(len(entry['current']), expected_length, "Current array length should be reduced by 100 ms")
74 |
75 |
76 | if __name__ == '__main__':
77 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_legacy_config.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.protocol tests"""
2 |
3 | import unittest
4 | import glob
5 |
6 | from bluepyefe.translate_legacy_config import translate_legacy_config
7 | import bluepyefe.extract
8 | import bluepyefe.tools
9 |
10 |
11 | class TestTranslateLegacyConfig(unittest.TestCase):
12 |
13 | def setUp(self):
14 |
15 | files_metadata = {}
16 | for cell_name in ["B6", "B8"]:
17 | for path in glob.glob(f"./tests/exp_data/{cell_name}/{cell_name}_Ch0_IDRest_*.ibw"):
18 |
19 | if cell_name not in files_metadata:
20 | files_metadata[cell_name] = {"experiments": {"IDRest": {"files": []}}}
21 |
22 | files_metadata[cell_name]["experiments"]["IDRest"]["files"].append(
23 | {
24 | "i_file": path,
25 | "v_file": path.replace("Ch0", "Ch3"),
26 | "i_unit": "pA",
27 | "v_unit": "mV",
28 | "t_unit": "s",
29 | "dt": 0.00025,
30 | "ljp": 14.0,
31 | }
32 | )
33 |
34 | self.config = {
35 | "cells": files_metadata,
36 | "features": {"IDRest": ["AP_amplitude", "Spikecount"]},
37 | "options": {
38 | "target": [150, 200, 250],
39 | "tolerance": [20, 20, 20],
40 | "onoff": {"IDRest": [700, 2700]},
41 | "expthreshold": ["IDRest"]
42 | },
43 | "path": "./",
44 | }
45 |
46 | def test_translate(self):
47 | translated_config = translate_legacy_config(self.config)
48 |
49 | cells = bluepyefe.extract.read_recordings(
50 | files_metadata=translated_config["files_metadata"])
51 |
52 | cells = bluepyefe.extract.extract_efeatures_at_targets(
53 | cells=cells, targets=translated_config["targets"]
54 | )
55 |
56 | bluepyefe.extract.compute_rheobase(cells, protocols_rheobase=["IDRest"])
57 |
58 | self.assertEqual(len(cells), 2)
59 | self.assertEqual(len(cells[0].recordings), 5)
60 | self.assertEqual(len(cells[1].recordings), 5)
61 |
62 | self.assertLess(abs(cells[0].rheobase - 0.119), 0.01)
63 | self.assertLess(abs(cells[1].rheobase - 0.0923), 0.01)
64 |
65 |
66 | if __name__ == "__main__":
67 | unittest.main()
68 |
--------------------------------------------------------------------------------
/tests/test_nwbreader.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.nwbreader tests"""
2 | import unittest
3 | import h5py
4 | from pathlib import Path
5 | from bluepyefe.reader import nwb_reader
6 |
7 |
8 | class TestNWBReaders(unittest.TestCase):
9 | def setUp(self):
10 | self.test_data = {
11 | 'filepath': './tests/exp_data/hippocampus-portal/99111002.nwb',
12 | 'protocol_name': 'Step',
13 | }
14 |
15 | def test_nwb_reader(self):
16 | filepath = Path(self.test_data['filepath'])
17 | self.assertTrue(filepath.is_file(), f"{filepath} is not a valid file")
18 |
19 | result = nwb_reader(self.test_data)
20 |
21 | self.assertIsInstance(result, list, f"Result for {filepath} should be a list")
22 | self.assertEqual(len(result), 16, f"Result for {filepath} should have 16 entries")
23 |
24 | for entry in result:
25 | self.assertIn('voltage', entry)
26 | self.assertIn('current', entry)
27 | self.assertIn('dt', entry)
28 | self.assertIn('id', entry)
29 | self.assertIn('i_unit', entry)
30 | self.assertIn('v_unit', entry)
31 | self.assertIn('t_unit', entry)
32 |
33 |
34 | if __name__ == '__main__':
35 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_protocol.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.protocol tests"""
2 |
3 | import unittest
4 |
5 | from bluepyefe.ecode.step import Step
6 | from bluepyefe.protocol import Protocol
7 | from bluepyefe.target import EFeatureTarget
8 |
9 |
10 | class TestProtocol(unittest.TestCase):
11 |
12 | def setUp(self):
13 | target = EFeatureTarget(
14 | efeature_name='test_spikecount',
15 | efel_feature_name='Spikecount',
16 | protocol_name='IDRest',
17 | amplitude=150.,
18 | tolerance=10.
19 | )
20 |
21 | self.protocol = Protocol(
22 | name='IDRest',
23 | feature_targets=[target],
24 | amplitude=150.,
25 | tolerance=10.,
26 | mode="mean"
27 | )
28 |
29 | def test_append_clear(self):
30 | rec = Step(config_data={}, reader_data={})
31 | rec.efeatures = {"test_spikecount": 10.}
32 |
33 | self.protocol.append(rec)
34 | self.protocol.append(rec)
35 | self.assertEqual(self.protocol.n_match, 2)
36 |
37 | def test_str(self):
38 | print(self.protocol)
39 |
40 |
41 | if __name__ == "__main__":
42 | unittest.main()
43 |
--------------------------------------------------------------------------------
/tests/test_recording.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.cell tests"""
2 |
3 | import unittest
4 |
5 | from numpy.testing import assert_array_almost_equal
6 | from pytest import approx
7 |
8 | import bluepyefe.cell
9 | import bluepyefe.recording
10 | from bluepyefe.reader import igor_reader
11 | from bluepyefe.ecode.step import Step
12 |
13 |
14 | class RecordingTest(unittest.TestCase):
15 | def setUp(self):
16 |
17 | config_data = {
18 | "i_file": "./tests/exp_data/B95_Ch0_IDRest_107.ibw",
19 | "v_file": "./tests/exp_data/B95_Ch3_IDRest_107.ibw",
20 | "i_unit": "pA",
21 | "v_unit": "mV",
22 | "t_unit": "s",
23 | "dt": 0.00025,
24 | "ljp": 14.0,
25 | }
26 |
27 | self.recording = Step(
28 | config_data,
29 | igor_reader(config_data)[0],
30 | protocol_name="Step",
31 | efel_settings={}
32 | )
33 |
34 | def test_step_ecode(self):
35 | self.assertTrue(isinstance(self.recording, bluepyefe.recording.Recording))
36 | self.assertEqual(len(self.recording.voltage), len(self.recording.current))
37 | self.assertEqual(len(self.recording.voltage), len(self.recording.t))
38 | self.assertLess(abs(self.recording.ton - 700.0), 10.0)
39 | self.assertLess(abs(self.recording.toff - 2700.0), 10.0)
40 | self.assertLess(abs(self.recording.hypamp + 0.03), 0.005)
41 | self.assertLess(abs(self.recording.amp - 0.033), 0.005)
42 |
43 | def test_get_params(self):
44 | params = self.recording.get_params()
45 | self.assertEqual(len(params), len(self.recording.export_attr))
46 |
47 | def test_generate(self):
48 | t, c = self.recording.generate()
49 | self.assertEqual(len(t), len(c))
50 | self.assertEqual(max(c), self.recording.amp + self.recording.hypamp)
51 |
52 | def test_in_target(self):
53 | self.recording.amp_rel = 100.
54 | self.assertTrue(self.recording.in_target(101, 2))
55 | self.assertFalse(self.recording.in_target(-100, 50))
56 | self.assertFalse(self.recording.in_target(90, 2))
57 |
58 |
59 | class RecordingTestNWB(unittest.TestCase):
60 |
61 | def setUp(self):
62 | cell = bluepyefe.cell.Cell(name="MouseNeuron")
63 | file_metadata = {
64 | "filepath": "./tests/exp_data/hippocampus-portal/99111002.nwb",
65 | "i_unit": "A",
66 | "v_unit": "V",
67 | "t_unit": "s",
68 | "ljp": 0.0,
69 | "protocol_name": "Step",
70 | }
71 | cell.read_recordings(protocol_data=[file_metadata], protocol_name="Step")
72 | self.cell = cell
73 |
74 | def test_set_autothreshold(self):
75 | """Test the auto_threshold detection in Recording."""
76 | assert self.cell.recordings[0].auto_threshold == approx(4.999999)
77 | assert self.cell.recordings[15].auto_threshold == approx(26.5)
78 |
79 | def test_compute_spikecount(self):
80 | """Test Recording.compute_spikecount()."""
81 | assert self.cell.recordings[1].spikecount == 2
82 | assert_array_almost_equal(self.cell.recordings[1].peak_time, [85.4, 346.1])
83 |
84 |
85 | if __name__ == "__main__":
86 | unittest.main()
87 |
--------------------------------------------------------------------------------
/tests/test_target.py:
--------------------------------------------------------------------------------
1 | """bluepyefe.target tests"""
2 |
3 | import unittest
4 | import numpy
5 | import math
6 |
7 | from bluepyefe.target import EFeatureTarget
8 |
9 |
10 | class TestEFeatureTarget(unittest.TestCase):
11 |
12 | def setUp(self):
13 | self.target = EFeatureTarget(
14 | efeature_name='test_spikecount',
15 | efel_feature_name='Spikecount',
16 | protocol_name='IDRest',
17 | amplitude=150.,
18 | tolerance=10.
19 | )
20 |
21 | def test_init(self):
22 | self.assertEqual(self.target.sample_size, 0)
23 |
24 | def test_nan(self):
25 | self.target.append(numpy.nan)
26 | self.target.append(math.nan)
27 | self.assertEqual(self.target.sample_size, 0)
28 |
29 | def test_append_clear(self):
30 | self.target.append(1.)
31 | self.target.append(2.)
32 | with self.assertRaises(TypeError) as context:
33 | self.target.append([1.])
34 | self.assertTrue('Expected value' in str(context.exception))
35 | self.assertEqual(self.target.sample_size, 2)
36 | self.target.clear()
37 | self.assertEqual(self.target.sample_size, 0)
38 |
39 | def test_mean_std(self):
40 | self.assertTrue(numpy.isnan(self.target.mean))
41 | self.assertTrue(numpy.isnan(self.target.std))
42 | self.target.append(1.)
43 | self.target.append(2.)
44 | self.assertEqual(self.target.mean, 1.5)
45 | self.assertEqual(self.target.std, 0.5)
46 |
47 | def test_dict(self):
48 | self.target.append(1.)
49 | self.target.append(2.)
50 | dict_form = self.target.as_dict()
51 | self.assertEqual(len(dict_form), 5)
52 | self.assertEqual(len(dict_form['val']), 2)
53 | self.assertEqual(len(dict_form['efel_settings']), 0)
54 |
55 | def test_str(self):
56 | print(self.target)
57 | self.target.append(1.)
58 | self.target.append(2.)
59 | print(self.target)
60 |
61 |
62 | if __name__ == "__main__":
63 | unittest.main()
64 |
--------------------------------------------------------------------------------
/tests/utils.py:
--------------------------------------------------------------------------------
1 | """Utils"""
2 |
3 | import urllib.request
4 | import shutil
5 | from pathlib import Path
6 |
7 | def download_datafiles(pathname, channels, numbers, output_dir, gb_url):
8 | paths = [f"{pathname}_{ch}_{n}.ibw" for ch in channels for n in numbers]
9 |
10 | Path(output_dir).mkdir(exist_ok=True, parents=True)
11 | for path in paths:
12 | output_path = f"{output_dir}{path}"
13 | if not Path(output_path).is_file():
14 | with urllib.request.urlopen(f"{gb_url}{path}") as response, open(output_path, "wb") as out_file:
15 | shutil.copyfileobj(response, out_file)
16 |
17 | def download_sahp_datafiles():
18 | """Download data files for sAHP and IDthresh traces."""
19 | output_dir = "./tests/exp_data/X/"
20 | gb_url = "https://raw.githubusercontent.com/BlueBrain/SSCxEModelExamples/main/feature_extraction/input-traces/C060109A1-SR-C1/"
21 | sahp_pathname = "X_sAHP"
22 | sahp_ch = ["ch0", "ch1"]
23 | sahp_numbers = list(range(320, 326))
24 | idthresh_pathname = "X_IDthresh"
25 | idthresh_ch = ["ch0", "ch1"]
26 | idthresh_numbers = list(range(349, 358)) + list(range(362, 371))
27 |
28 | download_datafiles(sahp_pathname, sahp_ch, sahp_numbers, output_dir, gb_url)
29 | download_datafiles(idthresh_pathname, idthresh_ch, idthresh_numbers, output_dir, gb_url)
30 |
31 | def download_apthresh_datafiles():
32 | """Download data files for APThreshold and IDthresh traces."""
33 | output_dir = "./tests/exp_data/X/"
34 | gb_url = "https://raw.githubusercontent.com/BlueBrain/SSCxEModelExamples/main/feature_extraction/input-traces/C060109A1-SR-C1/"
35 | apthresh_pathname = "X_APThreshold"
36 | apthresh_ch = ["ch0", "ch1"]
37 | apthresh_numbers = list(range(254, 257))
38 | idthresh_pathname = "X_IDthresh"
39 | idthresh_ch = ["ch0", "ch1"]
40 | idthresh_numbers = list(range(349, 358)) + list(range(362, 371))
41 |
42 | download_datafiles(apthresh_pathname, apthresh_ch, apthresh_numbers, output_dir, gb_url)
43 | download_datafiles(idthresh_pathname, idthresh_ch, idthresh_numbers, output_dir, gb_url)
44 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py3-{functional,style,syntax}
3 | minversion = 4
4 |
5 | [gh-actions]
6 | python =
7 | 3.9: py3
8 | 3.10: py3
9 | 3.11: py3
10 | 3.12: py3
11 |
12 | [testenv]
13 | envdir =
14 | py3{8,9,10,11,}{-functional,-notebooks,-style,-syntax}: {toxworkdir}/py3
15 | docs: {toxworkdir}/docs
16 | usedevelop = true
17 | deps =
18 | coverage
19 | pytest
20 | pytest-cov
21 | pytest-xdist
22 | nbmake
23 | flake8
24 | allowlist_externals =
25 | make
26 | passenv = https_proxy, USER, KRB5CCNAME
27 | commands =
28 | make clean
29 |
30 | style: pycodestyle --ignore=E402,W503,W504,E203,E501,E722,W605 bluepyefe
31 | syntax: flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics
32 |
33 | functional: pytest --cov-report=xml --cov=bluepyefe --cov=tests --cov-config=.coveragerc -vx tests
34 | functional: coverage report --show-missing
35 | functional: coverage xml
36 |
37 | notebooks: pytest -n=auto --nbmake "examples"
38 |
39 | [testenv:docs]
40 | basepython = python3.9
41 | changedir = docs
42 | deps =
43 | sphinx
44 | sphinx-bluebrain-theme
45 | commands = make html SPHINXOPTS=-W
46 | allowlist_externals = make
--------------------------------------------------------------------------------