├── .dockerignore ├── .git_archival.txt ├── .gitattributes ├── .github └── workflows │ ├── python-publish.yml │ └── pythonpackage.yml ├── .gitignore ├── .mailmap ├── .maint ├── contributors.json ├── developers.json ├── former.json ├── paper_author_list.py ├── update_changes.sh └── update_zenodo.py ├── .zenodo.json ├── CHANGES.rst ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── README.rst ├── docker └── files │ ├── neurodebian.gpg │ └── nipype.cfg ├── docs └── notebooks │ └── brainextraction_tutorial.ipynb ├── nirodents ├── __init__.py ├── cli │ ├── __init__.py │ ├── brainextraction.py │ └── plotmask.py ├── data │ ├── artsBrainExtraction_precise_T2w.json │ ├── artsBrainExtraction_testing_T2w.json │ ├── brainextraction_2stage_T1w.json │ ├── brainextraction_2stage_T2w.json │ ├── brainextraction_3stage_T1w.json │ ├── brainextraction_3stage_T2w.json │ ├── brainextraction_precise_T1w.json │ ├── brainextraction_precise_T2w.json │ └── testdata │ │ └── sub-15 │ │ └── anat │ │ └── sub-15_T2w.nii.gz ├── interfaces.py ├── utils │ ├── __init__.py │ └── filtering.py ├── viz.py └── workflows │ ├── __init__.py │ └── brainextraction.py ├── pyproject.toml ├── setup.cfg └── setup.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .circleci/ 2 | .maint/ 3 | docs/ 4 | .gitignore 5 | .mailmap 6 | .travis.yml 7 | -------------------------------------------------------------------------------- /.git_archival.txt: -------------------------------------------------------------------------------- 1 | node: d1dcf984ada244fc0e5724f792feeaabe68efb09 2 | node-date: 2024-07-02T13:25:55+01:00 3 | describe-name: 0.2.8-2-gd1dcf98 4 | ref-names: HEAD -> master 5 | 6 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | .git_archival.txt export-subst 2 | 3 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | build: 20 | uses: ./.github/workflows/pythonpackage.yml 21 | 22 | deploy: 23 | needs: [build] 24 | runs-on: ubuntu-latest 25 | 26 | steps: 27 | - uses: actions/checkout@v3 28 | - name: Set up Python 29 | uses: actions/setup-python@v3 30 | with: 31 | python-version: '3.x' 32 | - name: Install dependencies 33 | run: | 34 | python -m pip install --upgrade pip 35 | python -m pip install -U build "setuptools >= 45" wheel "setuptools_scm >= 6.2" \ 36 | setuptools_scm_git_archive twine docutils 37 | - name: Build package 38 | run: python -m build -s -w 39 | - name: Publish package 40 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 41 | with: 42 | user: __token__ 43 | password: ${{ secrets.PYPI_API_TOKEN }} 44 | -------------------------------------------------------------------------------- /.github/workflows/pythonpackage.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Python package 5 | 6 | on: 7 | push: 8 | branches: [ master ] 9 | tags: [ '*' ] 10 | pull_request: 11 | branches: [ master ] 12 | workflow_call: 13 | 14 | jobs: 15 | build: 16 | if: "!contains(github.event.head_commit.message, '[skip ci]')" 17 | runs-on: ubuntu-latest 18 | strategy: 19 | matrix: 20 | python-version: ['3.8', '3.9', '3.10'] 21 | pip: ["pip==21.2", "pip~=22.0"] 22 | 23 | steps: 24 | - uses: actions/checkout@v2 25 | - name: Fetch all tags (for setuptools_scm to work) 26 | run: | 27 | /usr/bin/git -c protocol.version=2 fetch --tags --prune --unshallow origin 28 | - name: Set up Python ${{ matrix.python-version }} 29 | uses: actions/setup-python@v1 30 | with: 31 | python-version: ${{ matrix.python-version }} 32 | - uses: actions/cache@v1 33 | with: 34 | path: $HOME/.cache/pip 35 | key: pip-cache-v1 36 | restore-keys: | 37 | pip-cache- 38 | 39 | - name: Build in confined environment and interpolate version 40 | run: | 41 | python -m venv /tmp/buildenv 42 | source /tmp/buildenv/bin/activate 43 | python -m pip install -U build "setuptools >= 45" wheel "setuptools_scm >= 6.2" \ 44 | setuptools_scm_git_archive pip twine docutils 45 | 46 | python -m build -s -w 47 | python -m twine check dist/nirodents-* 48 | 49 | mv dist /tmp/package 50 | rm -rf nirodents.egg-info/ 51 | 52 | # Interpolate version 53 | if [[ "$GITHUB_REF" == refs/tags/* ]]; then 54 | TAG=${GITHUB_REF##*/} 55 | fi 56 | THISVERSION=$( python -m setuptools_scm ) 57 | THISVERSION=${TAG:-$THISVERSION} 58 | echo "Expected VERSION: \"${THISVERSION}\"" 59 | echo "THISVERSION=${THISVERSION}" >> $GITHUB_ENV 60 | 61 | - name: Install in confined environment [pip] 62 | run: | 63 | python -m venv /tmp/pip 64 | source /tmp/pip/bin/activate 65 | python -m pip install -U "setuptools >= 45" "setuptools_scm >= 6.2" "${{ matrix.pip }}" 66 | python -m pip install . 67 | INSTALLED_VERSION=$(python -c 'import nirodents as nr; print(nr.__version__, end="")') 68 | echo "VERSION: \"${THISVERSION}\"" 69 | echo "INSTALLED: \"${INSTALLED_VERSION}\"" 70 | test "${INSTALLED_VERSION}" = "${THISVERSION}" 71 | 72 | - name: Install in confined environment [sdist] 73 | run: | 74 | python -m venv /tmp/install_sdist 75 | source /tmp/install_sdist/bin/activate 76 | python -m pip install -U "setuptools >= 45" "${{ matrix.pip }}" 77 | python -m pip install /tmp/package/nirodents*.tar.gz 78 | INSTALLED_VERSION=$(python -c 'import nirodents as nr; print(nr.__version__, end="")') 79 | echo "VERSION: \"${THISVERSION}\"" 80 | echo "INSTALLED: \"${INSTALLED_VERSION}\"" 81 | test "${INSTALLED_VERSION}" = "${THISVERSION}" 82 | 83 | - name: Install in confined environment [wheel] 84 | run: | 85 | python -m venv /tmp/install_wheel 86 | source /tmp/install_wheel/bin/activate 87 | python -m pip install -U "setuptools >= 45" "${{ matrix.pip }}" 88 | python -m pip install /tmp/package/nirodents*.whl 89 | INSTALLED_VERSION=$(python -c 'import nirodents as nr; print(nr.__version__, end="")') 90 | echo "INSTALLED: \"${INSTALLED_VERSION}\"" 91 | test "${INSTALLED_VERSION}" = "${THISVERSION}" 92 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | .DS_Store 131 | docker/files/rats.tar.gz 132 | 133 | nirodents/_version.py 134 | -------------------------------------------------------------------------------- /.mailmap: -------------------------------------------------------------------------------- 1 | Oscar Esteban 2 | Oscar Esteban 3 | -------------------------------------------------------------------------------- /.maint/contributors.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "affiliation": "Department of Psychology, Stanford University", 4 | "name": "Poldrack, Russell A.", 5 | "orcid": "0000-0001-6755-0259", 6 | "type": "Researcher" 7 | }, 8 | { 9 | "affiliation": "Department of Neuroimaging, King's College London", 10 | "name": "Cash, Diana", 11 | "orcid": "0000-0001-5021-1234", 12 | "type": "Researcher" 13 | }, 14 | { 15 | "affiliation": "Department of Neuroimaging, King's College London", 16 | "name": "Kim, Eugene", 17 | "orcid": "0000-0003-0066-7051", 18 | "type": "Researcher" 19 | } 20 | ] 21 | -------------------------------------------------------------------------------- /.maint/developers.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "affiliation": "Department of Neuroimaging, King's College London", 4 | "name": "MacNicol, Eilidh", 5 | "orcid": "0000-0003-3715-7012" 6 | }, 7 | { 8 | "affiliation": "Department of Psychology, Stanford University", 9 | "name": "Esteban, Oscar", 10 | "orcid": "0000-0001-8435-6191" 11 | } 12 | ] 13 | -------------------------------------------------------------------------------- /.maint/former.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "Example, Author" 4 | } 5 | ] -------------------------------------------------------------------------------- /.maint/paper_author_list.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Generate an author list for a new paper or abstract.""" 3 | import sys 4 | from pathlib import Path 5 | import json 6 | from update_zenodo import get_git_lines, sort_contributors 7 | 8 | 9 | # These authors should go last 10 | AUTHORS_LAST = ["Esteban, Oscar"] 11 | 12 | 13 | def _aslist(inlist): 14 | if not isinstance(inlist, list): 15 | return [inlist] 16 | return inlist 17 | 18 | 19 | if __name__ == "__main__": 20 | devs = json.loads(Path(".maint/developers.json").read_text()) 21 | contribs = json.loads(Path(".maint/contributors.json").read_text()) 22 | 23 | author_matches, unmatched = sort_contributors( 24 | devs + contribs, 25 | get_git_lines(), 26 | exclude=json.loads(Path(".maint/former.json").read_text()), 27 | last=AUTHORS_LAST, 28 | ) 29 | # Remove position 30 | affiliations = [] 31 | for item in author_matches: 32 | del item["position"] 33 | for a in _aslist(item.get("affiliation", "Unaffiliated")): 34 | if a not in affiliations: 35 | affiliations.append(a) 36 | 37 | aff_indexes = [ 38 | ", ".join( 39 | [ 40 | "%d" % (affiliations.index(a) + 1) 41 | for a in _aslist(author.get("affiliation", "Unaffiliated")) 42 | ] 43 | ) 44 | for author in author_matches 45 | ] 46 | 47 | print( 48 | "Some people made commits, but are missing in .maint/ " 49 | "files: %s." % ", ".join(unmatched), 50 | file=sys.stderr, 51 | ) 52 | 53 | print("Authors (%d):" % len(author_matches)) 54 | print( 55 | "; ".join( 56 | [ 57 | "%s (%s)" % (i["name"], idx) 58 | for i, idx in zip(author_matches, aff_indexes) 59 | ] 60 | ) 61 | ) 62 | 63 | print( 64 | "\n\nAffiliations:\n%s" 65 | % "\n".join( 66 | ["{0: >2}. {1}".format(i + 1, a) for i, a in enumerate(affiliations)] 67 | ) 68 | ) 69 | -------------------------------------------------------------------------------- /.maint/update_changes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Collects the pull-requests since the latest release and 4 | # aranges them in the CHANGES.rst.txt file. 5 | # 6 | # This is a script to be run before releasing a new version. 7 | # 8 | # Usage /bin/bash update_changes.sh 1.0.1 9 | # 10 | 11 | # Setting # $ help set 12 | set -u # Treat unset variables as an error when substituting. 13 | set -x # Print command traces before executing command. 14 | 15 | # Check whether the Upcoming release header is present 16 | head -1 CHANGES.rst | grep -q Upcoming 17 | UPCOMING=$? 18 | if [[ "$UPCOMING" == "0" ]]; then 19 | head -n3 CHANGES.rst >> newchanges 20 | fi 21 | 22 | # Elaborate today's release header 23 | HEADER="$1 ($(date '+%B %d, %Y'))" 24 | echo $HEADER >> newchanges 25 | echo $( printf "%${#HEADER}s" | tr " " "=" ) >> newchanges 26 | echo "" >> newchanges 27 | 28 | # Search for PRs since previous release 29 | git log --grep="Merge pull request" `git describe --tags --abbrev=0`..HEAD --pretty='format: * %b %s' | sed 's/Merge pull request \#\([^\d]*\)\ from\ .*/(\#\1)/' >> newchanges 30 | echo "" >> newchanges 31 | echo "" >> newchanges 32 | 33 | # Add back the Upcoming header if it was present 34 | if [[ "$UPCOMING" == "0" ]]; then 35 | tail -n+4 CHANGES.rst >> newchanges 36 | else 37 | cat CHANGES.rst >> newchanges 38 | fi 39 | 40 | # Replace old CHANGES.rst with new file 41 | mv newchanges CHANGES.rst 42 | 43 | -------------------------------------------------------------------------------- /.maint/update_zenodo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Update and sort the creators list of the zenodo record.""" 3 | import sys 4 | from pathlib import Path 5 | import json 6 | from fuzzywuzzy import fuzz, process 7 | 8 | # These ORCIDs should go last 9 | CREATORS_LAST = ["Esteban, Oscar"] 10 | CONTRIBUTORS_LAST = ["Poldrack, Russell A."] 11 | 12 | 13 | def sort_contributors(entries, git_lines, exclude=None, last=None): 14 | """Return a list of author dictionaries, ordered by contribution.""" 15 | last = last or [] 16 | sorted_authors = sorted(entries, key=lambda i: i["name"]) 17 | 18 | first_last = [ 19 | " ".join(val["name"].split(",")[::-1]).strip() for val in sorted_authors 20 | ] 21 | first_last_excl = [ 22 | " ".join(val["name"].split(",")[::-1]).strip() for val in exclude or [] 23 | ] 24 | 25 | unmatched = [] 26 | author_matches = [] 27 | position = 1 28 | for ele in git_lines: 29 | matches = process.extract( 30 | ele, first_last, scorer=fuzz.token_sort_ratio, limit=2 31 | ) 32 | # matches is a list [('First match', % Match), ('Second match', % Match)] 33 | if matches[0][1] > 80: 34 | val = sorted_authors[first_last.index(matches[0][0])] 35 | else: 36 | # skip unmatched names 37 | if ele not in first_last_excl: 38 | unmatched.append(ele) 39 | continue 40 | 41 | if val not in author_matches: 42 | val["position"] = position 43 | author_matches.append(val) 44 | position += 1 45 | 46 | names = {" ".join(val["name"].split(",")[::-1]).strip() for val in author_matches} 47 | for missing_name in first_last: 48 | if missing_name not in names: 49 | missing = sorted_authors[first_last.index(missing_name)] 50 | missing["position"] = position 51 | author_matches.append(missing) 52 | position += 1 53 | 54 | all_names = [val["name"] for val in author_matches] 55 | for last_author in last: 56 | author_matches[all_names.index(last_author)]["position"] = position 57 | position += 1 58 | 59 | author_matches = sorted(author_matches, key=lambda k: k["position"]) 60 | 61 | return author_matches, unmatched 62 | 63 | 64 | def get_git_lines(fname="line-contributors.txt"): 65 | """Run git-line-summary.""" 66 | import shutil 67 | import subprocess as sp 68 | 69 | contrib_file = Path(fname) 70 | 71 | lines = [] 72 | if contrib_file.exists(): 73 | print("WARNING: Reusing existing line-contributors.txt file.", file=sys.stderr) 74 | lines = contrib_file.read_text().splitlines() 75 | 76 | git_line_summary_path = shutil.which("git-line-summary") 77 | if not lines and git_line_summary_path: 78 | print("Running git-line-summary on repo") 79 | lines = sp.check_output([git_line_summary_path]).decode().splitlines() 80 | lines = [l for l in lines if "Not Committed Yet" not in l] 81 | contrib_file.write_text("\n".join(lines)) 82 | 83 | if not lines: 84 | raise RuntimeError( 85 | """\ 86 | Could not find line-contributors from git repository.%s""" 87 | % """ \ 88 | git-line-summary not found, please install git-extras. """ 89 | * (git_line_summary_path is None) 90 | ) 91 | return [" ".join(line.strip().split()[1:-1]) for line in lines if "%" in line] 92 | 93 | 94 | if __name__ == "__main__": 95 | data = get_git_lines() 96 | 97 | zenodo_file = Path(".zenodo.json") 98 | zenodo = json.loads(zenodo_file.read_text()) 99 | 100 | creators = json.loads(Path(".maint/developers.json").read_text()) 101 | zen_creators, miss_creators = sort_contributors( 102 | creators, 103 | data, 104 | exclude=json.loads(Path(".maint/former.json").read_text()), 105 | last=CREATORS_LAST, 106 | ) 107 | contributors = json.loads(Path(".maint/contributors.json").read_text()) 108 | zen_contributors, miss_contributors = sort_contributors( 109 | contributors, 110 | data, 111 | exclude=json.loads(Path(".maint/former.json").read_text()), 112 | last=CONTRIBUTORS_LAST, 113 | ) 114 | zenodo["creators"] = zen_creators 115 | zenodo["contributors"] = zen_contributors 116 | 117 | print( 118 | "Some people made commits, but are missing in .maint/ " 119 | "files: %s." % ", ".join(set(miss_creators).intersection(miss_contributors)), 120 | file=sys.stderr, 121 | ) 122 | 123 | # Remove position 124 | for creator in zenodo["creators"]: 125 | del creator["position"] 126 | if isinstance(creator["affiliation"], list): 127 | creator["affiliation"] = creator["affiliation"][0] 128 | 129 | for creator in zenodo["contributors"]: 130 | creator["type"] = "Researcher" 131 | del creator["position"] 132 | if isinstance(creator["affiliation"], list): 133 | creator["affiliation"] = creator["affiliation"][0] 134 | 135 | zenodo_file.write_text("%s\n" % json.dumps(zenodo, indent=2)) 136 | -------------------------------------------------------------------------------- /.zenodo.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "NeuroImaging Rodents (NiRodents): processing tools for magnetic resonance images of the rodent brain", 3 | "description": "

A collection of Nipype interfaces, patches and workflows for reuse in NiPreps (NeuroImaging PREProcesssing tools) such as fMRIPrep.

", 4 | "creators": [ 5 | { 6 | "affiliation": "", 7 | "name": "MacNicol, Eilidh", 8 | "orcid": "" 9 | }, 10 | { 11 | "affiliation": "Department of Psychology, Stanford University", 12 | "name": "Esteban, Oscar", 13 | "orcid": "0000-0001-8435-6191" 14 | } 15 | ], 16 | "contributors": [ 17 | { 18 | "affiliation": "Department of Psychology, Stanford University", 19 | "name": "Poldrack, Russell A.", 20 | "orcid": "0000-0001-6755-0259", 21 | "type": "Researcher" 22 | } 23 | ], 24 | "keywords": [ 25 | "neuroimaging", 26 | "workflow", 27 | "pipeline", 28 | "preprocessing", 29 | "MRI", 30 | "BIDS" 31 | ], 32 | "license": "Apache-2.0", 33 | "upload_type": "software" 34 | } 35 | -------------------------------------------------------------------------------- /CHANGES.rst: -------------------------------------------------------------------------------- 1 | 0.2.8 (February 27, 2023) 2 | ========================= 3 | 4 | * MAINT: Rotate CircleCI secrets and setup up org-level context by @oesteban in `#58 `__ 5 | * MAINT: Replace versioneer with setuptools-scm & adapt packaging to PEP517 by @oesteban in `#60 `__ 6 | * MAINT: Flexibilize dependency pinnings by @oesteban in `#59 `__ 7 | 8 | **Full Changelog**: https://github.com/nipreps/nirodents/compare/0.2.7...0.2.8 9 | 10 | 0.2.7 (May 10, 2022) 11 | ==================== 12 | 13 | * MAINT: update docker (#57) 14 | 15 | 0.2.6 (March 25, 2022) 16 | ====================== 17 | 18 | * MAINT: docker deployment (#56) 19 | * MAINT: bump niworkflows to 1.5.x (#55) 20 | 21 | 0.2.5 (March 15, 2022) 22 | ====================== 23 | 24 | * FIX: N4 updates (#54) 25 | * FIX: input image absolute path (#53) 26 | * FIX: update brain extraction notebook (#51) 27 | * FIX: reorder pre-brain extraction massaging 28 | * PIN: pandas and scipy versions in docker file (#49) 29 | * PIN: niworkflows 1.4.x (#48) 30 | * DOC: update changes.rst (#47) 31 | 32 | 0.2.3 (September 15, 2021) 33 | ========================== 34 | 35 | * ENH: improved clip function (#45) 36 | * ENH: improved brain extraction parameters (#44) 37 | * FIX: cli updated so antsai no longer default (#42) 38 | * ENH: improve aniso bspline (#41) 39 | * ENH: scale Laplacian smoothing with voxel size (#40) 40 | * ENH: Continue refactoring the workflow (#38) 41 | * REL: Preparing a 1.0 release (#37) 42 | * ENH: adapt antsai paramaters from cli (#36) 43 | * ENH: Deep revision of the workflow (#35) 44 | * ENH: Add RATS (commented out) and PCNN to dockerfile (#34) 45 | * ENH: Second refactor of workflow - make ``antsAI`` optional (#33) 46 | * ENH: Add an entrypoint in container images (#32) 47 | * ENH: Several improvements over the overhaul (#31) 48 | * ENH: Workflow overhaul (#30) 49 | * MAINT: Run black on the full repo, address pep8 errors (#27) 50 | * MAINT: tidy workflow (#23) 51 | * ENH: Setup a smoke test on CircleCI + minor improvements to CLI (#26) 52 | * FIX: Correctly pin niworkflows branch and use new interface (#25) 53 | * FIX: resampling bug (#22) 54 | * MAINT: Update version pinning of nipype and niworkflows to dev versions (#20) 55 | * ENH: Add AFNI to docker image (#19) 56 | * FIX: Data init file (#18) 57 | * FIX: Set correct binary path for ANTS (#17) 58 | * ENH: add mosaic plots and wrapper (#16) 59 | 60 | 0.2.0 (October 06, 2020) 61 | ======================== 62 | First usable release, still in alpha status. 63 | 64 | * FIX: Correctly pin niworkflows branch and use new interface (#25) 65 | * FIX: Bug in resampling interface (#22) 66 | * FIX: Data init file (#18) 67 | * FIX: Set correct binary path for ANTS (#17) 68 | * ENH: Improve anisotropic B-Splines for INU correction (#41) 69 | * ENH: Scale Laplacian smoothing with voxel size (#40) 70 | * ENH: Continue refactoring the workflow (#38) 71 | * ENH: Adapt ``antsAI`` paramaters from CLI (#36) 72 | * ENH: Deep revision of the workflow (#35) 73 | * ENH: Add RATS (commented out) and PCNN to ``Dockerfile`` (#34) 74 | * ENH: Second refactor of workflow - make ``antsAI`` optional (#33) 75 | * ENH: Add an entrypoint in container images (#32) 76 | * ENH: Several improvements over the overhaul (#31) 77 | * ENH: Workflow overhaul (#30) 78 | * ENH: Setup a smoke test on CircleCI + minor improvements to CLI (#26) 79 | * ENH: Add AFNI to docker image (#19) 80 | * ENH: add mosaic plots and wrapper (#16) 81 | * MAINT: Run black on the full repo, address pep8 errors (#27) 82 | * MAINT: Tidy-up workflow (#23) 83 | * MAINT: Update version pinning of nipype and niworkflows to dev versions (#20) 84 | 85 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nipreps/miniconda:py38_1.4.2 2 | 3 | # Prepare environment 4 | RUN apt-get update && \ 5 | apt-get install -y --no-install-recommends \ 6 | autoconf \ 7 | build-essential \ 8 | bzip2 \ 9 | ca-certificates \ 10 | curl \ 11 | git \ 12 | libtool \ 13 | lsb-release \ 14 | pkg-config \ 15 | unzip \ 16 | xvfb && \ 17 | apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 18 | 19 | # Installing ANTs 2.3.0 (NeuroDocker build) 20 | ENV ANTSPATH=/usr/lib/ants 21 | RUN mkdir -p $ANTSPATH && \ 22 | curl -sSL "https://dl.dropbox.com/s/hrm530kcqe3zo68/ants-Linux-centos6_x86_64-v2.3.2.tar.gz" \ 23 | | tar -xzC $ANTSPATH --strip-components 1 24 | ENV PATH=$ANTSPATH/bin:$PATH 25 | 26 | # WORKDIR /opt/pcnn3d 27 | # RUN curl -sSL "https://f495cb51-a-62cb3a1a-s-sites.googlegroups.com/site/chuanglab/software/3d-pcnn/PCNN3D%20binary.zip" -o "pcnn3d.zip" && \ 28 | # unzip pcnn3d.zip && \ 29 | # rm pcnn3d.zip && \ 30 | # chmod a+rx PCNNBrainExtract 31 | # ENV PATH="/opt/pcnn3d:$PATH" 32 | 33 | # Uncomment these lines for RATS (requires the software bundle) 34 | # WORKDIR /opt/RATS 35 | # COPY docker/files/rats.tar.gz /tmp/ 36 | # RUN tar xzf /tmp/rats.tar.gz 37 | # ENV PATH="/opt/RATS/distribution:$PATH" 38 | 39 | # Create a shared $HOME directory 40 | RUN useradd -m -s /bin/bash -G users nirodents 41 | WORKDIR /home/nirodents 42 | ENV HOME="/home/nirodents" 43 | 44 | # Unless otherwise specified each process should only use one thread - nipype 45 | # will handle parallelization 46 | ENV MKL_NUM_THREADS=1 \ 47 | OMP_NUM_THREADS=1 \ 48 | TEMPLATEFLOW_AUTOUPDATE=0 49 | 50 | # Installing dev requirements (packages that are not in pypi) 51 | WORKDIR /src/ 52 | COPY . nirodents/ 53 | WORKDIR /src/nirodents/ 54 | RUN pip install --no-cache-dir -e .[all] && \ 55 | rm -rf $HOME/.cache/pip 56 | 57 | COPY docker/files/nipype.cfg /home/nirodents/.nipype/nipype.cfg 58 | 59 | # Cleanup and ensure perms. 60 | RUN rm -rf $HOME/.npm $HOME/.conda $HOME/.empty && \ 61 | find $HOME -type d -exec chmod go=u {} + && \ 62 | find $HOME -type f -exec chmod go=u {} + 63 | 64 | # Final settings 65 | WORKDIR /tmp 66 | ARG BUILD_DATE 67 | ARG VCS_REF 68 | LABEL org.label-schema.build-date=$BUILD_DATE \ 69 | org.label-schema.name="nirodents" \ 70 | org.label-schema.description="nirodents - NeuroImaging workflows" \ 71 | org.label-schema.url="https://github.com/nipreps/nirodents" \ 72 | org.label-schema.vcs-ref=$VCS_REF \ 73 | org.label-schema.vcs-url="https://github.com/nipreps/nirodents" \ 74 | org.label-schema.version=$VERSION \ 75 | org.label-schema.schema-version="1.0" 76 | 77 | ENTRYPOINT ["/opt/conda/bin/artsBrainExtraction"] 78 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | #Misc 2 | include CHANGES.rst 3 | include LICENSE 4 | 5 | # versioneer 6 | include versioneer.py 7 | include nirodents/_version.py 8 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | NiRodents 2 | ========= 3 | A toolbox of rodent MRI processing components. -------------------------------------------------------------------------------- /docker/files/neurodebian.gpg: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1 3 | 4 | mQGiBEQ7TOgRBADvaRsIZ3VZ6Qy7PlDpdMm97m0OfvouOj/HhjOM4M3ECbGn4cYh 5 | vN1gK586s3sUsUcNQ8LuWvNsYhxYsVTZymCReJMEDxod0U6/z/oIbpWv5svF3kpl 6 | ogA66Ju/6cZx62RiCSOkskI6A3Waj6xHyEo8AGOPfzbMoOOQ1TS1u9s2FwCgxziL 7 | wADvKYlDZnWM03QtqIJVD8UEAOks9Q2OqFoqKarj6xTRdOYIBVEp2jhozZUZmLmz 8 | pKL9E4NKGfixqxdVimFcRUGM5h7R2w7ORqXjCzpiPmgdv3jJLWDnmHLmMYRYQc8p 9 | 5nqo8mxuO3zJugxBemWoacBDd1MJaH7nK20Hsk9L/jvU/qLxPJotMStTnwO+EpsK 10 | HlihA/9ZpvzR1QWNUd9nSuNR3byJhaXvxqQltsM7tLqAT4qAOJIcMjxr+qESdEbx 11 | NHM5M1Y21ZynrsQw+Fb1WHXNbP79vzOxHoZR0+OXe8uUpkri2d9iOocre3NUdpOO 12 | JHtl6cGGTFILt8tSuOVxMT/+nlo038JQB2jARe4B85O0tkPIPbQybmV1cm8uZGVi 13 | aWFuLm5ldCBhcmNoaXZlIDxtaWNoYWVsLmhhbmtlQGdtYWlsLmNvbT6IRgQQEQgA 14 | BgUCTVHJKwAKCRCNEUVjdcAkyOvzAJ0abJz+f2a6VZG1c9T8NHMTYh1atwCgt0EE 15 | 3ZZd/2in64jSzu0miqhXbOKISgQQEQIACgUCSotRlwMFAXgACgkQ93+NsjFEvg8n 16 | JgCfWcdJbILBtpLZCocvOzlLPqJ0Fn0AoI4EpJRxoUnrtzBGUC1MqecU7WsDiGAE 17 | ExECACAFAkqLUWcCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCl0y8BJkml 18 | qVklAJ4h2V6MdQkSAThF5c2Gkq6eSoIQYQCeM0DWyB9Bl+tTPSTYXwwZi2uoif20 19 | QmFwc3kuZ3NlLnVuaS1tYWdkZWJ1cmcuZGUgRGViaWFuIEFyY2hpdmUgPG1pY2hh 20 | ZWwuaGFua2VAZ21haWwuY29tPohGBBARAgAGBQJEO03FAAoJEPd/jbIxRL4PU18A 21 | n3tn7i4qdlMi8kHbYWFoabsKc9beAJ9sl/leZNCYNMGhz+u6BQgyeLKw94heBBMR 22 | AgAeBQJEO0zoAhsDBgsJCAcDAgMVAgMDFgIBAh4BAheAAAoJEKXTLwEmSaWpVdoA 23 | n27DvtZizNEbhz3wRUPQMiQjtqdvAJ9rS9YdPe5h5o5gHx3mw3BSkOttdYheBBMR 24 | AgAeBQJEO0zoAhsDBgsJCAcDAgMVAgMDFgIBAh4BAheAAAoJEKXTLwEmSaWpVdoA 25 | oLhwWL+E+2I9lrUf4Lf26quOK9vLAKC9ZpIF2tUirFFkBWnQvu13/TA0SokCHAQQ 26 | AQIABgUCTSNBgQAKCRDAc9Iof/uem4NpEACQ8jxmaCaS/qk/Y4GiwLA5bvKosG3B 27 | iARZ2v5UWqCZQ1tS56yKse/lCIzXQqU9BnYW6wOI2rvFf9meLfd8h96peG6oKscs 28 | fbclLDIf68bBvGBQaD0VYFi/Fk/rxmTQBOCQ3AJZs8O5rIM4gPGE0QGvSZ1h7VRw 29 | 3Uyeg4jKXLIeJn2xEmOJgt3auAR2FyKbzHaX9JCoByJZ/eU23akNl9hgt7ePlpXo 30 | 74KNYC58auuMUhCq3BQDB+II4ERYMcmFp1N5ZG05Cl6jcaRRHDXz+Ax6DWprRI1+ 31 | RH/Yyae6LmKpeJNwd+vM14aawnNO9h8IAQ+aJ3oYZdRhGyybbin3giJ10hmWveg/ 32 | Pey91Nh9vBCHdDkdPU0s9zE7z/PHT0c5ccZRukxfZfkrlWQ5iqu3V064ku5f4PBy 33 | 8UPSkETcjYgDnrdnwqIAO+oVg/SFlfsOzftnwUrvwIcZlXAgtP6MEEAs/38e/JIN 34 | g4VrpdAy7HMGEUsh6Ah6lvGQr+zBnG44XwKfl7e0uCYkrAzUJRGM5vx9iXvFMcMu 35 | jv9EBNNBOU8/Y6MBDzGZhgaoeI27nrUvaveJXjAiDKAQWBLjtQjINZ8I9uaSGOul 36 | 8kpbFavE4eS3+KhISrSHe4DuAa3dk9zI+FiPvXY1ZyfQBtNpR+gYFY6VxMbHhY1U 37 | lSLHO2eUIQLdYbRITmV1cm9EZWJpYW4gQXJjaGl2ZSBLZXkgPHBrZy1leHBwc3kt 38 | bWFpbnRhaW5lcnNAbGlzdHMuYWxpb3RoLmRlYmlhbi5vcmc+iEYEEBEIAAYFAk1R 39 | yQYACgkQjRFFY3XAJMgEWwCggx4Gqlcrt76TSMlbU94cESo55AEAoJ3asQEMpe8t 40 | QUX+5aikw3z1AUoCiEoEEBECAAoFAkqf/3cDBQF4AAoJEPd/jbIxRL4PxyMAoKUI 41 | RPWlHCj/+HSFfwhos68wcSwmAKChuC00qutDro+AOo+uuq6YoHXj+ohgBBMRAgAg 42 | BQJKn/8bAhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQpdMvASZJpalDggCe 43 | KF9KOgOPdQbFnKXl8KtHory4EEwAnA7jxgorE6kk2QHEXFSF8LzOOH4GiGMEExEC 44 | ACMCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAUCSp//RgIZAQAKCRCl0y8BJkml 45 | qekFAKCRyt4+FoCzmBbRUUP3Cr8PzH++IgCgkno4vdjsWdyAey8e0KpITTXMFrmJ 46 | AhwEEAECAAYFAk0jQYEACgkQwHPSKH/7npsFfw/+P8B8hpM3+T1fgboBa4R32deu 47 | n8m6b8vZMXwuo/awQtMpzjem8JGXSUQm8iiX4hDtjq6ZoPrlN8T4jNmviBt/F5jI 48 | Jji/PYmhq+Zn9s++mfx+aF4IJrcHJWFkg/6kJzn4oSdl/YlvKf4VRCcQNtj4xV87 49 | GsdamnzU17XapLVMbSaVKh+6Af7ZLDerEH+iAq733HsYaTK+1xKmN7EFVXgS7bZ1 50 | 9C4LTzc97bVHSywpT9yIrg9QQs/1kshfVIHDKyhjF6IwzSVbeGAIL3Oqo5zOMkWv 51 | 7JlEIkkhTyl+FETxNMTMYjAk+Uei3kRodneq3YBF2uFYSEzrXQgHAyn37geiaMYj 52 | h8wu6a85nG1NS0SdxiZDIePmbvD9vWxFZUWYJ/h9ifsLivWcVXlvHoQ0emd+n2ai 53 | FhAck2xsuyHgnGIZMHww5IkQdu/TMqvbcR6d8Xulh+C4Tq7ppy+oTLADSBKII++p 54 | JQioYydRD529EUJgVlhyH27X6YAk3FuRD3zYZRYS2QECiKXvS665o3JRJ0ZSqNgv 55 | YOom8M0zz6bI9grnUoivMI4o7ISpE4ZwffEd37HVzmraaUHDXRhkulFSf1ImtXoj 56 | V9nNSM5p/+9eP7OioTZhSote6Vj6Ja1SZeRkXZK7BwqPbdO0VsYOb7G//ZiOlqs+ 57 | paRr92G/pwBfj5Dq8EK5Ag0ERDtM9RAIAN0EJqBPvLN0tEin/y4Fe0R4n+E+zNXg 58 | bBsq4WidwyUFy3h/6u86FYvegXwUqVS2OsEs5MwPcCVJOfaEthF7I89QJnP9Nfx7 59 | V5I9yFB53o9ii38BN7X+9gSjpfwXOvf/wIDfggxX8/wRFel37GRB7TiiABRArBez 60 | s5x+zTXvT++WPhElySj0uY8bjVR6tso+d65K0UesvAa7PPWeRS+3nhqABSFLuTTT 61 | MMbnVXCGesBrYHlFVXClAYrSIOX8Ub/UnuEYs9+hIV7U4jKzRF9WJhIC1cXHPmOh 62 | vleAf/I9h/0KahD7HLYud40pNBo5tW8jSfp2/Q8TIE0xxshd51/xy4MAAwUH+wWn 63 | zsYVk981OKUEXul8JPyPxbw05fOd6gF4MJ3YodO+6dfoyIl3bewk+11KXZQALKaO 64 | 1xmkAEO1RqizPeetoadBVkQBp5xPudsVElUTOX0pTYhkUd3iBilsCYKK1/KQ9KzD 65 | I+O/lRsm6L9lc6rV0IgPU00P4BAwR+x8Rw7TJFbuS0miR3lP1NSguz+/kpjxzmGP 66 | LyHJ+LVDYFkk6t0jPXhqFdUY6McUTBDEvavTGlVO062l9APTmmSMVFDsPN/rBes2 67 | rYhuuT+lDp+gcaS1UoaYCIm9kKOteQBnowX9V74Z+HKEYLtwILaSnNe6/fNSTvyj 68 | g0z+R+sPCY4nHewbVC+ISQQYEQIACQUCRDtM9QIbDAAKCRCl0y8BJkmlqbecAJ9B 69 | UdSKVg9H+fQNyP5sbOjj4RDtdACfXHrRHa2+XjJP0dhpvJ8IfvYnQsU= 70 | =fAJZ 71 | -----END PGP PUBLIC KEY BLOCK----- 72 | -------------------------------------------------------------------------------- /docker/files/nipype.cfg: -------------------------------------------------------------------------------- 1 | [execution] 2 | hash_method = content 3 | poll_sleep_duration = 0.01 4 | remove_unnecessary_outputs = true 5 | crashfile_format = txt 6 | profile_runtime = false 7 | use_relative_paths = false 8 | -------------------------------------------------------------------------------- /docs/notebooks/brainextraction_tutorial.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# imports\n", 10 | "from pathlib import Path\n", 11 | "from nirodents.workflows.brainextraction import init_rodent_brain_extraction_wf" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "# Define inputs\n", 21 | "wd = Path.cwd()\n", 22 | "home_dir = wd.parents[1]\n", 23 | "in_dir = Path(home_dir, 'nirodents', 'data', 'testdata', 'sub-15', 'anat')\n", 24 | "tar_img = Path(in_dir, 'sub-15_T2w.nii.gz')" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "# Minimal set up of workflow\n", 34 | "be_wf = init_rodent_brain_extraction_wf(output_dir=wd)\n", 35 | "be_wf.inputs.inputnode.in_files = [tar_img]" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "# Run\n", 45 | "be_wf.run()" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "# display graph\n", 55 | "be_wf.write_graph(graph2use='flat', dotfilename='./graph_flat.dot')\n", 56 | "\n", 57 | "from IPython.display import Image\n", 58 | "Image(filename=\"graph_flat.png\")" 59 | ] 60 | } 61 | ], 62 | "metadata": { 63 | "kernelspec": { 64 | "display_name": "Python 3", 65 | "language": "python", 66 | "name": "python3" 67 | }, 68 | "language_info": { 69 | "codemirror_mode": { 70 | "name": "ipython", 71 | "version": 3 72 | }, 73 | "file_extension": ".py", 74 | "mimetype": "text/x-python", 75 | "name": "python", 76 | "nbconvert_exporter": "python", 77 | "pygments_lexer": "ipython3", 78 | "version": "3.9.7" 79 | } 80 | }, 81 | "nbformat": 4, 82 | "nbformat_minor": 4 83 | } 84 | -------------------------------------------------------------------------------- /nirodents/__init__.py: -------------------------------------------------------------------------------- 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 | # vi: set ft=python sts=4 ts=4 sw=4 et: 3 | # 4 | # Copyright 2022 The NiPreps Developers 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | # We support and encourage derived works from this project, please read 19 | # about our expectations at 20 | # 21 | # https://www.nipreps.org/community/licensing/ 22 | # 23 | """Top-level package for nirodents.""" 24 | from nirodents._version import __version__ 25 | 26 | __packagename__ = "nirodents" 27 | __copyright__ = "Copyright 2023, The NiPreps Developers" 28 | __url__ = "https://github.com/nipreps/nirodents" 29 | 30 | 31 | DOWNLOAD_URL = ( 32 | f"https://github.com/nipreps/{__packagename__}/archive/{__version__}.tar.gz" 33 | ) 34 | 35 | -------------------------------------------------------------------------------- /nirodents/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipreps/nirodents/d1dcf984ada244fc0e5724f792feeaabe68efb09/nirodents/cli/__init__.py -------------------------------------------------------------------------------- /nirodents/cli/brainextraction.py: -------------------------------------------------------------------------------- 1 | """Command Line Interface.""" 2 | from pathlib import Path 3 | 4 | 5 | def get_parser(): 6 | """Build parser object.""" 7 | from os import cpu_count 8 | from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter 9 | from nirodents import __version__ 10 | 11 | parser = ArgumentParser( 12 | description="""\ 13 | artsBrainExtraction -- Atlas-based brain extraction tool of the \ 14 | ANTs-based Rodents ToolS (ARTs) package.\ 15 | """, 16 | formatter_class=ArgumentDefaultsHelpFormatter, 17 | ) 18 | 19 | parser.add_argument( 20 | "input_image", 21 | action="store", 22 | type=Path, 23 | help="The target image for brain extraction.", 24 | ) 25 | parser.add_argument( 26 | "--version", 27 | action="version", 28 | version="artsBrainExtraction v{}".format(__version__), 29 | ) 30 | parser.add_argument( 31 | "--template", 32 | action="store", 33 | default="Fischer344", 34 | help="The TemplateFlow ID of the reference template.", 35 | ) 36 | parser.add_argument( 37 | "--omp-nthreads", 38 | action="store", 39 | type=int, 40 | default=cpu_count(), 41 | help="Number of CPUs available for multithreading processes.", 42 | ) 43 | parser.add_argument( 44 | "--nprocs", 45 | action="store", 46 | type=int, 47 | default=cpu_count(), 48 | help="Number of processes that can be run in parallel.", 49 | ) 50 | parser.add_argument( 51 | "-m", 52 | "--mri-scheme", 53 | action="store", 54 | type=str, 55 | default="T2w", 56 | help="select a particular MRI scheme", 57 | ) 58 | parser.add_argument( 59 | "-o", 60 | "--output-dir", 61 | action="store", 62 | type=Path, 63 | default=Path("results").absolute(), 64 | help="path where intermediate results should be stored", 65 | ) 66 | parser.add_argument( 67 | "-w", 68 | "--work-dir", 69 | action="store", 70 | type=Path, 71 | default=Path("work").absolute(), 72 | help="path where intermediate results should be stored", 73 | ) 74 | parser.add_argument( 75 | "--sloppy", 76 | dest="debug", 77 | action="store_true", 78 | default=False, 79 | help="Use low-quality tools for speed - TESTING ONLY", 80 | ) 81 | parser.add_argument( 82 | "--antsAI", 83 | dest="antsai_init", 84 | action="store_true", 85 | default=False, 86 | help="Include antsAI initialization step", 87 | ) 88 | 89 | parser.add_argument( 90 | "--antsAI-factor", 91 | dest="antsai_factor", 92 | action="store", 93 | type=int, 94 | default=20, 95 | help="Specify ants-ai search factor parameter", 96 | ) 97 | 98 | parser.add_argument( 99 | "--antsAI-arcfrac", 100 | dest="antsai_arcfrac", 101 | type=float, 102 | action="store", 103 | default=0.12, 104 | help="Specify ants-ai arc fraction parameter", 105 | ) 106 | 107 | parser.add_argument( 108 | "--antsAI-step", 109 | dest="antsai_step", 110 | action="store", 111 | type=float, 112 | default=4, 113 | help="Specify ants-ai step size parameter", 114 | ) 115 | 116 | parser.add_argument( 117 | "--antsAI-grid", 118 | dest="antsai_grid", 119 | action="store", 120 | nargs=3, 121 | type=float, 122 | default=(0, 4, 4), 123 | help="Specify ants-ai Search Grid parameter", 124 | ) 125 | return parser 126 | 127 | 128 | def main(): 129 | """Entry point.""" 130 | from templateflow import update as update_templateflow 131 | from ..workflows.brainextraction import init_rodent_brain_extraction_wf 132 | 133 | opts = get_parser().parse_args() 134 | update_templateflow(overwrite=False) 135 | be = init_rodent_brain_extraction_wf( 136 | ants_affine_init=opts.antsai_init, 137 | factor=opts.antsai_factor, 138 | arc=opts.antsai_arcfrac, 139 | step=opts.antsai_step, 140 | grid=tuple(opts.antsai_grid), 141 | debug=opts.debug, 142 | mri_scheme=opts.mri_scheme, 143 | omp_nthreads=opts.omp_nthreads, 144 | output_dir=opts.output_dir, 145 | template_id=opts.template, 146 | ) 147 | be.base_dir = opts.work_dir 148 | be.inputs.inputnode.in_files = Path(opts.input_image).absolute() 149 | 150 | nipype_plugin = {"plugin": "Linear"} 151 | if opts.nprocs > 1: 152 | nipype_plugin["plugin"] = "MultiProc" 153 | nipype_plugin["plugin_args"] = { 154 | "n_procs ": opts.nprocs, 155 | "raise_insufficient": False, 156 | "maxtasksperchild": 1, 157 | } 158 | be.run(**nipype_plugin) 159 | 160 | 161 | if __name__ == "__main__": 162 | raise RuntimeError( 163 | """\ 164 | nirodents/cli/brainextraction.py should not be run directly; 165 | Please `pip install` nirodents and use the `artsBrainExtraction` command.""" 166 | ) 167 | -------------------------------------------------------------------------------- /nirodents/cli/plotmask.py: -------------------------------------------------------------------------------- 1 | """Command Line Interface to print mask overlay mosaic plots""" 2 | 3 | import os 4 | from pathlib import Path 5 | 6 | 7 | def get_parser(): 8 | """Build parser object.""" 9 | from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter 10 | 11 | parser = ArgumentParser( 12 | prog="plot_mask", 13 | description="""plot_mask -- Create mosaic plot of mask on a base.""", 14 | formatter_class=ArgumentDefaultsHelpFormatter, 15 | ) 16 | 17 | parser.add_argument( 18 | "-b", 19 | "--base-image", 20 | type=Path, 21 | required=True, 22 | help="The base image for overlay.", 23 | ) 24 | parser.add_argument( 25 | "-m", 26 | "--mask-image", 27 | type=Path, 28 | required=True, 29 | help="The mask image for overlay.", 30 | ) 31 | parser.add_argument( 32 | "-o", "--output", type=Path, help="The location of output file." 33 | ) 34 | parser.add_argument( 35 | "-s", 36 | "--plot-sagittal", 37 | type=int, 38 | default=0, 39 | choices=[0, 1], 40 | help="Boolean to also print sagittal plane.", 41 | ) 42 | parser.add_argument( 43 | "-c", 44 | "--columns", 45 | type=int, 46 | default=7, 47 | help="Integer describing number of columns in plot.", 48 | ) 49 | 50 | return parser 51 | 52 | 53 | def main(): 54 | """Entry point.""" 55 | from nirodents import viz 56 | 57 | # set output if one is not defined 58 | opts = get_parser().parse_args() 59 | if opts.output is None: 60 | mask_dir = os.path.dirname(opts.mask_image) 61 | output_path = os.path.join(mask_dir, "mask_plot.svg") 62 | else: 63 | if os.path.basename(opts.output)[-4:] != ".svg": 64 | raise ValueError("Output must be .svg file") 65 | else: 66 | output_path = opts.output 67 | 68 | # call function 69 | viz.plot_mosaic( 70 | img=opts.base_image, 71 | overlay_mask=opts.mask_image, 72 | plot_sagittal=bool(opts.plot_sagittal), 73 | ncols=opts.columns, 74 | out_file=output_path, 75 | ) 76 | 77 | 78 | if __name__ == "__main__": 79 | raise RuntimeError( 80 | """\ 81 | nirodents/cli/plotmask.py should not be run directly; 82 | Please `pip install` nirodents and use the `plot_mask` command.""" 83 | ) 84 | -------------------------------------------------------------------------------- /nirodents/data/artsBrainExtraction_precise_T2w.json: -------------------------------------------------------------------------------- 1 | { 2 | "collapse_output_transforms": true, 3 | "convergence_threshold": [1E-8, 1E-8, 1E-9], 4 | "convergence_window_size": [10, 10, 15], 5 | "dimension": 3, 6 | "interpolation": "LanczosWindowedSinc", 7 | "metric": [ 8 | "MI", 9 | "MI", 10 | ["CC", "CC"] 11 | ], 12 | "metric_weight": [ 13 | 1, 14 | 1, 15 | [0.4, 0.6] 16 | ], 17 | "number_of_iterations": [ 18 | [1000, 500, 250, 100], 19 | [1000, 500, 250, 100], 20 | [50, 10, 0] 21 | ], 22 | "output_transform_prefix": "anat_to_template", 23 | "output_warped_image": true, 24 | "radius_or_number_of_bins": [ 25 | 32, 26 | 32, 27 | [4, 4] 28 | ], 29 | "sampling_percentage": [ 30 | 0.25, 31 | 0.25, 32 | [1, 1] 33 | ], 34 | "sampling_strategy": [ 35 | "Regular", 36 | "Regular", 37 | ["None", "None"] 38 | ], 39 | "shrink_factors": [ 40 | [8, 4, 2, 1], 41 | [8, 4, 2, 1], 42 | [4, 2, 1] 43 | ], 44 | "sigma_units": ["vox", "vox", "vox"], 45 | "smoothing_sigmas": [ 46 | [4, 2, 1, 0], 47 | [4, 2, 1, 0], 48 | [2, 1, 0] 49 | ], 50 | "transform_parameters": [ 51 | [0.1], 52 | [0.1], 53 | [0.1, 3.0, 0.0] 54 | ], 55 | "transforms": ["Rigid", "Affine", "SyN"], 56 | "use_histogram_matching": true, 57 | "verbose": true, 58 | "winsorize_lower_quantile": 0.01, 59 | "winsorize_upper_quantile": 0.975, 60 | "write_composite_transform": false 61 | } -------------------------------------------------------------------------------- /nirodents/data/artsBrainExtraction_testing_T2w.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "collapse_output_transforms": true, 4 | "convergence_threshold": [1E-8, 1E-8, 1E-9], 5 | "convergence_window_size": [10, 10, 15], 6 | "dimension": 3, 7 | "interpolation": "LanczosWindowedSinc", 8 | "metric": [ 9 | "MI", 10 | "MI", 11 | ["CC", "CC"] 12 | ], 13 | "metric_weight": [ 14 | 1, 15 | 1, 16 | [0.5, 0.5] 17 | ], 18 | "number_of_iterations": [ 19 | [100, 100, 50, 10], 20 | [100, 100, 50, 10], 21 | [5, 0] 22 | ], 23 | "output_transform_prefix": "anat_to_template", 24 | "output_warped_image": true, 25 | "radius_or_number_of_bins": [ 26 | 32, 27 | 32, 28 | [4, 4] 29 | ], 30 | "sampling_percentage": [ 31 | 0.25, 32 | 0.25, 33 | [1, 1] 34 | ], 35 | "sampling_strategy": [ 36 | "Regular", 37 | "Regular", 38 | ["None", "None"] 39 | ], 40 | "shrink_factors": [ 41 | [8, 4, 2, 1], 42 | [8, 4, 2, 1], 43 | [2, 1] 44 | ], 45 | "sigma_units": ["vox", "vox", "vox"], 46 | "smoothing_sigmas": [ 47 | [4, 2, 1, 0], 48 | [4, 2, 1, 0], 49 | [1, 0] 50 | ], 51 | "transform_parameters": [ 52 | [0.1], 53 | [0.1], 54 | [0.1, 3.0, 0.0] 55 | ], 56 | "transforms": ["Rigid", "Affine", "SyN"], 57 | "use_histogram_matching": true, 58 | "verbose": true, 59 | "winsorize_lower_quantile": 0.01, 60 | "winsorize_upper_quantile": 0.975, 61 | "write_composite_transform": false 62 | } -------------------------------------------------------------------------------- /nirodents/data/brainextraction_2stage_T1w.json: -------------------------------------------------------------------------------- 1 | { 2 | "collapse_output_transforms": true, 3 | "convergence_threshold": [1E-8, 1E-8], 4 | "convergence_window_size": [10, 10], 5 | "dimension": 3, 6 | "interpolation": "LanczosWindowedSinc", 7 | "metric": [ 8 | ["MI", "MI"], 9 | ["MI", "MI"] 10 | ], 11 | "metric_weight": [ 12 | [0.5, 0.5], 13 | [0.5, 0.5] 14 | ], 15 | "number_of_iterations": [ 16 | [1000, 500, 250, 100], 17 | [1000, 500, 250, 100] 18 | ], 19 | "output_transform_prefix": "anat_to_template", 20 | "output_warped_image": true, 21 | "radius_or_number_of_bins": [ 22 | [32, 32], 23 | [32, 32] 24 | ], 25 | "sampling_percentage": [ 26 | [0.25, 0.25], 27 | [0.25, 0.25] 28 | ], 29 | "sampling_strategy": [ 30 | ["Regular", "Regular"], 31 | ["Regular", "Regular"] 32 | ], 33 | "shrink_factors": [ 34 | [8, 4, 2, 1], 35 | [8, 4, 2, 1] 36 | ], 37 | "sigma_units": ["vox", "vox", "vox"], 38 | "smoothing_sigmas": [ 39 | [4, 2, 1, 0], 40 | [4, 2, 1, 0] 41 | ], 42 | "transform_parameters": [ 43 | [0.1], 44 | [0.1] 45 | ], 46 | "transforms": ["Rigid", "Affine"], 47 | "use_histogram_matching": false, 48 | "verbose": true, 49 | "winsorize_lower_quantile": 0.005, 50 | "winsorize_upper_quantile": 0.995, 51 | "write_composite_transform": false 52 | } -------------------------------------------------------------------------------- /nirodents/data/brainextraction_2stage_T2w.json: -------------------------------------------------------------------------------- 1 | { 2 | "collapse_output_transforms": true, 3 | "convergence_threshold": [1E-8, 1E-8], 4 | "convergence_window_size": [10, 10], 5 | "dimension": 3, 6 | "interpolation": "LanczosWindowedSinc", 7 | "metric": [ 8 | ["MI", "MI"], 9 | ["MI", "MI"] 10 | ], 11 | "metric_weight": [ 12 | [0.5, 0.5], 13 | [0.5, 0.5] 14 | ], 15 | "number_of_iterations": [ 16 | [1000, 500, 250, 100], 17 | [1000, 500, 250, 100] 18 | ], 19 | "output_transform_prefix": "anat_to_template", 20 | "output_warped_image": true, 21 | "radius_or_number_of_bins": [ 22 | [32, 32], 23 | [32, 32] 24 | ], 25 | "sampling_percentage": [ 26 | [0.25, 0.25], 27 | [0.25, 0.25] 28 | ], 29 | "sampling_strategy": [ 30 | ["Regular", "Regular"], 31 | ["Regular", "Regular"] 32 | ], 33 | "shrink_factors": [ 34 | [8, 4, 2, 1], 35 | [8, 4, 2, 1] 36 | ], 37 | "sigma_units": ["vox", "vox"], 38 | "smoothing_sigmas": [ 39 | [4, 2, 1, 0], 40 | [4, 2, 1, 0] 41 | ], 42 | "transform_parameters": [ 43 | [0.1], 44 | [0.2] 45 | ], 46 | "transforms": ["Rigid", "Affine"], 47 | "use_histogram_matching": true, 48 | "verbose": true, 49 | "winsorize_lower_quantile": 0.025, 50 | "winsorize_upper_quantile": 0.975, 51 | "write_composite_transform": false 52 | } -------------------------------------------------------------------------------- /nirodents/data/brainextraction_3stage_T1w.json: -------------------------------------------------------------------------------- 1 | { 2 | "collapse_output_transforms": true, 3 | "convergence_threshold": [1E-8, 1E-8, 1E-6], 4 | "convergence_window_size": [10, 10, 10], 5 | "dimension": 3, 6 | "interpolation": "LanczosWindowedSinc", 7 | "metric": [ 8 | ["MI", "MI"], 9 | ["MI", "MI"], 10 | ["CC", "CC"] 11 | ], 12 | "metric_weight": [ 13 | [0.5, 0.5], 14 | [0.35, 0.65], 15 | [0.35, 0.65] 16 | ], 17 | "number_of_iterations": [ 18 | [1000, 500, 250, 100], 19 | [1000, 500, 250, 100], 20 | [10] 21 | ], 22 | "output_transform_prefix": "anat_to_template", 23 | "output_warped_image": true, 24 | "radius_or_number_of_bins": [ 25 | [32, 32], 26 | [32, 32], 27 | [4, 4] 28 | ], 29 | "sampling_percentage": [ 30 | [0.25, 0.25], 31 | [0.25, 0.25], 32 | [0.25, 0.25] 33 | ], 34 | "sampling_strategy": [ 35 | ["Regular", "Regular"], 36 | ["Regular", "Regular"], 37 | ["None", "None"] 38 | ], 39 | "shrink_factors": [ 40 | [8, 4, 2, 1], 41 | [8, 4, 2, 1], 42 | [2] 43 | ], 44 | "sigma_units": ["vox", "vox", "vox"], 45 | "smoothing_sigmas": [ 46 | [4, 2, 1, 0], 47 | [4, 2, 1, 0], 48 | [1] 49 | ], 50 | "transform_parameters": [ 51 | [0.1], 52 | [0.1], 53 | [0.1, 3.0, 0.0] 54 | ], 55 | "transforms": ["Rigid", "Affine", "SyN"], 56 | "use_histogram_matching": false, 57 | "verbose": true, 58 | "winsorize_lower_quantile": 0.005, 59 | "winsorize_upper_quantile": 0.995, 60 | "write_composite_transform": false 61 | } -------------------------------------------------------------------------------- /nirodents/data/brainextraction_3stage_T2w.json: -------------------------------------------------------------------------------- 1 | { 2 | "collapse_output_transforms": true, 3 | "convergence_threshold": [1E-8, 1E-8, 1E-6], 4 | "convergence_window_size": [10, 10, 10], 5 | "dimension": 3, 6 | "interpolation": "LanczosWindowedSinc", 7 | "metric": [ 8 | ["MI", "MI"], 9 | ["MI", "MI"], 10 | ["CC", "CC"] 11 | ], 12 | "metric_weight": [ 13 | [0.5, 0.5], 14 | [0.5, 0.5], 15 | [0.5, 0.5] 16 | ], 17 | "number_of_iterations": [ 18 | [1000, 500, 250, 100], 19 | [1000, 500, 250, 100], 20 | [50, 10, 0] 21 | ], 22 | "output_transform_prefix": "anat_to_template", 23 | "output_warped_image": true, 24 | "radius_or_number_of_bins": [ 25 | [32, 32], 26 | [32, 32], 27 | [4, 4] 28 | ], 29 | "sampling_percentage": [ 30 | [0.25, 0.25], 31 | [0.25, 0.25], 32 | [0.25, 0.25] 33 | ], 34 | "sampling_strategy": [ 35 | ["Regular", "Regular"], 36 | ["Regular", "Regular"], 37 | ["None", "None"] 38 | ], 39 | "shrink_factors": [ 40 | [8, 4, 2, 1], 41 | [8, 4, 2, 1], 42 | [4, 2, 1] 43 | ], 44 | "sigma_units": ["vox", "vox", "vox"], 45 | "smoothing_sigmas": [ 46 | [4, 2, 1, 0], 47 | [4, 2, 1, 0], 48 | [2, 1, 0] 49 | ], 50 | "transform_parameters": [ 51 | [0.1], 52 | [0.1], 53 | [0.1, 3.0, 0.0] 54 | ], 55 | "transforms": ["Rigid", "Affine", "SyN"], 56 | "use_histogram_matching": true, 57 | "verbose": true, 58 | "winsorize_lower_quantile": 0.025, 59 | "winsorize_upper_quantile": 0.975, 60 | "write_composite_transform": false 61 | } -------------------------------------------------------------------------------- /nirodents/data/brainextraction_precise_T1w.json: -------------------------------------------------------------------------------- 1 | { 2 | "collapse_output_transforms": true, 3 | "convergence_threshold": [1E-6, 1E-6, 1E-8], 4 | "convergence_window_size": [10, 10, 10], 5 | "dimension": 3, 6 | "interpolation": "LanczosWindowedSinc", 7 | "initial_moving_transform_com": 1, 8 | "metric": [ 9 | ["MI"], 10 | ["MI"], 11 | ["CC"] 12 | ], 13 | "metric_weight": [ 14 | [1], 15 | [1], 16 | [1] 17 | ], 18 | "number_of_iterations": [ 19 | [1000, 500, 250, 100], 20 | [1000, 500, 250, 100], 21 | [30] 22 | ], 23 | "output_transform_prefix": "braintotemp", 24 | "output_warped_image": true, 25 | "radius_or_number_of_bins": [ 26 | [32], 27 | [32], 28 | [4] 29 | ], 30 | "sampling_percentage": [ 31 | [0.25], 32 | [0.25], 33 | [1] 34 | ], 35 | "sampling_strategy": [ 36 | ["Regular"], 37 | ["Regular"], 38 | ["None"] 39 | ], 40 | "shrink_factors": [ 41 | [8, 4, 2, 1], 42 | [8, 4, 2, 1], 43 | [2] 44 | ], 45 | "sigma_units": ["vox", "vox", "vox"], 46 | "smoothing_sigmas": [ 47 | [3, 2, 1, 0], 48 | [3, 2, 1, 0], 49 | [1] 50 | ], 51 | "transform_parameters": [ 52 | [0.1], 53 | [0.1], 54 | [0.1, 3.0, 0.0] 55 | ], 56 | "transforms": ["Rigid", "Affine", "SyN"], 57 | "use_histogram_matching": false, 58 | "verbose": true, 59 | "winsorize_lower_quantile": 0.025, 60 | "winsorize_upper_quantile": 0.975, 61 | "write_composite_transform": false 62 | } -------------------------------------------------------------------------------- /nirodents/data/brainextraction_precise_T2w.json: -------------------------------------------------------------------------------- 1 | { 2 | "collapse_output_transforms": true, 3 | "convergence_threshold": [1E-6, 1E-6, 1E-6], 4 | "convergence_window_size": [10, 10, 10], 5 | "dimension": 3, 6 | "interpolation": "LanczosWindowedSinc", 7 | "initial_moving_transform_com": 1, 8 | "metric": [ 9 | ["MI"], 10 | ["MI"], 11 | ["CC"] 12 | ], 13 | "metric_weight": [ 14 | [1], 15 | [1], 16 | [1] 17 | ], 18 | "number_of_iterations": [ 19 | [1000, 500, 250, 100], 20 | [1000, 500, 250, 100], 21 | [30] 22 | ], 23 | "output_transform_prefix": "braintotemp", 24 | "output_warped_image": true, 25 | "radius_or_number_of_bins": [ 26 | [32], 27 | [32], 28 | [4] 29 | ], 30 | "sampling_percentage": [ 31 | [0.25], 32 | [0.25], 33 | [1] 34 | ], 35 | "sampling_strategy": [ 36 | ["Regular"], 37 | ["Regular"], 38 | ["None"] 39 | ], 40 | "shrink_factors": [ 41 | [8, 4, 2, 1], 42 | [8, 4, 2, 1], 43 | [2] 44 | ], 45 | "sigma_units": ["vox", "vox", "vox"], 46 | "smoothing_sigmas": [ 47 | [3, 2, 1, 0], 48 | [3, 2, 1, 0], 49 | [1] 50 | ], 51 | "transform_parameters": [ 52 | [0.1], 53 | [0.1], 54 | [0.1, 3.0, 0.0] 55 | ], 56 | "transforms": ["Rigid", "Affine", "SyN"], 57 | "use_histogram_matching": true, 58 | "verbose": true, 59 | "winsorize_lower_quantile": 0.025, 60 | "winsorize_upper_quantile": 0.975, 61 | "write_composite_transform": false 62 | } -------------------------------------------------------------------------------- /nirodents/data/testdata/sub-15/anat/sub-15_T2w.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipreps/nirodents/d1dcf984ada244fc0e5724f792feeaabe68efb09/nirodents/data/testdata/sub-15/anat/sub-15_T2w.nii.gz -------------------------------------------------------------------------------- /nirodents/interfaces.py: -------------------------------------------------------------------------------- 1 | """Interfaces.""" 2 | from nipype.interfaces.base import traits 3 | from nipype.interfaces.mixins import CopyHeaderInterface 4 | from nipype.interfaces.ants.segmentation import ( 5 | DenoiseImageInputSpec, DenoiseImage as _DI 6 | ) 7 | 8 | 9 | class _DenoiseImageInputSpec(DenoiseImageInputSpec): 10 | copy_header = traits.Bool( 11 | True, 12 | usedefault=True, 13 | desc="copy headers of the original image into the output (corrected) file", 14 | ) 15 | 16 | 17 | class DenoiseImage(_DI, CopyHeaderInterface): 18 | """Extends DenoiseImage to auto copy the header.""" 19 | 20 | input_spec = _DenoiseImageInputSpec 21 | _copy_header_map = {"output_image": "input_image"} 22 | -------------------------------------------------------------------------------- /nirodents/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipreps/nirodents/d1dcf984ada244fc0e5724f792feeaabe68efb09/nirodents/utils/__init__.py -------------------------------------------------------------------------------- /nirodents/utils/filtering.py: -------------------------------------------------------------------------------- 1 | """Signal processing filters.""" 2 | 3 | 4 | def truncation( 5 | in_file, 6 | clip_max=99.9, 7 | out_file=None, 8 | out_max=1000, 9 | out_min=0, 10 | percentiles=(0.1, 95), 11 | ): 12 | """Truncate and clip the input image intensities.""" 13 | from pathlib import Path 14 | import numpy as np 15 | import nibabel as nb 16 | from nipype.utils.filemanip import fname_presuffix 17 | 18 | img = nb.load(in_file) 19 | data = img.get_fdata() 20 | 21 | if percentiles[0] is not None: 22 | a_min = np.percentile(data.reshape(-1), percentiles[0]) 23 | else: 24 | hist, edges = np.histogram(data.reshape(-1), bins="auto") 25 | a_min = edges[np.argmax(hist)] 26 | 27 | data -= a_min 28 | data[data < out_min] = out_min 29 | a_max = np.percentile(data.reshape(-1), percentiles[1]) 30 | data *= out_max / a_max 31 | 32 | if clip_max is not None: 33 | data = np.clip(data, out_min, np.percentile(data.reshape(-1), clip_max)) 34 | 35 | if out_file is None: 36 | out_file = fname_presuffix(Path(in_file).name, suffix="_trunc") 37 | 38 | out_file = str(Path(out_file).absolute()) 39 | img.__class__(data, img.affine, img.header).to_filename(out_file) 40 | return out_file 41 | 42 | 43 | def gaussian_filter(in_file, sigma, out_file=None): 44 | """Filter input image by convolving with a Gaussian kernel.""" 45 | from pathlib import Path 46 | import nibabel as nb 47 | from scipy.ndimage import gaussian_filter 48 | from nipype.utils.filemanip import fname_presuffix 49 | 50 | if out_file is None: 51 | out_file = fname_presuffix(Path(in_file).name, suffix="_gauss") 52 | out_file = str(Path(out_file).absolute()) 53 | 54 | img = nb.load(in_file) 55 | img.__class__( 56 | gaussian_filter(img.dataobj, sigma), img.affine, img.header 57 | ).to_filename(out_file) 58 | return out_file 59 | -------------------------------------------------------------------------------- /nirodents/viz.py: -------------------------------------------------------------------------------- 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 | # vi: set ft=python sts=4 ts=4 sw=4 et: 3 | """Create Mosaic Plot to showcase masking""" 4 | 5 | import math 6 | import os.path as op 7 | import numpy as np 8 | import nibabel as nb 9 | 10 | import matplotlib.pyplot as plt 11 | 12 | DEFAULT_DPI = 300 13 | DINA4_LANDSCAPE = (11.69, 8.27) 14 | DINA4_PORTRAIT = (8.27, 11.69) 15 | 16 | 17 | def plot_slice( 18 | dslice, 19 | spacing=None, 20 | cmap="Greys_r", 21 | label=None, 22 | ax=None, 23 | vmax=None, 24 | vmin=None, 25 | annotate=False, 26 | ): 27 | from matplotlib.cm import get_cmap 28 | 29 | if isinstance(cmap, (str, bytes)): 30 | cmap = get_cmap(cmap) 31 | 32 | est_vmin, est_vmax = _get_limits(dslice) 33 | if not vmin: 34 | vmin = est_vmin 35 | if not vmax: 36 | vmax = est_vmax 37 | 38 | if ax is None: 39 | ax = plt.gca() 40 | 41 | if spacing is None: 42 | spacing = [1.0, 1.0] 43 | 44 | phys_sp = np.array(spacing) * dslice.shape 45 | ax.imshow( 46 | np.swapaxes(dslice, 0, 1), 47 | vmin=vmin, 48 | vmax=vmax, 49 | cmap=cmap, 50 | interpolation="nearest", 51 | origin="lower", 52 | extent=[0, phys_sp[0], 0, phys_sp[1]], 53 | ) 54 | ax.set_xticklabels([]) 55 | ax.set_yticklabels([]) 56 | ax.grid(False) 57 | ax.axis("off") 58 | 59 | bgcolor = cmap(min(vmin, 0.0)) 60 | fgcolor = cmap(vmax) 61 | 62 | if annotate: 63 | ax.text( 64 | 0.95, 65 | 0.95, 66 | "R", 67 | color=fgcolor, 68 | transform=ax.transAxes, 69 | horizontalalignment="center", 70 | verticalalignment="top", 71 | size=18, 72 | bbox=dict(boxstyle="square,pad=0", ec=bgcolor, fc=bgcolor), 73 | ) 74 | ax.text( 75 | 0.05, 76 | 0.95, 77 | "L", 78 | color=fgcolor, 79 | transform=ax.transAxes, 80 | horizontalalignment="center", 81 | verticalalignment="top", 82 | size=18, 83 | bbox=dict(boxstyle="square,pad=0", ec=bgcolor, fc=bgcolor), 84 | ) 85 | 86 | if label is not None: 87 | ax.text( 88 | 0.98, 89 | 0.01, 90 | label, 91 | color=fgcolor, 92 | transform=ax.transAxes, 93 | horizontalalignment="right", 94 | verticalalignment="bottom", 95 | size=18, 96 | bbox=dict(boxstyle="square,pad=0", ec=bgcolor, fc=bgcolor), 97 | ) 98 | 99 | return ax 100 | 101 | 102 | def plot_mosaic( 103 | img, 104 | out_file=None, 105 | ncols=8, 106 | title=None, 107 | overlay_mask=None, 108 | bbox_mask_file=None, 109 | only_plot_noise=False, 110 | annotate=True, 111 | vmin=None, 112 | vmax=None, 113 | cmap="Greys_r", 114 | plot_sagittal=True, 115 | fig=None, 116 | zmax=128, 117 | ): 118 | 119 | if isinstance(img, (str, bytes)): 120 | nii = nb.as_closest_canonical(nb.load(img)) 121 | img_data = nii.get_data() 122 | zooms = nii.header.get_zooms() 123 | else: 124 | img_data = img 125 | zooms = [1.0, 1.0, 1.0] 126 | out_file = "mosaic.svg" 127 | 128 | # Remove extra dimensions 129 | img_data = np.squeeze(img_data) 130 | 131 | if img_data.shape[2] > zmax and bbox_mask_file is None: 132 | lowthres = np.percentile(img_data, 5) 133 | mask_file = np.ones_like(img_data) 134 | mask_file[img_data <= lowthres] = 0 135 | img_data = _bbox(img_data, mask_file) 136 | 137 | if bbox_mask_file is not None: 138 | bbox_data = nb.as_closest_canonical(nb.load(bbox_mask_file)).get_data() 139 | img_data = _bbox(img_data, bbox_data) 140 | 141 | z_vals = np.array(list(range(0, img_data.shape[2]))) 142 | 143 | # Reduce the number of slices shown 144 | if len(z_vals) > zmax: 145 | rem = 15 146 | # Crop inferior and posterior 147 | if not bbox_mask_file: 148 | # img_data = img_data[..., rem:-rem] 149 | z_vals = z_vals[rem:-rem] 150 | else: 151 | # img_data = img_data[..., 2 * rem:] 152 | z_vals = z_vals[2 * rem :] 153 | 154 | while len(z_vals) > zmax: 155 | # Discard one every two slices 156 | # img_data = img_data[..., ::2] 157 | z_vals = z_vals[::2] 158 | 159 | n_images = len(z_vals) * 0.7 160 | nrows = math.ceil(n_images / ncols) 161 | if plot_sagittal: 162 | nrows += 1 163 | 164 | if overlay_mask: 165 | overlay_data = nb.as_closest_canonical(nb.load(overlay_mask)).get_data() 166 | 167 | # create figures 168 | if fig is None: 169 | fig = plt.figure(figsize=(22, nrows * 3)) 170 | 171 | est_vmin, est_vmax = _get_limits(img_data, only_plot_noise=only_plot_noise) 172 | if not vmin: 173 | vmin = est_vmin 174 | if not vmax: 175 | vmax = est_vmax 176 | 177 | naxis = 1 178 | new_lims = int(len(z_vals) * 0.15) 179 | for z_val in z_vals[new_lims:-new_lims]: 180 | ax = fig.add_subplot(nrows, ncols, naxis) 181 | 182 | if overlay_mask: 183 | ax.set_rasterized(True) 184 | plot_slice( 185 | img_data[:, :, z_val], 186 | vmin=vmin, 187 | vmax=vmax, 188 | cmap=cmap, 189 | ax=ax, 190 | spacing=zooms[:2], 191 | label="%d" % z_val, 192 | annotate=annotate, 193 | ) 194 | 195 | if overlay_mask: 196 | from matplotlib import cm 197 | 198 | msk_cmap = cm.Reds # @UndefinedVariable 199 | msk_cmap._init() 200 | alphas = np.linspace(0, 0.75, msk_cmap.N + 3) 201 | msk_cmap._lut[:, -1] = alphas 202 | plot_slice( 203 | overlay_data[:, :, z_val], 204 | vmin=0, 205 | vmax=1, 206 | cmap=msk_cmap, 207 | ax=ax, 208 | spacing=zooms[:2], 209 | ) 210 | naxis += 1 211 | 212 | if plot_sagittal: 213 | naxis = ncols * (nrows - 1) + 1 214 | 215 | step = int(img_data.shape[0] / (ncols + 1)) 216 | start = int(step * 3.5) 217 | stop = int(img_data.shape[0] - (step * 3.5)) 218 | 219 | if step == 0: 220 | step = 1 221 | 222 | for x_val in list(range(start, stop, step))[:ncols]: 223 | ax = fig.add_subplot(nrows - 1, ncols, naxis) 224 | 225 | plot_slice( 226 | img_data[x_val, ...], 227 | vmin=vmin, 228 | vmax=vmax, 229 | cmap=cmap, 230 | ax=ax, 231 | label="%d" % x_val, 232 | spacing=[zooms[0], zooms[2]], 233 | ) 234 | naxis += 1 235 | 236 | fig.subplots_adjust( 237 | left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05 238 | ) 239 | 240 | if title: 241 | fig.suptitle(title, fontsize="10") 242 | fig.subplots_adjust(wspace=0.002, hspace=0.002) 243 | 244 | if out_file is None: 245 | fname, ext = op.splitext(op.basename(img)) 246 | if ext == ".gz": 247 | fname, _ = op.splitext(fname) 248 | out_file = op.abspath(fname + "_mosaic.svg") 249 | 250 | fig.savefig(out_file, format="svg", dpi=300, bbox_inches="tight") 251 | return out_file 252 | 253 | 254 | def _bbox(img_data, bbox_data): 255 | B = np.argwhere(bbox_data) 256 | (ystart, xstart, zstart), (ystop, xstop, zstop) = B.min(0), B.max(0) + 1 257 | return img_data[ystart:ystop, xstart:xstop, zstart:zstop] 258 | 259 | 260 | def _get_limits(nifti_file, only_plot_noise=False): 261 | if isinstance(nifti_file, str): 262 | nii = nb.as_closest_canonical(nb.load(nifti_file)) 263 | data = nii.get_data() 264 | else: 265 | data = nifti_file 266 | 267 | data_mask = np.logical_not(np.isnan(data)) 268 | 269 | if only_plot_noise: 270 | data_mask = np.logical_and(data_mask, data != 0) 271 | vmin = np.percentile(data[data_mask], 0) 272 | vmax = np.percentile(data[data_mask], 61) 273 | else: 274 | vmin = np.percentile(data[data_mask], 0.5) 275 | vmax = np.percentile(data[data_mask], 99.5) 276 | return vmin, vmax 277 | -------------------------------------------------------------------------------- /nirodents/workflows/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipreps/nirodents/d1dcf984ada244fc0e5724f792feeaabe68efb09/nirodents/workflows/__init__.py -------------------------------------------------------------------------------- /nirodents/workflows/brainextraction.py: -------------------------------------------------------------------------------- 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 | # vi: set ft=python sts=4 ts=4 sw=4 et: 3 | """Nipype translation of ANTs' workflows.""" 4 | # general purpose 5 | from pkg_resources import resource_filename as pkgr_fn 6 | 7 | # nipype 8 | from nipype.pipeline import engine as pe 9 | from nipype.interfaces import utility as niu 10 | from nipype.interfaces.ants import ( 11 | AI, 12 | ImageMath, 13 | N4BiasFieldCorrection, 14 | ) 15 | 16 | # niworkflows 17 | from niworkflows.interfaces.bids import DerivativesDataSink as _DDS 18 | from niworkflows.interfaces.images import RegridToZooms 19 | from niworkflows.interfaces.nibabel import ApplyMask, Binarize, IntensityClip 20 | from niworkflows.interfaces.fixes import ( 21 | FixHeaderRegistration as Registration, 22 | FixHeaderApplyTransforms as ApplyTransforms, 23 | ) 24 | from niworkflows.interfaces.reportlets.registration import ( 25 | SimpleBeforeAfterRPT as SimpleBeforeAfter, 26 | ) 27 | 28 | from templateflow.api import get as get_template 29 | from nirodents.interfaces import DenoiseImage 30 | 31 | from nirodents import __version__ 32 | 33 | 34 | class DerivativesDataSink(_DDS): 35 | """Generate a BIDS-Derivatives-compatible output folder.""" 36 | 37 | out_path_base = f"nirodents-{__version__}" 38 | 39 | 40 | LOWRES_ZOOMS = (0.4, 0.4, 0.4) 41 | HIRES_ZOOMS = (0.1, 0.1, 0.1) 42 | 43 | 44 | def init_rodent_brain_extraction_wf( 45 | ants_affine_init=False, 46 | factor=20, 47 | arc=0.12, 48 | step=4, 49 | grid=(0, 4, 4), 50 | debug=False, 51 | interim_checkpoints=True, 52 | mem_gb=3.0, 53 | mri_scheme="T2w", 54 | name="rodent_brain_extraction_wf", 55 | omp_nthreads=None, 56 | output_dir=None, 57 | template_id="Fischer344", 58 | template_specs=None, 59 | use_float=True, 60 | ): 61 | """ 62 | Build an atlas-based brain extraction pipeline for rodent T1w and T2w MRI data. 63 | 64 | Parameters 65 | ---------- 66 | ants_affine_init : :obj:`bool`, optional 67 | Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images. 68 | 69 | """ 70 | inputnode = pe.Node( 71 | niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode" 72 | ) 73 | outputnode = pe.Node( 74 | niu.IdentityInterface(fields=["out_corrected", "out_brain", "out_mask"]), 75 | name="outputnode", 76 | ) 77 | 78 | template_specs = template_specs or {} 79 | if template_id == "WHS" and "resolution" not in template_specs: 80 | template_specs["resolution"] = 2 81 | 82 | # Find a suitable target template in TemplateFlow 83 | tpl_target_path = get_template(template_id, suffix=mri_scheme, **template_specs,) 84 | if not tpl_target_path: 85 | raise RuntimeError( 86 | f"An instance of template with MR scheme '{mri_scheme}'" 87 | " could not be found." 88 | ) 89 | 90 | tpl_brainmask_path = get_template( 91 | template_id, 92 | atlas=None, 93 | hemi=None, 94 | desc="brain", 95 | suffix="probseg", 96 | **template_specs, 97 | ) or get_template( 98 | template_id, 99 | atlas=None, 100 | hemi=None, 101 | desc="brain", 102 | suffix="mask", 103 | **template_specs, 104 | ) 105 | 106 | tpl_regmask_path = get_template( 107 | template_id, 108 | atlas=None, 109 | desc="BrainCerebellumExtraction", 110 | suffix="mask", 111 | **template_specs, 112 | ) 113 | 114 | denoise = pe.Node(DenoiseImage(dimension=3, copy_header=True), 115 | name="denoise", n_procs=omp_nthreads) 116 | 117 | # Resample template to a controlled, isotropic resolution 118 | res_tmpl = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS, smooth=True), name="res_tmpl") 119 | 120 | # Create Laplacian images 121 | lap_tmpl = pe.Node(ImageMath(operation="Laplacian", copy_header=True), name="lap_tmpl") 122 | tmpl_sigma = pe.Node(niu.Function(function=_lap_sigma), 123 | name="tmpl_sigma", run_without_submitting=True) 124 | norm_lap_tmpl = pe.Node(niu.Function(function=_norm_lap), name="norm_lap_tmpl") 125 | 126 | lap_target = pe.Node(ImageMath(operation="Laplacian", copy_header=True), name="lap_target") 127 | target_sigma = pe.Node(niu.Function(function=_lap_sigma), 128 | name="target_sigma", run_without_submitting=True) 129 | norm_lap_target = pe.Node(niu.Function(function=_norm_lap), name="norm_lap_target") 130 | 131 | # Set up initial spatial normalization 132 | ants_params = "testing" if debug else "precise" 133 | norm = pe.Node( 134 | Registration( 135 | from_file=pkgr_fn( 136 | "nirodents", f"data/artsBrainExtraction_{ants_params}_{mri_scheme}.json" 137 | ) 138 | ), 139 | name="norm", 140 | n_procs=omp_nthreads, 141 | mem_gb=mem_gb, 142 | ) 143 | norm.inputs.float = use_float 144 | 145 | # main workflow 146 | wf = pe.Workflow(name) 147 | 148 | # truncate target intensity for N4 correction 149 | clip_target = pe.Node(IntensityClip(p_min=15, p_max=99.9), name="clip_target") 150 | 151 | # truncate template intensity to match target 152 | clip_tmpl = pe.Node(IntensityClip(p_min=5, p_max=98), name="clip_tmpl") 153 | clip_tmpl.inputs.in_file = _pop(tpl_target_path) 154 | 155 | # set INU bspline grid based on voxel size 156 | bspline_grid = pe.Node(niu.Function(function=_bspline_grid), name="bspline_grid") 157 | 158 | # INU correction of the target image 159 | init_n4 = pe.Node( 160 | N4BiasFieldCorrection( 161 | dimension=3, 162 | save_bias=False, 163 | copy_header=True, 164 | n_iterations=[50] * (4 - debug), 165 | convergence_threshold=1e-7, 166 | shrink_factor=1, 167 | rescale_intensities=True, 168 | ), 169 | n_procs=omp_nthreads, 170 | name="init_n4", 171 | ) 172 | clip_inu = pe.Node(IntensityClip(p_min=1, p_max=99.8), name="clip_inu") 173 | 174 | # Create a buffer interface as a cache for the actual inputs to registration 175 | buffernode = pe.Node( 176 | niu.IdentityInterface(fields=["hires_target"]), name="buffernode" 177 | ) 178 | 179 | # Merge image nodes 180 | mrg_target = pe.Node(niu.Merge(2), name="mrg_target") 181 | mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl") 182 | 183 | # fmt: off 184 | wf.connect([ 185 | # Target image massaging 186 | (inputnode, denoise, [(("in_files", _pop), "input_image")]), 187 | (inputnode, bspline_grid, [(("in_files", _pop), "in_file")]), 188 | (bspline_grid, init_n4, [("out", "args")]), 189 | (denoise, clip_target, [("output_image", "in_file")]), 190 | (clip_target, init_n4, [("out_file", "input_image")]), 191 | (init_n4, clip_inu, [("output_image", "in_file")]), 192 | (clip_inu, target_sigma, [("out_file", "in_file")]), 193 | (clip_inu, buffernode, [("out_file", "hires_target")]), 194 | (buffernode, lap_target, [("hires_target", "op1")]), 195 | (target_sigma, lap_target, [("out", "op2")]), 196 | (lap_target, norm_lap_target, [("output_image", "in_file")]), 197 | (buffernode, mrg_target, [("hires_target", "in1")]), 198 | (norm_lap_target, mrg_target, [("out", "in2")]), 199 | # Template massaging 200 | (clip_tmpl, res_tmpl, [("out_file", "in_file")]), 201 | (res_tmpl, tmpl_sigma, [("out_file", "in_file")]), 202 | (res_tmpl, lap_tmpl, [("out_file", "op1")]), 203 | (tmpl_sigma, lap_tmpl, [("out", "op2")]), 204 | (lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]), 205 | (res_tmpl, mrg_tmpl, [("out_file", "in1")]), 206 | (norm_lap_tmpl, mrg_tmpl, [("out", "in2")]), 207 | # Setup inputs to spatial normalization 208 | (mrg_target, norm, [("out", "moving_image")]), 209 | (mrg_tmpl, norm, [("out", "fixed_image")]), 210 | ]) 211 | # fmt: on 212 | 213 | # Graft a template registration-mask if present 214 | if tpl_regmask_path: 215 | hires_mask = pe.Node( 216 | ApplyTransforms( 217 | input_image=_pop(tpl_regmask_path), 218 | transforms="identity", 219 | interpolation="Gaussian", 220 | float=True, 221 | ), 222 | name="hires_mask", 223 | mem_gb=1, 224 | ) 225 | 226 | # fmt: off 227 | wf.connect([ 228 | (res_tmpl, hires_mask, [("out_file", "reference_image")]), 229 | (hires_mask, norm, [("output_image", "fixed_image_masks")]), 230 | ]) 231 | # fmt: on 232 | 233 | # Finally project brain mask and refine INU correction 234 | map_brainmask = pe.Node( 235 | ApplyTransforms(interpolation="Gaussian", float=True), 236 | name="map_brainmask", 237 | mem_gb=1, 238 | ) 239 | map_brainmask.inputs.input_image = str(tpl_brainmask_path) 240 | 241 | thr_brainmask = pe.Node(Binarize(thresh_low=0.50), name="thr_brainmask") 242 | 243 | final_n4 = pe.Node( 244 | N4BiasFieldCorrection( 245 | dimension=3, 246 | save_bias=True, 247 | copy_header=True, 248 | n_iterations=[50] * 4, 249 | convergence_threshold=1e-7, 250 | rescale_intensities=True, 251 | shrink_factor=1, 252 | ), 253 | n_procs=omp_nthreads, 254 | name="final_n4", 255 | ) 256 | final_mask = pe.Node(ApplyMask(), name="final_mask") 257 | 258 | # fmt: off 259 | wf.connect([ 260 | (inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]), 261 | (bspline_grid, final_n4, [("out", "args")]), 262 | (clip_target, final_n4, [("out_file", "input_image")]), 263 | # Project template's brainmask into subject space 264 | (norm, map_brainmask, [("reverse_transforms", "transforms"), 265 | ("reverse_invert_flags", "invert_transform_flags")]), 266 | (map_brainmask, thr_brainmask, [("output_image", "in_file")]), 267 | # take a second pass of N4 268 | (map_brainmask, final_n4, [("output_image", "mask_image")]), 269 | (final_n4, final_mask, [("output_image", "in_file")]), 270 | (thr_brainmask, final_mask, [("out_mask", "in_mask")]), 271 | (final_n4, outputnode, [("output_image", "out_corrected")]), 272 | (thr_brainmask, outputnode, [("out_mask", "out_mask")]), 273 | (final_mask, outputnode, [("out_file", "out_brain")]), 274 | ]) 275 | # fmt: on 276 | 277 | if interim_checkpoints: 278 | final_apply = pe.Node( 279 | ApplyTransforms(interpolation="BSpline", float=True), 280 | name="final_apply", 281 | mem_gb=1, 282 | ) 283 | final_report = pe.Node( 284 | SimpleBeforeAfter(after_label="target", before_label=f"tpl-{template_id}"), 285 | name="final_report", 286 | ) 287 | # fmt: off 288 | wf.connect([ 289 | (inputnode, final_apply, [(("in_files", _pop), "reference_image")]), 290 | (res_tmpl, final_apply, [("out_file", "input_image")]), 291 | (norm, final_apply, [("reverse_transforms", "transforms"), 292 | ("reverse_invert_flags", "invert_transform_flags")]), 293 | (final_apply, final_report, [("output_image", "before")]), 294 | (outputnode, final_report, [("out_corrected", "after"), ("out_mask", "wm_seg")]), 295 | ]) 296 | # fmt: on 297 | 298 | if ants_affine_init: 299 | # Initialize transforms with antsAI 300 | lowres_tmpl = pe.Node( 301 | RegridToZooms(zooms=LOWRES_ZOOMS, smooth=True), name="lowres_tmpl" 302 | ) 303 | lowres_trgt = pe.Node( 304 | RegridToZooms(zooms=LOWRES_ZOOMS, smooth=True), name="lowres_trgt" 305 | ) 306 | 307 | init_aff = pe.Node( 308 | AI( 309 | convergence=(100, 1e-6, 10), 310 | metric=("Mattes", 32, "Random", 0.25), 311 | principal_axes=False, 312 | search_factor=(factor, arc), 313 | search_grid=(step, grid), 314 | transform=("Affine", 0.1), 315 | verbose=True, 316 | ), 317 | name="init_aff", 318 | n_procs=omp_nthreads, 319 | ) 320 | # fmt: off 321 | wf.connect([ 322 | (clip_inu, lowres_trgt, [("out_file", "in_file")]), 323 | (lowres_trgt, init_aff, [("out_file", "moving_image")]), 324 | (clip_tmpl, lowres_tmpl, [("out_file", "in_file")]), 325 | (lowres_tmpl, init_aff, [("out_file", "fixed_image")]), 326 | (init_aff, norm, [("output_transform", "initial_moving_transform")]), 327 | ]) 328 | # fmt: on 329 | 330 | if tpl_regmask_path: 331 | lowres_mask = pe.Node( 332 | ApplyTransforms( 333 | input_image=_pop(tpl_regmask_path), 334 | transforms="identity", 335 | interpolation="MultiLabel", 336 | ), 337 | name="lowres_mask", 338 | mem_gb=1, 339 | ) 340 | # fmt: off 341 | wf.connect([ 342 | (lowres_tmpl, lowres_mask, [("out_file", "reference_image")]), 343 | (lowres_mask, init_aff, [("output_image", "fixed_image_mask")]), 344 | ]) 345 | # fmt: on 346 | 347 | if interim_checkpoints: 348 | init_apply = pe.Node( 349 | ApplyTransforms(interpolation="BSpline", invert_transform_flags=[True]), 350 | name="init_apply", 351 | mem_gb=1, 352 | ) 353 | init_mask = pe.Node( 354 | ApplyTransforms(interpolation="Gaussian", invert_transform_flags=[True]), 355 | name="init_mask", 356 | mem_gb=1, 357 | ) 358 | init_mask.inputs.input_image = str(tpl_brainmask_path) 359 | init_report = pe.Node( 360 | SimpleBeforeAfter( 361 | out_report="init_report.svg", 362 | before_label="target", 363 | after_label="template", 364 | ), 365 | name="init_report", 366 | ) 367 | # fmt: off 368 | wf.connect([ 369 | (lowres_trgt, init_apply, [("out_file", "reference_image")]), 370 | (lowres_tmpl, init_apply, [("out_file", "input_image")]), 371 | (init_aff, init_apply, [("output_transform", "transforms")]), 372 | (lowres_trgt, init_report, [("out_file", "before")]), 373 | (init_apply, init_report, [("output_image", "after")]), 374 | 375 | (lowres_trgt, init_mask, [("out_file", "reference_image")]), 376 | (init_aff, init_mask, [("output_transform", "transforms")]), 377 | (init_mask, init_report, [("output_image", "wm_seg")]), 378 | ]) 379 | # fmt: on 380 | else: 381 | norm.inputs.initial_moving_transform_com = 1 382 | 383 | if output_dir: 384 | ds_final_inu = pe.Node( 385 | DerivativesDataSink( 386 | base_directory=str(output_dir), desc="preproc", compress=True, 387 | ), name="ds_final_inu", run_without_submitting=True 388 | ) 389 | ds_final_msk = pe.Node( 390 | DerivativesDataSink( 391 | base_directory=str(output_dir), desc="brain", suffix="mask", compress=True, 392 | ), name="ds_final_msk", run_without_submitting=True 393 | ) 394 | 395 | # fmt: off 396 | wf.connect([ 397 | (inputnode, ds_final_inu, [("in_files", "source_file")]), 398 | (inputnode, ds_final_msk, [("in_files", "source_file")]), 399 | (outputnode, ds_final_inu, [("out_corrected", "in_file")]), 400 | (outputnode, ds_final_msk, [("out_mask", "in_file")]), 401 | ]) 402 | # fmt: on 403 | 404 | if interim_checkpoints: 405 | ds_report = pe.Node( 406 | DerivativesDataSink( 407 | base_directory=str(output_dir), desc="brain", 408 | suffix="mask", datatype="figures" 409 | ), name="ds_report", run_without_submitting=True 410 | ) 411 | # fmt: off 412 | wf.connect([ 413 | (inputnode, ds_report, [("in_files", "source_file")]), 414 | (final_report, ds_report, [("out_report", "in_file")]), 415 | ]) 416 | # fmt: on 417 | 418 | if ants_affine_init and interim_checkpoints: 419 | ds_report_init = pe.Node( 420 | DerivativesDataSink( 421 | base_directory=str(output_dir), desc="init", 422 | suffix="mask", datatype="figures" 423 | ), name="ds_report_init", run_without_submitting=True 424 | ) 425 | # fmt: off 426 | wf.connect([ 427 | (inputnode, ds_report_init, [("in_files", "source_file")]), 428 | (init_report, ds_report_init, [("out_report", "in_file")]), 429 | ]) 430 | # fmt: on 431 | 432 | return wf 433 | 434 | 435 | def _pop(in_files): 436 | if isinstance(in_files, (list, tuple)): 437 | return in_files[0] 438 | return in_files 439 | 440 | 441 | def _bspline_grid(in_file): 442 | import nibabel as nb 443 | import numpy as np 444 | import math 445 | 446 | img = nb.load(in_file) 447 | zooms = img.header.get_zooms()[:3] 448 | extent = (np.array(img.shape[:3]) - 1) * zooms 449 | # get mesh resolution ratio 450 | retval = [f"{math.ceil(i / extent[np.argmin(extent)])}" for i in extent] 451 | return f"-b [{'x'.join(retval)}]" 452 | 453 | 454 | def _lap_sigma(in_file): 455 | import numpy as np 456 | import nibabel as nb 457 | 458 | img = nb.load(in_file) 459 | min_vox = np.amin(img.header.get_zooms()) 460 | return str(1.5 * min_vox ** 0.75) 461 | 462 | 463 | def _norm_lap(in_file): 464 | from pathlib import Path 465 | import numpy as np 466 | import nibabel as nb 467 | from nipype.utils.filemanip import fname_presuffix 468 | 469 | img = nb.load(in_file) 470 | data = img.get_fdata() 471 | data -= np.median(data) 472 | l_max = np.percentile(data[data > 0], 99.8) 473 | l_min = np.percentile(data[data < 0], 0.2) 474 | data[data < 0] *= -1.0 / l_min 475 | data[data > 0] *= 1.0 / l_max 476 | data = np.clip(data, a_min=-1.0, a_max=1.0) 477 | 478 | out_file = fname_presuffix( 479 | Path(in_file).name, suffix="_norm", newpath=str(Path.cwd().absolute()) 480 | ) 481 | hdr = img.header.copy() 482 | hdr.set_data_dtype("float32") 483 | img.__class__(data.astype("float32"), img.affine, hdr).to_filename(out_file) 484 | return out_file 485 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools >= 45", 4 | "setuptools_scm[toml] >= 6.2", 5 | "wheel" 6 | ] 7 | build-backend = "setuptools.build_meta" 8 | 9 | [tool.setuptools_scm] 10 | write_to = "nirodents/_version.py" 11 | write_to_template = """\ 12 | \"\"\"Version file, automatically generated by setuptools_scm.\"\"\" 13 | __version__ = "{version}" 14 | """ 15 | fallback_version = "0.0" 16 | version_scheme = "python-simplified-semver" 17 | 18 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | url = https://github.com/nipreps/nirodents 3 | author = The NiPreps Developers 4 | author_email = nipreps@gmail.com 5 | maintainer = Eilidh MacNicol 6 | maintainer_email = nipreps@gmail.com 7 | description = NeuroImaging Rodents (NiRodents) provides processing tools for magnetic resonance images of the rodent brain. 8 | long_description = file:README.rst 9 | long_description_content_type = text/x-rst; charset=UTF-8 10 | license = 3-clause BSD 11 | classifiers = 12 | Development Status :: 4 - Beta 13 | Intended Audience :: Science/Research 14 | Topic :: Scientific/Engineering :: Image Recognition 15 | License :: OSI Approved :: BSD License 16 | Programming Language :: Python :: 3.6 17 | Programming Language :: Python :: 3.7 18 | Programming Language :: Python :: 3.8 19 | 20 | [options] 21 | python_requires = >=3.8 22 | setup_requires = 23 | setuptools >= 45 24 | setuptools_scm >= 6.2 25 | wheel 26 | 27 | install_requires = 28 | attrs 29 | networkx 30 | nibabel >= 3.0.1 31 | nipype >= 1.7.1 32 | niworkflows >= 1.5.2 33 | scipy >= 1.8 34 | templateflow >= 0.7.1 35 | 36 | test_requires = 37 | coverage < 5 38 | pytest >= 4.4 39 | pytest-cov 40 | pytest-xdist >= 1.28, <2.0 41 | packages = find: 42 | 43 | [options.entry_points] 44 | console_scripts = 45 | artsBrainExtraction=nirodents.cli.brainextraction:main 46 | plot_mask=nirodents.cli.plotmask:main 47 | 48 | [options.package_data] 49 | nirodents = 50 | data/*.json 51 | 52 | [options.packages.find] 53 | exclude = 54 | *.tests 55 | 56 | [options.extras_require] 57 | doc = 58 | pydot >= 1.2.3 59 | pydotplus 60 | sphinx >= 2.1.2 61 | sphinx_rtd_theme 62 | sphinxcontrib-apidoc ~= 0.3.0 63 | sphinxcontrib-napoleon 64 | sphinxcontrib-versioning 65 | docs = 66 | %(doc)s 67 | duecredit = 68 | duecredit 69 | style = 70 | flake8 >= 3.7.0 71 | test = 72 | coverage < 5 73 | pytest >= 4.4 74 | pytest-cov 75 | pytest-xdist >= 1.28 76 | tests = 77 | %(test)s 78 | all = 79 | %(doc)s 80 | %(duecredit)s 81 | %(style)s 82 | %(test)s 83 | 84 | [flake8] 85 | max-line-length = 99 86 | doctests = False 87 | ignore = 88 | W503 89 | E203 90 | exclude=*build/ 91 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """nirodents' PEP518 setup.py shim.""" 3 | from setuptools import setup 4 | 5 | if __name__ == "__main__": 6 | """ Install entry-point """ 7 | setup() 8 | --------------------------------------------------------------------------------