├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── feature_request.md
│ └── question.md
└── workflows
│ ├── autoformat.yml
│ ├── pr-lint.yml
│ ├── release.yml
│ └── tests.yml
├── .gitignore
├── .readthedocs.yaml
├── LICENSE
├── README.md
├── brainles_preprocessing
├── __init__.py
├── brain_extraction
│ ├── __init__.py
│ └── brain_extractor.py
├── brats
│ ├── __init__.py
│ ├── t1_centric.py
│ └── t1c_centric.py
├── cli.py
├── constants.py
├── defacing
│ ├── __init__.py
│ ├── defacer.py
│ └── quickshear
│ │ ├── __init__.py
│ │ ├── nipy_quickshear.py
│ │ └── quickshear.py
├── modality.py
├── normalization
│ ├── __init__.py
│ ├── normalizer_base.py
│ ├── percentile_normalizer.py
│ └── windowing_normalizer.py
├── preprocessor.py
├── registration
│ ├── ANTs
│ │ ├── ANTs.py
│ │ ├── TODO_ANTs_parameters.py
│ │ └── __init__.py
│ ├── __init__.py
│ ├── elastix
│ │ ├── __init__.py
│ │ └── elastix.py
│ ├── greedy
│ │ ├── __init__.py
│ │ └── greedy.py
│ ├── niftyreg
│ │ ├── __init__.py
│ │ ├── niftyreg.py
│ │ └── niftyreg_scripts
│ │ │ ├── niftyreg_1.5.68
│ │ │ ├── bin
│ │ │ │ ├── groupwise_niftyreg_params.sh
│ │ │ │ ├── groupwise_niftyreg_run.sh
│ │ │ │ ├── reg_aladin
│ │ │ │ ├── reg_average
│ │ │ │ ├── reg_f3d
│ │ │ │ ├── reg_jacobian
│ │ │ │ ├── reg_measure
│ │ │ │ ├── reg_resample
│ │ │ │ ├── reg_tools
│ │ │ │ └── reg_transform
│ │ │ └── include
│ │ │ │ ├── AffineDeformationFieldKernel.h
│ │ │ │ ├── AladinContent.h
│ │ │ │ ├── BlockMatchingKernel.h
│ │ │ │ ├── CPUAffineDeformationFieldKernel.h
│ │ │ │ ├── CPUBlockMatchingKernel.h
│ │ │ │ ├── CPUConvolutionKernel.h
│ │ │ │ ├── CPUKernelFactory.h
│ │ │ │ ├── CPUOptimiseKernel.h
│ │ │ │ ├── CPUResampleImageKernel.h
│ │ │ │ ├── ConvolutionKernel.h
│ │ │ │ ├── Kernel.h
│ │ │ │ ├── KernelFactory.h
│ │ │ │ ├── OptimiseKernel.h
│ │ │ │ ├── Platform.h
│ │ │ │ ├── ResampleImageKernel.h
│ │ │ │ ├── _reg_ReadWriteImage.h
│ │ │ │ ├── _reg_ReadWriteMatrix.h
│ │ │ │ ├── _reg_aladin.cpp
│ │ │ │ ├── _reg_aladin.h
│ │ │ │ ├── _reg_aladin_sym.cpp
│ │ │ │ ├── _reg_aladin_sym.h
│ │ │ │ ├── _reg_base.h
│ │ │ │ ├── _reg_blockMatching.h
│ │ │ │ ├── _reg_dti.h
│ │ │ │ ├── _reg_f3d.h
│ │ │ │ ├── _reg_f3d2.h
│ │ │ │ ├── _reg_f3d_sym.h
│ │ │ │ ├── _reg_femTrans.h
│ │ │ │ ├── _reg_globalTrans.h
│ │ │ │ ├── _reg_kld.h
│ │ │ │ ├── _reg_lncc.h
│ │ │ │ ├── _reg_localTrans.h
│ │ │ │ ├── _reg_localTrans_jac.h
│ │ │ │ ├── _reg_localTrans_regul.h
│ │ │ │ ├── _reg_macros.h
│ │ │ │ ├── _reg_maths.h
│ │ │ │ ├── _reg_maths_eigen.h
│ │ │ │ ├── _reg_measure.h
│ │ │ │ ├── _reg_mind.h
│ │ │ │ ├── _reg_nmi.h
│ │ │ │ ├── _reg_optimiser.cpp
│ │ │ │ ├── _reg_optimiser.h
│ │ │ │ ├── _reg_resampling.h
│ │ │ │ ├── _reg_splineBasis.h
│ │ │ │ ├── _reg_ssd.h
│ │ │ │ ├── _reg_stringFormat.h
│ │ │ │ ├── _reg_tools.h
│ │ │ │ ├── nifti1.h
│ │ │ │ ├── nifti1_io.h
│ │ │ │ ├── png.h
│ │ │ │ ├── pngconf.h
│ │ │ │ ├── pngdebug.h
│ │ │ │ ├── pnginfo.h
│ │ │ │ ├── pnglibconf.h
│ │ │ │ ├── pngpriv.h
│ │ │ │ ├── pngstruct.h
│ │ │ │ ├── readpng.h
│ │ │ │ ├── reg_png.h
│ │ │ │ ├── zlib.h
│ │ │ │ ├── znzlib.h
│ │ │ │ └── zutil.h
│ │ │ ├── reg_aladin
│ │ │ ├── reg_resample
│ │ │ ├── rigid_reg.sh
│ │ │ └── transform.sh
│ └── registrator.py
└── utils
│ ├── __init__.py
│ ├── generic.py
│ ├── logging_utils.py
│ └── zenodo.py
├── docs
├── Makefile
├── make.bat
└── source
│ ├── brain-extraction.rst
│ ├── brainles-preprocessing.rst
│ ├── conf.py
│ ├── defacing.rst
│ ├── index.rst
│ └── registration.rst
├── example
├── example_data
│ ├── OtherEXampleFromTCIA
│ │ ├── MRHR_FLAIR_AX_OtherEXampleTCIA_TCGA-FG-6692_Si_TCGA-FG-6692_MRHR_FLAIR_AX_SE_IR_5_tir2d1_21_fla.nii.gz
│ │ ├── MRHR_T1_AX_POST_GAD_OtherEXampleTCIA_TCGA-FG-6692_Si_TCGA-FG-6692_MRHR_T1_AX_POST_GAD_SE_13_se2d1r_t1c.nii.gz
│ │ ├── MRHR_T2_AX_OtherEXampleTCIA_TCGA-FG-6692_Si_TCGA-FG-6692_MRHR_T2_AX_SE_2_tse2d1_11_t2.nii.gz
│ │ └── T1_AX_OtherEXampleTCIA_TCGA-FG-6692_Si_TCGA-FG-6692_T1_AX_SE_10_se2d1_t1.nii.gz
│ └── TCGA-DU-7294
│ │ ├── AXIAL_FLAIR_RF2_150_TCGA-DU-7294_TCGA-DU-7294_GE_TCGA-DU-7294_AXIAL_FLAIR_RF2_150_IR_7_fla.nii.gz
│ │ ├── AX_T1_POST_GD_FLAIR_TCGA-DU-7294_TCGA-DU-7294_GE_TCGA-DU-7294_AX_T1_POST_GD_FLAIR_RM_13_t1c.nii.gz
│ │ ├── AX_T1_pre_gd_TCGA-DU-7294_TCGA-DU-7294_GE_TCGA-DU-7294_AX_T1_pre_gd_RM_8_t1.nii.gz
│ │ └── AX_T2_FR-FSE_RF2_150_TCGA-DU-7294_TCGA-DU-7294_GE_TCGA-DU-7294_AX_T2_FR-FSE_RF2_150_RM_4_t2.nii.gz
└── example_modality_centric_preprocessor.py
├── pyproject.toml
└── tests
├── registrator_base.py
├── test_data
└── input
│ ├── ants_matrix.mat
│ ├── bet_tcia_example_t1c_mask.nii.gz
│ ├── ereg_matrix.mat
│ ├── niftyreg_matrix.txt
│ ├── tcia_example_t1.nii.gz
│ └── tcia_example_t1c.nii.gz
├── test_hdbet_brain_extractor.py
├── test_registrators.py
└── test_zenodo.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: '[BUG] '
5 | labels: 'bug'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Install '....'
17 | 3. Run commands '....'
18 |
19 | **Expected behavior**
20 | A clear and concise description of what you expected to happen.
21 |
22 | **Screenshots**
23 | If applicable, add screenshots to help explain your problem.
24 |
25 | **Environment**
26 |
27 | ### operating system and version?
28 | e.g. Ubuntu 23.10 LTS
29 |
30 |
31 | ### NVIDIA drivers and GPUs
32 | please paste the output of (or a more suitable base version for your system):
33 | ```sh
34 | nvidia-smi
35 | ```
36 |
37 | You should see something like:
38 | ```
39 | +---------------------------------------------------------------------------------------+
40 | | NVIDIA-SMI 530.30.02 Driver Version: 530.30.02 CUDA Version: 12.1 |
41 | |-----------------------------------------+----------------------+----------------------+
42 | | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
43 | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
44 | | | | MIG M. |
45 | |=========================================+======================+======================|
46 | | 0 NVIDIA RTX A5000 Off| 00000000:01:00.0 Off | Off |
47 | | 30% 17C P8 12W / 230W| 6MiB / 24564MiB | 0% Default |
48 | | | | N/A |
49 | +-----------------------------------------+----------------------+----------------------+
50 | | 1 Quadro RTX 8000 Off| 00000000:C1:00.0 Off | Off |
51 | | 33% 17C P8 9W / 260W| 6MiB / 49152MiB | 0% Default |
52 | | | | N/A |
53 | +-----------------------------------------+----------------------+----------------------+
54 | ```
55 |
56 |
57 | ### Python environment and version?
58 | e.g. Conda environment with Python 3.10. Check your Python version with:
59 | ```sh
60 | python --version
61 | ```
62 |
63 | ### version of brainles_preprocessing ?
64 | please specify your version of brainles_preprocessing (please make sure you run the latest version):
65 | ```sh
66 | pip freeze | grep brainles_preprocessing
67 | ```
68 |
69 | **Additional context**
70 | Add any other context about the problem here.
71 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: '[FEATURE] '
5 | labels: 'enhancement'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Question
3 | about: Please ask your question, make sure to read the FAQ before
4 | title: '[QUESTION] '
5 | labels: 'question'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Your question**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
--------------------------------------------------------------------------------
/.github/workflows/autoformat.yml:
--------------------------------------------------------------------------------
1 | name: autoformat
2 |
3 | on:
4 | issue_comment:
5 | types: [created, edited]
6 | jobs:
7 | run_autoformat:
8 | uses: BrainLesion/BrainLes/.github/workflows/autoformat.yml@main
9 | secrets: inherit
10 |
--------------------------------------------------------------------------------
/.github/workflows/pr-lint.yml:
--------------------------------------------------------------------------------
1 | name: lint
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | jobs:
8 | run_lint:
9 | uses: BrainLesion/BrainLes/.github/workflows/pr_lint.yml@main
10 | secrets: inherit
11 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: release
2 |
3 | on:
4 | release:
5 | types: [created]
6 | workflow_dispatch:
7 |
8 | jobs:
9 | publish:
10 | name: Publish to test PyPI
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout Repository
14 | uses: actions/checkout@v3
15 | - name: Set up Python
16 | uses: actions/setup-python@v4
17 | with:
18 | python-version: "3.x"
19 | - name: Install dependencies
20 | run: |
21 | pip install build
22 | pip install twine
23 | - name: Build package
24 | run: python -m build
25 | - name: Upload to PyPI
26 | env:
27 | TWINE_USERNAME: __token__
28 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
29 | run: |
30 | twine upload dist/*
31 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: tests
5 |
6 | on:
7 | push:
8 | branches: [ "main" ]
9 | pull_request:
10 | branches: [ "main" ]
11 |
12 | jobs:
13 | build:
14 |
15 | runs-on: ubuntu-latest
16 | strategy:
17 | fail-fast: false
18 | matrix:
19 | python-version: ["3.10", "3.11", "3.12"]
20 |
21 | steps:
22 | - uses: actions/checkout@v3
23 | - name: Set up Python ${{ matrix.python-version }}
24 | uses: actions/setup-python@v3
25 | with:
26 | python-version: ${{ matrix.python-version }}
27 | - name: Install dependencies
28 | run: |
29 | python -m pip install --upgrade pip
30 | python -m pip install uv
31 | uv pip install flake8 pytest --system
32 | uv pip install -e .[all] --system
33 | - name: Lint with flake8
34 | run: |
35 | # stop the build if there are Python syntax errors or undefined names
36 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
37 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
38 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
39 | - name: Test with pytest
40 | run: |
41 | pytest
42 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | .vscode
132 |
133 | .DS_Store
134 |
135 | brainles_preprocessing/registration/atlases
136 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | build:
4 | os: "ubuntu-22.04"
5 | tools:
6 | python: "3.10"
7 | jobs:
8 | post_create_environment:
9 | # Install poetry
10 | # https://python-poetry.org/docs/#installing-manually
11 | - pip install poetry
12 | post_install:
13 | # Install dependencies with 'docs' dependency group
14 | # https://python-poetry.org/docs/managing-dependencies/#dependency-groups
15 | # VIRTUAL_ENV needs to be set manually for now.
16 | # See https://github.com/readthedocs/readthedocs.org/pull/11152/
17 | - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with docs --all-extras
18 |
19 | sphinx:
20 | configuration: docs/source/conf.py
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # BrainLes-Preprocessing
4 | [](https://pypi.org/project/brainles-preprocessing/)
5 | [](https://pypi.python.org/pypi/brainles-preprocessing/)
6 | [](http://brainles-preprocessing.readthedocs.io/?badge=latest)
7 | [](https://github.com/BrainLesion/preprocessing/actions/workflows/tests.yml)
8 | [](https://opensource.org/licenses/Apache-2.0)
9 |
10 |
11 | `BrainLes preprocessing` is a comprehensive tool for preprocessing tasks in biomedical imaging, with a focus on (but not limited to) multi-modal brain MRI. It can be used to build modular preprocessing pipelines:
12 |
13 | This includes **normalization**, **co-registration**, **atlas registration** and **skulstripping / brain extraction**.
14 |
15 | BrainLes is written `backend-agnostic` meaning it allows to swap the registration, brain extraction tools and defacing tools.
16 |
17 |
18 |
19 |
20 | ## Installation
21 |
22 | With a Python 3.10+ environment you can install directly from [pypi.org](https://pypi.org/project/brainles-preprocessing/):
23 |
24 | ```
25 | pip install brainles-preprocessing
26 | ```
27 |
28 | We recommend using Python `3.10 / 3.11 / 3.12`.
29 |
30 | > [!NOTE]
31 | > For python `3.13` the installation can currently fail with the error `Failed to build antspyx`.
32 | > This usually means that there is no pre-built wheel for the package and it has to be build locally.
33 | > This will require cmake (install e.g. with `pip install cmake`) and quite some time.
34 | > Rerunning the installation with cmake installed should fix the error.
35 |
36 |
37 | ## Usage
38 | A minimal example to register (to the standard atlas using ANTs) and skull strip (using HDBet) a t1c image (center modality) with 1 moving modality (flair) could look like this:
39 | ```python
40 | from pathlib import Path
41 | from brainles_preprocessing.modality import Modality, CenterModality
42 | from brainles_preprocessing.normalization.percentile_normalizer import (
43 | PercentileNormalizer,
44 | )
45 | from brainles_preprocessing.preprocessor import Preprocessor
46 |
47 | patient_folder = Path("/home/marcelrosier/preprocessing/patient")
48 |
49 | # specify a normalizer
50 | percentile_normalizer = PercentileNormalizer(
51 | lower_percentile=0.1,
52 | upper_percentile=99.9,
53 | lower_limit=0,
54 | upper_limit=1,
55 | )
56 |
57 | # define center and moving modalities
58 | center = CenterModality(
59 | modality_name="t1c",
60 | input_path=patient_folder / "t1c.nii.gz",
61 | normalizer=percentile_normalizer,
62 | # specify the output paths for the raw and normalized images of each step - here only for atlas registered and brain extraction
63 | raw_skull_output_path="patient/raw_skull_dir/t1c_skull_raw.nii.gz",
64 | raw_bet_output_path="patient/raw_bet_dir/t1c_bet_raw.nii.gz",
65 | raw_defaced_output_path="patient/raw_defaced_dir/t1c_defaced_raw.nii.gz",
66 | normalized_skull_output_path="patient/norm_skull_dir/t1c_skull_normalized.nii.gz",
67 | normalized_bet_output_path="patient/norm_bet_dir/t1c_bet_normalized.nii.gz",
68 | normalized_defaced_output_path="patient/norm_defaced_dir/t1c_defaced_normalized.nii.gz",
69 | # specify output paths for the brain extraction and defacing masks
70 | bet_mask_output_path="patient/masks/t1c_bet_mask.nii.gz",
71 | defacing_mask_output_path="patient/masks/t1c_defacing_mask.nii.gz",
72 | )
73 |
74 | moving_modalities = [
75 | Modality(
76 | modality_name="flair",
77 | input_path=patient_folder / "flair.nii.gz",
78 | normalizer=percentile_normalizer,
79 | # specify the output paths for the raw and normalized images of each step - here only for atlas registered and brain extraction
80 | raw_skull_output_path="patient/raw_skull_dir/fla_skull_raw.nii.gz",
81 | raw_bet_output_path="patient/raw_bet_dir/fla_bet_raw.nii.gz",
82 | raw_defaced_output_path="patient/raw_defaced_dir/fla_defaced_raw.nii.gz",
83 | normalized_skull_output_path="patient/norm_skull_dir/fla_skull_normalized.nii.gz",
84 | normalized_bet_output_path="patient/norm_bet_dir/fla_bet_normalized.nii.gz",
85 | normalized_defaced_output_path="patient/norm_defaced_dir/fla_defaced_normalized.nii.gz",
86 | )
87 | ]
88 |
89 | # instantiate and run the preprocessor using defaults for registration/ brain extraction/ defacing backends
90 | preprocessor = Preprocessor(
91 | center_modality=center,
92 | moving_modalities=moving_modalities,
93 | )
94 |
95 | preprocessor.run()
96 |
97 | ```
98 |
99 |
100 | The package allows to choose registration backends, brain extraction tools and defacing methods.
101 | An example notebook with 4 modalities and further outputs and customizations can be found following these badges:
102 |
103 | [](https://nbviewer.org/github/BrainLesion/tutorials/blob/main/preprocessing/preprocessing_tutorial.ipynb)
104 |
105 |
106 |
107 |
108 | For further information please have a look at our [Jupyter Notebook tutorials](https://github.com/BrainLesion/tutorials/tree/main/preprocessing) in our tutorials repo (WIP).
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 | ## Documentation
118 | We provide a (WIP) documentation. Have a look [here](https://brainles-preprocessing.readthedocs.io/en/latest/?badge=latest)
119 |
120 | ## FAQ
121 | Please credit the authors by citing their work.
122 |
123 | ### Registration
124 | We currently provide support for [ANTs](https://github.com/ANTsX/ANTs) (default), [Niftyreg](https://github.com/KCL-BMEIS/niftyreg) (Linux).
125 |
126 | ### Atlas Reference
127 | We provide the SRI-24 atlas from this [publication](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2915788/).
128 | However, custom atlases in NIfTI format are supported.
129 |
130 | ### Brain extraction
131 | We currently provide support for [HD-BET](https://github.com/MIC-DKFZ/HD-BET).
132 |
133 | ### Defacing
134 | We currently provide support for [Quickshear](https://github.com/nipy/quickshear).
135 |
--------------------------------------------------------------------------------
/brainles_preprocessing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/__init__.py
--------------------------------------------------------------------------------
/brainles_preprocessing/brain_extraction/__init__.py:
--------------------------------------------------------------------------------
1 | from .brain_extractor import HDBetExtractor
2 |
--------------------------------------------------------------------------------
/brainles_preprocessing/brain_extraction/brain_extractor.py:
--------------------------------------------------------------------------------
1 | # TODO add typing and docs
2 | import shutil
3 | from abc import ABC, abstractmethod
4 | from pathlib import Path
5 | from typing import Optional, Union
6 | from enum import Enum
7 |
8 | from auxiliary.nifti.io import read_nifti, write_nifti
9 | from brainles_hd_bet import run_hd_bet
10 |
11 |
12 | class Mode(Enum):
13 | FAST = "fast"
14 | ACCURATE = "accurate"
15 |
16 |
17 | class BrainExtractor:
18 | @abstractmethod
19 | def extract(
20 | self,
21 | input_image_path: Union[str, Path],
22 | masked_image_path: Union[str, Path],
23 | brain_mask_path: Union[str, Path],
24 | log_file_path: Optional[Union[str, Path]],
25 | mode: Union[str, Mode],
26 | **kwargs,
27 | ) -> None:
28 | """
29 | Abstract method to extract the brain from an input image.
30 |
31 | Args:
32 | input_image_path (str or Path): Path to the input image.
33 | masked_image_path (str or Path): Path where the brain-extracted image will be saved.
34 | brain_mask_path (str or Path): Path where the brain mask will be saved.
35 | log_file_path (str or Path, Optional): Path to the log file.
36 | mode (str or Mode): Extraction mode.
37 | **kwargs: Additional keyword arguments.
38 | """
39 | pass
40 |
41 | def apply_mask(
42 | self,
43 | input_image_path: Union[str, Path],
44 | mask_path: Union[str, Path],
45 | bet_image_path: Union[str, Path],
46 | ) -> None:
47 | """
48 | Apply a brain mask to an input image.
49 |
50 | Args:
51 | input_image_path (str or Path): Path to the input image (NIfTI format).
52 | mask_path (str or Path): Path to the brain mask image (NIfTI format).
53 | bet_image_path (str or Path): Path to save the resulting masked image (NIfTI format).
54 | """
55 |
56 | try:
57 | # Read data
58 | input_data = read_nifti(str(input_image_path))
59 | mask_data = read_nifti(str(mask_path))
60 | except FileNotFoundError as e:
61 | raise FileNotFoundError(f"File not found: {e.filename}") from e
62 | except Exception as e:
63 | raise RuntimeError(f"Error reading files: {e}") from e
64 |
65 | # Check that the input and mask have the same shape
66 | if input_data.shape != mask_data.shape:
67 | raise ValueError("Input image and mask must have the same dimensions.")
68 |
69 | # Mask and save it
70 | masked_data = input_data * mask_data
71 |
72 | try:
73 | write_nifti(
74 | input_array=masked_data,
75 | output_nifti_path=str(bet_image_path),
76 | reference_nifti_path=str(input_image_path),
77 | create_parent_directory=True,
78 | )
79 | except Exception as e:
80 | raise RuntimeError(f"Error writing output file: {e}") from e
81 |
82 |
83 | class HDBetExtractor(BrainExtractor):
84 | def extract(
85 | self,
86 | input_image_path: Union[str, Path],
87 | masked_image_path: Union[str, Path],
88 | brain_mask_path: Union[str, Path],
89 | log_file_path: Optional[Union[str, Path]] = None,
90 | # TODO convert mode to enum
91 | mode: Union[str, Mode] = Mode.ACCURATE,
92 | device: Optional[Union[int, str]] = 0,
93 | do_tta: Optional[bool] = True,
94 | ) -> None:
95 | # GPU + accurate + TTA
96 | """
97 | Skull-strips images with HD-BET and generates a skull-stripped file and mask.
98 |
99 | Args:
100 | input_image_path (str or Path): Path to the input image.
101 | masked_image_path (str or Path): Path where the brain-extracted image will be saved.
102 | brain_mask_path (str or Path): Path where the brain mask will be saved.
103 | log_file_path (str or Path, Optional): Path to the log file.
104 | mode (str or Mode): Extraction mode ('fast' or 'accurate').
105 | device (str or int): Device to use for computation (e.g., 0 for GPU 0, 'cpu' for CPU).
106 | do_tta (bool): whether to do test time data augmentation by mirroring along all axes.
107 | """
108 |
109 | # Ensure mode is a Mode enum instance
110 | if isinstance(mode, str):
111 | try:
112 | mode_enum = Mode(mode.lower())
113 | except ValueError:
114 | raise ValueError(f"'{mode}' is not a valid Mode.")
115 | elif isinstance(mode, Mode):
116 | mode_enum = mode
117 | else:
118 | raise TypeError("Mode must be a string or a Mode enum instance.")
119 |
120 | # Run HD-BET
121 | run_hd_bet(
122 | mri_fnames=[str(input_image_path)],
123 | output_fnames=[str(masked_image_path)],
124 | mode=mode_enum.value,
125 | device=device,
126 | # TODO consider postprocessing
127 | postprocess=False,
128 | do_tta=do_tta,
129 | keep_mask=True,
130 | overwrite=True,
131 | )
132 |
133 | # Construct the path to the generated mask
134 | masked_image_path = Path(masked_image_path)
135 | hdbet_mask_path = masked_image_path.with_name(
136 | masked_image_path.name.replace(".nii.gz", "_mask.nii.gz")
137 | )
138 |
139 | if hdbet_mask_path.resolve() != Path(brain_mask_path).resolve():
140 | try:
141 | shutil.copyfile(
142 | src=str(hdbet_mask_path),
143 | dst=str(brain_mask_path),
144 | )
145 | except Exception as e:
146 | raise RuntimeError(f"Error copying mask file: {e}") from e
147 |
--------------------------------------------------------------------------------
/brainles_preprocessing/brats/__init__.py:
--------------------------------------------------------------------------------
1 | from .t1_centric import preprocess_brats_style_t1_centric
2 | from .t1c_centric import preprocess_brats_style_t1c_centric
3 |
--------------------------------------------------------------------------------
/brainles_preprocessing/brats/t1_centric.py:
--------------------------------------------------------------------------------
1 | # # TODO currently broken, make this work again
2 |
3 | # from brainles_preprocessing.core import (
4 | # Modality,
5 | # preprocess_modality_centric_to_atlas_space,
6 | # )
7 |
8 |
9 | # def preprocess_brats_style_t1_centric(
10 | # input_t1: str,
11 | # output_t1: str,
12 | # input_t1c: str,
13 | # output_t1c: str,
14 | # input_t2: str,
15 | # output_t2: str,
16 | # input_flair: str,
17 | # output_flair: str,
18 | # bet_mode: str = "gpu",
19 | # limit_cuda_visible_devices: str | None = None,
20 | # temporary_directory: str | None = None,
21 | # keep_coregistration: str | None = None,
22 | # keep_atlas_registration: str | None = None,
23 | # keep_brainextraction: str | None = None,
24 | # ) -> None:
25 | # """
26 | # Preprocesses multiple modalities in a BRATS-style dataset to atlas space.
27 |
28 | # Args:
29 | # input_t1 (str): Path to the input T1 modality data.
30 | # output_t1 (str): Path to save the preprocessed T1 modality data.
31 | # input_t1c (str): Path to the input T1c modality data.
32 | # output_t1c (str): Path to save the preprocessed T1c modality data.
33 | # input_t2 (str): Path to the input T2 modality data.
34 | # output_t2 (str): Path to save the preprocessed T2 modality data.
35 | # input_flair (str): Path to the input FLAIR modality data.
36 | # output_flair (str): Path to save the preprocessed FLAIR modality data.
37 | # bet_mode (str, optional): The mode for brain extraction, e.g., "gpu".
38 | # limit_cuda_visible_devices (str | None, optional): Specify CUDA devices to use.
39 | # temporary_directory (str | None, optional): Path to a custom temporary directory.
40 | # keep_coregistration (str | None, optional): Specify if coregistration should be retained.
41 | # keep_atlas_registration (str | None, optional): Specify if atlas registration should be retained.
42 | # keep_brainextraction (str | None, optional): Specify if brain extraction should be retained.
43 |
44 | # Description:
45 | # This function preprocesses multiple medical image modalities from a BRATS-style dataset to align them to the
46 | # atlas space. It provides options for various preprocessing steps such as brain extraction, registration,
47 | # and intensity normalization.
48 |
49 | # If a custom temporary directory is not provided using the `temporary_directory` parameter, a temporary
50 | # directory is created to store intermediate results. You can use a custom temporary directory path to
51 | # facilitate debugging or control the location of temporary files.
52 |
53 | # Example:
54 | # >>> preprocess_brats_style_t1_centric(
55 | # ... input_t1="/path/to/t1.nii",
56 | # ... output_t1="/path/to/preprocessed_t1.nii",
57 | # ... input_t1c="/path/to/t1c.nii",
58 | # ... output_t1c="/path/to/preprocessed_t1c.nii",
59 | # ... input_t2="/path/to/t2.nii",
60 | # ... output_t2="/path/to/preprocessed_t2.nii",
61 | # ... input_flair="/path/to/flair.nii",
62 | # ... output_flair="/path/to/preprocessed_flair.nii",
63 | # ... bet_mode="gpu",
64 | # ... limit_cuda_visible_devices="0",
65 | # ... temporary_directory="/path/to/custom_temp_dir",
66 | # ... keep_coregistration="True",
67 | # ... keep_atlas_registration="False",
68 | # ... keep_brainextraction=None
69 | # ... )
70 | # """
71 | # # Create a Modality object for the primary T1 modality
72 | # primary = Modality(
73 | # modality_name="t1",
74 | # input_path=input_t1,
75 | # output_path=output_t1,
76 | # bet=True,
77 | # )
78 |
79 | # # Create Modality objects for other moving modalities
80 | # moving_modalities = [
81 | # Modality(
82 | # modality_name="t1c",
83 | # input_path=input_t1c,
84 | # output_path=output_t1c,
85 | # bet=True,
86 | # ),
87 | # Modality(
88 | # modality_name="t2",
89 | # input_path=input_t2,
90 | # output_path=output_t2,
91 | # bet=True,
92 | # ),
93 | # Modality(
94 | # modality_name="flair",
95 | # input_path=input_flair,
96 | # output_path=output_flair,
97 | # bet=True,
98 | # ),
99 | # ]
100 |
101 | # # Perform preprocessing to align modalities to the atlas space
102 | # preprocess_modality_centric_to_atlas_space(
103 | # center_modality=primary,
104 | # moving_modalities=moving_modalities,
105 | # bet_mode=bet_mode,
106 | # limit_cuda_visible_devices=limit_cuda_visible_devices,
107 | # temporary_directory=temporary_directory,
108 | # keep_coregistration=keep_coregistration,
109 | # keep_atlas_registration=keep_atlas_registration,
110 | # keep_brainextraction=keep_brainextraction,
111 | # )
112 |
--------------------------------------------------------------------------------
/brainles_preprocessing/brats/t1c_centric.py:
--------------------------------------------------------------------------------
1 | # # TODO currently broken, make this work again
2 |
3 | # from brainles_preprocessing.core import (
4 | # Modality,
5 | # preprocess_modality_centric_to_atlas_space,
6 | # )
7 |
8 |
9 | # def preprocess_brats_style_t1c_centric(
10 | # input_t1c: str,
11 | # output_t1c: str,
12 | # input_t1: str,
13 | # output_t1: str,
14 | # input_t2: str,
15 | # output_t2: str,
16 | # input_flair: str,
17 | # output_flair: str,
18 | # bet_mode: str = "gpu",
19 | # limit_cuda_visible_devices: str | None = None,
20 | # temporary_directory: str | None = None,
21 | # keep_coregistration: str | None = None,
22 | # keep_atlas_registration: str | None = None,
23 | # keep_brainextraction: str | None = None,
24 | # ) -> None:
25 | # """
26 | # Preprocesses multiple modalities in a BRATS-style dataset to atlas space.
27 |
28 | # Args:
29 | # input_t1c (str): Path to the input T1c modality data.
30 | # output_t1c (str): Path to save the preprocessed T1c modality data.
31 | # input_t1 (str): Path to the input T1 modality data.
32 | # output_t1 (str): Path to save the preprocessed T1 modality data.
33 | # input_t2 (str): Path to the input T2 modality data.
34 | # output_t2 (str): Path to save the preprocessed T2 modality data.
35 | # input_flair (str): Path to the input FLAIR modality data.
36 | # output_flair (str): Path to save the preprocessed FLAIR modality data.
37 | # bet_mode (str, optional): The mode for brain extraction, e.g., "gpu".
38 | # limit_cuda_visible_devices (str | None, optional): Specify CUDA devices to use.
39 | # temporary_directory (str | None, optional): Path to a custom temporary directory.
40 | # keep_coregistration (str | None, optional): Specify if coregistration should be retained.
41 | # keep_atlas_registration (str | None, optional): Specify if atlas registration should be retained.
42 | # keep_brainextraction (str | None, optional): Specify if brain extraction should be retained.
43 |
44 | # Description:
45 | # This function preprocesses multiple medical image modalities from a BRATS-style dataset to align them to the
46 | # atlas space. It provides options for various preprocessing steps such as brain extraction, registration,
47 | # and intensity normalization.
48 |
49 | # If a custom temporary directory is not provided using the `temporary_directory` parameter, a temporary
50 | # directory is created to store intermediate results. You can use a custom temporary directory path to
51 | # facilitate debugging or control the location of temporary files.
52 |
53 | # Example:
54 | # >>> preprocess_brats_style_t1c_centric(
55 | # ... input_t1c="/path/to/t1c.nii",
56 | # ... output_t1c="/path/to/preprocessed_t1c.nii",
57 | # ... input_t1="/path/to/t1.nii",
58 | # ... output_t1="/path/to/preprocessed_t1.nii",
59 | # ... input_t2="/path/to/t2.nii",
60 | # ... output_t2="/path/to/preprocessed_t2.nii",
61 | # ... input_flair="/path/to/flair.nii",
62 | # ... output_flair="/path/to/preprocessed_flair.nii",
63 | # ... bet_mode="gpu",
64 | # ... limit_cuda_visible_devices="0",
65 | # ... temporary_directory="/path/to/custom_temp_dir",
66 | # ... keep_coregistration="True",
67 | # ... keep_atlas_registration="False",
68 | # ... keep_brainextraction=None
69 | # ... )
70 | # """
71 | # # Create a Modality object for the primary T1c modality
72 | # primary = Modality(
73 | # modality_name="t1c",
74 | # input_path=input_t1c,
75 | # output_path=output_t1c,
76 | # bet=True,
77 | # )
78 |
79 | # # Create Modality objects for other moving modalities
80 | # moving_modalities = [
81 | # Modality(
82 | # modality_name="t1",
83 | # input_path=input_t1,
84 | # output_path=output_t1,
85 | # bet=True,
86 | # ),
87 | # Modality(
88 | # modality_name="t2",
89 | # input_path=input_t2,
90 | # output_path=output_t2,
91 | # bet=True,
92 | # ),
93 | # Modality(
94 | # modality_name="flair",
95 | # input_path=input_flair,
96 | # output_path=output_flair,
97 | # bet=True,
98 | # ),
99 | # ]
100 |
101 | # # Perform preprocessing to align modalities to the atlas space
102 | # preprocess_modality_centric_to_atlas_space(
103 | # center_modality=primary,
104 | # moving_modalities=moving_modalities,
105 | # bet_mode=bet_mode,
106 | # limit_cuda_visible_devices=limit_cuda_visible_devices,
107 | # temporary_directory=temporary_directory,
108 | # keep_coregistration=keep_coregistration,
109 | # keep_atlas_registration=keep_atlas_registration,
110 | # keep_brainextraction=keep_brainextraction,
111 | # )
112 |
--------------------------------------------------------------------------------
/brainles_preprocessing/cli.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from pathlib import Path
3 | import typer
4 | from typing_extensions import Annotated
5 | from importlib.metadata import version
6 |
7 |
8 | from brainles_preprocessing.modality import Modality, CenterModality
9 | from brainles_preprocessing.normalization.percentile_normalizer import (
10 | PercentileNormalizer,
11 | )
12 | from brainles_preprocessing.preprocessor import Preprocessor
13 |
14 |
15 | def version_callback(value: bool):
16 | __version__ = version("brainles_preprocessing")
17 | if value:
18 | typer.echo(f"Preprocessor CLI v{__version__}")
19 | raise typer.Exit()
20 |
21 |
22 | app = typer.Typer(
23 | context_settings={"help_option_names": ["-h", "--help"]}, add_completion=False
24 | )
25 |
26 |
27 | @app.command()
28 | def main(
29 | input_t1c: Annotated[
30 | str,
31 | typer.Option(
32 | "-t1c",
33 | "--input_t1c",
34 | help="The path to the T1c image",
35 | ),
36 | ],
37 | input_t1: Annotated[
38 | str,
39 | typer.Option(
40 | "-t1",
41 | "--input_t1",
42 | help="The path to the T1 image",
43 | ),
44 | ],
45 | input_t2: Annotated[
46 | str,
47 | typer.Option(
48 | "-t2",
49 | "--input_t2",
50 | help="The path to the T2 image",
51 | ),
52 | ],
53 | input_fla: Annotated[
54 | str,
55 | typer.Option(
56 | "-fl",
57 | "--input_fla",
58 | help="The path to the FLAIR image",
59 | ),
60 | ],
61 | output_dir: Annotated[
62 | str,
63 | typer.Option(
64 | "-o",
65 | "--output_dir",
66 | help="The path to the output directory",
67 | ),
68 | ],
69 | input_atlas: Annotated[
70 | Optional[str],
71 | typer.Option(
72 | "-a",
73 | "--input_atlas",
74 | help="The path to the atlas image",
75 | ),
76 | ] = "SRI24 BraTS atlas",
77 | version: Annotated[
78 | Optional[bool],
79 | typer.Option(
80 | "-v",
81 | "--version",
82 | callback=version_callback,
83 | is_eager=True,
84 | help="Print the version and exit.",
85 | ),
86 | ] = None,
87 | ):
88 | """
89 | Preprocess the input images according to the BraTS protocol.
90 | """
91 |
92 | output_dir = Path(output_dir)
93 | output_dir.mkdir(parents=True, exist_ok=True)
94 |
95 | # specify a normalizer
96 | percentile_normalizer = PercentileNormalizer(
97 | lower_percentile=0.1,
98 | upper_percentile=99.9,
99 | lower_limit=0,
100 | upper_limit=1,
101 | )
102 |
103 | # define center and moving modalities
104 | center = CenterModality(
105 | modality_name="t1c",
106 | input_path=input_t1c,
107 | normalizer=percentile_normalizer,
108 | # specify the output paths for the raw and normalized images of each step - here only for atlas registered and brain extraction
109 | raw_skull_output_path=output_dir / "t1c_skull_raw.nii.gz",
110 | raw_bet_output_path=output_dir / "t1c_bet_raw.nii.gz",
111 | raw_defaced_output_path=output_dir / "t1c_defaced_raw.nii.gz",
112 | normalized_skull_output_path=output_dir / "t1c_skull_normalized.nii.gz",
113 | normalized_bet_output_path=output_dir / "t1c_bet_normalized.nii.gz",
114 | normalized_defaced_output_path=output_dir / "t1c_defaced_normalized.nii.gz",
115 | # specify output paths for the brain extraction and defacing masks
116 | bet_mask_output_path=output_dir / "t1c_bet_mask.nii.gz",
117 | defacing_mask_output_path=output_dir / "t1c_defacing_mask.nii.gz",
118 | )
119 |
120 | for modality in ["t1", "t2", "fla"]:
121 | moving_modalities = [
122 | Modality(
123 | modality_name=modality,
124 | input_path=eval(f"input_{modality}"),
125 | normalizer=percentile_normalizer,
126 | # specify the output paths for the raw and normalized images of each step - here only for atlas registered and brain extraction
127 | raw_skull_output_path=output_dir / f"{modality}_skull_raw.nii.gz",
128 | raw_bet_output_path=output_dir / f"{modality}_bet_raw.nii.gz",
129 | raw_defaced_output_path=output_dir / f"{modality}_defaced_raw.nii.gz",
130 | normalized_skull_output_path=output_dir
131 | / f"{modality}_skull_normalized.nii.gz",
132 | normalized_bet_output_path=output_dir
133 | / f"{modality}_bet_normalized.nii.gz",
134 | normalized_defaced_output_path=output_dir
135 | / f"{modality}_defaced_normalized.nii.gz",
136 | )
137 | ]
138 |
139 | # if the input atlas is the SRI24 BraTS atlas, set it to None, because it will be picked up through the package
140 | if input_atlas == "SRI24 BraTS atlas":
141 | input_atlas = None
142 |
143 | # instantiate and run the preprocessor using defaults for registration/ brain extraction/ defacing backends
144 | preprocessor = Preprocessor(
145 | center_modality=center,
146 | moving_modalities=moving_modalities,
147 | temp_folder=output_dir / "temp",
148 | input_atlas=input_atlas,
149 | )
150 |
151 | preprocessor.run()
152 |
153 |
154 | if __name__ == "__main__":
155 | app()
156 |
--------------------------------------------------------------------------------
/brainles_preprocessing/constants.py:
--------------------------------------------------------------------------------
1 | from enum import Enum, IntEnum
2 |
3 |
4 | class PreprocessorSteps(IntEnum):
5 | INPUT = 0
6 | COREGISTERED = 1
7 | ATLAS_REGISTERED = 2
8 | ATLAS_CORRECTED = 3
9 | BET = 4
10 | DEFACED = 5
11 |
12 |
13 | class Atlas(str, Enum):
14 | BRATS_SRI24 = "brats_sri24.nii"
15 | """Slightly modified SRI24 atlas as found in the BraTS challenges"""
16 | BRATS_SRI24_SKULLSTRIPPED = "brats_sri24_skullstripped.nii"
17 | """Slightly modified SRI24 skull stripped atlas as found in the BraTS challenges"""
18 |
19 | SRI24 = "sri24.nii"
20 | """SRI24 atlas from https://www.nitrc.org/frs/download.php/4502/sri24_anatomy_unstripped_nifti.zip"""
21 | SRI24_SKULLSTRIPPED = "sri24_skullstripped.nii"
22 | """SRI24 skull stripped atlas from https://www.nitrc.org/frs/download.php/4499/sri24_anatomy_nifti.zip"""
23 |
24 | BRATS_MNI152 = "brats_MNI152lin_T1_1mm.nii.gz"
25 | """Slightly modified MNI152 atlas as found in the BraTS challenges"""
26 |
--------------------------------------------------------------------------------
/brainles_preprocessing/defacing/__init__.py:
--------------------------------------------------------------------------------
1 | from .defacer import Defacer
2 | from .quickshear.quickshear import QuickshearDefacer
3 |
--------------------------------------------------------------------------------
/brainles_preprocessing/defacing/defacer.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from pathlib import Path
3 | from typing import Union
4 |
5 | from auxiliary.nifti.io import read_nifti, write_nifti
6 |
7 |
8 | class Defacer(ABC):
9 | """
10 | Base class for defacing medical images using brain masks.
11 |
12 | Subclasses should implement the `deface` method to generate a defaced image
13 | based on the provided input image and mask.
14 | """
15 |
16 | @abstractmethod
17 | def deface(
18 | self,
19 | input_image_path: Union[str, Path],
20 | mask_image_path: Union[str, Path],
21 | ) -> None:
22 | """
23 | Generate a defacing mask provided an input image.
24 |
25 | Args:
26 | input_image_path (str or Path): Path to the input image (NIfTI format).
27 | mask_image_path (str or Path): Path to the output mask image (NIfTI format).
28 | """
29 | pass
30 |
31 | def apply_mask(
32 | self,
33 | input_image_path: Union[str, Path],
34 | mask_path: Union[str, Path],
35 | defaced_image_path: Union[str, Path],
36 | ) -> None:
37 | """
38 | Apply a brain mask to an input image.
39 |
40 | Args:
41 | input_image_path (str or Path): Path to the input image (NIfTI format).
42 | mask_path (str or Path): Path to the brain mask image (NIfTI format).
43 | defaced_image_path (str or Path): Path to save the resulting defaced image (NIfTI format).
44 | """
45 |
46 | if not input_image_path.is_file():
47 | raise FileNotFoundError(
48 | f"Input image file does not exist: {input_image_path}"
49 | )
50 | if not mask_path.is_file():
51 | raise FileNotFoundError(f"Mask file does not exist: {mask_path}")
52 |
53 | try:
54 | # Read data
55 | input_data = read_nifti(str(input_image_path))
56 | mask_data = read_nifti(str(mask_path))
57 | except Exception as e:
58 | raise RuntimeError(
59 | f"An error occurred while reading input files: {e}"
60 | ) from e
61 |
62 | # Check that the input and mask have the same shape
63 | if input_data.shape != mask_data.shape:
64 | raise ValueError("Input image and mask must have the same dimensions.")
65 |
66 | # Apply mask (element-wise multiplication)
67 | masked_data = input_data * mask_data
68 |
69 | # Save the defaced image
70 | write_nifti(
71 | input_array=masked_data,
72 | output_nifti_path=str(defaced_image_path),
73 | reference_nifti_path=str(input_image_path),
74 | create_parent_directory=True,
75 | )
76 |
--------------------------------------------------------------------------------
/brainles_preprocessing/defacing/quickshear/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/defacing/quickshear/__init__.py
--------------------------------------------------------------------------------
/brainles_preprocessing/defacing/quickshear/quickshear.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import Union
3 |
4 | import nibabel as nib
5 |
6 | from brainles_preprocessing.defacing.defacer import Defacer
7 | from brainles_preprocessing.defacing.quickshear.nipy_quickshear import run_quickshear
8 | from auxiliary.nifti.io import write_nifti
9 |
10 |
11 | class QuickshearDefacer(Defacer):
12 | """
13 | Defacer using Quickshear algorithm.
14 |
15 | Quickshear uses a skull stripped version of an anatomical images as a reference to deface the unaltered anatomical image.
16 |
17 | Base publication:
18 | - PDF: https://www.researchgate.net/profile/J-Hale/publication/262319696_Quickshear_defacing_for_neuroimages/links/570b97ee08aed09e917516b1/Quickshear-defacing-for-neuroimages.pdf
19 | - Bibtex:
20 | ```
21 | @article{schimke2011quickshear,
22 | title={Quickshear Defacing for Neuroimages.},
23 | author={Schimke, Nakeisha and Hale, John},
24 | journal={HealthSec},
25 | volume={11},
26 | pages={11},
27 | year={2011}
28 | }
29 | ```
30 | """
31 |
32 | def __init__(self, buffer: float = 10.0):
33 | """Initialize Quickshear defacer
34 |
35 | Args:
36 | buffer (float, optional): buffer parameter from quickshear algorithm. Defaults to 10.0.
37 | """
38 | super().__init__()
39 | self.buffer = buffer
40 |
41 | def deface(
42 | self,
43 | input_image_path: Union[str, Path],
44 | mask_image_path: Union[str, Path],
45 | ) -> None:
46 | """
47 | Generate a defacing mask using Quickshear algorithm.
48 |
49 | Note:
50 | The input image must be a brain-extracted (skull-stripped) image.
51 |
52 | Args:
53 | input_image_path (str or Path): Path to the brain-extracted input image.
54 | mask_image_path (str or Path): Path to save the generated mask image.
55 | """
56 |
57 | bet_img = nib.load(str(input_image_path))
58 | mask = run_quickshear(bet_img=bet_img, buffer=self.buffer)
59 | write_nifti(
60 | input_array=mask,
61 | output_nifti_path=str(mask_image_path),
62 | reference_nifti_path=str(input_image_path),
63 | )
64 |
--------------------------------------------------------------------------------
/brainles_preprocessing/normalization/__init__.py:
--------------------------------------------------------------------------------
1 | from .normalizer_base import Normalizer
2 | from .percentile_normalizer import PercentileNormalizer
3 | from .windowing_normalizer import WindowingNormalizer
4 |
--------------------------------------------------------------------------------
/brainles_preprocessing/normalization/normalizer_base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 |
4 | class Normalizer(ABC):
5 | """
6 | Abstract base class for image normalization methods.
7 | """
8 |
9 | def __init__(self):
10 | super().__init__()
11 |
12 | @abstractmethod
13 | def normalize(self, image):
14 | """
15 | Normalize the input image based on the chosen method.
16 |
17 | Parameters:
18 | image (numpy.ndarray): The input image.
19 |
20 | Returns:
21 | numpy.ndarray: The normalized image.
22 | """
23 | pass
24 |
--------------------------------------------------------------------------------
/brainles_preprocessing/normalization/percentile_normalizer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .normalizer_base import Normalizer
3 |
4 |
5 | class PercentileNormalizer(Normalizer):
6 | """
7 | Normalizer subclass for percentile-based image normalization.
8 | """
9 |
10 | def __init__(
11 | self,
12 | lower_percentile: float = 0.0,
13 | upper_percentile: float = 100.0,
14 | lower_limit: float = 0,
15 | upper_limit: float = 1,
16 | ):
17 | """
18 | Initialize the PercentileNormalizer.
19 |
20 | Parameters:
21 | lower_percentile (float): The lower percentile for mapping.
22 | upper_percentile (float): The upper percentile for mapping.
23 | lower_limit (float): The lower limit for normalized values.
24 | upper_limit (float): The upper limit for normalized values.
25 | """
26 | super().__init__()
27 | self.lower_percentile = lower_percentile
28 | self.upper_percentile = upper_percentile
29 | self.lower_limit = lower_limit
30 | self.upper_limit = upper_limit
31 |
32 | def normalize(self, image: np.ndarray):
33 | """
34 | Normalize the input image using percentile-based mapping.
35 |
36 | Parameters:
37 | image (numpy.ndarray): The input image.
38 |
39 | Returns:
40 | numpy.ndarray: The percentile-normalized image.
41 | """
42 | lower_value = np.percentile(image, self.lower_percentile)
43 | upper_value = np.percentile(image, self.upper_percentile)
44 | normalized_image = np.clip(
45 | (image - lower_value) / (upper_value - lower_value), 0, 1
46 | )
47 | normalized_image = (
48 | normalized_image * (self.upper_limit - self.lower_limit) + self.lower_limit
49 | )
50 | return normalized_image
51 |
--------------------------------------------------------------------------------
/brainles_preprocessing/normalization/windowing_normalizer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .normalizer_base import Normalizer
3 |
4 |
5 | class WindowingNormalizer(Normalizer):
6 | """
7 | Normalizer subclass for windowing-based image normalization.
8 | """
9 |
10 | def __init__(self, center, width):
11 | """
12 | Initialize the WindowingNormalizer.
13 |
14 | Parameters:
15 | center (float): The window center.
16 | width (float): The window width.
17 | """
18 | super().__init__()
19 | self.center = center
20 | self.width = width
21 |
22 | def normalize(self, image):
23 | """
24 | Normalize the input image using windowing.
25 |
26 | Parameters:
27 | image (numpy.ndarray): The input image.
28 |
29 | Returns:
30 | numpy.ndarray: The windowed normalized image.
31 | """
32 | min_value = self.center - self.width / 2
33 | max_value = self.center + self.width / 2
34 | windowed_image = np.clip(image, min_value, max_value)
35 | return windowed_image
36 |
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/ANTs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/ANTs/__init__.py
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/__init__.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 |
4 | try:
5 | from .ANTs.ANTs import ANTsRegistrator
6 | except ImportError:
7 | warnings.warn(
8 | "ANTS package not found. If you want to use it, please install it using 'pip install antspyx'"
9 | )
10 |
11 |
12 | from .niftyreg.niftyreg import NiftyRegRegistrator
13 |
14 |
15 | try:
16 | from .elastix.elastix import ElastixRegistrator
17 | except ImportError:
18 | warnings.warn(
19 | "itk-elastix package not found. If you want to use it, please install it using 'pip install brainles_preprocessing[itk-elastix]'"
20 | )
21 |
22 | try:
23 | from .greedy.greedy import GreedyRegistrator
24 | except ImportError:
25 | warnings.warn(
26 | "picsl_greedy package not found. If you want to use it, please install it using 'pip install brainles_preprocessing[picsl_greedy]'"
27 | )
28 |
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/elastix/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/elastix/__init__.py
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/elastix/elastix.py:
--------------------------------------------------------------------------------
1 | # TODO add typing and docs
2 | from typing import Optional
3 | import os
4 |
5 | import itk
6 |
7 | from brainles_preprocessing.registration.registrator import Registrator
8 | from brainles_preprocessing.utils import check_and_add_suffix
9 |
10 |
11 | class ElastixRegistrator(Registrator):
12 | def __init__(
13 | self,
14 | ):
15 | pass
16 |
17 | def register(
18 | self,
19 | fixed_image_path: str,
20 | moving_image_path: str,
21 | transformed_image_path: str,
22 | matrix_path: str,
23 | log_file_path: Optional[str] = None,
24 | parameter_object: Optional[itk.ParameterObject] = None,
25 | ) -> None:
26 | """
27 | Register images using elastix.
28 |
29 | Args:
30 | fixed_image_path (str): Path to the fixed image.
31 | moving_image_path (str): Path to the moving image.
32 | transformed_image_path (str): Path to the transformed image (output).
33 | matrix_path (str): Path to the transformation matrix (output). This gets overwritten if it already exists.
34 | log_file_path (Optional[str]): Path to the log file.
35 | parameter_object (Optional[itk.ParameterObject]): The parameter object for elastix registration.
36 | """
37 | # initialize parameter object
38 | if parameter_object is None:
39 | parameter_object = self.__initialize_parameter_object()
40 | # add .txt suffix to the matrix path if it doesn't have any extension
41 | matrix_path = check_and_add_suffix(matrix_path, ".txt")
42 |
43 | # read images as itk images
44 | fixed_image = itk.imread(fixed_image_path)
45 | moving_image = itk.imread(moving_image_path)
46 |
47 | if log_file_path is not None:
48 | # split log_file_path
49 | log_path, log_file = os.path.split(log_file_path)
50 | result_image, result_transform_params = itk.elastix_registration_method(
51 | fixed_image,
52 | moving_image,
53 | parameter_object=parameter_object,
54 | log_to_file=True,
55 | log_file_name=log_file,
56 | output_directory=log_path,
57 | )
58 | else:
59 | result_image, result_transform_params = itk.elastix_registration_method(
60 | fixed_image,
61 | moving_image,
62 | parameter_object=parameter_object,
63 | log_to_console=True,
64 | )
65 |
66 | itk.imwrite(result_image, transformed_image_path)
67 |
68 | if not os.path.exists(matrix_path):
69 | result_transform_params.WriteParameterFile(
70 | result_transform_params.GetParameterMap(0),
71 | matrix_path,
72 | )
73 |
74 | def transform(
75 | self,
76 | fixed_image_path: str,
77 | moving_image_path: str,
78 | transformed_image_path: str,
79 | matrix_path: str,
80 | log_file_path: Optional[str] = None,
81 | ) -> None:
82 | """
83 | Apply a transformation using elastix.
84 |
85 | Args:
86 | fixed_image_path (str): Path to the fixed image.
87 | moving_image_path (str): Path to the moving image.
88 | transformed_image_path (str): Path to the transformed image (output).
89 | matrix_path (str): Path to the transformation matrix (output). This gets overwritten if it already exists.
90 | log_file_path (Optional[str]): Path to the log file.
91 | """
92 | parameter_object = self.__initialize_parameter_object()
93 |
94 | # check if the matrix file exists
95 | if os.path.exists(matrix_path):
96 | parameter_object.SetParameter(
97 | 0, "InitialTransformParametersFileName", matrix_path
98 | )
99 |
100 | self.register(
101 | fixed_image_path,
102 | moving_image_path,
103 | transformed_image_path,
104 | matrix_path,
105 | log_file_path,
106 | parameter_object,
107 | )
108 |
109 | def __initialize_parameter_object(self) -> itk.ParameterObject:
110 | """
111 | Initialize the parameter object for elastix registration.
112 |
113 | Returns:
114 | itk.ParameterObject: The parameter object for registration.
115 | """
116 | parameter_object = itk.ParameterObject.New()
117 | default_rigid_parameter_map = parameter_object.GetDefaultParameterMap("rigid")
118 | parameter_object.AddParameterMap(default_rigid_parameter_map)
119 | return parameter_object
120 |
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/greedy/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/greedy/__init__.py
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/greedy/greedy.py:
--------------------------------------------------------------------------------
1 | # TODO add typing and docs
2 | from typing import Optional
3 | import contextlib
4 | import os
5 |
6 | from picsl_greedy import Greedy3D
7 |
8 | from brainles_preprocessing.registration.registrator import Registrator
9 | from brainles_preprocessing.utils import check_and_add_suffix
10 |
11 |
12 | class GreedyRegistrator(Registrator):
13 | def __init__(
14 | self,
15 | ):
16 | pass
17 |
18 | def register(
19 | self,
20 | fixed_image_path: str,
21 | moving_image_path: str,
22 | transformed_image_path: str,
23 | matrix_path: str,
24 | log_file_path: Optional[str] = None,
25 | ) -> None:
26 | """
27 | Register images using greedy. Ref: https://pypi.org/project/picsl-greedy/ and https://greedy.readthedocs.io/en/latest/reference.html#greedy-usage
28 |
29 | Args:
30 | fixed_image_path (str): Path to the fixed image.
31 | moving_image_path (str): Path to the moving image.
32 | transformed_image_path (str): Path to the transformed image (output).
33 | matrix_path (str): Path to the transformation matrix (output). This gets overwritten if it already exists.
34 | log_file_path (Optional[str]): Path to the log file, which is not used.
35 | """
36 | # add .txt suffix to the matrix path if it doesn't have any extension
37 | matrix_path = check_and_add_suffix(matrix_path, ".mat")
38 |
39 | registor = Greedy3D()
40 | # these parameters are taken from the OG BraTS Pipeline [https://github.com/CBICA/CaPTk/blob/master/src/applications/BraTSPipeline.cxx]
41 | command_to_run = f"-i {fixed_image_path} {moving_image_path} -o {matrix_path} -a -dof 6 -m NMI -n 100x50x5 -ia-image-centers"
42 |
43 | if log_file_path is not None:
44 | with open(log_file_path, "a+") as f:
45 | with contextlib.redirect_stdout(f):
46 | registor.execute(command_to_run)
47 | else:
48 | registor.execute(command_to_run)
49 |
50 | self.transform(
51 | fixed_image_path, moving_image_path, transformed_image_path, matrix_path
52 | )
53 |
54 | def transform(
55 | self,
56 | fixed_image_path: str,
57 | moving_image_path: str,
58 | transformed_image_path: str,
59 | matrix_path: str,
60 | interpolator: Optional[str] = "LINEAR",
61 | log_file_path: Optional[str] = None,
62 | ) -> None:
63 | """
64 | Apply a transformation using greedy.
65 |
66 | Args:
67 | fixed_image_path (str): Path to the fixed image.
68 | moving_image_path (str): Path to the moving image.
69 | transformed_image_path (str): Path to the transformed image (output).
70 | matrix_path (str): Path to the transformation matrix (output). This gets overwritten if it already exists.
71 | interpolator (Optional[str]): The interpolator to use; one of NN, LINEAR or LABEL.
72 | log_file_path (Optional[str]): Path to the log file, which is not used.
73 | """
74 | registor = Greedy3D()
75 | interpolator_upper = interpolator.upper()
76 | if "LABEL" in interpolator_upper:
77 | interpolator_upper += " 0.3vox"
78 |
79 | matrix_path = check_and_add_suffix(matrix_path, ".mat")
80 |
81 | if not os.path.exists(matrix_path):
82 | self.register(
83 | fixed_image_path,
84 | moving_image_path,
85 | transformed_image_path,
86 | matrix_path,
87 | log_file_path,
88 | )
89 |
90 | command_to_run = f"-rf {fixed_image_path} -rm {moving_image_path} {transformed_image_path} -r {matrix_path} -ri {interpolator_upper}"
91 | if log_file_path is not None:
92 | with open(log_file_path, "a+") as f:
93 | with contextlib.redirect_stdout(f):
94 | registor.execute(command_to_run)
95 | else:
96 | registor.execute(command_to_run)
97 |
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/niftyreg/__init__.py
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from auxiliary.runscript import ScriptRunner
4 | from auxiliary.turbopath import turbopath
5 |
6 | from brainles_preprocessing.registration.registrator import Registrator
7 |
8 | # from auxiliary import ScriptRunner
9 |
10 |
11 | class NiftyRegRegistrator(Registrator):
12 | def __init__(
13 | self,
14 | registration_abspath: str = os.path.dirname(os.path.abspath(__file__)),
15 | registration_script: str | None = None,
16 | transformation_script: str | None = None,
17 | ):
18 | """
19 | Initialize the NiftyRegRegistrator.
20 |
21 | Args:
22 | registration_abspath (str): Absolute path to the registration directory.
23 | registration_script (str, optional): Path to the registration script. If None, a default script will be used.
24 | transformation_script (str, optional): Path to the transformation script. If None, a default script will be used.
25 | """
26 | # Set default registration script
27 | if registration_script is None:
28 | self.registration_script = os.path.join(
29 | registration_abspath, "niftyreg_scripts", "rigid_reg.sh"
30 | )
31 | else:
32 | self.registration_script = registration_script
33 |
34 | # Set default transformation script
35 | if transformation_script is None:
36 | self.transformation_script = os.path.join(
37 | registration_abspath, "niftyreg_scripts", "transform.sh"
38 | )
39 | else:
40 | self.transformation_script = transformation_script
41 |
42 | def register(
43 | self,
44 | fixed_image_path: str,
45 | moving_image_path: str,
46 | transformed_image_path: str,
47 | matrix_path: str,
48 | log_file_path: str,
49 | ) -> None:
50 | """
51 | Register images using NiftyReg.
52 |
53 | Args:
54 | fixed_image_path (str): Path to the fixed image.
55 | moving_image_path (str): Path to the moving image.
56 | transformed_image_path (str): Path to the transformed image (output).
57 | matrix_path (str): Path to the transformation matrix (output).
58 | log_file_path (str): Path to the log file.
59 | """
60 | runner = ScriptRunner(
61 | script_path=self.registration_script,
62 | log_path=log_file_path,
63 | )
64 |
65 | niftyreg_executable = str(
66 | turbopath(__file__).parent + "/niftyreg_scripts/reg_aladin",
67 | )
68 |
69 | turbopath(matrix_path)
70 | if matrix_path.suffix != ".txt":
71 | matrix_path = matrix_path.with_suffix(".txt")
72 |
73 | input_params = [
74 | turbopath(niftyreg_executable),
75 | turbopath(fixed_image_path),
76 | turbopath(moving_image_path),
77 | turbopath(transformed_image_path),
78 | turbopath(matrix_path),
79 | ]
80 |
81 | # Call the run method to execute the script and capture the output in the log file
82 | success, error = runner.run(input_params)
83 |
84 | # if success:
85 | # print("Script executed successfully. Check the log file for details.")
86 | # else:
87 | # print("Script execution failed:", error)
88 |
89 | def transform(
90 | self,
91 | fixed_image_path: str,
92 | moving_image_path: str,
93 | transformed_image_path: str,
94 | matrix_path: str,
95 | log_file_path: str,
96 | ) -> None:
97 | """
98 | Apply a transformation using NiftyReg.
99 |
100 | Args:
101 | fixed_image_path (str): Path to the fixed image.
102 | moving_image_path (str): Path to the moving image.
103 | transformed_image_path (str): Path to the transformed image (output).
104 | matrix_path (str): Path to the transformation matrix.
105 | log_file_path (str): Path to the log file.
106 | """
107 | runner = ScriptRunner(
108 | script_path=self.transformation_script,
109 | log_path=log_file_path,
110 | )
111 |
112 | niftyreg_executable = str(
113 | turbopath(__file__).parent + "/niftyreg_scripts/reg_resample",
114 | )
115 |
116 | turbopath(matrix_path)
117 | if matrix_path.suffix != ".txt":
118 | matrix_path = matrix_path.with_suffix(".txt")
119 |
120 | input_params = [
121 | turbopath(niftyreg_executable),
122 | turbopath(fixed_image_path),
123 | turbopath(moving_image_path),
124 | turbopath(transformed_image_path),
125 | turbopath(matrix_path),
126 | # we need to add txt as this is the format for niftyreg matrixes
127 | ]
128 |
129 | # Call the run method to execute the script and capture the output in the log file
130 | success, error = runner.run(input_params)
131 |
132 | # if success:
133 | # print("Script executed successfully. Check the log file for details.")
134 | # else:
135 | # print("Script execution failed:", error)
136 |
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/groupwise_niftyreg_params.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | ############################################################################
4 | ###################### PARAMETERS THAT CAN BE CHANGED ######################
5 | ############################################################################
6 | # Array that contains the input images to create the atlas
7 | export IMG_INPUT=(`ls /path/to/all/your/images_*.nii`)
8 | export IMG_INPUT_MASK= # leave empty to not use floating masks
9 |
10 | # template image to use to initialise the atlas creation
11 | export TEMPLATE=`ls ${IMG_INPUT[0]}`
12 | export TEMPLATE_MASK= # leave empty to not use a reference mask
13 |
14 | # folder where the result images will be saved
15 | export RES_FOLDER=`pwd`/groupwise_result
16 |
17 | # argument to use for the affine (reg_aladin)
18 | export AFFINE_args="-omp 4"
19 | # argument to use for the non-rigid registration (reg_f3d)
20 | export NRR_args="-omp 4"
21 |
22 | # number of affine loop to perform - Note that the first step is always rigid
23 | export AFF_IT_NUM=5
24 | # number of non-rigid loop to perform
25 | export NRR_IT_NUM=10
26 |
27 | # grid engine arguments
28 | export QSUB_CMD="qsub -l h_rt=05:00:00 -l tmem=0.9G -l h_vmem=0.9G -l vf=0.9G -l s_stack=10240 -j y -S /bin/csh -b y -cwd -V -R y -pe smp 4"
29 | ############################################################################
30 |
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_aladin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_aladin
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_average:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_average
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_f3d:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_f3d
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_jacobian:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_jacobian
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_measure:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_measure
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_resample:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_resample
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_tools:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_tools
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_transform:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BrainLesion/preprocessing/960edb333f9f28d14e583f9a1ffbfaff8785b288/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/bin/reg_transform
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/include/AffineDeformationFieldKernel.h:
--------------------------------------------------------------------------------
1 | #ifndef AFFINEDEFORMATIONFIELDKERNEL_H
2 | #define AFFINEDEFORMATIONFIELDKERNEL_H
3 |
4 | #include "Kernel.h"
5 |
6 | class AffineDeformationFieldKernel : public Kernel {
7 | public:
8 | static std::string getName() {
9 | return "AffineDeformationFieldKernel";
10 | }
11 |
12 | AffineDeformationFieldKernel( std::string name) : Kernel(name) {
13 | }
14 |
15 | virtual ~AffineDeformationFieldKernel(){}
16 | virtual void calculate(bool compose = false) = 0;
17 | };
18 |
19 | #endif // AFFINEDEFORMATIONFIELDKERNEL_H
20 |
--------------------------------------------------------------------------------
/brainles_preprocessing/registration/niftyreg/niftyreg_scripts/niftyreg_1.5.68/include/AladinContent.h:
--------------------------------------------------------------------------------
1 | #ifndef ALADINCONTENT_H_
2 | #define ALADINCONTENT_H_
3 |
4 | #include
5 | #include
6 | #include