├── .gitignore
├── Dockerfile
├── Dockerfile.neuro
├── LICENSE
├── README.md
├── data
└── .gitkeep
├── dev-bash.sh
├── dev-clean-crash.sh
├── docs
├── CSF@map.png
├── GM@map.png
├── WM@map.png
├── graph_detailed.png
└── img.png
├── output
└── .gitkeep
├── requirements.txt
├── src
├── node
│ ├── __init__.py
│ ├── acpc_detect
│ │ ├── __init__.py
│ │ ├── interface.py
│ │ └── utils.py
│ ├── bias_field_correction
│ │ ├── __init__.py
│ │ └── interface.py
│ ├── draw_segmentation
│ │ ├── __init__.py
│ │ └── interface.py
│ ├── enhancement
│ │ ├── __init__.py
│ │ ├── interface.py
│ │ └── utils.py
│ ├── final_output
│ │ ├── __init__.py
│ │ └── interface.py
│ ├── orient2std
│ │ ├── __init__.py
│ │ └── interface.py
│ ├── registration
│ │ ├── __init__.py
│ │ └── interface.py
│ ├── segmentation
│ │ ├── __init__.py
│ │ ├── interface.py
│ │ └── utils.py
│ ├── skull_stripping
│ │ ├── __init__.py
│ │ └── interface.py
│ └── zip
│ │ ├── __init__.py
│ │ └── interface.py
├── utils
│ ├── __init__.py
│ ├── convert_dot_to_png.py
│ ├── draw_segmentation.py
│ ├── load_nii.py
│ ├── save_nii.py
│ └── save_nii_as_png.py
└── workflow.py
└── utils
├── acpcdetect_v2.1_LinuxCentOS6.7
├── PILbrain.nii
├── T1acpc.mdl
├── bin
│ └── acpcdetect
├── doc
│ └── acpcdetect_documentation.pdf
└── orion.mdl
└── atra1.0_LinuxCentOS6.7
├── PILbrain.nii
├── T1acpc.mdl
├── bin
└── atra
└── orion.mdl
/.gitignore:
--------------------------------------------------------------------------------
1 | src/graph*
2 | src/preprocess-workflow
3 | src/preprocess_workflow
4 |
5 | src/crash*
6 | src/.Trash*
7 | src/.ipynb_checkpoints
8 |
9 | .vscode
10 |
11 | __pycache__
12 |
13 | .DS_Store
14 |
15 | desktop.ini
16 |
17 | data/ADNI
18 | data/ADNI-*
19 | data/*ADNI-*.*
20 | data/099_S_4205*
21 |
22 | output
23 |
24 | utils/*/example*/
25 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM neuro:latest
2 |
3 | RUN apt-get update && apt-get install -y -qq graphviz
4 |
5 | ADD ./requirements.txt /requirements.txt
6 | RUN pip install -r /requirements.txt
7 |
8 |
--------------------------------------------------------------------------------
/Dockerfile.neuro:
--------------------------------------------------------------------------------
1 | # Generated by Neurodocker and Reproenv.
2 |
3 | FROM neurodebian:bullseye
4 | ENV FSLDIR="/opt/fsl-6.0.5.1" \
5 | PATH="/opt/fsl-6.0.5.1/bin:$PATH" \
6 | FSLOUTPUTTYPE="NIFTI_GZ" \
7 | FSLMULTIFILEQUIT="TRUE" \
8 | FSLTCLSH="/opt/fsl-6.0.5.1/bin/fsltclsh" \
9 | FSLWISH="/opt/fsl-6.0.5.1/bin/fslwish" \
10 | FSLLOCKDIR="" \
11 | FSLMACHINELIST="" \
12 | FSLREMOTECALL="" \
13 | FSLGECUDAQ="cuda.q"
14 | RUN apt-get update -qq \
15 | && apt-get install -y -q --no-install-recommends \
16 | bc \
17 | ca-certificates \
18 | curl \
19 | dc \
20 | file \
21 | libfontconfig1 \
22 | libfreetype6 \
23 | libgl1-mesa-dev \
24 | libgl1-mesa-dri \
25 | libglu1-mesa-dev \
26 | libgomp1 \
27 | libice6 \
28 | libopenblas-base \
29 | libxcursor1 \
30 | libxft2 \
31 | libxinerama1 \
32 | libxrandr2 \
33 | libxrender1 \
34 | libxt6 \
35 | nano \
36 | sudo \
37 | wget \
38 | && rm -rf /var/lib/apt/lists/* \
39 | && echo "Downloading FSL ..." \
40 | && mkdir -p /opt/fsl-6.0.5.1 \
41 | && curl -fL https://fsl.fmrib.ox.ac.uk/fsldownloads/fsl-6.0.5.1-centos7_64.tar.gz \
42 | | tar -xz -C /opt/fsl-6.0.5.1 --strip-components 1 \
43 | && echo "Installing FSL conda environment ..." \
44 | && bash /opt/fsl-6.0.5.1/etc/fslconf/fslpython_install.sh -f /opt/fsl-6.0.5.1
45 | ENV ANTSPATH="/opt/ants-2.4.1/" \
46 | PATH="/opt/ants-2.4.1:$PATH"
47 | RUN apt-get update -qq \
48 | && apt-get install -y -q --no-install-recommends \
49 | ca-certificates \
50 | curl \
51 | unzip \
52 | && rm -rf /var/lib/apt/lists/* \
53 | && echo "Downloading ANTs ..." \
54 | && curl -fsSL -o ants.zip https://github.com/ANTsX/ANTs/releases/download/v2.4.1/ants-2.4.1-centos7-X64-gcc.zip \
55 | && unzip ants.zip -d /opt \
56 | && mv /opt/ants-2.4.1/bin/* /opt/ants-2.4.1 \
57 | && rm ants.zip
58 | ENV CONDA_DIR="/opt/miniconda-latest" \
59 | PATH="/opt/miniconda-latest/bin:$PATH"
60 | RUN apt-get update -qq \
61 | && apt-get install -y -q --no-install-recommends \
62 | bzip2 \
63 | ca-certificates \
64 | curl \
65 | && rm -rf /var/lib/apt/lists/* \
66 | # Install dependencies.
67 | && export PATH="/opt/miniconda-latest/bin:$PATH" \
68 | && echo "Downloading Miniconda installer ..." \
69 | && conda_installer="/tmp/miniconda.sh" \
70 | && curl -fsSL -o "$conda_installer" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \
71 | && bash "$conda_installer" -b -p /opt/miniconda-latest \
72 | && rm -f "$conda_installer" \
73 | && conda update -yq -nbase conda \
74 | # Prefer packages in conda-forge
75 | && conda config --system --prepend channels conda-forge \
76 | # Packages in lower-priority channels not considered if a package with the same
77 | # name exists in a higher priority channel. Can dramatically speed up installations.
78 | # Conda recommends this as a default
79 | # https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-channels.html
80 | && conda config --set channel_priority strict \
81 | && conda config --system --set auto_update_conda false \
82 | && conda config --system --set show_channel_urls true \
83 | # Enable `conda activate`
84 | && conda init bash \
85 | && conda install -y --name base \
86 | "nipype" \
87 | "notebook" \
88 | # Clean up
89 | && sync && conda clean --all --yes && sync \
90 | && rm -rf ~/.cache/pip/*
91 |
92 | # Save specification to JSON.
93 | RUN printf '{ \
94 | "pkg_manager": "apt", \
95 | "existing_users": [ \
96 | "root" \
97 | ], \
98 | "instructions": [ \
99 | { \
100 | "name": "from_", \
101 | "kwds": { \
102 | "base_image": "neurodebian:bullseye" \
103 | } \
104 | }, \
105 | { \
106 | "name": "env", \
107 | "kwds": { \
108 | "FSLDIR": "/opt/fsl-6.0.5.1", \
109 | "PATH": "/opt/fsl-6.0.5.1/bin:$PATH", \
110 | "FSLOUTPUTTYPE": "NIFTI_GZ", \
111 | "FSLMULTIFILEQUIT": "TRUE", \
112 | "FSLTCLSH": "/opt/fsl-6.0.5.1/bin/fsltclsh", \
113 | "FSLWISH": "/opt/fsl-6.0.5.1/bin/fslwish", \
114 | "FSLLOCKDIR": "", \
115 | "FSLMACHINELIST": "", \
116 | "FSLREMOTECALL": "", \
117 | "FSLGECUDAQ": "cuda.q" \
118 | } \
119 | }, \
120 | { \
121 | "name": "run", \
122 | "kwds": { \
123 | "command": "apt-get update -qq\\napt-get install -y -q --no-install-recommends \\\\\\n bc \\\\\\n ca-certificates \\\\\\n curl \\\\\\n dc \\\\\\n file \\\\\\n libfontconfig1 \\\\\\n libfreetype6 \\\\\\n libgl1-mesa-dev \\\\\\n libgl1-mesa-dri \\\\\\n libglu1-mesa-dev \\\\\\n libgomp1 \\\\\\n libice6 \\\\\\n libopenblas-base \\\\\\n libxcursor1 \\\\\\n libxft2 \\\\\\n libxinerama1 \\\\\\n libxrandr2 \\\\\\n libxrender1 \\\\\\n libxt6 \\\\\\n nano \\\\\\n sudo \\\\\\n wget\\nrm -rf /var/lib/apt/lists/*\\necho \\"Downloading FSL ...\\"\\nmkdir -p /opt/fsl-6.0.5.1\\ncurl -fL https://fsl.fmrib.ox.ac.uk/fsldownloads/fsl-6.0.5.1-centos7_64.tar.gz \\\\\\n| tar -xz -C /opt/fsl-6.0.5.1 --strip-components 1 \\necho \\"Installing FSL conda environment ...\\"\\nbash /opt/fsl-6.0.5.1/etc/fslconf/fslpython_install.sh -f /opt/fsl-6.0.5.1" \
124 | } \
125 | }, \
126 | { \
127 | "name": "env", \
128 | "kwds": { \
129 | "ANTSPATH": "/opt/ants-2.4.1/", \
130 | "PATH": "/opt/ants-2.4.1:$PATH" \
131 | } \
132 | }, \
133 | { \
134 | "name": "run", \
135 | "kwds": { \
136 | "command": "apt-get update -qq\\napt-get install -y -q --no-install-recommends \\\\\\n ca-certificates \\\\\\n curl \\\\\\n unzip\\nrm -rf /var/lib/apt/lists/*\\necho \\"Downloading ANTs ...\\"\\ncurl -fsSL -o ants.zip https://github.com/ANTsX/ANTs/releases/download/v2.4.1/ants-2.4.1-centos7-X64-gcc.zip\\nunzip ants.zip -d /opt\\nmv /opt/ants-2.4.1/bin/* /opt/ants-2.4.1\\nrm ants.zip" \
137 | } \
138 | }, \
139 | { \
140 | "name": "env", \
141 | "kwds": { \
142 | "CONDA_DIR": "/opt/miniconda-latest", \
143 | "PATH": "/opt/miniconda-latest/bin:$PATH" \
144 | } \
145 | }, \
146 | { \
147 | "name": "run", \
148 | "kwds": { \
149 | "command": "apt-get update -qq\\napt-get install -y -q --no-install-recommends \\\\\\n bzip2 \\\\\\n ca-certificates \\\\\\n curl\\nrm -rf /var/lib/apt/lists/*\\n# Install dependencies.\\nexport PATH=\\"/opt/miniconda-latest/bin:$PATH\\"\\necho \\"Downloading Miniconda installer ...\\"\\nconda_installer=\\"/tmp/miniconda.sh\\"\\ncurl -fsSL -o \\"$conda_installer\\" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh\\nbash \\"$conda_installer\\" -b -p /opt/miniconda-latest\\nrm -f \\"$conda_installer\\"\\nconda update -yq -nbase conda\\n# Prefer packages in conda-forge\\nconda config --system --prepend channels conda-forge\\n# Packages in lower-priority channels not considered if a package with the same\\n# name exists in a higher priority channel. Can dramatically speed up installations.\\n# Conda recommends this as a default\\n# https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-channels.html\\nconda config --set channel_priority strict\\nconda config --system --set auto_update_conda false\\nconda config --system --set show_channel_urls true\\n# Enable `conda activate`\\nconda init bash\\nconda install -y --name base \\\\\\n \\"nipype\\" \\\\\\n \\"notebook\\"\\n# Clean up\\nsync && conda clean --all --yes && sync\\nrm -rf ~/.cache/pip/*" \
150 | } \
151 | } \
152 | ] \
153 | }' > /.reproenv.json
154 | # End saving to specification to JSON.
155 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Nat
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # **Nipype: Preprocessing Pipeline on Brain MR Images**
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | > Tested on x86/64 Linux-based system.
13 |
14 | The project is used to do preprocessing on brain MR images (`.nii` files).
15 |
16 | There is a pipeline include those features:
17 |
18 | - Dockerize the environment
19 | - Auto AC-PC detection & alignment
20 | - Registration
21 | - Skull Stripping
22 | - Segmentation (using `K-means` to split GM, WM and CSF)
23 | - Visualize the results (using `nilearn`)
24 |
25 | All the features implemented as `nipype`'s interface are connected in a workflow.
26 |
27 | ## Prerequisites
28 |
29 | In this repo, the dataset is downloaded from [LONI Image Data Archive (IDA)](https://ida.loni.usc.edu/login.jsp).
30 | Collect and download AD and NC screening sample of ADNI1 and ADNI2, and extract them into this folder.
31 |
32 | You can just place the `.nii` samples in `./data`.
33 |
34 | > If you only have DICOM files, you can use [DICOM to NIfTI Online Converter](https://www.onlineconverter.com/dicom-to-nifti) to convert them into NIfTI format.
35 |
36 | For example, folder `./data` structure is like this:
37 |
38 | ```
39 | ./data
40 | ├── 099_S_4206.nii
41 | └── 099_S_4205.nii
42 |
43 | 0 directories, 2 files
44 | ```
45 |
46 | ## Usage
47 |
48 | 1. Build Neuro Docker image
49 |
50 | ```bash
51 | docker build --tag neuro:latest --file Dockerfile.neuro .
52 | ```
53 |
54 | 2. Build custom Docker image
55 |
56 | > We need to install custom tools in the Docker image.
57 |
58 | ```bash
59 | docker build --tag neuro_custom:latest --file Dockerfile .
60 | ```
61 |
62 | 3. Run the workflow
63 |
64 | ```bash
65 | docker run --rm -it \
66 | --workdir /src \
67 | --volume ./src:/src \
68 | --volume ./utils:/utils \
69 | --volume ./data:/data \
70 | --volume ./output:/output \
71 | --name neuro_workflow \
72 | neuro_custom python workflow.py
73 | ```
74 |
75 | ### Results
76 |
77 | Segmentation results are shown as cover.
78 |
79 | When the workflow ran successfully, all the results of each step will be saved in `./output`.
80 |
81 | And the workflow graph will be saved in `./src/graph_detailed.png`.
82 |
83 | 
84 |
85 | ## Utilities
86 |
87 | The useful `Automatic Registration Toolbox` we used are listed below:
88 |
89 | - acpcdetect v2.1 Linux
90 | - ATRA v1.0 Linux
91 |
92 | They are downloaded from [NITRC](https://www.nitrc.org/projects/art) and put in `./utils`.
93 |
94 | ## Reference
95 |
96 | - [quqixun/BrainPrep](https://github.com/quqixun/BrainPrep)
97 | - [nipype](https://nipype.readthedocs.io/en/latest/)
98 | - [nilearn](https://nilearn.github.io/)
99 | - [neurodocker](https://github.com/ReproNim/neurodocker)
100 |
101 | ## Misc
102 |
103 | ### Generate Neuro Dockerfile and Build Docker Image
104 |
105 | 1. Install `neurodocker`
106 | ```bash
107 | pip install neurodocker
108 | ```
109 |
110 | 2. Generate Dockerfile using `neurodocker`
111 | ```bash
112 | neurodocker generate docker \
113 | --pkg-manager apt \
114 | --base-image neurodebian:bullseye \
115 | --fsl version=6.0.5.1 \
116 | --ants version=2.4.1 \
117 | --miniconda version=latest conda_install="nipype" \
118 | > Dockerfile.neuro
119 | ```
120 |
121 | 3. Build the docker image with the generated Dockerfile
122 | ```bash
123 | docker build --tag neuro:latest --file Dockerfile.neuro .
124 | ```
125 |
126 | ## Contributor
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |  Nat Lee |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
--------------------------------------------------------------------------------
/data/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/data/.gitkeep
--------------------------------------------------------------------------------
/dev-bash.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker exec -it neuro bash
3 |
--------------------------------------------------------------------------------
/dev-clean-crash.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Clean files starting with "crash" in ./src directory
4 | find ./src -name "crash-*.pklz" -exec rm -rf {} \;
5 |
--------------------------------------------------------------------------------
/docs/CSF@map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/docs/CSF@map.png
--------------------------------------------------------------------------------
/docs/GM@map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/docs/GM@map.png
--------------------------------------------------------------------------------
/docs/WM@map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/docs/WM@map.png
--------------------------------------------------------------------------------
/docs/graph_detailed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/docs/graph_detailed.png
--------------------------------------------------------------------------------
/docs/img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/docs/img.png
--------------------------------------------------------------------------------
/output/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/output/.gitkeep
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | loguru
2 |
3 | scikit-learn==1.4.0
4 | scikit-image==0.22.0
5 |
6 | nilearn==0.10.3
7 |
--------------------------------------------------------------------------------
/src/node/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/__init__.py
--------------------------------------------------------------------------------
/src/node/acpc_detect/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/acpc_detect/__init__.py
--------------------------------------------------------------------------------
/src/node/acpc_detect/interface.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from nipype.interfaces.base import SimpleInterface
4 | from nipype.interfaces.base import BaseInterfaceInputSpec
5 | from nipype.interfaces.base import TraitedSpec
6 | from nipype.interfaces.base import File
7 | from nipype.interfaces.base import Directory
8 |
9 | from node.acpc_detect.utils import acpc_detect
10 |
11 | from utils.save_nii_as_png import save_nii_as_png
12 |
13 | class ACPCDetectInputSpec(BaseInterfaceInputSpec):
14 | input_file = File(exists=True, desc='Path to the NIfTI file', mandatory=True)
15 | output_folder = Directory(exists=True, desc='Path to the output folder', mandatory=False)
16 |
17 | class ACPCDetectOutputSpec(TraitedSpec):
18 | output_file = File(exists=True, desc='Directory with the ACPC detection results', mandatory=True)
19 | output_png_file = File(exists=True, desc='Directory with the ACPC detection PNG image', mandatory=True)
20 |
21 | class ACPCDetectInterface(SimpleInterface):
22 | input_spec = ACPCDetectInputSpec
23 | output_spec = ACPCDetectOutputSpec
24 |
25 | def _run_interface(self, runtime):
26 | input_file = Path(self.inputs.input_file)
27 | output_folder = Path(self.inputs.output_folder)
28 | new_output_folder = acpc_detect(input_file, output_folder)
29 | # Only one file is expected
30 | output_file = new_output_folder / (Path(input_file).stem + '_RAS.nii')
31 | output_png_file = new_output_folder / (Path(input_file).stem+ '_RAS.png')
32 | save_nii_as_png(
33 | output_file,
34 | output_png_file
35 | )
36 | self._results['output_file'] = output_file.as_posix()
37 | self._results['output_png_file'] = output_png_file.as_posix()
38 | return runtime
39 |
40 | def _list_outputs(self):
41 | return self._results
--------------------------------------------------------------------------------
/src/node/acpc_detect/utils.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import tempfile
4 | import shutil
5 | import subprocess
6 | import os
7 | from loguru import logger
8 |
9 |
10 | # Set ART location
11 | os.environ['ARTHOME'] = '/utils/atra1.0_LinuxCentOS6.7/'
12 |
13 | # ACPC detection executable path
14 | ACPC_DETECT_BIN_PATH = '/utils/acpcdetect_v2.1_LinuxCentOS6.7/bin/acpcdetect'
15 |
16 |
17 | def acpc_detect(nii_file_path: Path, output_path:Path) -> Path:
18 | logger.info('ACPC detection on: {}'.format(nii_file_path))
19 |
20 | # Specify the output folder
21 | output_folder = output_path / 'acpc'
22 | logger.info('ACPC Output path: {}'.format(output_folder))
23 |
24 | # Use a temporary directory
25 | with tempfile.TemporaryDirectory() as temp_dir:
26 | temp_dir_path = Path(temp_dir)
27 |
28 | # Copy the .nii file to the temporary directory
29 | temp_nii_file_path = temp_dir_path / nii_file_path.name
30 | shutil.copy(nii_file_path, temp_nii_file_path)
31 |
32 | # Modify the command to use the new .nii file path in the temporary directory
33 | command = [ACPC_DETECT_BIN_PATH, "-no-tilt-correction", "-center-AC", "-nopng", "-noppm", "-i", str(temp_nii_file_path)]
34 |
35 | # Run the command
36 | subprocess.call(command, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT)
37 |
38 | # Create a folder as the same name as the .nii file
39 | Path(output_folder).mkdir(parents=True, exist_ok=True)
40 |
41 | # Clean the folder
42 | for file in output_folder.glob('*'):
43 | if file.is_file():
44 | file.unlink()
45 | if file.is_dir():
46 | shutil.rmtree(file)
47 |
48 | # Move the output files to the folder
49 | for file in temp_dir_path.glob('*'):
50 | #logger.info('Moving {} to {}'.format(file, output_folder))
51 | shutil.move(file, output_folder)
52 |
53 | # Use Path to change mode of the folder (include all files)
54 | (output_folder.parent).chmod(0o777)
55 |
56 | return output_folder
57 |
--------------------------------------------------------------------------------
/src/node/bias_field_correction/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/bias_field_correction/__init__.py
--------------------------------------------------------------------------------
/src/node/bias_field_correction/interface.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | from pathlib import Path
3 |
4 | from loguru import logger
5 | from nipype.interfaces.base import (SimpleInterface, BaseInterfaceInputSpec, TraitedSpec, File, Directory)
6 | from nipype.interfaces.ants import N4BiasFieldCorrection
7 |
8 | class BiasFieldCorrectionInputSpec(BaseInterfaceInputSpec):
9 | input_file = File(exists=True, desc='Source image path (.nii.gz)', mandatory=True)
10 | output_folder = Directory(exists=False, desc='Output folder for the bias corrected image', mandatory=True)
11 |
12 | class BiasFieldCorrectionOutputSpec(TraitedSpec):
13 | output_file = File(exists=True, desc='Path to the bias corrected image')
14 |
15 | class BiasFieldCorrectionInterface(SimpleInterface):
16 | input_spec = BiasFieldCorrectionInputSpec
17 | output_spec = BiasFieldCorrectionOutputSpec
18 |
19 | def _run_interface(self, runtime):
20 | input_file = self.inputs.input_file
21 | output_folder = Path(self.inputs.output_folder)
22 | output_bias_correction_folder = output_folder / 'bias_correction'
23 | output_file = output_bias_correction_folder / Path(input_file).name
24 |
25 | logger.info(f'N4ITK on: {input_file}')
26 | logger.info(f'Output: {output_file}')
27 |
28 | # Ensure the output directory exists
29 | output_bias_correction_folder.mkdir(parents=True, exist_ok=True)
30 |
31 | # Clean up the output directory
32 | for file in output_bias_correction_folder.glob('*'):
33 | if file.is_file():
34 | file.unlink()
35 | if file.is_dir():
36 | shutil.rmtree(file)
37 |
38 | try:
39 | n4 = N4BiasFieldCorrection()
40 | n4.inputs.input_image = str(input_file)
41 | n4.inputs.output_image = str(output_file)
42 | n4.inputs.dimension = 3
43 | n4.inputs.n_iterations = [100, 100, 60, 40]
44 | n4.inputs.shrink_factor = 3
45 | n4.inputs.convergence_threshold = 1e-4
46 | n4.inputs.bspline_fitting_distance = 300
47 | n4.run()
48 |
49 | self._results['output_file'] = str(output_file)
50 |
51 | except RuntimeError as e:
52 | logger.warning(f'Failed on: {input_file} with error: {e}')
53 |
54 | return runtime
55 |
56 | def _list_outputs(self):
57 | return self._results
58 |
--------------------------------------------------------------------------------
/src/node/draw_segmentation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/draw_segmentation/__init__.py
--------------------------------------------------------------------------------
/src/node/draw_segmentation/interface.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | from pathlib import Path
3 |
4 | from nipype.interfaces.base import (SimpleInterface, BaseInterfaceInputSpec, TraitedSpec, File, Directory, traits)
5 | from loguru import logger
6 |
7 | from utils.draw_segmentation import draw_segmentation
8 |
9 | class DrawSegmentationInputSpec(BaseInterfaceInputSpec):
10 | acpc_input_file = File(exists=True, desc='ACPC aligned Source image path (.nii.gz or .nii)', mandatory=True)
11 | segmented_input_file = File(exists=True, desc='Segmented Source image path (.nii.gz or .nii)', mandatory=True)
12 | output_folder = Directory(exists=False, desc='Output folder for the segmented image', mandatory=True)
13 | threshold = traits.Float(0.68, usedefault=True, desc='Threshold for the segmentation image (0-1)')
14 | title = traits.String('prob-map', usedefault=True, desc='Title for the segmentation image')
15 |
16 | class DrawSegmentationOutputSpec(TraitedSpec):
17 | output_file = File(exists=True, desc='Path to png image')
18 |
19 | class DrawSegmentationInterface(SimpleInterface):
20 | input_spec = DrawSegmentationInputSpec
21 | output_spec = DrawSegmentationOutputSpec
22 |
23 | def _run_interface(self, runtime):
24 | title = self.inputs.title
25 |
26 | acpc_input_file = self.inputs.acpc_input_file
27 | segmented_input_file = self.inputs.segmented_input_file
28 |
29 | output_folder = Path(self.inputs.output_folder)
30 | output_draw_folder = output_folder / 'draw_segmentation'
31 |
32 | # Specify the output png path
33 | output_png_path = output_draw_folder / f'{title}.png'
34 |
35 | logger.info(f'Running draw segmentation for {segmented_input_file} to {output_png_path}')
36 |
37 | # Ensure the output directory exists
38 | output_draw_folder.mkdir(parents=True, exist_ok=True)
39 |
40 | # Clean up the output file if it already exists
41 | if output_png_path.exists():
42 | output_png_path.unlink()
43 |
44 | draw_segmentation(
45 | input_nii_path=acpc_input_file,
46 | input_seg_nii_path=segmented_input_file,
47 | output_png_path=output_png_path.absolute().as_posix(),
48 | threshold=self.inputs.threshold,
49 | title=title,
50 | )
51 |
52 | self._results['output_file'] = output_png_path.as_posix()
53 |
54 | return runtime
55 |
56 | def _list_outputs(self):
57 | return self._results
58 |
--------------------------------------------------------------------------------
/src/node/enhancement/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/enhancement/__init__.py
--------------------------------------------------------------------------------
/src/node/enhancement/interface.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import shutil
3 |
4 | from nipype.interfaces.base import (SimpleInterface, BaseInterfaceInputSpec, TraitedSpec, File, traits)
5 | from loguru import logger
6 |
7 | from utils.load_nii import load_nii
8 | from utils.save_nii import save_nii
9 |
10 | from node.enhancement.utils import denoise
11 | from node.enhancement.utils import rescale_intensity
12 | from node.enhancement.utils import equalize_hist
13 |
14 | class EnhancementInputSpec(BaseInterfaceInputSpec):
15 | input_file = File(exists=True, desc='Source image path (.nii.gz)', mandatory=True)
16 | output_folder = traits.Directory(exists=False, desc='Output folder for the enhanced image', mandatory=True)
17 | kernel_size = traits.Int(1, usedefault=True, desc='Kernel size for denoising. If the size is too large, the image will be blurred')
18 | percentiles = traits.List([0.5, 99.5], usedefault=True, desc='Percentiles for intensity rescaling')
19 | bins_num = traits.Int(256, usedefault=True, desc='Number of bins for histogram equalization')
20 | eh = traits.Bool(True, usedefault=True, desc='Enable histogram equalization')
21 |
22 | class EnhancementOutputSpec(TraitedSpec):
23 | output_file = File(exists=True, desc='Path to the enhanced image')
24 |
25 | class EnhancementInterface(SimpleInterface):
26 | input_spec = EnhancementInputSpec
27 | output_spec = EnhancementOutputSpec
28 |
29 | def _run_interface(self, runtime):
30 | input_file = self.inputs.input_file
31 | output_folder = Path(self.inputs.output_folder)
32 | output_enhancement_folder = output_folder / 'enhancement'
33 |
34 | kernel_size = self.inputs.kernel_size
35 | percentiles = self.inputs.percentiles
36 | bins_num = self.inputs.bins_num
37 | eh = self.inputs.eh
38 | enhanced_image_path = output_enhancement_folder / Path(input_file).name
39 |
40 | logger.info(f'Preprocess on: {input_file}')
41 | logger.info(f'Output: {enhanced_image_path}')
42 |
43 | # Ensure the output directory exists
44 | output_enhancement_folder.mkdir(parents=True, exist_ok=True)
45 |
46 | # Clean up the output directory
47 | for file in output_enhancement_folder.glob('*'):
48 | if file.is_file():
49 | file.unlink()
50 | if file.is_dir():
51 | shutil.rmtree(file)
52 |
53 | try:
54 | # Load the image and perform enhancement
55 | volume, affine = load_nii(input_file)
56 | volume = denoise(volume, kernel_size)
57 | volume = rescale_intensity(volume, percentiles, bins_num)
58 | if eh:
59 | volume = equalize_hist(volume, bins_num)
60 |
61 | save_nii(volume, str(enhanced_image_path), affine)
62 | self._results['output_file'] = str(enhanced_image_path)
63 |
64 | except RuntimeError as e:
65 | logger.warning(f'Failed on: {input_file} with error: {e}')
66 |
67 | return runtime
68 |
69 | def _list_outputs(self):
70 | return self._results
71 |
--------------------------------------------------------------------------------
/src/node/enhancement/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from scipy.signal import medfilt
4 |
5 | def denoise(volume:np.ndarray, kernel_size=3):
6 | return medfilt(volume, kernel_size)
7 |
8 | def rescale_intensity(volume:np.ndarray, percentils=[0.5, 99.5], bins_num=256) -> np.ndarray:
9 | obj_volume = volume[np.where(volume > 0)]
10 | min_value = np.percentile(obj_volume, percentils[0])
11 | max_value = np.percentile(obj_volume, percentils[1])
12 |
13 | if bins_num == 0:
14 | obj_volume = (obj_volume - min_value) / (max_value - min_value).astype(np.float32)
15 | else:
16 | obj_volume = np.round((obj_volume - min_value) / (max_value - min_value) * (bins_num - 1))
17 | obj_volume[np.where(obj_volume < 1)] = 1
18 | obj_volume[np.where(obj_volume > (bins_num - 1))] = bins_num - 1
19 |
20 | volume = volume.astype(obj_volume.dtype)
21 | volume[np.where(volume > 0)] = obj_volume
22 |
23 | return volume
24 |
25 | def equalize_hist(volume:np.ndarray, bins_num=256) -> np.ndarray:
26 | obj_volume = volume[np.where(volume > 0)]
27 | #hist, bins = np.histogram(obj_volume, bins_num, normed=True)
28 | hist, bins = np.histogram(obj_volume, bins_num)
29 | cdf = hist.cumsum()
30 | cdf = (bins_num - 1) * cdf / cdf[-1]
31 |
32 | obj_volume = np.round(np.interp(obj_volume, bins[:-1], cdf)).astype(obj_volume.dtype)
33 | volume[np.where(volume > 0)] = obj_volume
34 | return volume
35 |
--------------------------------------------------------------------------------
/src/node/final_output/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/final_output/__init__.py
--------------------------------------------------------------------------------
/src/node/final_output/interface.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | from pathlib import Path
3 |
4 | from nipype.interfaces.base import (SimpleInterface, BaseInterfaceInputSpec, TraitedSpec, File, Directory, traits)
5 |
6 | from loguru import logger
7 |
8 | class FinalOutputInputSpec(BaseInterfaceInputSpec):
9 | acpc_output_file = File(exists=True, desc="ACPC corrected NIfTI file (.nii)", mandatory=True)
10 | acpc_output_png_file = File(exists=True, desc="ACPC corrected PNG file", mandatory=True)
11 | gm_png_file = File(exists=True, desc="Gray matter segmentation PNG file", mandatory=True)
12 | wm_png_file = File(exists=True, desc="White matter segmentation PNG file", mandatory=True)
13 | csf_png_file = File(exists=True, desc="CSF segmentation PNG file", mandatory=True)
14 | output_folder = Directory(desc="Final output directory", mandatory=True)
15 |
16 | class FinalOutputOutputSpec(TraitedSpec):
17 | output_folder = Directory(exists=True, desc="Directory containing organized final outputs")
18 |
19 | class OrganizeFinalOutputInterface(SimpleInterface):
20 | input_spec = FinalOutputInputSpec
21 | output_spec = FinalOutputOutputSpec
22 |
23 | def _run_interface(self, runtime):
24 | # Ensure the output directory exists
25 | output_dir = Path(self.inputs.output_folder) / "final_output"
26 | output_dir.mkdir(parents=True, exist_ok=True)
27 |
28 | logger.info(f"Organizing final output in {output_dir}")
29 |
30 | # Clean the output directory
31 | for file in output_dir.glob("*"):
32 | if file.is_file():
33 | file.unlink()
34 | if file.is_dir():
35 | shutil.rmtree(file)
36 |
37 | # Copy or move the ACPC output and PNG files to the output directory
38 | shutil.copy(self.inputs.acpc_output_file, output_dir / Path(self.inputs.acpc_output_file).name)
39 | shutil.copy(self.inputs.acpc_output_png_file, output_dir / Path(self.inputs.acpc_output_png_file).name)
40 | shutil.copy(self.inputs.gm_png_file, output_dir / "GM_Segmentation.png")
41 | shutil.copy(self.inputs.wm_png_file, output_dir / "WM_Segmentation.png")
42 | shutil.copy(self.inputs.csf_png_file, output_dir / "CSF_Segmentation.png")
43 |
44 | self._results['output_folder'] = str(output_dir)
45 | return runtime
46 |
47 | def _list_outputs(self):
48 | return self._results
--------------------------------------------------------------------------------
/src/node/orient2std/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/orient2std/__init__.py
--------------------------------------------------------------------------------
/src/node/orient2std/interface.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | from pathlib import Path
3 |
4 | from loguru import logger
5 |
6 | from nipype.interfaces.base import SimpleInterface, BaseInterfaceInputSpec, TraitedSpec, File, Directory
7 | from nipype.interfaces.fsl import Reorient2Std
8 |
9 | class Reorient2StdInputSpec(BaseInterfaceInputSpec):
10 | input_file = File(exists=True, desc='Input file to reorient', mandatory=True)
11 | output_folder = Directory(exists=False, desc='Path to the output folder', mandatory=True)
12 |
13 | class Reorient2StdOutputSpec(TraitedSpec):
14 | output_file = File(exists=True, desc='Reoriented output file')
15 |
16 | class Reorient2StdInterface(SimpleInterface):
17 | input_spec = Reorient2StdInputSpec
18 | output_spec = Reorient2StdOutputSpec
19 |
20 | def _run_interface(self, runtime):
21 | input_file = self.inputs.input_file
22 | output_folder = Path(self.inputs.output_folder)
23 | output_orient_folder = output_folder / 'orient2std'
24 | output_file = output_orient_folder / Path(input_file).name
25 |
26 | logger.info(f'Reorienting {input_file} to standard space')
27 | logger.info(f'Output file: {output_file}')
28 |
29 | # Ensure the output directory exists
30 | output_orient_folder.mkdir(parents=True, exist_ok=True)
31 |
32 | # Clean the folder
33 | for file in output_orient_folder.glob('*'):
34 | if file.is_file():
35 | file.unlink()
36 | if file.is_dir():
37 | shutil.rmtree(file)
38 |
39 | # Perform the reorientation
40 | reorient = Reorient2Std(in_file=input_file, out_file=str(output_file))
41 | reorient.run()
42 |
43 | self._results['output_file'] = str(output_file)
44 |
45 | return runtime
46 |
47 | def _list_outputs(self):
48 | super(Reorient2StdInterface, self)._list_outputs()
49 | return self._results
50 |
--------------------------------------------------------------------------------
/src/node/registration/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/registration/__init__.py
--------------------------------------------------------------------------------
/src/node/registration/interface.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | from pathlib import Path
4 |
5 | from nipype.interfaces.base import (SimpleInterface, BaseInterfaceInputSpec, TraitedSpec, File, Directory)
6 | from nipype.interfaces.fsl import FLIRT
7 |
8 | from loguru import logger
9 |
10 | REF_NII_TEMPLATE = os.getenv('FSLDIR') + '/data/standard/MNI152_T1_1mm.nii.gz'
11 |
12 | class FLIRTInputSpec(BaseInterfaceInputSpec):
13 | input_file = File(exists=True, desc='Source image path (.nii.gz)', mandatory=True)
14 | ref_file = File(exists=True, desc='Reference image path', mandatory=False)
15 | output_folder = Directory(exists=False, desc='Output folder for the registered image', mandatory=True)
16 |
17 | class FLIRTOutputSpec(TraitedSpec):
18 | output_file = File(exists=True, desc='Path to the registered image')
19 |
20 | class FLIRTInterface(SimpleInterface):
21 | input_spec = FLIRTInputSpec
22 | output_spec = FLIRTOutputSpec
23 |
24 | def _run_interface(self, runtime):
25 | input_file = self.inputs.input_file
26 | output_folder = Path(self.inputs.output_folder)
27 | output_reg_folder = output_folder / 'registration'
28 | output_file = output_reg_folder / Path(input_file).name
29 |
30 | # Use the default FSL reference image if ref_file is not provided
31 | ref_file = self.inputs.ref_file
32 | if not ref_file:
33 | ref_file = REF_NII_TEMPLATE
34 |
35 | logger.info('Registering {} to {}'.format(input_file, ref_file))
36 | logger.info('Reference file: {}'.format(ref_file))
37 | logger.info('Output file: {}'.format(output_file))
38 |
39 | # Ensure the output directory exists
40 | output_reg_folder.mkdir(parents=True, exist_ok=True)
41 |
42 | # Clean up the output directory
43 | for file in output_reg_folder.glob('*'):
44 | if file.is_file():
45 | file.unlink()
46 | if file.is_dir():
47 | shutil.rmtree(file)
48 |
49 | # Setup FLIRT interface
50 | flirt = FLIRT(
51 | in_file=input_file,
52 | reference=Path(ref_file).as_posix(),
53 | out_file=str(output_file),
54 | bins=256,
55 | cost_func='corratio',
56 | searchr_x=[0, 0],
57 | searchr_y=[0, 0],
58 | searchr_z=[0, 0],
59 | dof=12,
60 | interp='spline'
61 | )
62 |
63 | # Execute FLIRT
64 | flirt.run()
65 |
66 | self._results['output_file'] = str(output_file)
67 |
68 | return runtime
69 |
70 | def _list_outputs(self):
71 | return self._results
72 |
--------------------------------------------------------------------------------
/src/node/segmentation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/segmentation/__init__.py
--------------------------------------------------------------------------------
/src/node/segmentation/interface.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | from pathlib import Path
3 |
4 | from nipype.interfaces.base import (SimpleInterface, BaseInterfaceInputSpec, TraitedSpec, File, Directory)
5 | from loguru import logger
6 |
7 | from utils.load_nii import load_nii
8 | from utils.save_nii import save_nii
9 |
10 | from node.segmentation.utils import kmeans_cluster
11 | from node.segmentation.utils import get_target_labels
12 | from node.segmentation.utils import segment
13 |
14 | class SegmentationInputSpec(BaseInterfaceInputSpec):
15 | input_file = File(exists=True, desc='Source image path (.nii.gz)', mandatory=True)
16 | output_folder = Directory(exists=False, desc='Output folder for the segmented image', mandatory=True)
17 |
18 | class SegmentationOutputSpec(TraitedSpec):
19 | gm_segmented_output_file = File(exists=True, desc='Path to GM segmented image')
20 | wm_segmented_output_file = File(exists=True, desc='Path to WM segmented image')
21 | csf_segmented_output_file = File(exists=True, desc='Path to CSF segmented image')
22 |
23 | gm_labels_output_file = File(exists=True, desc='Path to GM labels image')
24 | wm_labels_output_file = File(exists=True, desc='Path to WM labels image')
25 | csf_labels_output_file = File(exists=True, desc='Path to CSF labels image')
26 |
27 | class SegmentationInterface(SimpleInterface):
28 | input_spec = SegmentationInputSpec
29 | output_spec = SegmentationOutputSpec
30 |
31 | def _run_interface(self, runtime):
32 | input_file = self.inputs.input_file
33 | output_folder = Path(self.inputs.output_folder)
34 | output_segmentation_folder = output_folder / 'segmentation'
35 |
36 | # Specify the labels and segmented image paths
37 | gm_labels_path = output_segmentation_folder / (Path(input_file).stem + '_gm_labels.nii.gz')
38 | wm_labels_path = output_segmentation_folder / (Path(input_file).stem + '_wm_labels.nii.gz')
39 | csf_labels_path = output_segmentation_folder / (Path(input_file).stem + '_csf_labels.nii.gz')
40 |
41 | gm_segmented_image_path = output_segmentation_folder / (Path(input_file).stem + '_gm_segmented.nii.gz')
42 | wm_segmented_image_path = output_segmentation_folder / (Path(input_file).stem + '_wm_segmented.nii.gz')
43 | csf_segmented_image_path = output_segmentation_folder / (Path(input_file).stem + '_csf_segmented.nii.gz')
44 |
45 | logger.info(f'Segment on: {input_file}')
46 | logger.info(f'Output folder: {output_segmentation_folder}')
47 |
48 | # Ensure the output directory exists
49 | output_segmentation_folder.mkdir(parents=True, exist_ok=True)
50 |
51 | # Clean up the output directory
52 | for file in output_segmentation_folder.glob('*'):
53 | if file.is_file():
54 | file.unlink()
55 | if file.is_dir():
56 | shutil.rmtree(file)
57 |
58 | try:
59 | # Load the image data and perform K-means clustering
60 | data, affine = load_nii(input_file)
61 |
62 | # Split the data into 3 clusters (GM, WM, CSF)
63 | n_clusters = 3
64 |
65 | labels = kmeans_cluster(data, n_clusters)
66 |
67 | gm_target, wm_target, csf_target = get_target_labels(labels, data)
68 |
69 | g_matter = segment(labels, data, gm_target)
70 | save_nii(labels, str(gm_labels_path), affine)
71 | save_nii(g_matter, str(gm_segmented_image_path), affine)
72 |
73 | w_matter = segment(labels, data, wm_target)
74 | save_nii(labels, str(wm_labels_path), affine)
75 | save_nii(w_matter, str(wm_segmented_image_path), affine)
76 |
77 | c_matter = segment(labels, data, csf_target)
78 | save_nii(labels, str(csf_labels_path), affine)
79 | save_nii(c_matter, str(csf_segmented_image_path), affine)
80 |
81 | self._results['gm_segmented_output_file'] = str(gm_segmented_image_path)
82 | self._results['gm_labels_output_file'] = str(gm_labels_path)
83 | self._results['wm_segmented_output_file'] = str(wm_segmented_image_path)
84 | self._results['wm_labels_output_file'] = str(wm_labels_path)
85 | self._results['csf_segmented_output_file'] = str(csf_segmented_image_path)
86 | self._results['csf_labels_output_file'] = str(csf_labels_path)
87 |
88 | except RuntimeError as e:
89 | logger.warning(f'Failed on: {input_file} with error: {e}')
90 |
91 | return runtime
92 |
93 | def _list_outputs(self):
94 | return self._results
95 |
--------------------------------------------------------------------------------
/src/node/segmentation/utils.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | import numpy as np
4 | from sklearn.cluster import KMeans
5 |
6 | def extract_features(data:np.ndarray) -> np.ndarray:
7 | x_idx, y_idx, z_idx = np.where(data > 0)
8 | features = []
9 | for x, y, z in zip(x_idx, y_idx, z_idx):
10 | features.append([data[x, y, z], x, y, z])
11 | return np.array(features)
12 |
13 |
14 | def kmeans_cluster(data:np.ndarray, n_clusters:int) -> np.ndarray:
15 | features = extract_features(data)
16 | intensities = features[..., 0].reshape((-1, 1))
17 | kmeans_model = KMeans(
18 | n_clusters=n_clusters,
19 | init="k-means++",
20 | verbose=0,
21 | random_state=42,
22 | max_iter=100,
23 | tol=1e-6
24 | ).fit(intensities)
25 |
26 | labels = np.zeros(data.shape)
27 | for l, f in zip(kmeans_model.labels_, features):
28 | labels[int(f[1]), int(f[2]), int(f[3])] = l + 1
29 |
30 | return labels
31 |
32 | def get_target_labels(labels:np.ndarray, data:np.ndarray) -> List[int]:
33 | labels_set = np.unique(labels)
34 | mean_intensities = []
35 | for label in labels_set[1:]:
36 | label_data = data[np.where(labels == label)]
37 | mean_intensities.append(np.mean(label_data))
38 | gm_target_intensity = np.median(mean_intensities) # GM
39 | wm_target_intensity = np.max(mean_intensities) # WM
40 | csf_target_intensity = np.min(mean_intensities) # CSF
41 | target_labels = [
42 | mean_intensities.index(gm_target_intensity) + 1, # +1 because labels start at 1
43 | mean_intensities.index(wm_target_intensity) + 1,
44 | mean_intensities.index(csf_target_intensity) + 1
45 | ]
46 | return target_labels
47 |
48 |
49 | def segment(labels:np.ndarray, data:np.ndarray, target:int):
50 | # =====================
51 | mask = np.copy(labels).astype(np.float32)
52 | mask[np.where(mask != target)] = 0.333
53 | mask[np.where(mask == target)] = 1.
54 | data = data.astype(np.float32)
55 | matter = np.multiply(data, mask)
56 | # =====================
57 | return matter
--------------------------------------------------------------------------------
/src/node/skull_stripping/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/skull_stripping/__init__.py
--------------------------------------------------------------------------------
/src/node/skull_stripping/interface.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | from pathlib import Path
3 |
4 | from nipype.interfaces.base import (SimpleInterface, BaseInterfaceInputSpec, TraitedSpec, File, traits)
5 | from nipype.interfaces.fsl import BET
6 |
7 | class SkullStrippingInputSpec(BaseInterfaceInputSpec):
8 | input_file = File(exists=True, desc='Source image path (.nii.gz)', mandatory=True)
9 | output_folder = traits.Directory(exists=False, desc='Output folder for the extracted brain image', mandatory=True)
10 | frac = traits.Float(default_value=0.5, usedefault=True, desc='Fractional intensity threshold', mandatory=False)
11 |
12 | class SkullStrippingOutputSpec(TraitedSpec):
13 | output_file = File(exists=True, desc='Path to the extracted brain image')
14 |
15 | class SkullStrippingInterface(SimpleInterface):
16 | input_spec = SkullStrippingInputSpec
17 | output_spec = SkullStrippingOutputSpec
18 |
19 | def _run_interface(self, runtime):
20 | input_file = self.inputs.input_file
21 |
22 | output_folder = Path(self.inputs.output_folder)
23 | output_skull_stripping_folder = output_folder / 'skull_stripping'
24 | output_file = output_skull_stripping_folder / Path(input_file).name
25 |
26 | frac = self.inputs.frac
27 |
28 | # Ensure the output directory exists
29 | output_skull_stripping_folder.mkdir(parents=True, exist_ok=True)
30 |
31 | # Clean up the output directory
32 | for file in output_skull_stripping_folder.glob('*'):
33 | if file.is_file():
34 | file.unlink()
35 | if file.is_dir():
36 | shutil.rmtree(file)
37 |
38 | bet = BET(in_file=input_file, out_file=output_file, frac=frac, robust=True)
39 | res = bet.run()
40 |
41 | self._results['output_file'] = res.outputs.out_file # here get BET's output named `out_file`
42 |
43 | return runtime
44 |
45 | def _list_outputs(self):
46 | return self._results
47 |
--------------------------------------------------------------------------------
/src/node/zip/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/node/zip/__init__.py
--------------------------------------------------------------------------------
/src/node/zip/interface.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import gzip
4 | import shutil
5 |
6 | from loguru import logger
7 |
8 | from nipype.interfaces.base import SimpleInterface
9 | from nipype.interfaces.base import BaseInterfaceInputSpec
10 | from nipype.interfaces.base import TraitedSpec
11 | from nipype.interfaces.base import File
12 | from nipype.interfaces.base import Directory
13 |
14 |
15 | class ZipOutputInputSpec(BaseInterfaceInputSpec):
16 | input_file = File(exists=True, desc='Input file to compress', mandatory=True)
17 | output_folder = Directory(exists=True, desc='Path to the output folder', mandatory=False)
18 |
19 | class ZipOutputOutputSpec(TraitedSpec):
20 | output_file = File(exists=True, desc='Output compressed file', mandatory=True)
21 |
22 | class ZipOutputInterface(SimpleInterface):
23 | input_spec = ZipOutputInputSpec
24 | output_spec = ZipOutputOutputSpec
25 |
26 | def _run_interface(self, runtime):
27 | input_file = self.inputs.input_file
28 | output_folder = self.inputs.output_folder
29 | output_zip_folder = Path(output_folder) / 'zip'
30 | output_file = output_zip_folder / (Path(input_file).name + '.gz')
31 |
32 | logger.info('Compressing {} to {}'.format(input_file, output_file))
33 |
34 | # Create the output directory if it doesn't exist
35 | output_zip_folder.mkdir(parents=True, exist_ok=True)
36 |
37 | # Clean the folder
38 | for file in output_zip_folder.glob('*'):
39 | if file.is_file():
40 | file.unlink()
41 | if file.is_dir():
42 | shutil.rmtree(file)
43 |
44 | # Compress the directory
45 | with open(input_file, 'rb') as f_in, gzip.open(output_file, 'wb') as f_out:
46 | shutil.copyfileobj(f_in, f_out)
47 |
48 | self._results['output_file'] = output_file
49 | return runtime
50 |
51 | def _list_outputs(self):
52 | return self._results
--------------------------------------------------------------------------------
/src/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/src/utils/__init__.py
--------------------------------------------------------------------------------
/src/utils/convert_dot_to_png.py:
--------------------------------------------------------------------------------
1 | import pydot
2 |
3 | from loguru import logger
4 |
5 | def convert_dot_to_png(dot_file_path, png_file_path):
6 | """
7 | Reads a DOT file and saves it as a PNG file.
8 |
9 | Args:
10 | dot_file_path (str): The path to the DOT file.
11 | png_file_path (str): The path to save the PNG file.
12 | """
13 | # Load the dot file from string
14 | with open(dot_file_path) as f:
15 | dot_file_contents = f.read()
16 | dot_file_contents = dot_file_contents.replace('\n', '') # avoid blank rectangle in the png file
17 |
18 | (graph,) = pydot.graph_from_dot_data(dot_file_contents)
19 |
20 | # Save the dot file as a PNG file
21 | graph.write_png(png_file_path)
22 |
23 | logger.info(f"Saved PNG file to {png_file_path}")
24 |
--------------------------------------------------------------------------------
/src/utils/draw_segmentation.py:
--------------------------------------------------------------------------------
1 | from matplotlib import pyplot as plt
2 | import numpy as np
3 |
4 | from nilearn import image
5 | from nilearn.plotting import plot_stat_map
6 | import nibabel as nib
7 |
8 |
9 | def draw_segmentation(input_nii_path:str, input_seg_nii_path:str, output_png_path:str, threshold:float=0.6, title='Prob. map'):
10 | input_nii = image.smooth_img(input_nii_path, fwhm=None)
11 | intput_seg_nii = image.smooth_img(input_seg_nii_path, fwhm=None)
12 |
13 | # Normalize the segmentation image data to 0-1
14 | data = intput_seg_nii.get_fdata()
15 | normalized_data = (data - np.min(data)) / (np.max(data) - np.min(data))
16 |
17 | # Create a new NIfTI image with the normalized data
18 | normalized_intput_seg_nii = nib.Nifti1Image(normalized_data, intput_seg_nii.affine, intput_seg_nii.header)
19 |
20 | plot_stat_map(
21 | stat_map_img=normalized_intput_seg_nii,
22 | title=title,
23 | cmap=plt.cm.magma,
24 | threshold=threshold,
25 | bg_img=input_nii, # bg_img is the background image on top of which we plot the stat_map
26 | display_mode='z',
27 | cut_coords=range(-50, 50, 20),
28 | dim=-1,
29 | output_file=output_png_path
30 | )
--------------------------------------------------------------------------------
/src/utils/load_nii.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import nibabel as nib
3 |
4 | def load_nii(path:str):
5 | nii = nib.load(path)
6 | return np.asanyarray(nii.dataobj), nii.affine
7 |
--------------------------------------------------------------------------------
/src/utils/save_nii.py:
--------------------------------------------------------------------------------
1 | import nibabel as nib
2 |
3 | def save_nii(data, path, affine):
4 | nib.save(nib.Nifti1Image(data, affine), path)
5 | return
--------------------------------------------------------------------------------
/src/utils/save_nii_as_png.py:
--------------------------------------------------------------------------------
1 | import nibabel as nib
2 | import matplotlib.pyplot as plt
3 |
4 | from loguru import logger
5 |
6 | from utils.load_nii import load_nii
7 |
8 | def save_nii_as_png(nii_path, png_path):
9 | """
10 | Save a slice of a NIfTI file as a PNG image.
11 |
12 | Args:
13 | nii_path (str): Path to the NIfTI file.
14 | png_path (str): Path where the PNG file will be saved.
15 | """
16 |
17 | # Load the NIfTI file
18 | data, _ = load_nii(nii_path)
19 |
20 | # The data shape should be (slice_index, w, h, channels)
21 |
22 | # Get the middle slice
23 | slice_index = data.shape[0] // 2
24 |
25 | # Get the slice
26 | selected_slice = data[slice_index, :, :]
27 | selected_slice = selected_slice.squeeze()
28 |
29 | # Save the slice as a PNG file
30 | plt.imshow(selected_slice.T, cmap="gray", origin="lower")
31 | plt.axis('off')
32 | plt.savefig(png_path, bbox_inches='tight', pad_inches=0)
33 | plt.close()
34 |
35 | logger.info(f"Saved PNG file to {png_path}")
36 |
--------------------------------------------------------------------------------
/src/workflow.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import shutil
3 |
4 | from nipype import Function
5 | from nipype.pipeline import Node, Workflow
6 |
7 | from node.acpc_detect.interface import ACPCDetectInterface
8 | from node.zip.interface import ZipOutputInterface
9 | from node.orient2std.interface import Reorient2StdInterface
10 | from node.registration.interface import FLIRTInterface
11 | from node.skull_stripping.interface import SkullStrippingInterface
12 | from node.bias_field_correction.interface import BiasFieldCorrectionInterface
13 | from node.enhancement.interface import EnhancementInterface
14 | from node.segmentation.interface import SegmentationInterface
15 | from node.draw_segmentation.interface import DrawSegmentationInterface
16 | from node.final_output.interface import OrganizeFinalOutputInterface
17 |
18 | from utils.convert_dot_to_png import convert_dot_to_png
19 |
20 | # ==========================================
21 |
22 | # ==========================================
23 | # Pepare the data
24 | # ==========================================
25 |
26 | # Define the input file and output folder
27 | # List of input NIfTI files to process
28 | data_folder = Path('/data')
29 | output_folder = Path('/output')
30 |
31 | input_files = []
32 | for file in data_folder.glob('*.nii'):
33 | input_files.append(file)
34 |
35 | output_folders = []
36 | for input_file in input_files:
37 | folder = output_folder / input_file.stem
38 | folder.mkdir(parents=True, exist_ok=True)
39 | output_folders.append(folder)
40 | # Clean the folder
41 | for file in folder.glob('*'):
42 | if file.is_file():
43 | file.unlink()
44 | if file.is_dir():
45 | shutil.rmtree(file)
46 |
47 | pairs = list(zip(input_files, output_folders))
48 |
49 | # ==========================================
50 |
51 | # ==========================================
52 | # Define the workflow
53 | # ==========================================
54 |
55 | # Define the process_pair function
56 | def process_pair(pair):
57 | input_file, output_folder = pair
58 | return input_file, output_folder
59 |
60 | process_pair_node = Node(
61 | Function(
62 | input_names=["pair"],
63 | output_names=["input_file", "output_folder"],
64 | function=process_pair
65 | ),
66 | name="process_pair"
67 | )
68 | process_pair_node.iterables = [('pair', pairs)]
69 |
70 | # Create the acpc node
71 | acpc_node = Node(ACPCDetectInterface(), name='t1_acpc_detect')
72 |
73 | # Create the zip node
74 | zip_node = Node(ZipOutputInterface(), name='zip_output')
75 |
76 | # Create the orient2std node
77 | orient2std_node = Node(Reorient2StdInterface(), name='orient2std')
78 |
79 | # Create the registration node
80 | registration_node = Node(FLIRTInterface(), name='registration')
81 |
82 | # Create the skull stripping node
83 | skull_stripping_node = Node(SkullStrippingInterface(), name='skull_stripping')
84 |
85 | # Create the bias field correction node
86 | bias_field_correction_node = Node(BiasFieldCorrectionInterface(), name='bias_field_correction')
87 |
88 | # Create the enhancement node
89 | enhancement_node = Node(EnhancementInterface(), name='enhancement')
90 |
91 | # Create the segmentation node
92 | segmentation_node = Node(SegmentationInterface(), name='segmentation')
93 |
94 | # Create the draw segmentation node
95 | draw_gm_segmentation_node = Node(DrawSegmentationInterface(), name='draw_gm_segmentation')
96 | draw_gm_segmentation_node.inputs.title = 'GM@map'
97 | draw_wm_segmentation_node = Node(DrawSegmentationInterface(), name='draw_wm_segmentation')
98 | draw_wm_segmentation_node.inputs.title = 'WM@map'
99 | draw_csf_segmentation_node = Node(DrawSegmentationInterface(), name='draw_csf_segmentation')
100 | draw_csf_segmentation_node.inputs.title = 'CSF@map'
101 |
102 | # Create the final output node
103 | final_output_node = Node(OrganizeFinalOutputInterface(), name='final_output')
104 |
105 | # ==========================================
106 |
107 | # ==========================================
108 | # Connect the nodes
109 | # ==========================================
110 |
111 | # Create a workflow
112 | workflow = Workflow(name='preprocess_workflow')
113 | nodes = [
114 | process_pair_node,
115 | acpc_node,
116 | zip_node,
117 | orient2std_node,
118 | registration_node,
119 | skull_stripping_node,
120 | bias_field_correction_node,
121 | enhancement_node,
122 | segmentation_node,
123 | draw_gm_segmentation_node,
124 | draw_wm_segmentation_node,
125 | draw_csf_segmentation_node,
126 | final_output_node,
127 | ]
128 | workflow.add_nodes(nodes)
129 |
130 | # Connect the input_file_node to the acpc_node (input_file & output_folder)
131 | workflow.connect(process_pair_node, 'input_file', acpc_node, 'input_file')
132 | workflow.connect(process_pair_node, 'output_folder', acpc_node, 'output_folder')
133 |
134 | # Connect the acpc_node to the zip_node (input_file & output_folder)
135 | workflow.connect(acpc_node, 'output_file', zip_node, 'input_file')
136 | workflow.connect(process_pair_node, 'output_folder', zip_node, 'output_folder')
137 |
138 | # Connect the zip_node to the orient2std_node (input_file & output_folder)
139 | workflow.connect(zip_node, 'output_file', orient2std_node, 'input_file')
140 | workflow.connect(process_pair_node, 'output_folder', orient2std_node, 'output_folder')
141 |
142 | # Connect the orient2std_node to the registration_node (input_file & output_folder)
143 | workflow.connect(orient2std_node, 'output_file', registration_node, 'input_file')
144 | workflow.connect(process_pair_node, 'output_folder', registration_node, 'output_folder')
145 |
146 | # Connect the registration_node to the skull_stripping_node (input_file & output_folder)
147 | workflow.connect(registration_node, 'output_file', skull_stripping_node, 'input_file')
148 | workflow.connect(process_pair_node, 'output_folder', skull_stripping_node, 'output_folder')
149 |
150 | # Connect the skull_stripping_node to the bias_field_correction_node (input_file & output_folder)
151 | workflow.connect(skull_stripping_node, 'output_file', bias_field_correction_node, 'input_file')
152 | workflow.connect(process_pair_node, 'output_folder', bias_field_correction_node, 'output_folder')
153 |
154 | # Connect the bias_field_correction_node to the enhancement_node (input_file & output_folder)
155 | workflow.connect(bias_field_correction_node, 'output_file', enhancement_node, 'input_file')
156 | workflow.connect(process_pair_node, 'output_folder', enhancement_node, 'output_folder')
157 |
158 | # Connect the enhancement_node to the segmentation_node (input_file & output_folder)
159 | workflow.connect(enhancement_node, 'output_file', segmentation_node, 'input_file')
160 | workflow.connect(process_pair_node, 'output_folder', segmentation_node, 'output_folder')
161 |
162 | # Connect acpc_node to draw_gm_segmentation_node (input_file & output_folder)
163 | workflow.connect(acpc_node, 'output_file', draw_gm_segmentation_node, 'acpc_input_file')
164 | workflow.connect(segmentation_node, 'gm_segmented_output_file', draw_gm_segmentation_node, 'segmented_input_file')
165 | workflow.connect(process_pair_node, 'output_folder', draw_gm_segmentation_node, 'output_folder')
166 | # Connect acpc_node to draw_wm_segmentation_node (input_file & output_folder)
167 | workflow.connect(acpc_node, 'output_file', draw_wm_segmentation_node, 'acpc_input_file')
168 | workflow.connect(segmentation_node, 'wm_segmented_output_file', draw_wm_segmentation_node, 'segmented_input_file')
169 | workflow.connect(process_pair_node, 'output_folder', draw_wm_segmentation_node, 'output_folder')
170 | # Connect acpc_node to draw_csf_segmentation_node (input_file & output_folder)
171 | workflow.connect(acpc_node, 'output_file', draw_csf_segmentation_node, 'acpc_input_file')
172 | workflow.connect(segmentation_node, 'csf_segmented_output_file', draw_csf_segmentation_node, 'segmented_input_file')
173 | workflow.connect(process_pair_node, 'output_folder', draw_csf_segmentation_node, 'output_folder')
174 |
175 | # Connect the process_pair_node to the final_output_node (output_folder)
176 | workflow.connect(process_pair_node, 'output_folder', final_output_node, 'output_folder')
177 | # Connect the draw_gm_segmentation_node to the final_output_node (acpc_output_file & acpc_output_png)
178 | workflow.connect(acpc_node, 'output_file', final_output_node, 'acpc_output_file')
179 | workflow.connect(acpc_node, 'output_png_file', final_output_node, 'acpc_output_png_file')
180 | # Connect the draw_gm_segmentation_node to the final_output_node (gm_png_file)
181 | workflow.connect(draw_gm_segmentation_node, 'output_file', final_output_node, 'gm_png_file')
182 | # Connect the draw_wm_segmentation_node to the final_output_node (wm_png_file)
183 | workflow.connect(draw_wm_segmentation_node, 'output_file', final_output_node, 'wm_png_file')
184 | # Connect the draw_csf_segmentation_node to the final_output_node (csf_png_file)
185 | workflow.connect(draw_csf_segmentation_node, 'output_file', final_output_node, 'csf_png_file')
186 |
187 | # ==========================================
188 |
189 | # ==========================================
190 | # Draw the workflow and run it
191 | # ==========================================
192 |
193 | # Draw the workflow
194 | workflow.write_graph(graph2use='flat', format='png', simple_form=True)
195 |
196 | # Convert the detailed dot file to a png file
197 | convert_dot_to_png('./graph_detailed.dot', './graph_detailed.png')
198 |
199 | # Run the workflow
200 | workflow.run()
--------------------------------------------------------------------------------
/utils/acpcdetect_v2.1_LinuxCentOS6.7/PILbrain.nii:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/utils/acpcdetect_v2.1_LinuxCentOS6.7/PILbrain.nii
--------------------------------------------------------------------------------
/utils/acpcdetect_v2.1_LinuxCentOS6.7/T1acpc.mdl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/utils/acpcdetect_v2.1_LinuxCentOS6.7/T1acpc.mdl
--------------------------------------------------------------------------------
/utils/acpcdetect_v2.1_LinuxCentOS6.7/bin/acpcdetect:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/utils/acpcdetect_v2.1_LinuxCentOS6.7/bin/acpcdetect
--------------------------------------------------------------------------------
/utils/acpcdetect_v2.1_LinuxCentOS6.7/doc/acpcdetect_documentation.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/utils/acpcdetect_v2.1_LinuxCentOS6.7/doc/acpcdetect_documentation.pdf
--------------------------------------------------------------------------------
/utils/acpcdetect_v2.1_LinuxCentOS6.7/orion.mdl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/utils/acpcdetect_v2.1_LinuxCentOS6.7/orion.mdl
--------------------------------------------------------------------------------
/utils/atra1.0_LinuxCentOS6.7/PILbrain.nii:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/utils/atra1.0_LinuxCentOS6.7/PILbrain.nii
--------------------------------------------------------------------------------
/utils/atra1.0_LinuxCentOS6.7/T1acpc.mdl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/utils/atra1.0_LinuxCentOS6.7/T1acpc.mdl
--------------------------------------------------------------------------------
/utils/atra1.0_LinuxCentOS6.7/bin/atra:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/utils/atra1.0_LinuxCentOS6.7/bin/atra
--------------------------------------------------------------------------------
/utils/atra1.0_LinuxCentOS6.7/orion.mdl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NatLee/nipype-preprocess-on-brain-MR/19faf1499f37ca885c18a1d1d4e4eb523f6e9869/utils/atra1.0_LinuxCentOS6.7/orion.mdl
--------------------------------------------------------------------------------