├── .gitignore ├── Docker_tutorial ├── SegRap2023_task1_OARs_nnUNet_Example │ ├── Dockerfile │ ├── README.md │ ├── build.sh │ ├── export.sh │ ├── images │ │ └── images │ │ │ ├── head-neck-contrast-enhanced-ct │ │ │ └── data.txt │ │ │ └── head-neck-ct │ │ │ └── data.txt │ ├── inference_code.py │ ├── post_processing.py │ ├── process.py │ ├── requirements.in │ ├── requirements.txt │ ├── test.sh │ └── weight │ │ └── fold_0 │ │ ├── plans.pkl │ │ └── weight.txt ├── SegRap2023_task2_GTVs_nnUNet_Example │ ├── Dockerfile │ ├── README.md │ ├── build.sh │ ├── export.sh │ ├── images │ │ └── images │ │ │ ├── head-neck-contrast-enhanced-ct │ │ │ └── data.txt │ │ │ └── head-neck-ct │ │ │ └── data.txt │ ├── inference_code.py │ ├── post_processing.py │ ├── process.py │ ├── requirements.in │ ├── requirements.txt │ ├── test.sh │ └── weight │ │ └── fold_0 │ │ ├── plans.pkl │ │ └── weight.txt ├── gtvs_output_example.mha ├── gtvs_output_example.zip ├── oars_output_example.mha ├── oars_output_example.zip ├── outputs.png └── stacked_results_to_4d_mha.py ├── Eval ├── SegRap_Task001_DSC_NSD_Eval.py ├── SegRap_Task002_DSC_NSD_Eval.py └── two_evaluation_metrics.py ├── Poster_Top5_Team ├── Task01 │ ├── Chan Woong Lee.pdf │ ├── Kaixiang Yang.pdf │ ├── Yanzhou Su.pdf │ ├── Yiwen Ye.pdf │ └── Yunxin Zhong.pdf └── Task02 │ ├── Constantin Ulrich.pdf │ ├── Kaixiang Yang.pdf │ ├── Mehdi Astaraki.pdf │ ├── Yiwen Ye.pdf │ └── Zhaohu Xing.pdf ├── README.md ├── Tutorial ├── config │ ├── unet3d_GTVs.cfg │ └── unet3d_OARs.cfg ├── nnunet_baseline.ipynb ├── postprocessing.py ├── preprocessing.py ├── utils.py └── write_csv_file.py └── ethics.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/Dockerfile: -------------------------------------------------------------------------------- 1 | # Pull the docker image | 拉取镜像 2 | # FROM pytorch/pytorch:1.13.0-cuda11.6-cudnn8-devel 3 | 4 | FROM python:3.10-slim 5 | 6 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user 7 | 8 | RUN mkdir -p /opt/app /input /output \ 9 | && chown user:user /opt/app /input /output 10 | 11 | USER user 12 | WORKDIR /opt/app 13 | 14 | ENV PATH="/home/user/.local/bin:${PATH}" 15 | COPY --chown=user:user requirements.txt /opt/app/ 16 | 17 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools 18 | RUN python -m piptools sync requirements.txt 19 | 20 | # RUN python -m pip install --user -r requirements.txt 21 | RUN python -m pip install --user torch==1.12.0+cu116 torchvision==0.13.0+cu116 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu116 22 | RUN python -m pip install --no-deps --user nnunet==1.7.1 23 | 24 | COPY --chown=user:user process.py /opt/app/ 25 | COPY --chown=user:user inference_code.py /opt/app/ 26 | COPY --chown=user:user post_processing.py /opt/app/ 27 | # COPY --chown=user:user images/ /opt/app/images/ 28 | COPY --chown=user:user weight/ /opt/app/weight/ 29 | # COPY --chown=user:user output/ /opt/app/ 30 | 31 | 32 | ENTRYPOINT [ "python", "-m", "process" ] 33 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/README.md: -------------------------------------------------------------------------------- 1 | # SegRap2023_SegmentationContainer Algorithm 2 | 3 | The source code for the algorithm container for 4 | SegRap2023_SegmentationContainer, generated with 5 | evalutils version 0.4.2 6 | using Python 3.10. 7 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 3 | 4 | docker build -t segrap2023_oar_segmentationcontainer "$SCRIPTPATH" 5 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/export.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./build.sh 4 | 5 | docker save segrap2023_segmentationcontainer | gzip -c > SegRap2023_SegmentationContainer.tar.gz 6 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/images/images/head-neck-contrast-enhanced-ct/data.txt: -------------------------------------------------------------------------------- 1 | Download a non-contrast image from our provided link for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/images/images/head-neck-ct/data.txt: -------------------------------------------------------------------------------- 1 | Download a contrast-enhanced image from our provided link for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/inference_code.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | o_path = os.getcwd() 4 | sys.path.append(o_path) 5 | import shutil 6 | from multiprocessing import Pool 7 | 8 | import numpy as np 9 | import torch 10 | from copy import deepcopy 11 | from batchgenerators.utilities.file_and_folder_operations import * 12 | from nnunet.inference.predict import preprocess_multithreaded 13 | from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax 14 | from nnunet.postprocessing.connected_components import load_postprocessing, load_remove_save 15 | from nnunet.training.model_restore import load_model_and_checkpoint_files 16 | from torch import cuda 17 | from torch.nn import functional as F 18 | 19 | 20 | def predict_cases_segrap2023(model, list_of_lists, output_filenames, folds, save_npz=False, num_threads_preprocessing=6, 21 | num_threads_nifti_save=2, segs_from_prev_stage=None, do_tta=False, 22 | overwrite_existing=False, step_size=0.5, checkpoint_name="model_final_checkpoint", 23 | disable_postprocessing: bool = False): 24 | assert len(list_of_lists) == len(output_filenames) 25 | if segs_from_prev_stage is not None: 26 | assert len(segs_from_prev_stage) == len(output_filenames) 27 | 28 | pool = Pool(num_threads_nifti_save) 29 | results = [] 30 | 31 | cleaned_output_files = [] 32 | for o in output_filenames: 33 | dr, f = os.path.split(o) 34 | if len(dr) > 0: 35 | maybe_mkdir_p(dr) 36 | if not f.endswith(".nii.gz"): 37 | f, _ = os.path.splitext(f) 38 | f = f + ".nii.gz" 39 | cleaned_output_files.append(join(dr, f)) 40 | 41 | if not overwrite_existing: 42 | print("number of cases:", len(list_of_lists)) 43 | # if save_npz=True then we should also check for missing npz files 44 | not_done_idx = [i for i, j in enumerate(cleaned_output_files) if 45 | (not isfile(j)) or (save_npz and not isfile(j[:-7] + '.npz'))] 46 | 47 | cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx] 48 | print(list_of_lists) 49 | list_of_lists = [list_of_lists[i] for i in not_done_idx] 50 | if segs_from_prev_stage is not None: 51 | segs_from_prev_stage = [segs_from_prev_stage[i] 52 | for i in not_done_idx] 53 | 54 | print("number of cases that still need to be predicted:", 55 | len(cleaned_output_files)) 56 | 57 | print("emptying cuda cache") 58 | torch.cuda.empty_cache() 59 | 60 | print("loading parameters for folds,", folds) 61 | trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=True, 62 | checkpoint_name=checkpoint_name) 63 | 64 | print("starting preprocessing generator") 65 | preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, 66 | segs_from_prev_stage) 67 | print("starting prediction...") 68 | all_output_files = [] 69 | with torch.no_grad(): 70 | for preprocessed in preprocessing: 71 | output_filename, (d, dct) = preprocessed 72 | all_output_files.append(all_output_files) 73 | if isinstance(d, str): 74 | data = np.load(d) 75 | os.remove(d) 76 | d = data 77 | 78 | # we need to be able to del it if things fail (just in case) 79 | softmax = None 80 | 81 | try: 82 | print("predicting", output_filename) 83 | print(f"attempting all_in_gpu {True}") 84 | trainer.load_checkpoint_ram(params[0], False) 85 | softmax = trainer.predict_preprocessed_data_return_seg_and_softmax( 86 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 87 | 'mirror_axes'], use_sliding_window=True, 88 | step_size=step_size, use_gaussian=True, all_in_gpu=True, 89 | mixed_precision=True)[1] 90 | for p in params[1:]: 91 | trainer.load_checkpoint_ram(p, False) 92 | softmax += trainer.predict_preprocessed_data_return_seg_and_softmax( 93 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 94 | 'mirror_axes'], use_sliding_window=True, 95 | step_size=step_size, use_gaussian=True, all_in_gpu=True, 96 | mixed_precision=True)[1] 97 | except RuntimeError: # out of gpu memory 98 | del softmax 99 | cuda.empty_cache() 100 | print( 101 | f"\nGPU AGGREGATION FAILED FOR CASE {os.path.basename(output_filename)} DUE TO OUT OF MEMORY, falling back to all_in_gpu False\n") 102 | trainer.load_checkpoint_ram(params[0], False) 103 | softmax = trainer.predict_preprocessed_data_return_seg_and_softmax( 104 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 105 | 'mirror_axes'], use_sliding_window=True, 106 | step_size=step_size, use_gaussian=True, all_in_gpu=False, 107 | mixed_precision=True)[1] 108 | 109 | for p in params[1:]: 110 | trainer.load_checkpoint_ram(p, False) 111 | softmax += trainer.predict_preprocessed_data_return_seg_and_softmax( 112 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 113 | 'mirror_axes'], use_sliding_window=True, 114 | step_size=step_size, use_gaussian=True, all_in_gpu=False, 115 | mixed_precision=True)[1] 116 | cuda.empty_cache() 117 | 118 | if len(params) > 1: 119 | softmax /= len(params) 120 | 121 | transpose_forward = trainer.plans.get('transpose_forward') 122 | if transpose_forward is not None: 123 | transpose_backward = trainer.plans.get('transpose_backward') 124 | # softmax = softmax.transpose([0] + [i + 1 for i in transpose_backward]) 125 | 126 | # resampling linearly on GPU 127 | torch.cuda.empty_cache() 128 | target_shape = dct.get('size_after_cropping') 129 | target_shape = [target_shape[i] for i in transpose_forward] 130 | if not isinstance(softmax, torch.Tensor): 131 | softmax = torch.from_numpy(softmax) 132 | try: 133 | with torch.no_grad(): 134 | softmax_resampled = torch.zeros((softmax.shape[0], *target_shape), dtype=torch.half, 135 | device='cuda:0') 136 | if not softmax.device == torch.device('cuda:0'): 137 | softmax_gpu = softmax.to(torch.device('cuda:0')) 138 | else: 139 | softmax_gpu = softmax 140 | for c in range(len(softmax)): 141 | softmax_resampled[c] = \ 142 | F.interpolate( 143 | softmax_gpu[c][None, None], size=target_shape, mode='trilinear')[0, 0] 144 | del softmax, softmax_gpu 145 | softmax_resampled = softmax_resampled.cpu().numpy() 146 | except RuntimeError: 147 | # gpu failed, try CPU 148 | print( 149 | f"\nGPU RESAMPLING FAILED FOR CASE {os.path.basename(output_filename)} DUE TO OUT OF MEMORY, falling back to CPU\n") 150 | 151 | if not softmax.device == torch.device('cpu'): 152 | softmax_cpu = softmax.to(torch.device('cpu')).float() 153 | else: 154 | softmax_cpu = softmax 155 | 156 | torch.cuda.empty_cache() 157 | with torch.no_grad(): 158 | softmax_resampled = torch.zeros( 159 | (softmax.shape[0], *target_shape)) 160 | # depending on where we crash this has already been converted or not 161 | if not isinstance(softmax, torch.Tensor): 162 | softmax = torch.from_numpy(softmax) 163 | for c in range(len(softmax)): 164 | softmax_resampled[c] = \ 165 | F.interpolate( 166 | softmax_cpu[c][None, None], size=target_shape, mode='trilinear')[0, 0] 167 | del softmax, softmax_cpu 168 | softmax_resampled = softmax_resampled.half().numpy() 169 | torch.cuda.empty_cache() 170 | ##################################### 171 | softmax_resampled = softmax_resampled.transpose( 172 | [0] + [i + 1 for i in transpose_backward]) 173 | 174 | if save_npz: 175 | npz_file = output_filename[:-7] + ".npz" 176 | else: 177 | npz_file = None 178 | 179 | if hasattr(trainer, 'regions_class_order'): 180 | region_class_order = trainer.regions_class_order 181 | else: 182 | region_class_order = None 183 | 184 | """There is a problem with python process communication that prevents us from communicating objects 185 | larger than 2 GB between processes (basically when the length of the pickle string that will be sent is 186 | communicated by the multiprocessing.Pipe object then the placeholder (I think) does not allow for long 187 | enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually 188 | patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will 189 | then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either 190 | filename or np.ndarray and will handle this automatically""" 191 | bytes_per_voxel = 4 192 | print( 193 | f'softmax shape {softmax_resampled.shape}, softmax dtype {softmax_resampled.dtype}') 194 | if True: 195 | # if all_in_gpu then the return value is half (float16) 196 | bytes_per_voxel = 2 197 | # * 0.85 just to be save 198 | if np.prod(softmax_resampled.shape) > (2e9 / bytes_per_voxel * 0.85): 199 | print( 200 | "This output is too large for python process-process communication. Saving output temporarily to disk") 201 | np.save(output_filename[:-7] + ".npy", softmax_resampled) 202 | softmax_resampled = output_filename[:-7] + ".npy" 203 | 204 | results.append(pool.starmap_async(save_segmentation_nifti_from_softmax, 205 | ((softmax_resampled, output_filename, dct, 1, region_class_order, 206 | None, None, 207 | npz_file, None, False, 1),) 208 | )) 209 | 210 | print("inference done. Now waiting for the segmentation export to finish...") 211 | _ = [i.get() for i in results] 212 | # now apply postprocessing 213 | # first load the postprocessing properties if they are present. Else raise a well visible warning 214 | if not disable_postprocessing: 215 | results = [] 216 | pp_file = join(model, "postprocessing.json") 217 | if isfile(pp_file): 218 | print("postprocessing...") 219 | shutil.copy(pp_file, os.path.abspath( 220 | os.path.dirname(output_filenames[0]))) 221 | # for_which_classes stores for which of the classes everything but the largest connected component needs to be 222 | # removed 223 | for_which_classes, min_valid_obj_size = load_postprocessing( 224 | pp_file) 225 | results.append(pool.starmap_async(load_remove_save, 226 | zip(output_filenames, output_filenames, 227 | [for_which_classes] * 228 | len(output_filenames), 229 | [min_valid_obj_size] * len(output_filenames)))) 230 | _ = [i.get() for i in results] 231 | else: 232 | print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run " 233 | "consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is " 234 | "%s" % model) 235 | 236 | pool.close() 237 | pool.join() 238 | 239 | 240 | def check_input_folder_and_return_caseIDs(input_folder, expected_num_modalities): 241 | print("This model expects %d input modalities for each image" % 242 | expected_num_modalities) 243 | files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) 244 | 245 | maybe_case_ids = np.unique([i[:-12] for i in files]) 246 | 247 | remaining = deepcopy(files) 248 | missing = [] 249 | 250 | assert len( 251 | files) > 0, "input folder did not contain any images (expected to find .nii.gz file endings)" 252 | 253 | # now check if all required files are present and that no unexpected files are remaining 254 | for c in maybe_case_ids: 255 | for n in range(expected_num_modalities): 256 | expected_output_file = c + "_%04.0d.nii.gz" % n 257 | if not isfile(join(input_folder, expected_output_file)): 258 | missing.append(expected_output_file) 259 | else: 260 | remaining.remove(expected_output_file) 261 | 262 | print("Found %d unique case ids, here are some examples:" % len(maybe_case_ids), 263 | np.random.choice(maybe_case_ids, min(len(maybe_case_ids), 10))) 264 | print("If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc") 265 | 266 | if len(remaining) > 0: 267 | print("found %d unexpected remaining files in the folder. Here are some examples:" % len(remaining), 268 | np.random.choice(remaining, min(len(remaining), 10))) 269 | 270 | if len(missing) > 0: 271 | print("Some files are missing:") 272 | print(missing) 273 | raise RuntimeError("missing files in input_folder") 274 | 275 | return maybe_case_ids 276 | 277 | 278 | def predict_from_folder_segrap2023(model: str, input_folder: str, output_folder: str, folds: 0, part_id:0, num_parts:1): 279 | """ 280 | here we use the standard naming scheme to generate list_of_lists and output_files needed by predict_cases 281 | 282 | :param model: 283 | :param input_folder: 284 | :param output_folder: 285 | :param folds: 286 | :param save_npz: 287 | :param num_threads_preprocessing: 288 | :param num_threads_nifti_save: 289 | :param lowres_segmentations: 290 | :param part_id: 291 | :param num_parts: 292 | :param tta: 293 | :param mixed_precision: 294 | :param overwrite_existing: if not None then it will be overwritten with whatever is in there. None is default (no overwrite) 295 | :return: 296 | """ 297 | maybe_mkdir_p(output_folder) 298 | # shutil.copy(join(model, 'plans.pkl'), output_folder) 299 | 300 | # assert isfile(join(model, "plans.pkl") 301 | # ), "Folder with saved model weights must contain a plans.pkl file" 302 | expected_num_modalities = load_pickle( 303 | join(model+"/fold_{}".format(folds), "plans.pkl"))['num_modalities'] 304 | 305 | # check input folder integrity 306 | case_ids = check_input_folder_and_return_caseIDs( 307 | input_folder, expected_num_modalities) 308 | 309 | output_files = [join(output_folder, i + ".nii.gz") for i in case_ids] 310 | all_files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) 311 | list_of_lists = [[join(input_folder, i) for i in all_files if i[:len(j)].startswith(j) and 312 | len(i) == (len(j) + 12)] for j in case_ids] 313 | 314 | return predict_cases_segrap2023(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds=0) 315 | 316 | 317 | # predict_from_folder_segrap2023("weight/", "images/", "test/", 0, 0, 1) 318 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/post_processing.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import numpy as np 3 | import shutil 4 | import glob 5 | import os 6 | import sys 7 | o_path = os.getcwd() 8 | sys.path.append(o_path) 9 | 10 | 11 | segrap_subset_task001 = { 12 | 'Brain': [1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "BrainStem": 2, 14 | "Chiasm": 3, 15 | "TemporalLobe_L": [4, 6], 16 | "TemporalLobe_R": [5, 7], 17 | "Hippocampus_L": [8, 6], 18 | "Hippocampus_R": [9, 7], 19 | 'Eye_L': [10, 12], 20 | 'Eye_R': [11, 13], 21 | "Lens_L": 12, 22 | "Lens_R": 13, 23 | "OpticNerve_L": 14, 24 | "OpticNerve_R": 15, 25 | "MiddleEar_L": [18, 16, 20, 24, 28, 30], 26 | "MiddleEar_R": [19, 17, 21, 25, 29, 31], 27 | "IAC_L": 18, 28 | "IAC_R": 19, 29 | "TympanicCavity_L": [22, 20], 30 | "TympanicCavity_R": [23, 21], 31 | "VestibulSemi_L": [26, 24], 32 | "VestibulSemi_R": [27, 25], 33 | "Cochlea_L": 28, 34 | "Cochlea_R": 29, 35 | "ETbone_L": [32, 30], 36 | "ETbone_R": [33, 31], 37 | "Pituitary": 34, 38 | "OralCavity": 35, 39 | "Mandible_L": 36, 40 | "Mandible_R": 37, 41 | "Submandibular_L": 38, 42 | "Submandibular_R": 39, 43 | "Parotid_L": 40, 44 | "Parotid_R": 41, 45 | "Mastoid_L": 42, 46 | "Mastoid_R": 43, 47 | "TMjoint_L": 44, 48 | "TMjoint_R": 45, 49 | "SpinalCord": 46, 50 | "Esophagus": 47, 51 | "Larynx": [48, 49, 50, 51], 52 | "Larynx_Glottic": 49, 53 | "Larynx_Supraglot": 50, 54 | "PharynxConst": [51, 52], 55 | "Thyroid": 53, 56 | "Trachea": 54} 57 | 58 | 59 | segrap_subset_task002 = { 60 | "GTVp": 1, 61 | "GTVnd": 2} 62 | 63 | 64 | def nii2array(path): 65 | mask_itk_ref = sitk.ReadImage(path) 66 | mask_arr_ref = sitk.GetArrayFromImage(mask_itk_ref) 67 | return mask_arr_ref 68 | 69 | 70 | def merge_multi_class_to_one(input_arr, classes_index=None): 71 | new_arr = np.zeros_like(input_arr) 72 | for cls_ind in classes_index: 73 | new_arr[input_arr == cls_ind] = 1 74 | return new_arr 75 | 76 | 77 | def convert_one_hot_label_to_multi_organs(ont_hot_label_path, save_path): 78 | patient_results = [] 79 | spacing = None 80 | for organ in segrap_subset_task001.keys(): 81 | ont_hot_label_arr = nii2array(ont_hot_label_path) 82 | ont_hot_label_itk = sitk.ReadImage(ont_hot_label_path) 83 | spacing = ont_hot_label_itk.GetSpacing() 84 | 85 | if type(segrap_subset_task001[organ]) is list: 86 | new_arr = merge_multi_class_to_one( 87 | ont_hot_label_arr, segrap_subset_task001[organ]) 88 | else: 89 | new_arr = np.zeros_like(ont_hot_label_arr) 90 | new_arr[ont_hot_label_arr == segrap_subset_task001[organ]] = 1 91 | patient_results.append(new_arr) 92 | 93 | oars = [] 94 | for t in patient_results: 95 | oars.append(sitk.GetImageFromArray(t, False)) 96 | output_itk = sitk.JoinSeries(oars) 97 | new_spacing = (spacing[0], spacing[1], spacing[2], 1) 98 | output_itk.SetSpacing(new_spacing) 99 | print(output_itk.GetSize()) 100 | sitk.WriteImage(output_itk, save_path, True) 101 | print("Conversion Finished !") 102 | 103 | 104 | def convert_one_hot_label_to_multi_lesions(ont_hot_label_path, save_fold): 105 | patient_results = [] 106 | spacing = None 107 | for lesion in segrap_subset_task002.keys(): 108 | ont_hot_label_arr = nii2array(ont_hot_label_path) 109 | ont_hot_label_itk = sitk.ReadImage(ont_hot_label_path) 110 | spacing = ont_hot_label_itk.GetSpacing() 111 | new_arr = np.zeros_like(ont_hot_label_arr) 112 | new_arr[ont_hot_label_arr == segrap_subset_task002[lesion]] = 1 113 | patient_results.append(new_arr) 114 | new_itk = sitk.GetImageFromArray( 115 | np.array(patient_results).transpose(1, 2, 3, 0)) 116 | new_itk.SetSpacing(spacing) 117 | sitk.WriteImage(new_itk, "{}.nii.gz".format(save_fold)) 118 | return "Conversion Finished" 119 | 120 | 121 | # if __name__ == "__main__": 122 | # for patient in glob.glob("test/*"): 123 | # new_path = "test/{}".format( 124 | # patient.split("/")[-1].replace("_cropped.nii.gz", "")) 125 | # if os.path.exists(new_path): 126 | # pass 127 | # convert_one_hot_label_to_multi_organs(patient, new_path) 128 | # else: 129 | # os.mkdir(new_path) 130 | # convert_one_hot_label_to_multi_organs(patient, new_path) 131 | # print("Convert all predictions to single organ files") 132 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/process.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from post_processing import convert_one_hot_label_to_multi_organs 3 | from inference_code import predict_from_folder_segrap2023 4 | from evalutils.validators import ( 5 | UniquePathIndicesValidator, 6 | UniqueImagesValidator, 7 | ) 8 | from evalutils import SegmentationAlgorithm 9 | import numpy as np 10 | import SimpleITK 11 | import os 12 | import sys 13 | o_path = os.getcwd() 14 | print(o_path) 15 | sys.path.append(o_path) 16 | 17 | 18 | class Customalgorithm(): # SegmentationAlgorithm is not inherited in this class anymore 19 | def __init__(self): 20 | """ 21 | Do not modify the `self.input_dir` and `self.output_dir`. 22 | (Check https://grand-challenge.org/algorithms/interfaces/) 23 | """ 24 | self.input_dir = "/input/" 25 | self.output_dir = "/output/images/head-neck-segmentation/" 26 | 27 | """ 28 | Store the validation/test data and predictions into the `self.nii_path` and `self.result_path`, respectively. 29 | Put your model and pkl files into the `self.weight`. 30 | """ 31 | self.nii_path = '/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/imagesTs' 32 | self.result_path = '/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/result' 33 | self.nii_seg_file = 'SegRap2023_001.nii.gz' 34 | self.weight = "./weight/" 35 | if not os.path.exists(self.nii_path): 36 | os.makedirs(self.nii_path, exist_ok=True) 37 | if not os.path.exists(self.result_path): 38 | os.makedirs(self.result_path, exist_ok=True) 39 | pass 40 | 41 | def convert_mha_to_nii(self, mha_input_path, nii_out_path): # nnUNet specific 42 | img = SimpleITK.ReadImage(mha_input_path) 43 | print(img.GetSize()) 44 | SimpleITK.WriteImage(img, nii_out_path, True) 45 | 46 | def convert_nii_to_mha(self, nii_input_path, mha_out_path): # nnUNet specific 47 | img = SimpleITK.ReadImage(nii_input_path) 48 | SimpleITK.WriteImage(img, mha_out_path, True) 49 | 50 | def check_gpu(self): 51 | """ 52 | Check if GPU is available. Note that the Grand Challenge only has one available GPU. 53 | """ 54 | print('Checking GPU availability') 55 | is_available = torch.cuda.is_available() 56 | print('Available: ' + str(is_available)) 57 | print(f'Device count: {torch.cuda.device_count()}') 58 | if is_available: 59 | print(f'Current device: {torch.cuda.current_device()}') 60 | print('Device name: ' + torch.cuda.get_device_name(0)) 61 | print('Device memory: ' + 62 | str(torch.cuda.get_device_properties(0).total_memory)) 63 | 64 | def load_inputs(self): # use two modalities input data 65 | """ 66 | Read input data (two modalities) from `self.input_dir` (/input/). 67 | Please do not modify the path for CT and contrast-CT images. 68 | """ 69 | ct_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-ct/'))[0] 70 | ctc_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] 71 | uuid = os.path.splitext(ct_mha)[0] 72 | 73 | """ 74 | if your model was based on nnUNet baseline and used two modalities as inputs, 75 | please convert the input data into '_0000.nii.gz' and '_0001.nii.gz' using following code. 76 | """ 77 | self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-ct/', ct_mha), 78 | os.path.join(self.nii_path, 'SegRap2023_001_0000.nii.gz')) 79 | self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ctc_mha), 80 | os.path.join(self.nii_path, 'SegRap2023_001_0001.nii.gz')) 81 | 82 | # Check the validation/test data exist. 83 | print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/imagesTs')) 84 | 85 | return uuid 86 | 87 | 88 | # def load_inputs(self): # only use non-contrast-CT images as input 89 | # """ 90 | # Read input data (non-contrast-CT images) from `self.input_dir` (/input/). 91 | # Please do not modify the path for non-contrast-CT images. 92 | # """ 93 | # ct = os.listdir(os.path.join(self.input_dir, 'images/head-neck-ct/'))[0] 94 | # uuid = os.path.splitext(ct)[0] 95 | 96 | # """ 97 | # if your model was based on nnUNet baseline and only used non-contrast-CT images as inputs, 98 | # please convert the input data into '_0000.nii.gz' using following code. 99 | # """ 100 | # self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-ct/', ct), 101 | # os.path.join(self.nii_path, 'SegRap2023_001_0000.nii.gz')) 102 | # # Check the validation/test data exist. 103 | # print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/imagesTs')) 104 | 105 | # return uuid 106 | 107 | 108 | # def load_inputs(self): # only use contrast-CT images as input 109 | # """ 110 | # Read input data (single contrast-CT images) from `self.input_dir` (/input/). 111 | # Please do not modify the path for contrast-CT images. 112 | # """ 113 | # ct = os.listdir(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] 114 | # uuid = os.path.splitext(ct)[0] 115 | 116 | # """ 117 | # if your model was based on nnUNet baseline and only used contrast-CT images as inputs, 118 | # please convert the input data into '_0000.nii.gz' using following code. 119 | # """ 120 | # self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ct), 121 | # os.path.join(self.nii_path, 'SegRap2023_001_0000.nii.gz')) 122 | 123 | # # Check the validation/test data exist. 124 | # print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/imagesTs')) 125 | 126 | # return uuid 127 | 128 | 129 | def write_outputs(self, uuid): 130 | """ 131 | If you used one-hot label (54 classes) for training, please convert the 54 classes prediction to 45 oars prediction using function `convert_one_hot_label_to_multi_organs`. 132 | Otherwise, stack your 45 predictions for oars in the first channel, the corresponding mapping between the channel index and the organ names is: 133 | {0: 'Brain', 134 | 1: 'BrainStem', 135 | 2: 'Chiasm', 136 | 3: 'TemporalLobe_L', 137 | 4: 'TemporalLobe_R', 138 | 5: 'Hippocampus_L', 139 | 6: 'Hippocampus_R', 140 | 7: 'Eye_L', 141 | 8: 'Eye_R', 142 | 9: 'Lens_L', 143 | 10: 'Lens_R', 144 | 11: 'OpticNerve_L', 145 | 12: 'OpticNerve_R', 146 | 13: 'MiddleEar_L', 147 | 14: 'MiddleEar_R', 148 | 15: 'IAC_L', 149 | 16: 'IAC_R', 150 | 17: 'TympanicCavity_L', 151 | 18: 'TympanicCavity_R', 152 | 19: 'VestibulSemi_L', 153 | 20: 'VestibulSemi_R', 154 | 21: 'Cochlea_L', 155 | 22: 'Cochlea_R', 156 | 23: 'ETbone_L', 157 | 24: 'ETbone_R', 158 | 25: 'Pituitary', 159 | 26: 'OralCavity', 160 | 27: 'Mandible_L', 161 | 28: 'Mandible_R', 162 | 29: 'Submandibular_L', 163 | 30: 'Submandibular_R', 164 | 31: 'Parotid_L', 165 | 32: 'Parotid_R', 166 | 33: 'Mastoid_L', 167 | 34: 'Mastoid_R', 168 | 35: 'TMjoint_L', 169 | 36: 'TMjoint_R', 170 | 37: 'SpinalCord', 171 | 38: 'Esophagus', 172 | 39: 'Larynx', 173 | 40: 'Larynx_Glottic', 174 | 41: 'Larynx_Supraglot', 175 | 42: 'PharynxConst', 176 | 43: 'Thyroid', 177 | 44: 'Trachea'} 178 | Please ensure the 0 channel is the prediction of Brain, the 1 channel is the prediction of BrainStem, ......, the 44 channel is the prediction of Trachea. 179 | and also ensure the shape of final prediction array is [45, *image_shape]. 180 | The predictions should be saved in the `self.output_dir` (/output/). Please do not modify the path and the suffix (.mha) for saving the prediction. 181 | """ 182 | os.makedirs(os.path.dirname(self.output_dir), exist_ok=True) 183 | convert_one_hot_label_to_multi_organs(os.path.join( 184 | self.result_path, self.nii_seg_file), os.path.join(self.output_dir, uuid + ".mha")) 185 | print('Output written to: ', os.path.join(self.output_dir, uuid + ".mha")) 186 | 187 | def predict(self): 188 | """ 189 | load the model and checkpoint, and generate the predictions. You can replace this part with your own model. 190 | """ 191 | predict_from_folder_segrap2023(self.weight, self.nii_path, self.result_path, 0, 0, 1) 192 | print("nnUNet segmentation done!") 193 | if not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): 194 | print('waiting for nnUNet segmentation to be created') 195 | 196 | while not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): 197 | import time 198 | print('.', end='') 199 | time.sleep(5) 200 | # print(cproc) # since nnUNet_predict call is split into prediction and postprocess, a pre-mature exit code is received but segmentation file not yet written. This hack ensures that all spawned subprocesses are finished before being printed. 201 | print('Prediction finished !') 202 | 203 | def post_process(self): 204 | self.check_gpu() 205 | print('Start processing') 206 | uuid = self.load_inputs() 207 | print('Start prediction') 208 | self.predict() 209 | print('Start output writing') 210 | self.write_outputs(uuid) 211 | 212 | def process(self): 213 | """ 214 | Read inputs from /input, process with your algorithm and write to /output 215 | """ 216 | print(self.weight, self.nii_path, self.result_path) 217 | self.post_process() 218 | 219 | 220 | if __name__ == "__main__": 221 | Customalgorithm().process() 222 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/requirements.in: -------------------------------------------------------------------------------- 1 | 2 | evalutils==0.4.2 3 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.10 3 | # by the following command: 4 | # 5 | # pip-compile --resolver=backtracking 6 | # 7 | arrow==1.2.3 8 | # via jinja2-time 9 | binaryornot==0.4.4 10 | # via cookiecutter 11 | build==0.10.0 12 | # via pip-tools 13 | certifi==2023.5.7 14 | # via requests 15 | chardet==5.1.0 16 | # via binaryornot 17 | charset-normalizer==3.1.0 18 | # via requests 19 | click==8.1.3 20 | # via 21 | # cookiecutter 22 | # evalutils 23 | # pip-tools 24 | cookiecutter==2.1.1 25 | # via evalutils 26 | evalutils==0.4.2 27 | # via -r requirements.in 28 | idna==3.4 29 | # via requests 30 | imageio[tifffile]==2.31.1 31 | # via evalutils 32 | jinja2==3.1.2 33 | # via 34 | # cookiecutter 35 | # jinja2-time 36 | jinja2-time==0.2.0 37 | # via cookiecutter 38 | joblib==1.3.1 39 | # via scikit-learn 40 | markupsafe==2.1.3 41 | # via jinja2 42 | numpy==1.25.0 43 | # via 44 | # evalutils 45 | # imageio 46 | # pandas 47 | # scikit-learn 48 | # scipy 49 | # tifffile 50 | packaging==23.1 51 | # via build 52 | pandas==2.0.3 53 | # via evalutils 54 | pillow==10.0.0 55 | # via imageio 56 | pip-tools==6.14.0 57 | # via evalutils 58 | pyproject-hooks==1.0.0 59 | # via build 60 | python-dateutil==2.8.2 61 | # via 62 | # arrow 63 | # pandas 64 | python-slugify==8.0.1 65 | # via cookiecutter 66 | pytz==2023.3 67 | # via pandas 68 | pyyaml==6.0 69 | # via cookiecutter 70 | requests==2.31.0 71 | # via cookiecutter 72 | scikit-learn==1.3.0 73 | # via evalutils 74 | scipy==1.11.1 75 | # via 76 | # evalutils 77 | # scikit-learn 78 | simpleitk==2.2.1 79 | # via evalutils 80 | six==1.16.0 81 | # via python-dateutil 82 | text-unidecode==1.3 83 | # via python-slugify 84 | threadpoolctl==3.1.0 85 | # via scikit-learn 86 | tifffile==2023.4.12 87 | # via imageio 88 | tomli==2.0.1 89 | # via 90 | # build 91 | # pip-tools 92 | # pyproject-hooks 93 | tzdata==2023.3 94 | # via pandas 95 | urllib3==2.0.3 96 | # via requests 97 | wheel==0.40.0 98 | # via pip-tools 99 | batchgenerators==0.25 100 | matplotlib 101 | tqdm 102 | dicom2nifti 103 | scikit-image 104 | medpy 105 | nibabel 106 | # The following packages are considered to be unsafe in a requirements file: 107 | # pip 108 | # setuptools 109 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 4 | 5 | ./build.sh 6 | 7 | VOLUME_SUFFIX=$(dd if=/dev/urandom bs=32 count=1 | md5sum | cut --delimiter=' ' --fields=1) 8 | # Maximum is currently 30g, configurable in your algorithm image settings on grand challenge 9 | MEM_LIMIT="32g" 10 | 11 | docker volume create segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX 12 | 13 | echo $VOLUME_SUFFIX 14 | # Do not change any of the parameters to docker run, these are fixed 15 | docker run --rm --gpus all \ 16 | --memory="${MEM_LIMIT}" \ 17 | --memory-swap="${MEM_LIMIT}" \ 18 | --network="none" \ 19 | --cap-drop="ALL" \ 20 | --security-opt="no-new-privileges" \ 21 | --shm-size="128m" \ 22 | --pids-limit="256" \ 23 | -v $SCRIPTPATH/images/:/input/ \ 24 | -v segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 25 | segrap2023_oar_segmentationcontainer 26 | 27 | docker run --rm \ 28 | -v segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 29 | python:3.10-slim ls -al /output/images/head-neck-segmentation 30 | 31 | docker run --rm \ 32 | -v segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 33 | python:3.10-slim cat /output/results.json | python -m json.tool 34 | 35 | docker run --rm \ 36 | -v segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 37 | -v $SCRIPTPATH/test/:/input/ \ 38 | python:3.10-slim python -c "import json, sys; f1 = json.load(open('/output/results.json')); f2 = json.load(open('/input/expected_output.json')); sys.exit(f1 != f2);" 39 | 40 | if [ $? -eq 0 ]; then 41 | echo "Tests successfully passed..." 42 | else 43 | echo "Expected output was not found..." 44 | fi 45 | 46 | docker volume rm segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX 47 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/weight/fold_0/plans.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/weight/fold_0/plans.pkl -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/weight/fold_0/weight.txt: -------------------------------------------------------------------------------- 1 | Download a trained nnUNet model for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). 2 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/Dockerfile: -------------------------------------------------------------------------------- 1 | # Pull the docker image | 拉取镜像 2 | # FROM pytorch/pytorch:1.13.0-cuda11.6-cudnn8-devel 3 | 4 | FROM python:3.10-slim 5 | 6 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user 7 | 8 | RUN mkdir -p /opt/app /input /output \ 9 | && chown user:user /opt/app /input /output 10 | 11 | USER user 12 | WORKDIR /opt/app 13 | 14 | ENV PATH="/home/user/.local/bin:${PATH}" 15 | COPY --chown=user:user requirements.txt /opt/app/ 16 | 17 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools 18 | RUN python -m piptools sync requirements.txt 19 | 20 | # RUN python -m pip install --user -r requirements.txt 21 | RUN python -m pip install --user torch==1.12.0+cu116 torchvision==0.13.0+cu116 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu116 22 | RUN python -m pip install --no-deps --user nnunet==1.7.1 23 | 24 | COPY --chown=user:user process.py /opt/app/ 25 | COPY --chown=user:user inference_code.py /opt/app/ 26 | COPY --chown=user:user post_processing.py /opt/app/ 27 | # COPY --chown=user:user images/ /opt/app/images/ 28 | COPY --chown=user:user weight/ /opt/app/weight/ 29 | # COPY --chown=user:user output/ /opt/app/ 30 | 31 | 32 | ENTRYPOINT [ "python", "-m", "process" ] 33 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/README.md: -------------------------------------------------------------------------------- 1 | # SegRap2023_GTVs_SegmentationContainer Algorithm 2 | 3 | The source code for the algorithm container for 4 | SegRap2023_GTVs_SegmentationContainer, generated with 5 | evalutils version 0.4.2 6 | using Python 3.10. 7 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 3 | 4 | docker build -t segrap2023_gtv_segmentationcontainer "$SCRIPTPATH" 5 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/export.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./build.sh 4 | 5 | docker save segrap2023_gtv_segmentationcontainer | gzip -c > SegRap2023_GTV_SegmentationContainer.tar.gz 6 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/images/images/head-neck-contrast-enhanced-ct/data.txt: -------------------------------------------------------------------------------- 1 | Download a non-contrast image from our provided link for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/images/images/head-neck-ct/data.txt: -------------------------------------------------------------------------------- 1 | Download a contrast-enhanced image from our provided link for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/inference_code.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | o_path = os.getcwd() 4 | sys.path.append(o_path) 5 | import shutil 6 | from multiprocessing import Pool 7 | 8 | import numpy as np 9 | import torch 10 | from copy import deepcopy 11 | from batchgenerators.utilities.file_and_folder_operations import * 12 | from nnunet.inference.predict import preprocess_multithreaded 13 | from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax 14 | from nnunet.postprocessing.connected_components import load_postprocessing, load_remove_save 15 | from nnunet.training.model_restore import load_model_and_checkpoint_files 16 | from torch import cuda 17 | from torch.nn import functional as F 18 | 19 | 20 | def predict_cases_segrap2023(model, list_of_lists, output_filenames, folds, save_npz=False, num_threads_preprocessing=6, 21 | num_threads_nifti_save=2, segs_from_prev_stage=None, do_tta=False, 22 | overwrite_existing=False, step_size=0.5, checkpoint_name="model_final_checkpoint", 23 | disable_postprocessing: bool = False): 24 | assert len(list_of_lists) == len(output_filenames) 25 | if segs_from_prev_stage is not None: 26 | assert len(segs_from_prev_stage) == len(output_filenames) 27 | 28 | pool = Pool(num_threads_nifti_save) 29 | results = [] 30 | 31 | cleaned_output_files = [] 32 | for o in output_filenames: 33 | dr, f = os.path.split(o) 34 | if len(dr) > 0: 35 | maybe_mkdir_p(dr) 36 | if not f.endswith(".nii.gz"): 37 | f, _ = os.path.splitext(f) 38 | f = f + ".nii.gz" 39 | cleaned_output_files.append(join(dr, f)) 40 | 41 | if not overwrite_existing: 42 | print("number of cases:", len(list_of_lists)) 43 | # if save_npz=True then we should also check for missing npz files 44 | not_done_idx = [i for i, j in enumerate(cleaned_output_files) if 45 | (not isfile(j)) or (save_npz and not isfile(j[:-7] + '.npz'))] 46 | 47 | cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx] 48 | print(list_of_lists) 49 | list_of_lists = [list_of_lists[i] for i in not_done_idx] 50 | if segs_from_prev_stage is not None: 51 | segs_from_prev_stage = [segs_from_prev_stage[i] 52 | for i in not_done_idx] 53 | 54 | print("number of cases that still need to be predicted:", 55 | len(cleaned_output_files)) 56 | 57 | print("emptying cuda cache") 58 | torch.cuda.empty_cache() 59 | 60 | print("loading parameters for folds,", folds) 61 | trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=True, 62 | checkpoint_name=checkpoint_name) 63 | 64 | print("starting preprocessing generator") 65 | preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, 66 | segs_from_prev_stage) 67 | print("starting prediction...") 68 | all_output_files = [] 69 | with torch.no_grad(): 70 | for preprocessed in preprocessing: 71 | output_filename, (d, dct) = preprocessed 72 | all_output_files.append(all_output_files) 73 | if isinstance(d, str): 74 | data = np.load(d) 75 | os.remove(d) 76 | d = data 77 | 78 | # we need to be able to del it if things fail (just in case) 79 | softmax = None 80 | 81 | try: 82 | print("predicting", output_filename) 83 | print(f"attempting all_in_gpu {True}") 84 | trainer.load_checkpoint_ram(params[0], False) 85 | softmax = trainer.predict_preprocessed_data_return_seg_and_softmax( 86 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 87 | 'mirror_axes'], use_sliding_window=True, 88 | step_size=step_size, use_gaussian=True, all_in_gpu=True, 89 | mixed_precision=True)[1] 90 | for p in params[1:]: 91 | trainer.load_checkpoint_ram(p, False) 92 | softmax += trainer.predict_preprocessed_data_return_seg_and_softmax( 93 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 94 | 'mirror_axes'], use_sliding_window=True, 95 | step_size=step_size, use_gaussian=True, all_in_gpu=True, 96 | mixed_precision=True)[1] 97 | except RuntimeError: # out of gpu memory 98 | del softmax 99 | cuda.empty_cache() 100 | print( 101 | f"\nGPU AGGREGATION FAILED FOR CASE {os.path.basename(output_filename)} DUE TO OUT OF MEMORY, falling back to all_in_gpu False\n") 102 | trainer.load_checkpoint_ram(params[0], False) 103 | softmax = trainer.predict_preprocessed_data_return_seg_and_softmax( 104 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 105 | 'mirror_axes'], use_sliding_window=True, 106 | step_size=step_size, use_gaussian=True, all_in_gpu=False, 107 | mixed_precision=True)[1] 108 | 109 | for p in params[1:]: 110 | trainer.load_checkpoint_ram(p, False) 111 | softmax += trainer.predict_preprocessed_data_return_seg_and_softmax( 112 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 113 | 'mirror_axes'], use_sliding_window=True, 114 | step_size=step_size, use_gaussian=True, all_in_gpu=False, 115 | mixed_precision=True)[1] 116 | cuda.empty_cache() 117 | 118 | if len(params) > 1: 119 | softmax /= len(params) 120 | 121 | transpose_forward = trainer.plans.get('transpose_forward') 122 | if transpose_forward is not None: 123 | transpose_backward = trainer.plans.get('transpose_backward') 124 | # softmax = softmax.transpose([0] + [i + 1 for i in transpose_backward]) 125 | 126 | # resampling linearly on GPU 127 | torch.cuda.empty_cache() 128 | target_shape = dct.get('size_after_cropping') 129 | target_shape = [target_shape[i] for i in transpose_forward] 130 | if not isinstance(softmax, torch.Tensor): 131 | softmax = torch.from_numpy(softmax) 132 | try: 133 | with torch.no_grad(): 134 | softmax_resampled = torch.zeros((softmax.shape[0], *target_shape), dtype=torch.half, 135 | device='cuda:0') 136 | if not softmax.device == torch.device('cuda:0'): 137 | softmax_gpu = softmax.to(torch.device('cuda:0')) 138 | else: 139 | softmax_gpu = softmax 140 | for c in range(len(softmax)): 141 | softmax_resampled[c] = \ 142 | F.interpolate( 143 | softmax_gpu[c][None, None], size=target_shape, mode='trilinear')[0, 0] 144 | del softmax, softmax_gpu 145 | softmax_resampled = softmax_resampled.cpu().numpy() 146 | except RuntimeError: 147 | # gpu failed, try CPU 148 | print( 149 | f"\nGPU RESAMPLING FAILED FOR CASE {os.path.basename(output_filename)} DUE TO OUT OF MEMORY, falling back to CPU\n") 150 | 151 | if not softmax.device == torch.device('cpu'): 152 | softmax_cpu = softmax.to(torch.device('cpu')).float() 153 | else: 154 | softmax_cpu = softmax 155 | 156 | torch.cuda.empty_cache() 157 | with torch.no_grad(): 158 | softmax_resampled = torch.zeros( 159 | (softmax.shape[0], *target_shape)) 160 | # depending on where we crash this has already been converted or not 161 | if not isinstance(softmax, torch.Tensor): 162 | softmax = torch.from_numpy(softmax) 163 | for c in range(len(softmax)): 164 | softmax_resampled[c] = \ 165 | F.interpolate( 166 | softmax_cpu[c][None, None], size=target_shape, mode='trilinear')[0, 0] 167 | del softmax, softmax_cpu 168 | softmax_resampled = softmax_resampled.half().numpy() 169 | torch.cuda.empty_cache() 170 | ##################################### 171 | softmax_resampled = softmax_resampled.transpose( 172 | [0] + [i + 1 for i in transpose_backward]) 173 | 174 | if save_npz: 175 | npz_file = output_filename[:-7] + ".npz" 176 | else: 177 | npz_file = None 178 | 179 | if hasattr(trainer, 'regions_class_order'): 180 | region_class_order = trainer.regions_class_order 181 | else: 182 | region_class_order = None 183 | 184 | """There is a problem with python process communication that prevents us from communicating objects 185 | larger than 2 GB between processes (basically when the length of the pickle string that will be sent is 186 | communicated by the multiprocessing.Pipe object then the placeholder (I think) does not allow for long 187 | enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually 188 | patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will 189 | then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either 190 | filename or np.ndarray and will handle this automatically""" 191 | bytes_per_voxel = 4 192 | print( 193 | f'softmax shape {softmax_resampled.shape}, softmax dtype {softmax_resampled.dtype}') 194 | if True: 195 | # if all_in_gpu then the return value is half (float16) 196 | bytes_per_voxel = 2 197 | # * 0.85 just to be save 198 | if np.prod(softmax_resampled.shape) > (2e9 / bytes_per_voxel * 0.85): 199 | print( 200 | "This output is too large for python process-process communication. Saving output temporarily to disk") 201 | np.save(output_filename[:-7] + ".npy", softmax_resampled) 202 | softmax_resampled = output_filename[:-7] + ".npy" 203 | 204 | results.append(pool.starmap_async(save_segmentation_nifti_from_softmax, 205 | ((softmax_resampled, output_filename, dct, 1, region_class_order, 206 | None, None, 207 | npz_file, None, False, 1),) 208 | )) 209 | 210 | print("inference done. Now waiting for the segmentation export to finish...") 211 | _ = [i.get() for i in results] 212 | # now apply postprocessing 213 | # first load the postprocessing properties if they are present. Else raise a well visible warning 214 | if not disable_postprocessing: 215 | results = [] 216 | pp_file = join(model, "postprocessing.json") 217 | if isfile(pp_file): 218 | print("postprocessing...") 219 | shutil.copy(pp_file, os.path.abspath( 220 | os.path.dirname(output_filenames[0]))) 221 | # for_which_classes stores for which of the classes everything but the largest connected component needs to be 222 | # removed 223 | for_which_classes, min_valid_obj_size = load_postprocessing( 224 | pp_file) 225 | results.append(pool.starmap_async(load_remove_save, 226 | zip(output_filenames, output_filenames, 227 | [for_which_classes] * 228 | len(output_filenames), 229 | [min_valid_obj_size] * len(output_filenames)))) 230 | _ = [i.get() for i in results] 231 | else: 232 | print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run " 233 | "consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is " 234 | "%s" % model) 235 | 236 | pool.close() 237 | pool.join() 238 | 239 | 240 | def check_input_folder_and_return_caseIDs(input_folder, expected_num_modalities): 241 | print("This model expects %d input modalities for each image" % 242 | expected_num_modalities) 243 | files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) 244 | 245 | maybe_case_ids = np.unique([i[:-12] for i in files]) 246 | 247 | remaining = deepcopy(files) 248 | missing = [] 249 | 250 | assert len( 251 | files) > 0, "input folder did not contain any images (expected to find .nii.gz file endings)" 252 | 253 | # now check if all required files are present and that no unexpected files are remaining 254 | for c in maybe_case_ids: 255 | for n in range(expected_num_modalities): 256 | expected_output_file = c + "_%04.0d.nii.gz" % n 257 | if not isfile(join(input_folder, expected_output_file)): 258 | missing.append(expected_output_file) 259 | else: 260 | remaining.remove(expected_output_file) 261 | 262 | print("Found %d unique case ids, here are some examples:" % len(maybe_case_ids), 263 | np.random.choice(maybe_case_ids, min(len(maybe_case_ids), 10))) 264 | print("If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc") 265 | 266 | if len(remaining) > 0: 267 | print("found %d unexpected remaining files in the folder. Here are some examples:" % len(remaining), 268 | np.random.choice(remaining, min(len(remaining), 10))) 269 | 270 | if len(missing) > 0: 271 | print("Some files are missing:") 272 | print(missing) 273 | raise RuntimeError("missing files in input_folder") 274 | 275 | return maybe_case_ids 276 | 277 | 278 | def predict_from_folder_segrap2023(model: str, input_folder: str, output_folder: str, folds: 0, part_id:0, num_parts:1): 279 | """ 280 | here we use the standard naming scheme to generate list_of_lists and output_files needed by predict_cases 281 | 282 | :param model: 283 | :param input_folder: 284 | :param output_folder: 285 | :param folds: 286 | :param save_npz: 287 | :param num_threads_preprocessing: 288 | :param num_threads_nifti_save: 289 | :param lowres_segmentations: 290 | :param part_id: 291 | :param num_parts: 292 | :param tta: 293 | :param mixed_precision: 294 | :param overwrite_existing: if not None then it will be overwritten with whatever is in there. None is default (no overwrite) 295 | :return: 296 | """ 297 | maybe_mkdir_p(output_folder) 298 | # shutil.copy(join(model, 'plans.pkl'), output_folder) 299 | 300 | # assert isfile(join(model, "plans.pkl") 301 | # ), "Folder with saved model weights must contain a plans.pkl file" 302 | expected_num_modalities = load_pickle( 303 | join(model+"/fold_{}".format(folds), "plans.pkl"))['num_modalities'] 304 | 305 | # check input folder integrity 306 | case_ids = check_input_folder_and_return_caseIDs( 307 | input_folder, expected_num_modalities) 308 | 309 | output_files = [join(output_folder, i + ".nii.gz") for i in case_ids] 310 | all_files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) 311 | list_of_lists = [[join(input_folder, i) for i in all_files if i[:len(j)].startswith(j) and 312 | len(i) == (len(j) + 12)] for j in case_ids] 313 | 314 | return predict_cases_segrap2023(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds=0) 315 | 316 | 317 | # predict_from_folder_segrap2023("weight/", "images/", "test/", 0, 0, 1) 318 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/post_processing.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import numpy as np 3 | import shutil 4 | import glob 5 | import os 6 | import sys 7 | o_path = os.getcwd() 8 | sys.path.append(o_path) 9 | 10 | 11 | segrap_subset_task002 = { 12 | "GTVp": 1, 13 | "GTVnd": 2} 14 | 15 | 16 | def nii2array(path): 17 | mask_itk_ref = sitk.ReadImage(path) 18 | mask_arr_ref = sitk.GetArrayFromImage(mask_itk_ref) 19 | return mask_arr_ref 20 | 21 | 22 | def merge_multi_class_to_one(input_arr, classes_index=None): 23 | new_arr = np.zeros_like(input_arr) 24 | for cls_ind in classes_index: 25 | new_arr[input_arr == cls_ind] = 1 26 | return new_arr 27 | 28 | 29 | def convert_one_hot_label_to_multi_lesions(ont_hot_label_path, save_path): 30 | patient_results = [] 31 | spacing = None 32 | for lesion in segrap_subset_task002.keys(): 33 | ont_hot_label_arr = nii2array(ont_hot_label_path) 34 | ont_hot_label_itk = sitk.ReadImage(ont_hot_label_path) 35 | spacing = ont_hot_label_itk.GetSpacing() 36 | new_arr = np.zeros_like(ont_hot_label_arr) 37 | new_arr[ont_hot_label_arr == segrap_subset_task002[lesion]] = 1 38 | patient_results.append(new_arr) 39 | oars = [] 40 | for t in patient_results: 41 | oars.append(sitk.GetImageFromArray(t, False)) 42 | output_itk = sitk.JoinSeries(oars) 43 | new_spacing = (spacing[0], spacing[1], spacing[2], 1) 44 | output_itk.SetSpacing(new_spacing) 45 | print(output_itk.GetSize()) 46 | sitk.WriteImage(output_itk, save_path, True) 47 | print("Conversion Finished") 48 | 49 | 50 | # if __name__ == "__main__": 51 | # for patient in glob.glob("test/*"): 52 | # new_path = "test/{}".format( 53 | # patient.split("/")[-1].replace("_cropped.nii.gz", "")) 54 | # if os.path.exists(new_path): 55 | # pass 56 | # convert_one_hot_label_to_multi_organs(patient, new_path) 57 | # else: 58 | # os.mkdir(new_path) 59 | # convert_one_hot_label_to_multi_organs(patient, new_path) 60 | # print("Convert all predictions to single organ files") 61 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/process.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from post_processing import convert_one_hot_label_to_multi_lesions 3 | from inference_code import predict_from_folder_segrap2023 4 | from evalutils.validators import ( 5 | UniquePathIndicesValidator, 6 | UniqueImagesValidator, 7 | ) 8 | from evalutils import SegmentationAlgorithm 9 | import numpy as np 10 | import SimpleITK 11 | import os 12 | import sys 13 | o_path = os.getcwd() 14 | print(o_path) 15 | sys.path.append(o_path) 16 | 17 | 18 | class Customalgorithm(): # SegmentationAlgorithm is not inherited in this class anymore 19 | def __init__(self): 20 | """ 21 | Do not modify the `self.input_dir` and `self.output_dir`. 22 | (Check https://grand-challenge.org/algorithms/interfaces/) 23 | """ 24 | self.input_dir = "/input/" 25 | self.output_dir = "/output/images/gross-tumor-volume-segmentation/" 26 | 27 | """ 28 | Store the validation/test data and predictions into the `self.nii_path` and `self.result_path`, respectively. 29 | Put your model and pkl files into the `self.weight`. 30 | """ 31 | self.nii_path = '/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/imagesTs' 32 | self.result_path = '/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/result' 33 | self.nii_seg_file = 'SegRap2023_002.nii.gz' 34 | self.weight = "./weight/" 35 | if not os.path.exists(self.nii_path): 36 | os.makedirs(self.nii_path, exist_ok=True) 37 | if not os.path.exists(self.result_path): 38 | os.makedirs(self.result_path, exist_ok=True) 39 | pass 40 | 41 | def convert_mha_to_nii(self, mha_input_path, nii_out_path): # nnUNet specific 42 | img = SimpleITK.ReadImage(mha_input_path) 43 | print(img.GetSize()) 44 | print(img.GetSpacing()) 45 | SimpleITK.WriteImage(img, nii_out_path, True) 46 | 47 | def convert_nii_to_mha(self, nii_input_path, mha_out_path): # nnUNet specific 48 | img = SimpleITK.ReadImage(nii_input_path) 49 | print(img.GetSize()) 50 | print(img.GetSpacing()) 51 | SimpleITK.WriteImage(img, mha_out_path, True) 52 | 53 | def check_gpu(self): 54 | """ 55 | Check if GPU is available. Note that the Grand Challenge only has one available GPU. 56 | """ 57 | print('Checking GPU availability') 58 | is_available = torch.cuda.is_available() 59 | print('Available: ' + str(is_available)) 60 | print(f'Device count: {torch.cuda.device_count()}') 61 | if is_available: 62 | print(f'Current device: {torch.cuda.current_device()}') 63 | print('Device name: ' + torch.cuda.get_device_name(0)) 64 | print('Device memory: ' + 65 | str(torch.cuda.get_device_properties(0).total_memory)) 66 | 67 | def load_inputs(self): # use two modalities input data 68 | """ 69 | Read input data (two modalities) from `self.input_dir` (/input/). 70 | Please do not modify the path for CT and contrast-CT images. 71 | """ 72 | ct_mha = os.listdir(os.path.join( 73 | self.input_dir, 'images/head-neck-ct/'))[0] 74 | ctc_mha = os.listdir(os.path.join( 75 | self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] 76 | uuid = os.path.splitext(ct_mha)[0] 77 | 78 | """ 79 | if your model was based on nnUNet baseline and used two modalities as inputs, 80 | please convert the input data into '_0000.nii.gz' and '_0001.nii.gz' using following code. 81 | """ 82 | self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-ct/', ct_mha), 83 | os.path.join(self.nii_path, 'SegRap2023_002_0000.nii.gz')) 84 | self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ctc_mha), 85 | os.path.join(self.nii_path, 'SegRap2023_002_0001.nii.gz')) 86 | 87 | # Check the validation/test data exist. 88 | print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/imagesTs')) 89 | return uuid 90 | 91 | # def load_inputs(self): # only use non-contrast-CT images as input 92 | # """ 93 | # Read input data (non-contrast-CT images) from `self.input_dir` (/input/). 94 | # Please do not modify the path for non-contrast-CT images. 95 | # """ 96 | # ct = os.listdir(os.path.join(self.input_dir, 'images/head-neck-ct/'))[0] 97 | # uuid = os.path.splitext(ct)[0] 98 | 99 | # """ 100 | # if your model was based on nnUNet baseline and only used non-contrast-CT images as inputs, 101 | # please convert the input data into '_0000.nii.gz' using following code. 102 | # """ 103 | # self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-ct/', ct), 104 | # os.path.join(self.nii_path, 'SegRap2023_002_0000.nii.gz')) 105 | # # Check the validation/test data exist. 106 | # print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/imagesTs')) 107 | 108 | # return uuid 109 | 110 | 111 | # def load_inputs(self): # only use contrast-CT images as input 112 | # """ 113 | # Read input data (single contrast-CT images) from `self.input_dir` (/input/). 114 | # Please do not modify the path for contrast-CT images. 115 | # """ 116 | # ct = os.listdir(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] 117 | # uuid = os.path.splitext(ct)[0] 118 | 119 | # """ 120 | # if your model was based on nnUNet baseline and only used contrast-CT images as inputs, 121 | # please convert the input data into '_0000.nii.gz' using following code. 122 | # """ 123 | # self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ct), 124 | # os.path.join(self.nii_path, 'SegRap2023_002_0000.nii.gz')) 125 | 126 | # # Check the validation/test data exist. 127 | # print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/imagesTs')) 128 | 129 | # return uuid 130 | 131 | 132 | def write_outputs(self, uuid): 133 | """ 134 | If you used one-hot label (2 classes) for training, please convert the 2 classes prediction to 2 gtvs prediction using function `convert_one_hot_label_to_multi_lesions`. 135 | Otherwise, stack your 2 predictions for gtvs in the first channel, the corresponding mapping between the channel index and the gtv names is: 136 | {0: 'GTVp', 137 | 1: 'GTVnd'} 138 | Please ensure the 0 channel is the prediction of GTVp, the 1 channel is the prediction of GTVnd. 139 | and also ensure the shape of final prediction array is [2, *image_shape]. 140 | The predictions should be saved in the `self.output_dir` (/output/). Please do not modify the path and the suffix (.mha) for saving the prediction. 141 | """ 142 | os.makedirs(os.path.dirname(self.output_dir), exist_ok=True) 143 | convert_one_hot_label_to_multi_lesions(os.path.join( 144 | self.result_path, self.nii_seg_file), os.path.join(self.output_dir, uuid + ".mha")) 145 | print('Output written to: ' + 146 | os.path.join(self.output_dir, uuid + ".mha")) 147 | 148 | def predict(self): 149 | """ 150 | load the model and checkpoint, and generate the predictions. You can replace this part with your own model. 151 | """ 152 | predict_from_folder_segrap2023( 153 | self.weight, self.nii_path, self.result_path, 0, 0, 1) 154 | print("nnUNet segmentation done!") 155 | if not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): 156 | print('waiting for nnUNet segmentation to be created') 157 | 158 | while not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): 159 | import time 160 | print('.', end='') 161 | # print(cproc) # since nnUNet_predict call is split into prediction and postprocess, a pre-mature exit code is received but segmentation file not yet written. This hack ensures that all spawned subprocesses are finished before being printed. 162 | print('Prediction finished') 163 | 164 | def post_process(self): 165 | self.check_gpu() 166 | print('Start processing') 167 | uuid = self.load_inputs() 168 | print('Start prediction') 169 | self.predict() 170 | print('Start output writing') 171 | self.write_outputs(uuid) 172 | 173 | def process(self): 174 | """ 175 | Read inputs from /input, process with your algorithm and write to /output 176 | """ 177 | self.post_process() 178 | 179 | 180 | if __name__ == "__main__": 181 | Customalgorithm().process() 182 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/requirements.in: -------------------------------------------------------------------------------- 1 | 2 | evalutils==0.4.2 3 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.10 3 | # by the following command: 4 | # 5 | # pip-compile --resolver=backtracking 6 | # 7 | arrow==1.2.3 8 | # via jinja2-time 9 | binaryornot==0.4.4 10 | # via cookiecutter 11 | build==0.10.0 12 | # via pip-tools 13 | certifi==2023.5.7 14 | # via requests 15 | chardet==5.1.0 16 | # via binaryornot 17 | charset-normalizer==3.1.0 18 | # via requests 19 | click==8.1.3 20 | # via 21 | # cookiecutter 22 | # evalutils 23 | # pip-tools 24 | cookiecutter==2.1.1 25 | # via evalutils 26 | evalutils==0.4.2 27 | # via -r requirements.in 28 | idna==3.4 29 | # via requests 30 | imageio[tifffile]==2.31.1 31 | # via evalutils 32 | jinja2==3.1.2 33 | # via 34 | # cookiecutter 35 | # jinja2-time 36 | jinja2-time==0.2.0 37 | # via cookiecutter 38 | joblib==1.3.1 39 | # via scikit-learn 40 | markupsafe==2.1.3 41 | # via jinja2 42 | numpy==1.25.0 43 | # via 44 | # evalutils 45 | # imageio 46 | # pandas 47 | # scikit-learn 48 | # scipy 49 | # tifffile 50 | packaging==23.1 51 | # via build 52 | pandas==2.0.3 53 | # via evalutils 54 | pillow==10.0.0 55 | # via imageio 56 | pip-tools==6.14.0 57 | # via evalutils 58 | pyproject-hooks==1.0.0 59 | # via build 60 | python-dateutil==2.8.2 61 | # via 62 | # arrow 63 | # pandas 64 | python-slugify==8.0.1 65 | # via cookiecutter 66 | pytz==2023.3 67 | # via pandas 68 | pyyaml==6.0 69 | # via cookiecutter 70 | requests==2.31.0 71 | # via cookiecutter 72 | scikit-learn==1.3.0 73 | # via evalutils 74 | scipy==1.11.1 75 | # via 76 | # evalutils 77 | # scikit-learn 78 | simpleitk==2.2.1 79 | # via evalutils 80 | six==1.16.0 81 | # via python-dateutil 82 | text-unidecode==1.3 83 | # via python-slugify 84 | threadpoolctl==3.1.0 85 | # via scikit-learn 86 | tifffile==2023.4.12 87 | # via imageio 88 | tomli==2.0.1 89 | # via 90 | # build 91 | # pip-tools 92 | # pyproject-hooks 93 | tzdata==2023.3 94 | # via pandas 95 | urllib3==2.0.3 96 | # via requests 97 | wheel==0.40.0 98 | # via pip-tools 99 | batchgenerators==0.25 100 | matplotlib 101 | tqdm 102 | dicom2nifti 103 | scikit-image 104 | medpy 105 | nibabel 106 | # The following packages are considered to be unsafe in a requirements file: 107 | # pip 108 | # setuptools 109 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 4 | 5 | ./build.sh 6 | 7 | VOLUME_SUFFIX=$(dd if=/dev/urandom bs=32 count=1 | md5sum | cut --delimiter=' ' --fields=1) 8 | # Maximum is currently 30g, configurable in your algorithm image settings on grand challenge 9 | MEM_LIMIT="32g" 10 | 11 | docker volume create segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX 12 | 13 | echo $VOLUME_SUFFIX 14 | # Do not change any of the parameters to docker run, these are fixed 15 | docker run --rm --gpus all \ 16 | --memory="${MEM_LIMIT}" \ 17 | --memory-swap="${MEM_LIMIT}" \ 18 | --network="none" \ 19 | --cap-drop="ALL" \ 20 | --security-opt="no-new-privileges" \ 21 | --shm-size="128m" \ 22 | --pids-limit="256" \ 23 | -v $SCRIPTPATH/images/:/input/ \ 24 | -v segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 25 | segrap2023_gtv_segmentationcontainer 26 | 27 | docker run --rm \ 28 | -v segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 29 | python:3.10-slim ls -al /output/images/gross-tumor-volume-segmentation 30 | 31 | docker run --rm \ 32 | -v segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 33 | python:3.10-slim cat /output/results.json | python -m json.tool 34 | 35 | docker run --rm \ 36 | -v segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 37 | -v $SCRIPTPATH/test/:/input/ \ 38 | python:3.10-slim python -c "import json, sys; f1 = json.load(open('/output/results.json')); f2 = json.load(open('/input/expected_output.json')); sys.exit(f1 != f2);" 39 | 40 | if [ $? -eq 0 ]; then 41 | echo "Tests successfully passed..." 42 | else 43 | echo "Expected output was not found..." 44 | fi 45 | 46 | docker volume rm segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX 47 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/weight/fold_0/plans.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/weight/fold_0/plans.pkl -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/weight/fold_0/weight.txt: -------------------------------------------------------------------------------- 1 | Download a trained nnUNet model for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). 2 | -------------------------------------------------------------------------------- /Docker_tutorial/gtvs_output_example.mha: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/gtvs_output_example.mha -------------------------------------------------------------------------------- /Docker_tutorial/gtvs_output_example.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/gtvs_output_example.zip -------------------------------------------------------------------------------- /Docker_tutorial/oars_output_example.mha: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/oars_output_example.mha -------------------------------------------------------------------------------- /Docker_tutorial/oars_output_example.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/oars_output_example.zip -------------------------------------------------------------------------------- /Docker_tutorial/outputs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/outputs.png -------------------------------------------------------------------------------- /Docker_tutorial/stacked_results_to_4d_mha.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import SimpleITK as sitk 4 | 5 | # download and unzip "Docker_tutorial/gtvs_output_example.zip" and "Docker_tutorial/oars_output_example.zip" to run this example. 6 | # # The mapping between the structures (OARs or GTVs) and the index of the 4D file (mha). 7 | oars_mapping_dict = {0: 'Brain', 8 | 1: 'BrainStem', 9 | 2: 'Chiasm', 10 | 3: 'TemporalLobe_L', 11 | 4: 'TemporalLobe_R', 12 | 5: 'Hippocampus_L', 13 | 6: 'Hippocampus_R', 14 | 7: 'Eye_L', 15 | 8: 'Eye_R', 16 | 9: 'Lens_L', 17 | 10: 'Lens_R', 18 | 11: 'OpticNerve_L', 19 | 12: 'OpticNerve_R', 20 | 13: 'MiddleEar_L', 21 | 14: 'MiddleEar_R', 22 | 15: 'IAC_L', 23 | 16: 'IAC_R', 24 | 17: 'TympanicCavity_L', 25 | 18: 'TympanicCavity_R', 26 | 19: 'VestibulSemi_L', 27 | 20: 'VestibulSemi_R', 28 | 21: 'Cochlea_L', 29 | 22: 'Cochlea_R', 30 | 23: 'ETbone_L', 31 | 24: 'ETbone_R', 32 | 25: 'Pituitary', 33 | 26: 'OralCavity', 34 | 27: 'Mandible_L', 35 | 28: 'Mandible_R', 36 | 29: 'Submandibular_L', 37 | 30: 'Submandibular_R', 38 | 31: 'Parotid_L', 39 | 32: 'Parotid_R', 40 | 33: 'Mastoid_L', 41 | 34: 'Mastoid_R', 42 | 35: 'TMjoint_L', 43 | 36: 'TMjoint_R', 44 | 37: 'SpinalCord', 45 | 38: 'Esophagus', 46 | 39: 'Larynx', 47 | 40: 'Larynx_Glottic', 48 | 41: 'Larynx_Supraglot', 49 | 42: 'PharynxConst', 50 | 43: 'Thyroid', 51 | 44: 'Trachea'} 52 | 53 | gtvs_mapping_dict = {0: 'GTVp', 54 | 1: 'GTVnd'} 55 | 56 | 57 | def convert_individual_organs_to_4d_mha(input_dir="/path/of/45organs/folder", output_path="/path/of/4dmha/"): 58 | patient_results = [] 59 | spacing = None 60 | for index in oars_mapping_dict.keys(): 61 | organ_itk = sitk.ReadImage(os.path.join( 62 | input_dir, "{}.nii.gz".format(oars_mapping_dict[index]))) 63 | organ_arr = sitk.GetArrayFromImage(organ_itk) 64 | spacing = organ_itk.GetSpacing() 65 | patient_results.append(organ_arr) 66 | 67 | # the following part is very important, please save your results as follows. 68 | stacked_oars = [] 69 | for each_organ in patient_results: 70 | # isVector must be set to False!!! 71 | stacked_oars.append(sitk.GetImageFromArray(each_organ, isVector=False)) 72 | output_itk = sitk.JoinSeries(stacked_oars) 73 | new_spacing = (spacing[0], spacing[1], spacing[2], 1) 74 | output_itk.SetSpacing(new_spacing) 75 | print(output_itk.GetSize()) 76 | # The last parameter must be True 77 | sitk.WriteImage(output_itk, output_path, True) 78 | print("Conversion Finished") 79 | 80 | 81 | def convert_individual_gtvs_to_4d_mha(input_dir="/path/of/45organs/folder", output_path="/path/of/4dmha/"): 82 | patient_results = [] 83 | spacing = None 84 | for index in gtvs_mapping_dict.keys(): 85 | gtv_itk = sitk.ReadImage(os.path.join( 86 | input_dir, "{}.nii.gz".format(gtvs_mapping_dict[index]))) 87 | gtv_arr = sitk.GetArrayFromImage(gtv_itk) 88 | spacing = gtv_itk.GetSpacing() 89 | patient_results.append(gtv_arr) 90 | # the following part is very important, please save your results as follows. 91 | stacked_gtvs = [] 92 | for each_gtv in patient_results: 93 | # isVector must be set to False!!! 94 | stacked_gtvs.append(sitk.GetImageFromArray(each_gtv, isVector=False)) 95 | output_itk = sitk.JoinSeries(stacked_gtvs) 96 | new_spacing = (spacing[0], spacing[1], spacing[2], 1) 97 | output_itk.SetSpacing(new_spacing) 98 | print(output_itk.GetSize()) 99 | # The last parameter must be True 100 | sitk.WriteImage(output_itk, output_path, True) 101 | print("Conversion Finished") 102 | 103 | 104 | convert_individual_gtvs_to_4d_mha( 105 | "./gtvs_output_example/", "gtvs_output_example.mha") 106 | convert_individual_organs_to_4d_mha( 107 | "./oars_output_example/", "oars_output_example.mha") 108 | -------------------------------------------------------------------------------- /Eval/SegRap_Task001_DSC_NSD_Eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | from collections import OrderedDict 3 | 4 | import numpy as np 5 | import pandas as pd 6 | import SimpleITK as sitk 7 | from two_evaluation_metrics import dsc, nsd 8 | 9 | 10 | submission_path = '/data_8t/radiology_images/processed/SegRap2023/nnUNetV2_infersVal/task001' 11 | gt_path = '/data_8t/radiology_images/processed/SegRap2023/SegRap2023_Validation_Set_Labels_Cropped' 12 | save_path = '/data_8t/radiology_images/processed/SegRap2023/nnUNetV2_infersVal' 13 | patientnames = os.listdir(submission_path) 14 | task01_submission_result = OrderedDict() 15 | 16 | task01_submission_result['Name'] = list() 17 | 18 | task01_label_tolerance = OrderedDict({ 19 | "Brain": 1, 20 | "BrainStem": 1, 21 | "Chiasm": 1, 22 | "Cochlea_L": 1, 23 | "Cochlea_R": 1, 24 | "Esophagus": 1, 25 | "ETbone_L": 1, 26 | "ETbone_R": 1, 27 | "Eye_L": 1, 28 | "Eye_R": 1, 29 | "Hippocampus_L": 1, 30 | "Hippocampus_R": 1, 31 | "IAC_L": 1, 32 | "IAC_R": 1, 33 | "Larynx": 2, 34 | "Larynx_Glottic": 1, 35 | "Larynx_Supraglot": 1, 36 | "Lens_L": 1, 37 | "Lens_R": 1, 38 | "Mandible_L": 1, 39 | "Mandible_R": 1, 40 | "Mastoid_L": 1, 41 | "Mastoid_R": 1, 42 | "MiddleEar_L": 1, 43 | "MiddleEar_R": 1, 44 | "OpticNerve_L": 1, 45 | "OpticNerve_R": 1, 46 | "OralCavity": 3, 47 | "Parotid_L": 1, 48 | "Parotid_R": 1, 49 | "PharynxConst": 1, 50 | "Pituitary": 1, 51 | "SpinalCord": 1, 52 | "Submandibular_L": 1, 53 | "Submandibular_R": 1, 54 | "TemporalLobe_L": 1, 55 | "TemporalLobe_R": 1, 56 | "Thyroid": 1, 57 | "Trachea": 1, 58 | "TympanicCavity_L": 1, 59 | "TMjoint_L": 1, 60 | "TMjoint_R": 1, 61 | "TympanicCavity_R": 1, 62 | "VestibulSemi_L": 1, 63 | "VestibulSemi_R": 1 64 | }) 65 | 66 | for organ in task01_label_tolerance.keys(): 67 | task01_submission_result['{}_DSC'.format(organ)] = list() 68 | for organ in task01_label_tolerance.keys(): 69 | task01_submission_result['{}_NSD'.format(organ)] = list() 70 | 71 | 72 | def compute_each_organ_lesion_performace(result, reference, voxel_spacing, tolerance_mm): 73 | if np.sum(reference) == 0 and np.sum(result) == 0: 74 | DSC = 1 75 | NSD = 1 76 | elif np.sum(reference) == 0 and np.sum(result) > 0: 77 | DSC = 0 78 | NSD = 0 79 | else: 80 | DSC = dsc(result, reference) 81 | NSD = nsd(result, reference, voxel_spacing, tolerance_mm) 82 | return round(DSC, 4), round(NSD, 4) 83 | 84 | 85 | def nii2arr(path): 86 | itk_data = sitk.ReadImage(path) 87 | arr_data = sitk.GetArrayFromImage(itk_data) 88 | spacing = itk_data.GetSpacing()[::-1] 89 | return arr_data, spacing 90 | 91 | 92 | for patient in os.listdir(submission_path): 93 | print(patient) 94 | task01_submission_result["Name"].append(patient) 95 | 96 | for organ in sorted(task01_label_tolerance.keys()): 97 | result_organ, spacing = nii2arr( 98 | "{}/{}/{}.nii.gz".format(submission_path, patient, organ)) 99 | reference_organ, spacing = nii2arr( 100 | "{}/{}/{}.nii.gz".format(gt_path, patient, organ)) 101 | DSC_organ, NSD_organ = compute_each_organ_lesion_performace( 102 | result_organ > 0, reference_organ > 0, spacing, task01_label_tolerance[organ]) 103 | task01_submission_result['{}_DSC'.format(organ)].append(DSC_organ) 104 | task01_submission_result['{}_NSD'.format(organ)].append(NSD_organ) 105 | 106 | 107 | task01_df = pd.DataFrame(task01_submission_result) 108 | task01_df.to_csv(os.path.join( 109 | save_path, 'DSC_NSD_Task001_Admin_nnUNetV2.csv'), index=False) 110 | -------------------------------------------------------------------------------- /Eval/SegRap_Task002_DSC_NSD_Eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | from collections import OrderedDict 3 | 4 | import numpy as np 5 | import pandas as pd 6 | import SimpleITK as sitk 7 | from two_evaluation_metrics import dsc, nsd 8 | 9 | 10 | submission_path = '/data_8t/radiology_images/processed/SegRap2023/SegRap2023_Validation_Set(with labels)' 11 | gt_path = '/data_8t/radiology_images/processed/SegRap2023/SegRap2023_Validation_Set(with labels)' 12 | save_path = '/data_8t/radiology_images/processed/SegRap2023/SegRap2023_Validation_Set(with labels)' 13 | patientnames = os.listdir(submission_path) 14 | task02_submission_result = OrderedDict() 15 | 16 | task02_submission_result['Name'] = list() 17 | 18 | task02_label_tolerance = OrderedDict({"GTVp": 1, "GTVnd": 1}) 19 | 20 | 21 | for lesion in task02_label_tolerance.keys(): 22 | task02_submission_result['{}_DSC'.format(lesion)] = list() 23 | for lesion in task02_label_tolerance.keys(): 24 | task02_submission_result['{}_NSD'.format(lesion)] = list() 25 | 26 | 27 | def compute_each_organ_lesion_performace(result, reference, voxel_spacing, tolerance_mm): 28 | if np.sum(reference) == 0 and np.sum(result) == 0: 29 | DSC = 1 30 | NSD = 1 31 | elif np.sum(reference) == 0 and np.sum(result) > 0: 32 | DSC = 0 33 | NSD = 0 34 | else: 35 | DSC = dsc(result, reference) 36 | NSD = nsd(result, reference, voxel_spacing, tolerance_mm) 37 | return round(DSC, 4), round(NSD, 4) 38 | 39 | 40 | def nii2arr(path): 41 | itk_data = sitk.ReadImage(path) 42 | arr_data = sitk.GetArrayFromImage(itk_data) 43 | spacing = itk_data.GetSpacing()[::-1] 44 | return arr_data, spacing 45 | 46 | 47 | for patient in os.listdir(submission_path): 48 | print(patient) 49 | task02_submission_result["Name"].append(patient) 50 | 51 | for lesion in sorted(task02_label_tolerance.keys()): 52 | result_lesion, spacing = nii2arr( 53 | "{}/{}/{}.nii.gz".format(submission_path, patient, lesion)) 54 | reference_lesion, spacing = nii2arr( 55 | "{}/{}/{}.nii.gz".format(gt_path, patient, lesion)) 56 | DSC_lesion, NSD_lesion = compute_each_organ_lesion_performace( 57 | result_lesion > 0, reference_lesion > 0, spacing, task02_label_tolerance[lesion]) 58 | task02_submission_result['{}_DSC'.format(lesion)].append(DSC_lesion) 59 | task02_submission_result['{}_NSD'.format(lesion)].append(NSD_lesion) 60 | 61 | task02_df = pd.DataFrame(task02_submission_result) 62 | task02_df.to_csv(os.path.join( 63 | save_path, 'DSC_NSD_Task02_Admin_nnUNetV2.csv'), index=False) 64 | -------------------------------------------------------------------------------- /Eval/two_evaluation_metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | The code was borrowed from https://github.com/loli/medpy/blob/master/medpy/metric/binary.py. 3 | """ 4 | import time 5 | from collections.abc import Iterable, Iterator 6 | 7 | import numpy 8 | import numpy as np 9 | import scipy 10 | from scipy.ndimage import _ni_support 11 | from scipy.ndimage.morphology import (binary_erosion, distance_transform_edt, 12 | generate_binary_structure) 13 | from medpy import metric 14 | 15 | 16 | neighbour_code_to_normals = [ 17 | [[0, 0, 0]], 18 | [[0.125, 0.125, 0.125]], 19 | [[-0.125, -0.125, 0.125]], 20 | [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 21 | [[0.125, -0.125, 0.125]], 22 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]], 23 | [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 24 | [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 25 | [[-0.125, 0.125, 0.125]], 26 | [[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]], 27 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]], 28 | [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 29 | [[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 30 | [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]], 31 | [[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 32 | [[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]], 33 | [[0.125, -0.125, -0.125]], 34 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]], 35 | [[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 36 | [[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 37 | [[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 38 | [[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]], 39 | [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 40 | [[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], 41 | [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 42 | [[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]], 43 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]], 44 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]], 45 | [[0.125, 0.125, 0.125], [0.375, 0.375, 0.375], 46 | [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]], 47 | [[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 48 | [[0.375, 0.375, 0.375], [0.0, 0.25, -0.25], 49 | [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]], 50 | [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], 51 | [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]], 52 | [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]], 53 | [[0.125, -0.125, 0.125]], 54 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 55 | [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 56 | [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]], 57 | [[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 58 | [[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]], 59 | [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]], 60 | [[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25], 61 | [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]], 62 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 63 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 64 | [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 65 | [[0.25, 0.25, -0.25], [0.25, 0.25, -0.25], 66 | [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]], 67 | [[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 68 | [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], 69 | [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]], 70 | [[0.0, 0.25, -0.25], [0.375, -0.375, -0.375], 71 | [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]], 72 | [[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 73 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]], 74 | [[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]], 75 | [[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]], 76 | [[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]], 77 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]], 78 | [[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25], 79 | [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]], 80 | [[0.125, 0.125, 0.125], [0.0, -0.5, 0.0], 81 | [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]], 82 | [[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]], 83 | [[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]], 84 | [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], 85 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 86 | [[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0], 87 | [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]], 88 | [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 89 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], 90 | [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 91 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]], 92 | [[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]], 93 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]], 94 | [[-0.125, -0.125, 0.125]], 95 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]], 96 | [[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 97 | [[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 98 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]], 99 | [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]], 100 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]], 101 | [[0.375, -0.375, 0.375], [0.0, -0.25, -0.25], 102 | [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]], 103 | [[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 104 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 105 | [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]], 106 | [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], 107 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 108 | [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]], 109 | [[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25], 110 | [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]], 111 | [[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375], 112 | [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]], 113 | [[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]], 114 | [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 115 | [[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 116 | [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 117 | [[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375], 118 | [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]], 119 | [[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 120 | [[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]], 121 | [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125], 122 | [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 123 | [[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 124 | [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]], 125 | [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], 126 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 127 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], 128 | [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 129 | [[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 130 | [[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375], 131 | [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]], 132 | [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]], 133 | [[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 134 | [[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 135 | [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 136 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 137 | [[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 138 | [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], 139 | [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 140 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]], 141 | [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], 142 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 143 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], 144 | [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 145 | [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]], 146 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 147 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], 148 | [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]], 149 | [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], 150 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 151 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 152 | [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], 153 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 154 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 155 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]], 156 | [[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]], 157 | [[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]], 158 | [[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125], 159 | [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]], 160 | [[0.375, -0.375, 0.375], [0.0, 0.25, 0.25], 161 | [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]], 162 | [[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 163 | [[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0], 164 | [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]], 165 | [[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 166 | [[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 167 | [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 168 | [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], 169 | [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]], 170 | [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]], 171 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]], 172 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 173 | [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]], 174 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]], 175 | [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]], 176 | [[0.125, 0.125, 0.125]], 177 | [[0.125, 0.125, 0.125]], 178 | [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]], 179 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]], 180 | [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]], 181 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 182 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]], 183 | [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]], 184 | [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], 185 | [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]], 186 | [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 187 | [[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 188 | [[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 189 | [[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0], 190 | [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]], 191 | [[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 192 | [[0.375, -0.375, 0.375], [0.0, 0.25, 0.25], 193 | [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]], 194 | [[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125], 195 | [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]], 196 | [[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]], 197 | [[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]], 198 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]], 199 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 200 | [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], 201 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 202 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 203 | [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], 204 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 205 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], 206 | [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]], 207 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 208 | [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]], 209 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 210 | [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], 211 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 212 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]], 213 | [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], 214 | [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 215 | [[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 216 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 217 | [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 218 | [[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 219 | [[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 220 | [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]], 221 | [[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375], 222 | [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]], 223 | [[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 224 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], 225 | [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 226 | [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], 227 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 228 | [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]], 229 | [[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 230 | [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125], 231 | [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 232 | [[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]], 233 | [[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 234 | [[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375], 235 | [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]], 236 | [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 237 | [[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 238 | [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 239 | [[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]], 240 | [[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375], 241 | [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]], 242 | [[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25], 243 | [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]], 244 | [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]], 245 | [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], 246 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 247 | [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]], 248 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 249 | [[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 250 | [[0.375, -0.375, 0.375], [0.0, -0.25, -0.25], 251 | [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]], 252 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]], 253 | [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]], 254 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]], 255 | [[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 256 | [[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 257 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]], 258 | [[-0.125, -0.125, 0.125]], 259 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]], 260 | [[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]], 261 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]], 262 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], 263 | [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 264 | [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 265 | [[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0], 266 | [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]], 267 | [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], 268 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 269 | [[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]], 270 | [[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]], 271 | [[0.125, 0.125, 0.125], [0.0, -0.5, 0.0], 272 | [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]], 273 | [[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25], 274 | [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]], 275 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]], 276 | [[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]], 277 | [[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]], 278 | [[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]], 279 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]], 280 | [[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 281 | [[0.0, 0.25, -0.25], [0.375, -0.375, -0.375], 282 | [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]], 283 | [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], 284 | [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]], 285 | [[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 286 | [[0.25, 0.25, -0.25], [0.25, 0.25, -0.25], 287 | [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]], 288 | [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 289 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 290 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 291 | [[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25], 292 | [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]], 293 | [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]], 294 | [[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]], 295 | [[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 296 | [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]], 297 | [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 298 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 299 | [[0.125, -0.125, 0.125]], 300 | [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]], 301 | [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], 302 | [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]], 303 | [[0.375, 0.375, 0.375], [0.0, 0.25, -0.25], 304 | [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]], 305 | [[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 306 | [[0.125, 0.125, 0.125], [0.375, 0.375, 0.375], 307 | [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]], 308 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]], 309 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]], 310 | [[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]], 311 | [[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], 312 | [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 313 | [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 314 | [[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]], 315 | [[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 316 | [[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 317 | [[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 318 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]], 319 | [[0.125, -0.125, -0.125]], 320 | [[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]], 321 | [[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 322 | [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]], 323 | [[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 324 | [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 325 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]], 326 | [[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]], 327 | [[-0.125, 0.125, 0.125]], 328 | [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 329 | [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 330 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]], 331 | [[0.125, -0.125, 0.125]], 332 | [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 333 | [[-0.125, -0.125, 0.125]], 334 | [[0.125, 0.125, 0.125]], 335 | [[0, 0, 0]]] 336 | 337 | 338 | def _normalize_sequence(input, rank): 339 | """If input is a scalar, create a sequence of length equal to the 340 | rank by duplicating the input. If input is a sequence, 341 | check if its length is equal to the length of array. 342 | """ 343 | is_str = isinstance(input, str) 344 | if not is_str and isinstance(input, Iterable): 345 | normalized = list(input) 346 | if len(normalized) != rank: 347 | err = "sequence argument must have length equal to input rank" 348 | raise RuntimeError(err) 349 | else: 350 | normalized = [input] * rank 351 | return normalized 352 | 353 | 354 | def __surface_distances(result, reference, voxelspacing=None, connectivity=1): 355 | """ 356 | The distances between the surface voxel of binary objects in result and their 357 | nearest partner surface voxel of a binary object in reference. 358 | """ 359 | result = numpy.atleast_1d(result.astype(numpy.bool)) 360 | reference = numpy.atleast_1d(reference.astype(numpy.bool)) 361 | if voxelspacing is not None: 362 | voxelspacing = _ni_support._normalize_sequence( 363 | voxelspacing, result.ndim) 364 | voxelspacing = numpy.asarray(voxelspacing, dtype=numpy.float64) 365 | if not voxelspacing.flags.contiguous: 366 | voxelspacing = voxelspacing.copy() 367 | 368 | # binary structure 369 | footprint = generate_binary_structure(result.ndim, connectivity) 370 | 371 | # test for emptiness 372 | if 0 == numpy.count_nonzero(result): 373 | raise RuntimeError( 374 | 'The first supplied array does not contain any binary object.') 375 | if 0 == numpy.count_nonzero(reference): 376 | raise RuntimeError( 377 | 'The second supplied array does not contain any binary object.') 378 | 379 | # extract only 1-pixel border line of objects 380 | result_border = result ^ binary_erosion( 381 | result, structure=footprint, iterations=1) 382 | reference_border = reference ^ binary_erosion( 383 | reference, structure=footprint, iterations=1) 384 | 385 | # compute average surface distance 386 | # Note: scipys distance transform is calculated only inside the borders of the 387 | # foreground objects, therefore the input has to be reversed 388 | dt = distance_transform_edt(~reference_border, sampling=voxelspacing) 389 | sds = dt[result_border] 390 | 391 | return sds 392 | 393 | 394 | def hd(result, reference, voxelspacing=None, connectivity=1): 395 | hd1 = __surface_distances( 396 | result, reference, voxelspacing, connectivity).max() 397 | hd2 = __surface_distances( 398 | reference, result, voxelspacing, connectivity).max() 399 | hd = max(hd1, hd2) 400 | return hd 401 | 402 | 403 | def hd_fast(result, reference, voxelspacing=None, connectivity=1): 404 | hd1 = __surface_distances( 405 | result, reference, voxelspacing, connectivity).max() 406 | hd2 = __surface_distances( 407 | reference, result, voxelspacing, connectivity).max() 408 | hd = max(hd1, hd2) 409 | return hd 410 | 411 | 412 | def hd95(result, reference, voxelspacing=None, connectivity=1): 413 | hd1 = __surface_distances(result, reference, voxelspacing, connectivity) 414 | hd2 = __surface_distances(reference, result, voxelspacing, connectivity) 415 | hd95 = numpy.percentile(numpy.hstack((hd1, hd2)), 95) 416 | return hd95 417 | 418 | 419 | def assd(result, reference, voxelspacing=None, connectivity=1): 420 | assd = numpy.mean((__surface_distances(result, reference, voxelspacing, connectivity), 421 | __surface_distances(reference, result, voxelspacing, connectivity))) 422 | return assd 423 | 424 | 425 | def asd(result, reference, voxelspacing=None, connectivity=1): 426 | sds = __surface_distances(result, reference, voxelspacing, connectivity) 427 | asd = sds.mean() 428 | return asd 429 | 430 | 431 | def compute_surface_distances(mask_gt, mask_pred, spacing_mm): 432 | """Compute closest distances from all surface points to the other surface. 433 | 434 | Finds all surface elements "surfels" in the ground truth mask `mask_gt` and 435 | the predicted mask `mask_pred`, computes their area in mm^2 and the distance 436 | to the closest point on the other surface. It returns two sorted lists of 437 | distances together with the corresponding surfel areas. If one of the masks 438 | is empty, the corresponding lists are empty and all distances in the other 439 | list are `inf` 440 | 441 | Args: 442 | mask_gt: 3-dim Numpy array of type bool. The ground truth mask. 443 | mask_pred: 3-dim Numpy array of type bool. The predicted mask. 444 | spacing_mm: 3-element list-like structure. Voxel spacing in x0, x1 and x2 445 | direction 446 | 447 | Returns: 448 | A dict with 449 | "distances_gt_to_pred": 1-dim numpy array of type float. The distances in mm 450 | from all ground truth surface elements to the predicted surface, 451 | sorted from smallest to largest 452 | "distances_pred_to_gt": 1-dim numpy array of type float. The distances in mm 453 | from all predicted surface elements to the ground truth surface, 454 | sorted from smallest to largest 455 | "surfel_areas_gt": 1-dim numpy array of type float. The area in mm^2 of 456 | the ground truth surface elements in the same order as 457 | distances_gt_to_pred 458 | "surfel_areas_pred": 1-dim numpy array of type float. The area in mm^2 of 459 | the predicted surface elements in the same order as 460 | distances_pred_to_gt 461 | 462 | """ 463 | 464 | # compute the area for all 256 possible surface elements 465 | # (given a 2x2x2 neighbourhood) according to the spacing_mm 466 | neighbour_code_to_surface_area = np.zeros([256]) 467 | for code in range(256): 468 | normals = np.array(neighbour_code_to_normals[code]) 469 | sum_area = 0 470 | for normal_idx in range(normals.shape[0]): 471 | # normal vector 472 | n = np.zeros([3]) 473 | n[0] = normals[normal_idx, 0] * spacing_mm[1] * spacing_mm[2] 474 | n[1] = normals[normal_idx, 1] * spacing_mm[0] * spacing_mm[2] 475 | n[2] = normals[normal_idx, 2] * spacing_mm[0] * spacing_mm[1] 476 | area = np.linalg.norm(n) 477 | sum_area += area 478 | neighbour_code_to_surface_area[code] = sum_area 479 | 480 | # compute the bounding box of the masks to trim 481 | # the volume to the smallest possible processing subvolume 482 | mask_all = mask_gt | mask_pred 483 | bbox_min = np.zeros(3, np.int64) 484 | bbox_max = np.zeros(3, np.int64) 485 | 486 | # max projection to the x0-axis 487 | proj_0 = np.max(np.max(mask_all, axis=2), axis=1) 488 | idx_nonzero_0 = np.nonzero(proj_0)[0] 489 | if len(idx_nonzero_0) == 0: 490 | return {"distances_gt_to_pred": np.array([]), 491 | "distances_pred_to_gt": np.array([]), 492 | "surfel_areas_gt": np.array([]), 493 | "surfel_areas_pred": np.array([])} 494 | 495 | bbox_min[0] = np.min(idx_nonzero_0) 496 | bbox_max[0] = np.max(idx_nonzero_0) 497 | 498 | # max projection to the x1-axis 499 | proj_1 = np.max(np.max(mask_all, axis=2), axis=0) 500 | idx_nonzero_1 = np.nonzero(proj_1)[0] 501 | bbox_min[1] = np.min(idx_nonzero_1) 502 | bbox_max[1] = np.max(idx_nonzero_1) 503 | 504 | # max projection to the x2-axis 505 | proj_2 = np.max(np.max(mask_all, axis=1), axis=0) 506 | idx_nonzero_2 = np.nonzero(proj_2)[0] 507 | bbox_min[2] = np.min(idx_nonzero_2) 508 | bbox_max[2] = np.max(idx_nonzero_2) 509 | 510 | # print("bounding box min = {}".format(bbox_min)) 511 | # print("bounding box max = {}".format(bbox_max)) 512 | 513 | # crop the processing subvolume. 514 | # we need to zeropad the cropped region with 1 voxel at the lower, 515 | # the right and the back side. This is required to obtain the "full" 516 | # convolution result with the 2x2x2 kernel 517 | cropmask_gt = np.zeros((bbox_max - bbox_min) + 2, np.uint8) 518 | cropmask_pred = np.zeros((bbox_max - bbox_min) + 2, np.uint8) 519 | 520 | cropmask_gt[0:-1, 0:-1, 0:-1] = mask_gt[bbox_min[0]:bbox_max[0] + 1, 521 | bbox_min[1]:bbox_max[1] + 1, 522 | bbox_min[2]:bbox_max[2] + 1] 523 | 524 | cropmask_pred[0:-1, 0:-1, 0:-1] = mask_pred[bbox_min[0]:bbox_max[0] + 1, 525 | bbox_min[1]:bbox_max[1] + 1, 526 | bbox_min[2]:bbox_max[2] + 1] 527 | 528 | # compute the neighbour code (local binary pattern) for each voxel 529 | # the resultsing arrays are spacially shifted by minus half a voxel in each axis. 530 | # i.e. the points are located at the corners of the original voxels 531 | kernel = np.array([[[128, 64], 532 | [32, 16]], 533 | [[8, 4], 534 | [2, 1]]]) 535 | neighbour_code_map_gt = scipy.ndimage.filters.correlate( 536 | cropmask_gt.astype(np.uint8), kernel, mode="constant", cval=0) 537 | neighbour_code_map_pred = scipy.ndimage.filters.correlate(cropmask_pred.astype(np.uint8), kernel, mode="constant", 538 | cval=0) 539 | 540 | # create masks with the surface voxels 541 | borders_gt = ((neighbour_code_map_gt != 0) & 542 | (neighbour_code_map_gt != 255)) 543 | borders_pred = ((neighbour_code_map_pred != 0) & 544 | (neighbour_code_map_pred != 255)) 545 | 546 | # compute the distance transform (closest distance of each voxel to the surface voxels) 547 | if borders_gt.any(): 548 | distmap_gt = scipy.ndimage.morphology.distance_transform_edt( 549 | ~borders_gt, sampling=spacing_mm) 550 | else: 551 | distmap_gt = np.Inf * np.ones(borders_gt.shape) 552 | 553 | if borders_pred.any(): 554 | distmap_pred = scipy.ndimage.morphology.distance_transform_edt( 555 | ~borders_pred, sampling=spacing_mm) 556 | else: 557 | distmap_pred = np.Inf * np.ones(borders_pred.shape) 558 | 559 | # compute the area of each surface element 560 | surface_area_map_gt = neighbour_code_to_surface_area[neighbour_code_map_gt] 561 | surface_area_map_pred = neighbour_code_to_surface_area[neighbour_code_map_pred] 562 | 563 | # create a list of all surface elements with distance and area 564 | distances_gt_to_pred = distmap_pred[borders_gt] 565 | distances_pred_to_gt = distmap_gt[borders_pred] 566 | surfel_areas_gt = surface_area_map_gt[borders_gt] 567 | surfel_areas_pred = surface_area_map_pred[borders_pred] 568 | 569 | # sort them by distance 570 | if distances_gt_to_pred.shape != (0,): 571 | sorted_surfels_gt = np.array( 572 | sorted(zip(distances_gt_to_pred, surfel_areas_gt))) 573 | distances_gt_to_pred = sorted_surfels_gt[:, 0] 574 | surfel_areas_gt = sorted_surfels_gt[:, 1] 575 | 576 | if distances_pred_to_gt.shape != (0,): 577 | sorted_surfels_pred = np.array( 578 | sorted(zip(distances_pred_to_gt, surfel_areas_pred))) 579 | distances_pred_to_gt = sorted_surfels_pred[:, 0] 580 | surfel_areas_pred = sorted_surfels_pred[:, 1] 581 | 582 | return {"distances_gt_to_pred": distances_gt_to_pred, 583 | "distances_pred_to_gt": distances_pred_to_gt, 584 | "surfel_areas_gt": surfel_areas_gt, 585 | "surfel_areas_pred": surfel_areas_pred} 586 | 587 | 588 | def compute_average_surface_distance(surface_distances): 589 | distances_gt_to_pred = surface_distances["distances_gt_to_pred"] 590 | distances_pred_to_gt = surface_distances["distances_pred_to_gt"] 591 | surfel_areas_gt = surface_distances["surfel_areas_gt"] 592 | surfel_areas_pred = surface_distances["surfel_areas_pred"] 593 | average_distance_gt_to_pred = np.sum( 594 | distances_gt_to_pred * surfel_areas_gt) / np.sum(surfel_areas_gt) 595 | average_distance_pred_to_gt = np.sum( 596 | distances_pred_to_gt * surfel_areas_pred) / np.sum(surfel_areas_pred) 597 | return (average_distance_gt_to_pred, average_distance_pred_to_gt) 598 | 599 | 600 | def compute_robust_hausdorff(surface_distances, percent): 601 | distances_gt_to_pred = surface_distances["distances_gt_to_pred"] 602 | distances_pred_to_gt = surface_distances["distances_pred_to_gt"] 603 | surfel_areas_gt = surface_distances["surfel_areas_gt"] 604 | surfel_areas_pred = surface_distances["surfel_areas_pred"] 605 | if len(distances_gt_to_pred) > 0: 606 | surfel_areas_cum_gt = np.cumsum( 607 | surfel_areas_gt) / np.sum(surfel_areas_gt) 608 | idx = np.searchsorted(surfel_areas_cum_gt, percent / 100.0) 609 | perc_distance_gt_to_pred = distances_gt_to_pred[min( 610 | idx, len(distances_gt_to_pred) - 1)] 611 | else: 612 | perc_distance_gt_to_pred = np.Inf 613 | 614 | if len(distances_pred_to_gt) > 0: 615 | surfel_areas_cum_pred = np.cumsum( 616 | surfel_areas_pred) / np.sum(surfel_areas_pred) 617 | idx = np.searchsorted(surfel_areas_cum_pred, percent / 100.0) 618 | perc_distance_pred_to_gt = distances_pred_to_gt[min( 619 | idx, len(distances_pred_to_gt) - 1)] 620 | else: 621 | perc_distance_pred_to_gt = np.Inf 622 | 623 | return max(perc_distance_gt_to_pred, perc_distance_pred_to_gt) 624 | 625 | 626 | def compute_surface_overlap_at_tolerance(surface_distances, tolerance_mm): 627 | distances_gt_to_pred = surface_distances["distances_gt_to_pred"] 628 | distances_pred_to_gt = surface_distances["distances_pred_to_gt"] 629 | surfel_areas_gt = surface_distances["surfel_areas_gt"] 630 | surfel_areas_pred = surface_distances["surfel_areas_pred"] 631 | rel_overlap_gt = np.sum( 632 | surfel_areas_gt[distances_gt_to_pred <= tolerance_mm]) / np.sum(surfel_areas_gt) 633 | rel_overlap_pred = np.sum( 634 | surfel_areas_pred[distances_pred_to_gt <= tolerance_mm]) / np.sum(surfel_areas_pred) 635 | return (rel_overlap_gt, rel_overlap_pred) 636 | 637 | 638 | def compute_surface_dice_at_tolerance(surface_distances, tolerance_mm): 639 | distances_gt_to_pred = surface_distances["distances_gt_to_pred"] 640 | distances_pred_to_gt = surface_distances["distances_pred_to_gt"] 641 | surfel_areas_gt = surface_distances["surfel_areas_gt"] 642 | surfel_areas_pred = surface_distances["surfel_areas_pred"] 643 | overlap_gt = np.sum(surfel_areas_gt[distances_gt_to_pred <= tolerance_mm]) 644 | overlap_pred = np.sum( 645 | surfel_areas_pred[distances_pred_to_gt <= tolerance_mm]) 646 | surface_dice = (overlap_gt + overlap_pred) / ( 647 | np.sum(surfel_areas_gt) + np.sum(surfel_areas_pred)) 648 | return surface_dice 649 | 650 | 651 | def nsd(result, reference, voxelspacing=None, tolerance_mm=1): 652 | nsd = compute_surface_dice_at_tolerance( 653 | compute_surface_distances(reference, result, voxelspacing), tolerance_mm) 654 | return nsd 655 | 656 | 657 | def dsc(result, reference): 658 | dsc = metric.binary.dc(result, reference) 659 | return dsc 660 | 661 | 662 | # if __name__ == "__main__": 663 | # import SimpleITK as sitk 664 | # gt = sitk.ReadImage("gt.nii.gz") 665 | # pred = sitk.ReadImage("pred.nii.gz") 666 | 667 | # gt_array = sitk.GetArrayFromImage(gt) 668 | # pred_array = sitk.GetArrayFromImage(pred) 669 | # time1 = time.time() 670 | # for i in range(1, 17): 671 | # print(hd95(pred_array == i, gt_array == i, gt.GetSpacing()[::-1])) 672 | # print("Original total time: ", time.time() - time1) 673 | # time2 = time.time() 674 | # for i in range(1, 17): 675 | # print(nsd(pred_array == i, gt_array == 676 | # i, gt.GetSpacing()[::-1])) 677 | # print("Fast version total time: ", time.time() - time2) 678 | -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Chan Woong Lee.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Chan Woong Lee.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Kaixiang Yang.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Kaixiang Yang.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Yanzhou Su.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Yanzhou Su.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Yiwen Ye.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Yiwen Ye.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Yunxin Zhong.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Yunxin Zhong.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Constantin Ulrich.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Constantin Ulrich.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Kaixiang Yang.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Kaixiang Yang.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Mehdi Astaraki.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Mehdi Astaraki.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Yiwen Ye.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Yiwen Ye.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Zhaohu Xing.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Zhaohu Xing.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #
[SegRap2023](https://segrap2023.grand-challenge.org/)
2 | 3 | ## 1. Totorial for Algorithm Docker Image ([official guideline of grand challenge](https://grand-challenge.org/documentation/test-and-deploy-your-container/)) 4 | ### 1.1 Important input and output 5 | - For task1, the input dir is `/input/images/head-neck-ct/` (non-contrast-ct images) and `/input/images/head-neck-contrast-enhanced-ct/` (contrast-ct images). The output dir is `/output/images/head-neck-segmentation/`. Note that the final prediction has to be a 4D .mha file, which array shape is [45, *image_shape]. An example code and output is shown as `Docker_tutorial/stacked_results_to_4d_mha.py` and `Docker_tutorial/oars_output_example.mha`. 6 | 7 | - For task2, the dir is `/input/images/head-neck-ct/` (non-contrast-ct images) and `/input/images/head-neck-contrast-enhanced-ct/` (contrast-ct images). The output dir is `/output/images/gross-tumor-volume-segmentation/`. Note that the final prediction has to be a 4D .mha file, which array shape is [2, *image_shape]. An example code and output is shown as `Docker_tutorial/stacked_results_to_4d_mha.py` and `Docker_tutorial/gtvs_output_example.mha`. 8 | 9 | ### 1.2 Algorithm examples based on nnUNet 10 | We provide two algorithm examples based on nnUNet, which is only the baseline for two tasks. If your method is based on nnUNet, you can follow the example to generate predictions and run `sh export.sh` to generate an Algorithm Container Image in tar.gz format. The details about loading input, generating predictions, and saving output can be seen in the `process.py`. 11 | 12 | In addition, you can download the example data and model weight from [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view?usp=sharing) and [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023) to the folder `images` and `weight`, respectively. Before submitting, you can test the docker image on your local machine by running `sh test.sh` or `sudo sh test.sh`, we show an [example output](https://github.com/HiLab-git/SegRap2023/blob/main/Docker_tutorial/outputs.png) on our ubuntu22.04 (one 3090 GPU). 13 | 14 | #### 1.2.1 How to test the container locally? 15 | 1. Parepare your images and weight as following format 16 | - SegRap2023_task1_OARs_nnUNet_Example 17 | - images 18 | - images 19 | - head-neck-contrast-enhanced-ct 20 | - segrap_0001.mha 21 | - head-neck-ct 22 | - segrap_0001.mha 23 | - weight 24 | - fold_0 25 | - model_final_checkpoint.model 26 | - model_final_checkpoint.model.pkl 27 | - plans.pkl 28 | 29 | 2. How about the output? 30 | - You can check out if there are predictions in the output folder `/output/images/head-neck-segmentation` or `/output/images/gross-tumor-volume-segmentation` that are corresponded to the input images. Run the following command will show the files in the output folder. 31 | ```bash 32 | docker run --rm \ 33 | -v segrap2023_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 34 | python:3.10-slim ls -al /output/images/head-neck-segmentation 35 | ``` 36 | - The `test` folder is just an empty folder which hasn't been used in the docker image, so you can ignore or remove it. 37 | 38 | - You can ignore the error `No such file or directory: '/output/results.json'` when you run the docker locally. 39 | 40 | ### 1.3 Algorithm examples based on others. 41 | If your method is not based on nnUNet, you can modify the function of `predict()` in `process.py` and other corresponding parts for inference. It's easy to read and modify, but please ensure the format of the output file (a 4D .mha, the right mapping between the index of 4D file and OARs or GTVs.). We provided an example (`Docker_tutorial/stacked_results_to_4d_mha.py`) to stack individual oars/gtvs predictions of a patient into a required 4d .mha files. 42 | 43 | ### 1.4 Q&A. 44 | If you meet any questions when submitting your docker images, you can email us (`luoxd1996@gmail.com` or `fujia98914@gmail.com`), or post [the issue](https://github.com/HiLab-git/SegRap2023/issues) or discuss it in [the forum](https://grand-challenge.org/forums/forum/segmentation-of-organs-at-risk-and-gross-tumor-volume-of-npc-699/) at any time. 45 | 46 | ## 2. How to submit the algorithm? 47 | 1. If you have not created your algorithm, you can go to https://segrap2023.grand-challenge.org/evaluation/challenge/algorithms/create/ to create an algorithm with 30GB memory. 48 | 49 | 2. Upload your Algorithm Container Image, then wait for the container to be active. 50 | 51 | 3. Go to the [SegRap2023 submit website](https://segrap2023.grand-challenge.org/evaluation/challenge/submissions/create/), choose the task and submit your Algorithm Image. 52 | 53 | 4. After submitting, you can wait for the update of [Leaderboards](https://segrap2023.grand-challenge.org/evaluation/challenge/leaderboard/). 54 | 55 | 56 | ## 3. Tutorial for SegRap2023 Challenge 57 | 58 | This repository provides tutorial code for Segmentation of Organs-at-Risk and Gross Tumor Volume of NPC for Radiotherapy Planning (SegRap2023) Challenge. Our code is based on [PyMIC](https://github.com/HiLab-git/PyMIC), a pytorch-based toolkit for medical image computing with deep learning, that is lightweight and easy to use. 59 | 60 | ### Requirements 61 | This code depends on [Pytorch](https://pytorch.org), [PyMIC](https://github.com/HiLab-git/PyMIC). 62 | To install PyMIC and GeodisTK, run: 63 | ``` 64 | pip install PYMIC 65 | ``` 66 | 67 | 68 | ### Segmentation model based on PyMIC 69 | 70 | 71 | #### Dataset and Preprocessing 72 | - Download the dataset from [SegRap2023](https://segrap2023.grand-challenge.org/) and put the dataset in the `data_dir/raw_data`. 73 | 74 | - For data preprocessing, run: 75 | ```bash 76 | python Tutorial/preprocessing.py 77 | ``` 78 | This will crop the images with the maximal nonzero bounding box, and the cropped results are normalized based on the intensity properties of all training images. By setting the `args.task` to `OARs` and `GTVs`, we can get the preprocessed images and labels for two tasks that are saved in `data_dir/Task001_OARs_preprocess` and `data_dir/Task002_GTVs_preprocess`, respectively. 79 | 80 | #### Training 81 | - Run the following command to create csv files for training, validation, and testing. The csv files will be saved to `config/data_OARs` and `config/data_GTVs`. 82 | ```bash 83 | python Tutorial/write_csv_files.py 84 | ``` 85 | 86 | - Run the following command for training and validation. The segmentation model will be saved in `model/unet3d_OARs` and `model/unet3d_GTVs`, respectively. 87 | ```bash 88 | pymic_train Tutorial/config/unet3d_OARs.cfg 89 | pymic_train Tutorial/config/unet3d_GTVs.cfg 90 | ``` 91 | Note that you can modify the settings in .cfg file to get better segmentation results, such as RandomCrop_output_size, loss_class_weight, etc. 92 | 93 | #### Testing 94 | - After training, run the following command, we can get the performance on the testing set, and the predictions of testing data will be saved in `result/unet3d_OARs` and `result/unet3d_GTVs`. 95 | ```bash 96 | pymic_test Tutorial/config/unet3d_OARs.cfg 97 | pymic_test Tutorial/config/unet3d_GTVs.cfg 98 | ``` 99 | 100 | #### Postprocessing 101 | - Run the following command to obtain the final predictions, which are saved in `result/unet3d_OARs_post` and `result/unet3d_GTVs_post`. 102 | ```bash 103 | python Tutorial/postprocessing.py 104 | ``` 105 | 106 | ### Segmentation model based on nnUNet 107 | #### Postprocessing 108 | - Following `Tutorial/nnunet_baseline.ipynb`, you can obtain the final predictions based on the outputs from [nnUNet](https://github.com/MIC-DKFZ/nnUNet). In addition, you also can use `Tutorial/preprocessing.py` for preprocessing firstly (details as mentioned in the above tutorial) and then train networks using `Tutorial/nnunet_baseline.ipynb`. 109 | 110 | 111 | ## 4. Evaluation for SegRap2023 Challenge 112 | Run following command to get the quantitative evaluation results. 113 | ```bash 114 | python Eval/SegRap_Task001_DSC_NSD_Eval.py 115 | python Eval/SegRap_Task002_DSC_NSD_Eval.py 116 | ``` 117 | ## 5. Code of Top Teams for SegRap2023 Challenge 118 | The code of top teams is available at: [SegRap2023_code_from_winning_teams](https://drive.google.com/file/d/1uI7idYOF87G6Bjx1tes_VSteXpbrT3kD/view?usp=drive_link). 119 | 120 | ### Reference 121 | 122 | ``` 123 | @article{luo2025segrap2023, 124 | title={Segrap2023: A benchmark of organs-at-risk and gross tumor volume segmentation for radiotherapy planning of nasopharyngeal carcinoma}, 125 | author={Luo, Xiangde and Fu, Jia and Zhong, Yunxin and Liu, Shuolin and Han, Bing and Astaraki, Mehdi and Bendazzoli, Simone and Toma-Dasu, Iuliana and Ye, Yiwen and Chen, Ziyang and others}, 126 | journal={Medical Image Analysis}, 127 | volume={101}, 128 | pages={103447}, 129 | year={2025}, 130 | publisher={Elsevier} 131 | } 132 | ``` 133 | -------------------------------------------------------------------------------- /Tutorial/config/unet3d_GTVs.cfg: -------------------------------------------------------------------------------- 1 | [dataset] 2 | # tensor type (float or double) 3 | tensor_type = float 4 | modal_num = 2 5 | 6 | task_type = seg 7 | root_dir = data_dir/Task002_GTVs_preprocess 8 | train_csv = Tutorial/config/data_GTVs/image_train.csv 9 | valid_csv = Tutorial/config/data_GTVs/image_valid.csv 10 | test_csv = Tutorial/config/data_GTVs/image_test.csv 11 | 12 | train_batch_size = 4 13 | valid_batch_size = 1 14 | 15 | # data transforms 16 | train_transform = [RandomCrop, RandomFlip, NormalizeWithMeanStd, GammaCorrection, GaussianNoise, LabelToProbability] 17 | valid_transform = [NormalizeWithMeanStd, Pad, LabelToProbability] 18 | test_transform = [NormalizeWithMeanStd, Pad] 19 | 20 | RandomCrop_output_size = [32, 128, 320] 21 | RandomCrop_foreground_focus = True 22 | RandomCrop_foreground_ratio = 0.5 23 | 24 | RandomFlip_flip_depth = True 25 | RandomFlip_flip_height = True 26 | RandomFlip_flip_width = True 27 | 28 | NormalizeWithMeanStd_channels = [0, 1] 29 | 30 | GammaCorrection_channels = [0, 1] 31 | GammaCorrection_gamma_min = 0.7 32 | GammaCorrection_gamma_max = 1.5 33 | 34 | GaussianNoise_channels = [0, 1] 35 | GaussianNoise_mean = 0 36 | GaussianNoise_std = 0.05 37 | GaussianNoise_probability = 0.5 38 | 39 | Pad_output_size = [8, 8, 8] 40 | Pad_ceil_mode = True 41 | 42 | [network] 43 | # this section gives parameters for network 44 | # the keys may be different for different networks 45 | 46 | # type of network 47 | net_type = UNet3D 48 | 49 | # number of class, required for segmentation task 50 | class_num = 3 51 | in_chns = 2 52 | feature_chns = [16, 32, 64, 128] 53 | dropout = [0.0, 0.0, 0.1, 0.2] 54 | trilinear = True 55 | multiscale_pred = True 56 | 57 | [training] 58 | # list of gpus 59 | gpus = [0, 1] 60 | 61 | mixup_probability = 0.5 62 | loss_type = [DiceLoss, CrossEntropyLoss] 63 | loss_weight = [1.0, 1.0] 64 | deep_supervise = True 65 | 66 | # for optimizers 67 | optimizer = Adam 68 | learning_rate = 1e-4 69 | momentum = 0.9 70 | weight_decay = 1e-5 71 | 72 | # for lr schedular (StepLR) 73 | lr_scheduler = StepLR 74 | lr_gamma = 0.5 75 | lr_step = 3000 76 | 77 | ckpt_save_dir = Tutorial/model/unet3d_GTVs 78 | 79 | # start iter 80 | iter_start = 0 81 | iter_max = 128000 82 | iter_valid = 500 83 | iter_save = [2000, 4000, 8000, 16000, 32000, 64000, 128000] 84 | # early_stop_patience = 5000 85 | 86 | [testing] 87 | # list of gpus 88 | gpus = [0, 1] 89 | 90 | # checkpoint mode can be [0-latest, 1-best, 2-specified] 91 | ckpt_mode = 1 92 | output_dir = Tutorial/result/unet3d_GTVs 93 | # post_process = KeepLargestComponent 94 | 95 | sliding_window_enable = True 96 | sliding_window_size = [32, 128, 320] 97 | sliding_window_stride = [32, 128, 320] 98 | -------------------------------------------------------------------------------- /Tutorial/config/unet3d_OARs.cfg: -------------------------------------------------------------------------------- 1 | [dataset] 2 | # tensor type (float or double) 3 | tensor_type = float 4 | modal_num = 2 5 | 6 | task_type = seg 7 | root_dir = data_dir/Task002_OARs_preprocess 8 | train_csv = Tutorial/config/data_OARs/image_train.csv 9 | valid_csv = Tutorial/config/data_OARs/image_valid.csv 10 | test_csv = Tutorial/config/data_OARs/image_test.csv 11 | 12 | train_batch_size = 4 13 | valid_batch_size = 1 14 | 15 | # data transforms 16 | train_transform = [RandomCrop, NormalizeWithMeanStd, GammaCorrection, GaussianNoise, LabelToProbability] 17 | valid_transform = [NormalizeWithMeanStd, Pad, LabelToProbability] 18 | test_transform = [NormalizeWithMeanStd, Pad] 19 | 20 | RandomCrop_output_size = [32, 128, 320] 21 | RandomCrop_foreground_focus = True 22 | RandomCrop_foreground_ratio = 0.2 23 | 24 | RandomFlip_flip_depth = True 25 | RandomFlip_flip_height = True 26 | RandomFlip_flip_width = True 27 | 28 | NormalizeWithMeanStd_channels = [0, 1] 29 | 30 | GammaCorrection_channels = [0, 1] 31 | GammaCorrection_gamma_min = 0.7 32 | GammaCorrection_gamma_max = 1.5 33 | 34 | GaussianNoise_channels = [0, 1] 35 | GaussianNoise_mean = 0 36 | GaussianNoise_std = 0.05 37 | GaussianNoise_probability = 0.5 38 | 39 | Pad_output_size = [8, 8, 8] 40 | Pad_ceil_mode = True 41 | 42 | [network] 43 | # this section gives parameters for network 44 | # the keys may be different for different networks 45 | 46 | # type of network 47 | net_type = UNet3D 48 | 49 | # number of class, required for segmentation task 50 | class_num = 55 51 | in_chns = 2 52 | feature_chns = [16, 32, 64, 128] 53 | dropout = [0.0, 0.0, 0.1, 0.2] 54 | trilinear = True 55 | multiscale_pred = False 56 | 57 | [training] 58 | # list of gpus 59 | gpus = [0, 1] 60 | 61 | mixup_probability = 0.5 62 | loss_type = [DiceLoss, CrossEntropyLoss] 63 | loss_weight = [1.0, 1.0] 64 | deep_supervise = False 65 | loss_class_weight = [1, 1, 1, 10, 1, 1, 1, 1, 10, 10, 1, 1, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] 66 | 67 | 68 | # for optimizers 69 | optimizer = Adam 70 | learning_rate = 1e-3 71 | momentum = 0.9 72 | weight_decay = 1e-5 73 | 74 | # for lr schedular (StepLR) 75 | lr_scheduler = StepLR 76 | lr_gamma = 0.5 77 | lr_step = 3000 78 | 79 | ckpt_save_dir = Tutorial/model/unet3d_OARs 80 | 81 | # start iter 82 | iter_start = 0 83 | iter_max = 128000 84 | iter_valid = 500 85 | iter_save = [2000, 4000, 8000, 16000, 32000, 64000, 128000] 86 | # early_stop_patience = 5000 87 | 88 | [testing] 89 | # list of gpus 90 | gpus = [0, 1] 91 | 92 | # checkpoint mode can be [0-latest, 1-best, 2-specified] 93 | ckpt_mode = 1 94 | output_dir = Tutorial/result/unet3d_OARs 95 | # post_process = KeepLargestComponent 96 | 97 | sliding_window_enable = True 98 | sliding_window_batch = 8 99 | sliding_window_size = [32, 128, 320] 100 | sliding_window_stride = [32, 128, 320] 101 | -------------------------------------------------------------------------------- /Tutorial/nnunet_baseline.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "5159d53e", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "# copy and rename the downloaded dataset to train a nnUNet model.\n", 11 | "# nnUNet: https://github.com/MIC-DKFZ/nnUNet\n", 12 | "\n", 13 | "import glob\n", 14 | "import os\n", 15 | "import shutil\n", 16 | "\n", 17 | "import numpy as np\n", 18 | "import SimpleITK as sitk\n", 19 | "\n", 20 | "# segrap_0000_0000 means the non_contrast_CT of patient segrap_0000\n", 21 | "# segrap_0000_0001 means the contrast_CT of patient segrap_0000\n", 22 | "\n", 23 | "# First, copy and rename the images (imagesTr folder) to obey the nnunet requirement.\n", 24 | "download_raw_data_path = \"../SegRap2023/SegRap2023_Training_Set_120cases\"\n", 25 | "nnunet_raw_data_path = \"../nnUNetFrame/DATASET/nnUNet_raw/nnUNet_raw_data/Task070_SegRap2023_OARs\" # OARs and GTVs are two different tasks.\n", 26 | "for patient in os.listdir(download_raw_data_path):\n", 27 | " shutil.copy2(\"{}/{}/image.nii.gz\".format(download_raw_data_path, patient),\n", 28 | " \"{}/imagesTr/{}_0000.nii.gz\".format(nnunet_raw_data_path, patient))\n", 29 | " shutil.copy2(\"{}/{}/image_contrast.nii.gz\".format(download_raw_data_path, patient),\n", 30 | " \"{}/imagesTr/{}_0001.nii.gz\".format(nnunet_raw_data_path, patient))\n", 31 | "\n", 32 | "# Second, download the one-hot labels to labelsTr folder from https://drive.google.com/file/d/1kvX8qvc15oDQ0hrhoE1Ar5kJuEkfelAL/view?usp=sharing\n", 33 | "\n", 34 | "# Third, download the dataset_task001.json or dataset_task002.json from https://drive.google.com/drive/folders/115mzmNlZRIewnSR2QFDwW_-RkNM0LC9D?usp=sharing, please rename them to data.json\n", 35 | "\n", 36 | "# Then, you can train the models (task001 or task002) following the nnUNet training and testing guideline (https://github.com/MIC-DKFZ/nnUNet)\n", 37 | "\n", 38 | "# Finally, use the following functions to merge these subparts into individual organs.\n", 39 | "\n", 40 | "\n", 41 | "segrap_subset_task001 = {\n", 42 | " 'Brain': [1, 2, 3, 4, 5, 6, 7, 8, 9],\n", 43 | " \"BrainStem\": 2,\n", 44 | " \"Chiasm\": 3,\n", 45 | " \"TemporalLobe_L\": [4, 6],\n", 46 | " \"TemporalLobe_R\": [5, 7],\n", 47 | " \"Hippocampus_L\": [8, 6],\n", 48 | " \"Hippocampus_R\": [9, 7],\n", 49 | " 'Eye_L': [10, 12],\n", 50 | " 'Eye_R': [11, 13],\n", 51 | " \"Lens_L\": 12,\n", 52 | " \"Lens_R\": 13,\n", 53 | " \"OpticNerve_L\": 14,\n", 54 | " \"OpticNerve_R\": 15,\n", 55 | " \"MiddleEar_L\": [18, 16, 20, 24, 28, 30],\n", 56 | " \"MiddleEar_R\": [19, 17, 21, 25, 29, 31],\n", 57 | " \"IAC_L\": 18,\n", 58 | " \"IAC_R\": 19,\n", 59 | " \"TympanicCavity_L\": [22, 20],\n", 60 | " \"TympanicCavity_R\": [23, 21],\n", 61 | " \"VestibulSemi_L\": [26, 24],\n", 62 | " \"VestibulSemi_R\": [27, 25],\n", 63 | " \"Cochlea_L\": 28,\n", 64 | " \"Cochlea_R\": 29,\n", 65 | " \"ETbone_L\": [32, 30],\n", 66 | " \"ETbone_R\": [33, 31],\n", 67 | " \"Pituitary\": 34,\n", 68 | " \"OralCavity\": 35,\n", 69 | " \"Mandible_L\": 36,\n", 70 | " \"Mandible_R\": 37,\n", 71 | " \"Submandibular_L\": 38,\n", 72 | " \"Submandibular_R\": 39,\n", 73 | " \"Parotid_L\": 40,\n", 74 | " \"Parotid_R\": 41,\n", 75 | " \"Mastoid_L\": 42,\n", 76 | " \"Mastoid_R\": 43,\n", 77 | " \"TMjoint_L\": 44,\n", 78 | " \"TMjoint_R\": 45,\n", 79 | " \"SpinalCord\": 46,\n", 80 | " \"Esophagus\": 47,\n", 81 | " \"Larynx\": [48, 49, 50, 51],\n", 82 | " \"Larynx_Glottic\": 49,\n", 83 | " \"Larynx_Supraglot\": 50,\n", 84 | " \"PharynxConst\": [51, 52],\n", 85 | " \"Thyroid\": 53,\n", 86 | " \"Trachea\": 54}\n", 87 | "\n", 88 | "\n", 89 | "segrap_subset_task002 = {\n", 90 | " \"GTVp\": 1,\n", 91 | " \"GTVnd\": 2}\n", 92 | "\n", 93 | "\n", 94 | "def nii2array(path):\n", 95 | " mask_itk_ref = sitk.ReadImage(path)\n", 96 | " mask_arr_ref = sitk.GetArrayFromImage(mask_itk_ref)\n", 97 | " return mask_arr_ref\n", 98 | "\n", 99 | "\n", 100 | "def merge_multi_class_to_one(input_arr, classes_index=None):\n", 101 | " new_arr = np.zeros_like(input_arr)\n", 102 | " for cls_ind in classes_index:\n", 103 | " new_arr[input_arr == cls_ind] = 1\n", 104 | " return new_arr\n", 105 | "\n", 106 | "\n", 107 | "def convert_one_hot_label_to_multi_organs(ont_hot_label_path, save_fold):\n", 108 | " for organ in segrap_subset_task001.keys():\n", 109 | " ont_hot_label_arr = nii2array(ont_hot_label_path)\n", 110 | " ont_hot_label_itk = sitk.ReadImage(ont_hot_label_path)\n", 111 | " if type(segrap_subset_task001[organ]) is list:\n", 112 | " new_arr = merge_multi_class_to_one(\n", 113 | " ont_hot_label_arr, segrap_subset_task001[organ])\n", 114 | " else:\n", 115 | " new_arr = np.zeros_like(ont_hot_label_arr)\n", 116 | " new_arr[ont_hot_label_arr == segrap_subset_task001[organ]] = 1\n", 117 | " new_itk = sitk.GetImageFromArray(new_arr)\n", 118 | " new_itk.CopyInformation(ont_hot_label_itk)\n", 119 | " sitk.WriteImage(new_itk, \"{}/{}.nii.gz\".format(save_fold, organ))\n", 120 | " return \"Conversion Finished\"\n", 121 | "\n", 122 | "\n", 123 | "def convert_one_hot_label_to_multi_lesions(ont_hot_label_path, save_fold):\n", 124 | " for lesion in segrap_subset_task002.keys():\n", 125 | " ont_hot_label_arr = nii2array(ont_hot_label_path)\n", 126 | " ont_hot_label_itk = sitk.ReadImage(ont_hot_label_path)\n", 127 | " new_arr = np.zeros_like(ont_hot_label_arr)\n", 128 | " new_arr[ont_hot_label_arr == segrap_subset_task002[lesion]] = 1\n", 129 | " new_itk = sitk.GetImageFromArray(new_arr)\n", 130 | " new_itk.CopyInformation(ont_hot_label_itk)\n", 131 | " sitk.WriteImage(new_itk, \"{}/{}.nii.gz\".format(save_fold, lesion))\n", 132 | " return \"Conversion Finished\"\n", 133 | "\n", 134 | "\n", 135 | "if __name__ == \"__main__\":\n", 136 | " for patient in glob.glob(\"/data_8t/radiology_images/processed/SegRap2023/nnUNetV2_infersVal/one_hot/*\"):\n", 137 | " new_path = \"/data_8t/radiology_images/processed/SegRap2023/nnUNetV2_infersVal/task001/{}\".format(\n", 138 | " patient.split(\"/\")[-1].replace(\"_cropped.nii.gz\", \"\"))\n", 139 | " if os.path.exists(new_path):\n", 140 | " pass\n", 141 | " convert_one_hot_label_to_multi_organs(patient, new_path)\n", 142 | " else:\n", 143 | " os.mkdir(new_path)\n", 144 | " convert_one_hot_label_to_multi_organs(patient, new_path)\n" 145 | ] 146 | } 147 | ], 148 | "metadata": { 149 | "kernelspec": { 150 | "display_name": "Python [conda env:py12]", 151 | "language": "python", 152 | "name": "conda-env-py12-py" 153 | }, 154 | "language_info": { 155 | "codemirror_mode": { 156 | "name": "ipython", 157 | "version": 3 158 | }, 159 | "file_extension": ".py", 160 | "mimetype": "text/x-python", 161 | "name": "python", 162 | "nbconvert_exporter": "python", 163 | "pygments_lexer": "ipython3", 164 | "version": "3.10.9" 165 | } 166 | }, 167 | "nbformat": 4, 168 | "nbformat_minor": 5 169 | } 170 | -------------------------------------------------------------------------------- /Tutorial/postprocessing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import numpy as np 4 | import SimpleITK as sitk 5 | from batchgenerators.utilities.file_and_folder_operations import * 6 | from utils import * 7 | from scipy import ndimage 8 | 9 | segrap_subset_task001 = { 10 | 'Brain': [1, 2, 3, 4, 5, 6, 7, 8, 9], 11 | "BrainStem": 2, 12 | "Chiasm": 3, 13 | "TemporalLobe_L": [4, 6], 14 | "TemporalLobe_R": [5, 7], 15 | "Hippocampus_L": [8, 6], 16 | "Hippocampus_R": [9, 7], 17 | 'Eye_L': [10, 12], 18 | 'Eye_R': [11, 13], 19 | "Lens_L": 12, 20 | "Lens_R": 13, 21 | "OpticNerve_L": 14, 22 | "OpticNerve_R": 15, 23 | "MiddleEar_L": [18, 16, 20, 24, 28, 30], 24 | "MiddleEar_R": [19, 17, 21, 25, 29, 31], 25 | "IAC_L": 18, 26 | "IAC_R": 19, 27 | "TympanicCavity_L": [22, 20], 28 | "TympanicCavity_R": [23, 21], 29 | "VestibulSemi_L": [26, 24], 30 | "VestibulSemi_R": [27, 25], 31 | "Cochlea_L": 28, 32 | "Cochlea_R": 29, 33 | "ETbone_L": [32, 30], 34 | "ETbone_R": [33, 31], 35 | "Pituitary": 34, 36 | "OralCavity": 35, 37 | "Mandible_L": 36, 38 | "Mandible_R": 37, 39 | "Submandibular_L": 38, 40 | "Submandibular_R": 39, 41 | "Parotid_L": 40, 42 | "Parotid_R": 41, 43 | "Mastoid_L": 42, 44 | "Mastoid_R": 43, 45 | "TMjoint_L": 44, 46 | "TMjoint_R": 45, 47 | "SpinalCord": 46, 48 | "Esophagus": 47, 49 | "Larynx": [48, 49, 50, 51], 50 | "Larynx_Glottic": 49, 51 | "Larynx_Supraglot": 50, 52 | "PharynxConst": [51, 52], 53 | "Thyroid": 53, 54 | "Trachea": 54} 55 | 56 | 57 | segrap_subset_task002 = { 58 | "GTVp": 1, 59 | "GTVnd": 2} 60 | 61 | def merge_multi_class_to_one(input_arr, classes_index=None): 62 | new_arr = np.zeros_like(input_arr) 63 | for cls_ind in classes_index: 64 | new_arr[input_arr == cls_ind] = 1 65 | return new_arr 66 | 67 | 68 | def convert_one_hot_label_to_multi_organs(ont_hot_label_arr, save_fold, spacing=None, origin=None, direction=None): 69 | for organ in segrap_subset_task001.keys(): 70 | if type(segrap_subset_task001[organ]) is list: 71 | new_arr = merge_multi_class_to_one(ont_hot_label_arr, segrap_subset_task001[organ]) 72 | else: 73 | new_arr = np.zeros_like(ont_hot_label_arr) 74 | new_arr[ont_hot_label_arr == segrap_subset_task001[organ]] = 1 75 | save_nii(new_arr, "{}/{}.nii.gz".format(save_fold, organ), spacing=spacing, origin=origin, direction=direction) 76 | return "Conversion Finished" 77 | 78 | 79 | def convert_one_hot_label_to_multi_lesions(ont_hot_label_arr, save_fold, spacing=None, origin=None, direction=None): 80 | for lesion in segrap_subset_task002.keys(): 81 | new_arr = np.zeros_like(ont_hot_label_arr) 82 | new_arr[ont_hot_label_arr == segrap_subset_task002[lesion]] = 1 83 | save_nii(new_arr, "{}/{}.nii.gz".format(save_fold, lesion), spacing=spacing, origin=origin, direction=direction) 84 | return "Conversion Finished" 85 | 86 | 87 | def get_raw_data(seg, params, name): 88 | """get preprocessing parameters""" 89 | raw_spacing, target_spacing, origin, direction, raw_shape, resample_shape, bbox = params[name] 90 | 91 | """cropped --> resampled""" 92 | seg_full = np.zeros(resample_shape, dtype=seg.dtype) 93 | seg_full[bbox[0][0]: bbox[0][1], bbox[1][0]: bbox[1][1], bbox[2][0]: bbox[2][1]] = seg 94 | 95 | """resampled --> raw""" 96 | scale = np.array(target_spacing) / np.array(raw_spacing) 97 | seg_raw = ndimage.zoom(seg_full, scale, order=0) 98 | 99 | assert list(seg_raw.shape) == raw_shape[1:] 100 | spacing = (raw_spacing[2], raw_spacing[1], raw_spacing[0]) 101 | 102 | return seg_raw, spacing, origin, direction 103 | 104 | 105 | if __name__ == "__main__": 106 | seg_dir = 'result/unet3d_OARs' 107 | seg_dir_post = 'result/unet3d_OARs_post' 108 | 109 | json_file = 'data_dir/SegRap2023_dataset.json' 110 | with open(json_file, "r", encoding="utf-8") as f: 111 | data = json.load(f) 112 | 113 | for patient in glob.glob(seg_dir + '/*.nii.gz'): 114 | patient_name = patient.split('/')[-1].replace('.nii.gz', '') 115 | seg = nii2array(patient) 116 | seg_itk = sitk.ReadImage(patient) 117 | 118 | seg_raw, spacing, origin, direction = get_raw_data(seg, data, patient_name) 119 | 120 | new_path = '{}/{}'.format(seg_dir_post, patient) 121 | maybe_mkdir_p(new_path) 122 | convert_one_hot_label_to_multi_organs(seg_raw, new_path, spacing=spacing, origin=origin, direction=direction) 123 | -------------------------------------------------------------------------------- /Tutorial/preprocessing.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | from skimage import measure 4 | import SimpleITK as sitk 5 | from collections import OrderedDict 6 | 7 | from batchgenerators.augmentations.utils import resize_segmentation 8 | from skimage.transform import resize 9 | from scipy.ndimage import map_coordinates 10 | from batchgenerators.utilities.file_and_folder_operations import * 11 | from collections import OrderedDict 12 | from pymic.io.image_read_write import * 13 | from utils import * 14 | 15 | def largestConnectComponent(binaryimg): 16 | label_image, num = measure.label(binaryimg, background=0, return_num=True) 17 | areas = [r.area for r in measure.regionprops(label_image)] 18 | areas.sort() 19 | if num > 1: 20 | for region in measure.regionprops(label_image): 21 | if (region.area < areas[-1]): 22 | # print(region.area) 23 | for coordinates in region.coords: 24 | label_image[coordinates[0], coordinates[1], coordinates[2]] = 0 25 | label_image = label_image.astype(np.int8) 26 | label_image[np.where(label_image > 0)] = 1 27 | 28 | return label_image 29 | 30 | def create_nonzero_mask(data, thresh=-500): 31 | mask = np.zeros_like(data) 32 | mask[data > thresh] = 1 33 | nonzero_mask = largestConnectComponent(mask) 34 | return nonzero_mask 35 | 36 | 37 | def get_bbox_from_mask(mask, outside_value=0): 38 | mask_voxel_coords = np.where(mask != outside_value) 39 | minzidx = int(np.min(mask_voxel_coords[0])) 40 | maxzidx = int(np.max(mask_voxel_coords[0])) + 1 41 | minxidx = int(np.min(mask_voxel_coords[1])) 42 | maxxidx = int(np.max(mask_voxel_coords[1])) + 1 43 | minyidx = int(np.min(mask_voxel_coords[2])) 44 | maxyidx = int(np.max(mask_voxel_coords[2])) + 1 45 | return [[minzidx, maxzidx], [minxidx, maxxidx], [minyidx, maxyidx]] 46 | 47 | 48 | def crop_to_bbox(image, bbox): 49 | assert len(image.shape) == 3, "only supports 3d images" 50 | resizer = (slice(bbox[0][0], bbox[0][1]), slice(bbox[1][0], bbox[1][1]), slice(bbox[2][0], bbox[2][1])) 51 | return image[resizer] 52 | 53 | 54 | def crop_to_nonzero(data, seg=None, nonzero_label=-1): 55 | """ 56 | 57 | :param data: 58 | :param seg: 59 | :param nonzero_label: this will be written into the segmentation map 60 | :return: 61 | """ 62 | nonzero_mask = create_nonzero_mask(data) 63 | bbox = get_bbox_from_mask(nonzero_mask, 0) 64 | 65 | data = crop_to_bbox(data, bbox) 66 | 67 | return data, bbox 68 | 69 | def get_do_separate_z(spacing, anisotropy_threshold=4): 70 | do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold 71 | return do_separate_z 72 | 73 | 74 | def get_lowres_axis(new_spacing): 75 | axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic 76 | return axis 77 | 78 | 79 | def resample_patient(data, data_contrast, seg, original_spacing, target_spacing, order_data=3, order_seg=0, force_separate_z=False, 80 | order_z_data=0, order_z_seg=0, separate_z_anisotropy_threshold=4): 81 | """ 82 | :param data: 83 | :param seg: 84 | :param original_spacing: 85 | :param target_spacing: 86 | :param order_data: 87 | :param order_seg: 88 | :param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always 89 | /never resample along z separately 90 | :param order_z_seg: only applies if do_separate_z is True 91 | :param order_z_data: only applies if do_separate_z is True 92 | :param separate_z_anisotropy_threshold: if max_spacing > separate_z_anisotropy_threshold * min_spacing (per axis) 93 | then resample along lowres axis with order_z_data/order_z_seg instead of order_data/order_seg 94 | 95 | :return: 96 | """ 97 | assert not (((data is None) or (data_contrast is not None)) and (seg is None)) 98 | if data is not None: 99 | assert len(data.shape) == 4, "data must be c x y z" 100 | if data_contrast is not None: 101 | assert len(data_contrast.shape) == 4, "seg must be c x y z" 102 | if seg is not None: 103 | assert len(seg.shape) == 4, "seg must be c x y z" 104 | 105 | 106 | if data is not None: 107 | shape = np.array(data[0].shape) 108 | elif data_contrast is not None: 109 | shape = np.array(data_contrast[0].shape) 110 | else: 111 | shape = np.array(seg[0].shape) 112 | 113 | new_shape = np.round(((np.array(original_spacing) / np.array(target_spacing)).astype(float) * shape)).astype(int) 114 | 115 | if force_separate_z is not None: 116 | do_separate_z = force_separate_z 117 | if force_separate_z: 118 | axis = get_lowres_axis(original_spacing) 119 | else: 120 | axis = None 121 | else: 122 | if get_do_separate_z(original_spacing, separate_z_anisotropy_threshold): 123 | do_separate_z = True 124 | axis = get_lowres_axis(original_spacing) 125 | elif get_do_separate_z(target_spacing, separate_z_anisotropy_threshold): 126 | do_separate_z = True 127 | axis = get_lowres_axis(target_spacing) 128 | else: 129 | do_separate_z = False 130 | axis = None 131 | 132 | if axis is not None: 133 | if len(axis) == 3: 134 | # every axis has the spacing, this should never happen, why is this code here? 135 | do_separate_z = False 136 | elif len(axis) == 2: 137 | # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample 138 | # separately in the out of plane axis 139 | do_separate_z = False 140 | else: 141 | pass 142 | 143 | if data is not None: 144 | data_reshaped = resample_data_or_seg(data, new_shape, False, axis, order_data, do_separate_z, order_z=order_z_data) 145 | else: 146 | data_reshaped = None 147 | 148 | if data_contrast is not None: 149 | data_contrast_reshaped = resample_data_or_seg(data_contrast, new_shape, False, axis, order_data, do_separate_z, order_z=order_z_data) 150 | else: 151 | data_contrast_reshaped = None 152 | 153 | if seg is not None: 154 | seg_reshaped = resample_data_or_seg(seg, new_shape, True, axis, order_seg, do_separate_z, order_z=order_z_seg) 155 | else: 156 | seg_reshaped = None 157 | 158 | return data_reshaped.squeeze(), data_contrast_reshaped.squeeze(), seg_reshaped.squeeze() 159 | 160 | 161 | def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, order_z=0): 162 | """ 163 | separate_z=True will resample with order 0 along z 164 | :param data: 165 | :param new_shape: 166 | :param is_seg: 167 | :param axis: 168 | :param order: 169 | :param do_separate_z: 170 | :param cval: 171 | :param order_z: only applies if do_separate_z is True 172 | :return: 173 | """ 174 | assert len(data.shape) == 4, "data must be (c, x, y, z)" 175 | if is_seg: 176 | resize_fn = resize_segmentation 177 | kwargs = OrderedDict() 178 | else: 179 | resize_fn = resize 180 | kwargs = {'mode': 'edge', 'anti_aliasing': False} 181 | dtype_data = data.dtype 182 | shape = np.array(data[0].shape) 183 | new_shape = np.array(new_shape) 184 | if np.any(shape != new_shape): 185 | data = data.astype(float) 186 | if do_separate_z: 187 | print("separate z, order in z is", 188 | order_z, "order inplane is", order) 189 | assert len(axis) == 1, "only one anisotropic axis supported" 190 | axis = axis[0] 191 | if axis == 0: 192 | new_shape_2d = new_shape[1:] 193 | elif axis == 1: 194 | new_shape_2d = new_shape[[0, 2]] 195 | else: 196 | new_shape_2d = new_shape[:-1] 197 | 198 | reshaped_final_data = [] 199 | for c in range(data.shape[0]): 200 | reshaped_data = [] 201 | for slice_id in range(shape[axis]): 202 | if axis == 0: 203 | reshaped_data.append( 204 | resize_fn(data[c, slice_id], new_shape_2d, order, **kwargs)) 205 | elif axis == 1: 206 | reshaped_data.append( 207 | resize_fn(data[c, :, slice_id], new_shape_2d, order, **kwargs)) 208 | else: 209 | reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order, 210 | **kwargs)) 211 | reshaped_data = np.stack(reshaped_data, axis) 212 | if shape[axis] != new_shape[axis]: 213 | 214 | # The following few lines are blatantly copied and modified from sklearn's resize() 215 | rows, cols, dim = new_shape[0], new_shape[1], new_shape[2] 216 | orig_rows, orig_cols, orig_dim = reshaped_data.shape 217 | 218 | row_scale = float(orig_rows) / rows 219 | col_scale = float(orig_cols) / cols 220 | dim_scale = float(orig_dim) / dim 221 | 222 | map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim] 223 | map_rows = row_scale * (map_rows + 0.5) - 0.5 224 | map_cols = col_scale * (map_cols + 0.5) - 0.5 225 | map_dims = dim_scale * (map_dims + 0.5) - 0.5 226 | 227 | coord_map = np.array([map_rows, map_cols, map_dims]) 228 | if not is_seg or order_z == 0: 229 | reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z, 230 | mode='nearest')[None]) 231 | else: 232 | unique_labels = np.unique(reshaped_data) 233 | reshaped = np.zeros(new_shape, dtype=dtype_data) 234 | 235 | for i, cl in enumerate(unique_labels): 236 | reshaped_multihot = np.round( 237 | map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z, 238 | mode='nearest')) 239 | reshaped[reshaped_multihot > 0.5] = cl 240 | reshaped_final_data.append(reshaped[None]) 241 | else: 242 | reshaped_final_data.append(reshaped_data[None]) 243 | reshaped_final_data = np.vstack(reshaped_final_data) 244 | else: 245 | # print("no separate z, order", order) 246 | reshaped = [] 247 | for c in range(data.shape[0]): 248 | reshaped.append(resize_fn(data[c], new_shape, order, **kwargs)[None]) 249 | reshaped_final_data = np.vstack(reshaped) 250 | return reshaped_final_data.astype(dtype_data) 251 | else: 252 | print("no resampling necessary") 253 | return data 254 | 255 | 256 | def normalize_intensity(data, intensity_properties): 257 | """ 258 | mean_intensity: mean intensity 259 | std_intensity: std intensity 260 | lower_bound: percentile_00_5 261 | upper_bound: percentile_99_5 262 | """ 263 | mean_intensity = intensity_properties[1] 264 | std_intensity = intensity_properties[2] 265 | lower_bound = intensity_properties[6] 266 | upper_bound = intensity_properties[5] 267 | 268 | data_norm = np.clip(data, lower_bound, upper_bound) 269 | data_norm = (data_norm - mean_intensity) / std_intensity 270 | 271 | return data_norm 272 | 273 | 274 | class collect_intensity_properties(): 275 | def __init__(self, root_dir): 276 | super().__init__() 277 | self.root_dir = root_dir 278 | 279 | def intensity_properties_stat(self, dir_mask): 280 | patient_folders_name = os.listdir(self.root_dir) 281 | w_img, w_img_contrast = [], [] 282 | for patient_folder_name in patient_folders_name: 283 | patient_folder_path = os.path.join(self.root_dir, patient_folder_name) 284 | mask_name = dir_mask + '/' + patient_folder_name 285 | intensity_img, intensity_img_contrast = self.get_intensity_folder(patient_folder_path, mask_name) 286 | w_img += intensity_img 287 | w_img_contrast += intensity_img_contrast 288 | intensity_properties_img = self.compute_stats(w_img) 289 | intensity_properties_img_contrast = self.compute_stats(w_img_contrast) 290 | 291 | return intensity_properties_img, intensity_properties_img_contrast 292 | 293 | def compute_stats(self, voxels): 294 | if len(voxels) == 0: 295 | return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan 296 | median = np.median(voxels) 297 | mean = np.mean(voxels) 298 | sd = np.std(voxels) 299 | mn = np.min(voxels) 300 | mx = np.max(voxels) 301 | percentile_99_5 = np.percentile(voxels, 99.5) 302 | percentile_00_5 = np.percentile(voxels, 00.5) 303 | return median, mean, sd, mn, mx, percentile_99_5, percentile_00_5 304 | 305 | def get_voxels_in_foreground(self, data, mask): 306 | assert data.shape == mask.shape 307 | data_intensity = data[mask > 0] 308 | 309 | return list(data_intensity) 310 | 311 | def get_intensity_folder(self, patient_folder_path, seg_name): 312 | img = nii2array(patient_folder_path + '/image.nii.gz') 313 | img_contrast = nii2array(patient_folder_path + '/image_contrast.nii.gz') 314 | seg = nii2array(seg_name) 315 | intensity_img = self.get_voxels_in_foreground(img, seg) 316 | intensity_img_contrast = self.get_voxels_in_foreground(img_contrast, seg) 317 | 318 | return intensity_img, intensity_img_contrast 319 | 320 | if __name__ == "__main__": 321 | parser = argparse.ArgumentParser(description='SegRap2023 preprocessing') 322 | parser.add_argument("--root_path", type=str, default='data_dir/raw_data') 323 | parser.add_argument("--root_path_onehot", type=str, default='data_dir/one_hot_label') 324 | parser.add_argument("--task", type=str, default='OARs', choices=['OARs', 'GTVs']) 325 | parser.add_argument("--target_spacing", type=list, default=[1.0, 1.0, 3.0]) 326 | args = parser.parse_args() 327 | root_path, target_spacing = args.root_path, args.target_spacing 328 | 329 | base = os.path.dirname(root_path) 330 | """fuse multi-organs into a one-hot label""" 331 | if args.task == "OARs": 332 | dir_one_hot_label = args.root_path_onehot + '/Task001_OARs' 333 | save_path = base + '/Task001_OARs_preprocess' 334 | elif args.task == "GTVs": 335 | dir_one_hot_label = args.root_path_onehot + '/Task002_GTVs' 336 | save_path = base + '/Task002_GTVs_preprocess' 337 | 338 | path_preprocessed_image = save_path + '/image' 339 | path_preprocessed_image_contrast = save_path + '/image_contrast' 340 | path_preprocessed_label = save_path + '/label' 341 | maybe_mkdir_p(path_preprocessed_image) 342 | maybe_mkdir_p(path_preprocessed_image_contrast) 343 | maybe_mkdir_p(path_preprocessed_label) 344 | 345 | """get data intensity properties""" 346 | json_dict = {} 347 | get_intensity_properties = collect_intensity_properties(root_path) 348 | targets_intensity_properties_image, targets_intensity_properties_image_contrast = get_intensity_properties.intensity_properties_stat(dir_one_hot_label) 349 | json_dict['image_' + args.task] = np.array(targets_intensity_properties_image).tolist() 350 | json_dict['image_contrast_' + args.task] = np.array(targets_intensity_properties_image_contrast).tolist() 351 | save_json(json_dict, os.path.join(base, "SegRap2023_intensity_" + args.task + ".json")) 352 | 353 | 354 | """target class for fuse label""" 355 | json_dict_shape = {} 356 | patient_names = os.listdir(root_path) 357 | for patient_name in patient_names: 358 | """load image and one-hot label""" 359 | img_obj = sitk.ReadImage("{}/{}/image.nii.gz".format(root_path, patient_name)) 360 | image = sitk.GetArrayFromImage(img_obj) 361 | origin, spacing, direction = img_obj.GetOrigin(), img_obj.GetSpacing(), img_obj.GetDirection() 362 | raw_shape = image.shape 363 | image_contrast = nii2array("{}/{}/image_contrast.nii.gz".format(root_path, patient_name)) 364 | seg = nii2array("{}/{}.nii.gz".format(dir_one_hot_label, patient_name)) 365 | 366 | 367 | """resample data""" 368 | image, image_contrast, seg = np.expand_dims(image, 0), np.expand_dims(image_contrast, 0), np.expand_dims(seg, 0) 369 | spacing_transpose = (spacing[2], spacing[1], spacing[0]) 370 | target_spacing_transpose = (target_spacing[2], target_spacing[1], target_spacing[0]) 371 | image, image_contrast, seg = resample_patient(image, image_contrast, seg, spacing_transpose, target_spacing_transpose) 372 | 373 | 374 | """crop data""" 375 | image, bbox = crop_to_nonzero(image) 376 | image_contrast = crop_to_bbox(image_contrast, bbox) 377 | seg = crop_to_bbox(seg, bbox) 378 | cropped_shape = image.shape 379 | 380 | 381 | """normalize data based on intensity properties""" 382 | image = normalize_intensity(image, targets_intensity_properties_image) 383 | image_contrast = normalize_intensity(image_contrast, targets_intensity_properties_image_contrast) 384 | 385 | target_origin = [origin[0] + target_spacing[0] * bbox[2][0], origin[1] + target_spacing[1] * bbox[1][0], origin[2] + target_spacing[2] * bbox[0][0]] 386 | 387 | save_nii(image, '{}/{}.nii.gz'.format(path_preprocessed_image, patient_name), spacing=target_spacing, origin=target_origin, direction=direction) 388 | save_nii(image_contrast, '{}/{}.nii.gz'.format(path_preprocessed_image_contrast, patient_name), spacing=target_spacing, origin=target_origin, direction=direction) 389 | save_nii(seg, '{}/{}.nii.gz'.format(path_preprocessed_label, patient_name), spacing=target_spacing, origin=target_origin, direction=direction) 390 | 391 | """save preprocessing parameters for each case""" 392 | json_dict_shape[patient_name] = [spacing_transpose, target_spacing_transpose, origin, direction, raw_shape, cropped_shape, bbox] 393 | 394 | save_json(json_dict_shape, os.path.join(base, "SegRap2023_dataset.json")) -------------------------------------------------------------------------------- /Tutorial/utils.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | 3 | def nii2array(path): 4 | mask_itk_ref = sitk.ReadImage(path) 5 | mask_arr_ref = sitk.GetArrayFromImage(mask_itk_ref) 6 | return mask_arr_ref 7 | 8 | def save_nii(data, save_name, spacing=None, origin=None, direction=None): 9 | img = sitk.GetImageFromArray(data) 10 | if (spacing is not None): 11 | img.SetSpacing(spacing) 12 | if (origin is not None): 13 | img.SetOrigin(origin) 14 | if (origin is not None): 15 | img.SetDirection(direction) 16 | sitk.WriteImage(img, save_name) 17 | 18 | -------------------------------------------------------------------------------- /Tutorial/write_csv_file.py: -------------------------------------------------------------------------------- 1 | """Script for writing cvs files 2 | """ 3 | 4 | import os 5 | import csv 6 | import pandas as pd 7 | import random 8 | from random import shuffle 9 | 10 | 11 | def create_csv_file(data_root, output_file, fields): 12 | """ 13 | create a csv file to store the paths of files for each patient 14 | """ 15 | filenames = [] 16 | patient_names = os.listdir(data_root + '/' + fields[1]) 17 | patient_names.sort() 18 | print('total number of images {0:}'.format(len(patient_names))) 19 | for patient_name in patient_names: 20 | patient_image_names = [] 21 | for field in fields: 22 | image_name = field + '/' + patient_name 23 | # if(field == 'image'): 24 | # image_name = image_name.replace('_seg.', '.') 25 | # #image_name = image_name[:-4] 26 | patient_image_names.append(image_name) 27 | filenames.append(patient_image_names) 28 | 29 | with open(output_file, mode='w') as csv_file: 30 | csv_writer = csv.writer(csv_file, delimiter=',', 31 | quotechar='"',quoting=csv.QUOTE_MINIMAL) 32 | csv_writer.writerow(fields) 33 | for item in filenames: 34 | csv_writer.writerow(item) 35 | 36 | def random_split_dataset(base, input_file): 37 | random.seed(2023) 38 | input_file = '{}/{}'.format(base, input_file) 39 | train_names_file = base + '/image_train.csv' 40 | valid_names_file = base + '/image_valid.csv' 41 | test_names_file = base + '/image_test.csv' 42 | with open(input_file, 'r') as f: 43 | lines = f.readlines() 44 | data_lines = lines[1:] 45 | shuffle(data_lines) 46 | N = len(data_lines) 47 | n1 = int(N * 0.7) 48 | n2 = int(N * 0.8) 49 | print('image number', N) 50 | print('training number', n1) 51 | print('validation number', n2 - n1) 52 | print('testing number', N - n2) 53 | train_lines = data_lines[:n1] 54 | valid_lines = data_lines[n1:n2] 55 | test_lines = data_lines[n2:] 56 | with open(train_names_file, 'w') as f: 57 | f.writelines(lines[:1] + train_lines) 58 | with open(valid_names_file, 'w') as f: 59 | f.writelines(lines[:1] + valid_lines) 60 | with open(test_names_file, 'w') as f: 61 | f.writelines(lines[:1] + test_lines) 62 | 63 | def get_evaluation_image_pairs(test_csv, gt_seg_csv): 64 | with open(test_csv, 'r') as f: 65 | input_lines = f.readlines()[1:] 66 | output_lines = [] 67 | for item in input_lines: 68 | gt_name = item.split(',')[-1] 69 | gt_name = gt_name.rstrip() 70 | seg_name = gt_name.split('/')[-1] 71 | output_lines.append([gt_name, seg_name]) 72 | with open(gt_seg_csv, mode='w') as csv_file: 73 | csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"',quoting=csv.QUOTE_MINIMAL) 74 | csv_writer.writerow(["ground_truth", "segmentation"]) 75 | for item in output_lines: 76 | csv_writer.writerow(item) 77 | 78 | 79 | if __name__ == "__main__": 80 | # create cvs file for SegRap2023 81 | # set data_dir based on your own system 82 | data_dir = 'data_dir/Task001_OARs_preprocess' 83 | fields = ['image', 'image_contrast', 'label'] 84 | 85 | base = 'config/data_OARs' 86 | if not os.path.exists(base): 87 | os.makedirs(base) 88 | 89 | output_file = base + '/image_all.csv' 90 | create_csv_file(data_dir, output_file, fields) 91 | 92 | # split the data into training, validation and testing 93 | random_split_dataset(base, output_file) 94 | 95 | # obtain ground truth and segmentation pairs for evaluation 96 | test_csv = base + "/image_test.csv" 97 | gt_seg_csv = base + "/image_test_gt_seg.csv" 98 | get_evaluation_image_pairs(test_csv, gt_seg_csv) 99 | -------------------------------------------------------------------------------- /ethics.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/ethics.pdf --------------------------------------------------------------------------------