├── .gitignore ├── Docker_tutorial ├── SegRap2023_task1_OARs_nnUNet_Example │ ├── Dockerfile │ ├── README.md │ ├── build.sh │ ├── export.sh │ ├── images │ │ └── images │ │ │ ├── head-neck-contrast-enhanced-ct │ │ │ └── data.txt │ │ │ └── head-neck-ct │ │ │ └── data.txt │ ├── inference_code.py │ ├── post_processing.py │ ├── process.py │ ├── requirements.in │ ├── requirements.txt │ ├── test.sh │ └── weight │ │ └── fold_0 │ │ ├── plans.pkl │ │ └── weight.txt ├── SegRap2023_task2_GTVs_nnUNet_Example │ ├── Dockerfile │ ├── README.md │ ├── build.sh │ ├── export.sh │ ├── images │ │ └── images │ │ │ ├── head-neck-contrast-enhanced-ct │ │ │ └── data.txt │ │ │ └── head-neck-ct │ │ │ └── data.txt │ ├── inference_code.py │ ├── post_processing.py │ ├── process.py │ ├── requirements.in │ ├── requirements.txt │ ├── test.sh │ └── weight │ │ └── fold_0 │ │ ├── plans.pkl │ │ └── weight.txt ├── gtvs_output_example.mha ├── gtvs_output_example.zip ├── oars_output_example.mha ├── oars_output_example.zip ├── outputs.png └── stacked_results_to_4d_mha.py ├── Eval ├── SegRap_Task001_DSC_NSD_Eval.py ├── SegRap_Task002_DSC_NSD_Eval.py └── two_evaluation_metrics.py ├── Poster_Top5_Team ├── Task01 │ ├── Chan Woong Lee.pdf │ ├── Kaixiang Yang.pdf │ ├── Yanzhou Su.pdf │ ├── Yiwen Ye.pdf │ └── Yunxin Zhong.pdf └── Task02 │ ├── Constantin Ulrich.pdf │ ├── Kaixiang Yang.pdf │ ├── Mehdi Astaraki.pdf │ ├── Yiwen Ye.pdf │ └── Zhaohu Xing.pdf ├── README.md ├── Tutorial ├── config │ ├── unet3d_GTVs.cfg │ └── unet3d_OARs.cfg ├── nnunet_baseline.ipynb ├── postprocessing.py ├── preprocessing.py ├── utils.py └── write_csv_file.py └── ethics.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/Dockerfile: -------------------------------------------------------------------------------- 1 | # Pull the docker image | 拉取镜像 2 | # FROM pytorch/pytorch:1.13.0-cuda11.6-cudnn8-devel 3 | 4 | FROM python:3.10-slim 5 | 6 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user 7 | 8 | RUN mkdir -p /opt/app /input /output \ 9 | && chown user:user /opt/app /input /output 10 | 11 | USER user 12 | WORKDIR /opt/app 13 | 14 | ENV PATH="/home/user/.local/bin:${PATH}" 15 | COPY --chown=user:user requirements.txt /opt/app/ 16 | 17 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools 18 | RUN python -m piptools sync requirements.txt 19 | 20 | # RUN python -m pip install --user -r requirements.txt 21 | RUN python -m pip install --user torch==1.12.0+cu116 torchvision==0.13.0+cu116 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu116 22 | RUN python -m pip install --no-deps --user nnunet==1.7.1 23 | 24 | COPY --chown=user:user process.py /opt/app/ 25 | COPY --chown=user:user inference_code.py /opt/app/ 26 | COPY --chown=user:user post_processing.py /opt/app/ 27 | # COPY --chown=user:user images/ /opt/app/images/ 28 | COPY --chown=user:user weight/ /opt/app/weight/ 29 | # COPY --chown=user:user output/ /opt/app/ 30 | 31 | 32 | ENTRYPOINT [ "python", "-m", "process" ] 33 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/README.md: -------------------------------------------------------------------------------- 1 | # SegRap2023_SegmentationContainer Algorithm 2 | 3 | The source code for the algorithm container for 4 | SegRap2023_SegmentationContainer, generated with 5 | evalutils version 0.4.2 6 | using Python 3.10. 7 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 3 | 4 | docker build -t segrap2023_oar_segmentationcontainer "$SCRIPTPATH" 5 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/export.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./build.sh 4 | 5 | docker save segrap2023_segmentationcontainer | gzip -c > SegRap2023_SegmentationContainer.tar.gz 6 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/images/images/head-neck-contrast-enhanced-ct/data.txt: -------------------------------------------------------------------------------- 1 | Download a non-contrast image from our provided link for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/images/images/head-neck-ct/data.txt: -------------------------------------------------------------------------------- 1 | Download a contrast-enhanced image from our provided link for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/inference_code.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | o_path = os.getcwd() 4 | sys.path.append(o_path) 5 | import shutil 6 | from multiprocessing import Pool 7 | 8 | import numpy as np 9 | import torch 10 | from copy import deepcopy 11 | from batchgenerators.utilities.file_and_folder_operations import * 12 | from nnunet.inference.predict import preprocess_multithreaded 13 | from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax 14 | from nnunet.postprocessing.connected_components import load_postprocessing, load_remove_save 15 | from nnunet.training.model_restore import load_model_and_checkpoint_files 16 | from torch import cuda 17 | from torch.nn import functional as F 18 | 19 | 20 | def predict_cases_segrap2023(model, list_of_lists, output_filenames, folds, save_npz=False, num_threads_preprocessing=6, 21 | num_threads_nifti_save=2, segs_from_prev_stage=None, do_tta=False, 22 | overwrite_existing=False, step_size=0.5, checkpoint_name="model_final_checkpoint", 23 | disable_postprocessing: bool = False): 24 | assert len(list_of_lists) == len(output_filenames) 25 | if segs_from_prev_stage is not None: 26 | assert len(segs_from_prev_stage) == len(output_filenames) 27 | 28 | pool = Pool(num_threads_nifti_save) 29 | results = [] 30 | 31 | cleaned_output_files = [] 32 | for o in output_filenames: 33 | dr, f = os.path.split(o) 34 | if len(dr) > 0: 35 | maybe_mkdir_p(dr) 36 | if not f.endswith(".nii.gz"): 37 | f, _ = os.path.splitext(f) 38 | f = f + ".nii.gz" 39 | cleaned_output_files.append(join(dr, f)) 40 | 41 | if not overwrite_existing: 42 | print("number of cases:", len(list_of_lists)) 43 | # if save_npz=True then we should also check for missing npz files 44 | not_done_idx = [i for i, j in enumerate(cleaned_output_files) if 45 | (not isfile(j)) or (save_npz and not isfile(j[:-7] + '.npz'))] 46 | 47 | cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx] 48 | print(list_of_lists) 49 | list_of_lists = [list_of_lists[i] for i in not_done_idx] 50 | if segs_from_prev_stage is not None: 51 | segs_from_prev_stage = [segs_from_prev_stage[i] 52 | for i in not_done_idx] 53 | 54 | print("number of cases that still need to be predicted:", 55 | len(cleaned_output_files)) 56 | 57 | print("emptying cuda cache") 58 | torch.cuda.empty_cache() 59 | 60 | print("loading parameters for folds,", folds) 61 | trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=True, 62 | checkpoint_name=checkpoint_name) 63 | 64 | print("starting preprocessing generator") 65 | preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, 66 | segs_from_prev_stage) 67 | print("starting prediction...") 68 | all_output_files = [] 69 | with torch.no_grad(): 70 | for preprocessed in preprocessing: 71 | output_filename, (d, dct) = preprocessed 72 | all_output_files.append(all_output_files) 73 | if isinstance(d, str): 74 | data = np.load(d) 75 | os.remove(d) 76 | d = data 77 | 78 | # we need to be able to del it if things fail (just in case) 79 | softmax = None 80 | 81 | try: 82 | print("predicting", output_filename) 83 | print(f"attempting all_in_gpu {True}") 84 | trainer.load_checkpoint_ram(params[0], False) 85 | softmax = trainer.predict_preprocessed_data_return_seg_and_softmax( 86 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 87 | 'mirror_axes'], use_sliding_window=True, 88 | step_size=step_size, use_gaussian=True, all_in_gpu=True, 89 | mixed_precision=True)[1] 90 | for p in params[1:]: 91 | trainer.load_checkpoint_ram(p, False) 92 | softmax += trainer.predict_preprocessed_data_return_seg_and_softmax( 93 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 94 | 'mirror_axes'], use_sliding_window=True, 95 | step_size=step_size, use_gaussian=True, all_in_gpu=True, 96 | mixed_precision=True)[1] 97 | except RuntimeError: # out of gpu memory 98 | del softmax 99 | cuda.empty_cache() 100 | print( 101 | f"\nGPU AGGREGATION FAILED FOR CASE {os.path.basename(output_filename)} DUE TO OUT OF MEMORY, falling back to all_in_gpu False\n") 102 | trainer.load_checkpoint_ram(params[0], False) 103 | softmax = trainer.predict_preprocessed_data_return_seg_and_softmax( 104 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 105 | 'mirror_axes'], use_sliding_window=True, 106 | step_size=step_size, use_gaussian=True, all_in_gpu=False, 107 | mixed_precision=True)[1] 108 | 109 | for p in params[1:]: 110 | trainer.load_checkpoint_ram(p, False) 111 | softmax += trainer.predict_preprocessed_data_return_seg_and_softmax( 112 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 113 | 'mirror_axes'], use_sliding_window=True, 114 | step_size=step_size, use_gaussian=True, all_in_gpu=False, 115 | mixed_precision=True)[1] 116 | cuda.empty_cache() 117 | 118 | if len(params) > 1: 119 | softmax /= len(params) 120 | 121 | transpose_forward = trainer.plans.get('transpose_forward') 122 | if transpose_forward is not None: 123 | transpose_backward = trainer.plans.get('transpose_backward') 124 | # softmax = softmax.transpose([0] + [i + 1 for i in transpose_backward]) 125 | 126 | # resampling linearly on GPU 127 | torch.cuda.empty_cache() 128 | target_shape = dct.get('size_after_cropping') 129 | target_shape = [target_shape[i] for i in transpose_forward] 130 | if not isinstance(softmax, torch.Tensor): 131 | softmax = torch.from_numpy(softmax) 132 | try: 133 | with torch.no_grad(): 134 | softmax_resampled = torch.zeros((softmax.shape[0], *target_shape), dtype=torch.half, 135 | device='cuda:0') 136 | if not softmax.device == torch.device('cuda:0'): 137 | softmax_gpu = softmax.to(torch.device('cuda:0')) 138 | else: 139 | softmax_gpu = softmax 140 | for c in range(len(softmax)): 141 | softmax_resampled[c] = \ 142 | F.interpolate( 143 | softmax_gpu[c][None, None], size=target_shape, mode='trilinear')[0, 0] 144 | del softmax, softmax_gpu 145 | softmax_resampled = softmax_resampled.cpu().numpy() 146 | except RuntimeError: 147 | # gpu failed, try CPU 148 | print( 149 | f"\nGPU RESAMPLING FAILED FOR CASE {os.path.basename(output_filename)} DUE TO OUT OF MEMORY, falling back to CPU\n") 150 | 151 | if not softmax.device == torch.device('cpu'): 152 | softmax_cpu = softmax.to(torch.device('cpu')).float() 153 | else: 154 | softmax_cpu = softmax 155 | 156 | torch.cuda.empty_cache() 157 | with torch.no_grad(): 158 | softmax_resampled = torch.zeros( 159 | (softmax.shape[0], *target_shape)) 160 | # depending on where we crash this has already been converted or not 161 | if not isinstance(softmax, torch.Tensor): 162 | softmax = torch.from_numpy(softmax) 163 | for c in range(len(softmax)): 164 | softmax_resampled[c] = \ 165 | F.interpolate( 166 | softmax_cpu[c][None, None], size=target_shape, mode='trilinear')[0, 0] 167 | del softmax, softmax_cpu 168 | softmax_resampled = softmax_resampled.half().numpy() 169 | torch.cuda.empty_cache() 170 | ##################################### 171 | softmax_resampled = softmax_resampled.transpose( 172 | [0] + [i + 1 for i in transpose_backward]) 173 | 174 | if save_npz: 175 | npz_file = output_filename[:-7] + ".npz" 176 | else: 177 | npz_file = None 178 | 179 | if hasattr(trainer, 'regions_class_order'): 180 | region_class_order = trainer.regions_class_order 181 | else: 182 | region_class_order = None 183 | 184 | """There is a problem with python process communication that prevents us from communicating objects 185 | larger than 2 GB between processes (basically when the length of the pickle string that will be sent is 186 | communicated by the multiprocessing.Pipe object then the placeholder (I think) does not allow for long 187 | enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually 188 | patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will 189 | then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either 190 | filename or np.ndarray and will handle this automatically""" 191 | bytes_per_voxel = 4 192 | print( 193 | f'softmax shape {softmax_resampled.shape}, softmax dtype {softmax_resampled.dtype}') 194 | if True: 195 | # if all_in_gpu then the return value is half (float16) 196 | bytes_per_voxel = 2 197 | # * 0.85 just to be save 198 | if np.prod(softmax_resampled.shape) > (2e9 / bytes_per_voxel * 0.85): 199 | print( 200 | "This output is too large for python process-process communication. Saving output temporarily to disk") 201 | np.save(output_filename[:-7] + ".npy", softmax_resampled) 202 | softmax_resampled = output_filename[:-7] + ".npy" 203 | 204 | results.append(pool.starmap_async(save_segmentation_nifti_from_softmax, 205 | ((softmax_resampled, output_filename, dct, 1, region_class_order, 206 | None, None, 207 | npz_file, None, False, 1),) 208 | )) 209 | 210 | print("inference done. Now waiting for the segmentation export to finish...") 211 | _ = [i.get() for i in results] 212 | # now apply postprocessing 213 | # first load the postprocessing properties if they are present. Else raise a well visible warning 214 | if not disable_postprocessing: 215 | results = [] 216 | pp_file = join(model, "postprocessing.json") 217 | if isfile(pp_file): 218 | print("postprocessing...") 219 | shutil.copy(pp_file, os.path.abspath( 220 | os.path.dirname(output_filenames[0]))) 221 | # for_which_classes stores for which of the classes everything but the largest connected component needs to be 222 | # removed 223 | for_which_classes, min_valid_obj_size = load_postprocessing( 224 | pp_file) 225 | results.append(pool.starmap_async(load_remove_save, 226 | zip(output_filenames, output_filenames, 227 | [for_which_classes] * 228 | len(output_filenames), 229 | [min_valid_obj_size] * len(output_filenames)))) 230 | _ = [i.get() for i in results] 231 | else: 232 | print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run " 233 | "consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is " 234 | "%s" % model) 235 | 236 | pool.close() 237 | pool.join() 238 | 239 | 240 | def check_input_folder_and_return_caseIDs(input_folder, expected_num_modalities): 241 | print("This model expects %d input modalities for each image" % 242 | expected_num_modalities) 243 | files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) 244 | 245 | maybe_case_ids = np.unique([i[:-12] for i in files]) 246 | 247 | remaining = deepcopy(files) 248 | missing = [] 249 | 250 | assert len( 251 | files) > 0, "input folder did not contain any images (expected to find .nii.gz file endings)" 252 | 253 | # now check if all required files are present and that no unexpected files are remaining 254 | for c in maybe_case_ids: 255 | for n in range(expected_num_modalities): 256 | expected_output_file = c + "_%04.0d.nii.gz" % n 257 | if not isfile(join(input_folder, expected_output_file)): 258 | missing.append(expected_output_file) 259 | else: 260 | remaining.remove(expected_output_file) 261 | 262 | print("Found %d unique case ids, here are some examples:" % len(maybe_case_ids), 263 | np.random.choice(maybe_case_ids, min(len(maybe_case_ids), 10))) 264 | print("If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc") 265 | 266 | if len(remaining) > 0: 267 | print("found %d unexpected remaining files in the folder. Here are some examples:" % len(remaining), 268 | np.random.choice(remaining, min(len(remaining), 10))) 269 | 270 | if len(missing) > 0: 271 | print("Some files are missing:") 272 | print(missing) 273 | raise RuntimeError("missing files in input_folder") 274 | 275 | return maybe_case_ids 276 | 277 | 278 | def predict_from_folder_segrap2023(model: str, input_folder: str, output_folder: str, folds: 0, part_id:0, num_parts:1): 279 | """ 280 | here we use the standard naming scheme to generate list_of_lists and output_files needed by predict_cases 281 | 282 | :param model: 283 | :param input_folder: 284 | :param output_folder: 285 | :param folds: 286 | :param save_npz: 287 | :param num_threads_preprocessing: 288 | :param num_threads_nifti_save: 289 | :param lowres_segmentations: 290 | :param part_id: 291 | :param num_parts: 292 | :param tta: 293 | :param mixed_precision: 294 | :param overwrite_existing: if not None then it will be overwritten with whatever is in there. None is default (no overwrite) 295 | :return: 296 | """ 297 | maybe_mkdir_p(output_folder) 298 | # shutil.copy(join(model, 'plans.pkl'), output_folder) 299 | 300 | # assert isfile(join(model, "plans.pkl") 301 | # ), "Folder with saved model weights must contain a plans.pkl file" 302 | expected_num_modalities = load_pickle( 303 | join(model+"/fold_{}".format(folds), "plans.pkl"))['num_modalities'] 304 | 305 | # check input folder integrity 306 | case_ids = check_input_folder_and_return_caseIDs( 307 | input_folder, expected_num_modalities) 308 | 309 | output_files = [join(output_folder, i + ".nii.gz") for i in case_ids] 310 | all_files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) 311 | list_of_lists = [[join(input_folder, i) for i in all_files if i[:len(j)].startswith(j) and 312 | len(i) == (len(j) + 12)] for j in case_ids] 313 | 314 | return predict_cases_segrap2023(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds=0) 315 | 316 | 317 | # predict_from_folder_segrap2023("weight/", "images/", "test/", 0, 0, 1) 318 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/post_processing.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import numpy as np 3 | import shutil 4 | import glob 5 | import os 6 | import sys 7 | o_path = os.getcwd() 8 | sys.path.append(o_path) 9 | 10 | 11 | segrap_subset_task001 = { 12 | 'Brain': [1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "BrainStem": 2, 14 | "Chiasm": 3, 15 | "TemporalLobe_L": [4, 6], 16 | "TemporalLobe_R": [5, 7], 17 | "Hippocampus_L": [8, 6], 18 | "Hippocampus_R": [9, 7], 19 | 'Eye_L': [10, 12], 20 | 'Eye_R': [11, 13], 21 | "Lens_L": 12, 22 | "Lens_R": 13, 23 | "OpticNerve_L": 14, 24 | "OpticNerve_R": 15, 25 | "MiddleEar_L": [18, 16, 20, 24, 28, 30], 26 | "MiddleEar_R": [19, 17, 21, 25, 29, 31], 27 | "IAC_L": 18, 28 | "IAC_R": 19, 29 | "TympanicCavity_L": [22, 20], 30 | "TympanicCavity_R": [23, 21], 31 | "VestibulSemi_L": [26, 24], 32 | "VestibulSemi_R": [27, 25], 33 | "Cochlea_L": 28, 34 | "Cochlea_R": 29, 35 | "ETbone_L": [32, 30], 36 | "ETbone_R": [33, 31], 37 | "Pituitary": 34, 38 | "OralCavity": 35, 39 | "Mandible_L": 36, 40 | "Mandible_R": 37, 41 | "Submandibular_L": 38, 42 | "Submandibular_R": 39, 43 | "Parotid_L": 40, 44 | "Parotid_R": 41, 45 | "Mastoid_L": 42, 46 | "Mastoid_R": 43, 47 | "TMjoint_L": 44, 48 | "TMjoint_R": 45, 49 | "SpinalCord": 46, 50 | "Esophagus": 47, 51 | "Larynx": [48, 49, 50, 51], 52 | "Larynx_Glottic": 49, 53 | "Larynx_Supraglot": 50, 54 | "PharynxConst": [51, 52], 55 | "Thyroid": 53, 56 | "Trachea": 54} 57 | 58 | 59 | segrap_subset_task002 = { 60 | "GTVp": 1, 61 | "GTVnd": 2} 62 | 63 | 64 | def nii2array(path): 65 | mask_itk_ref = sitk.ReadImage(path) 66 | mask_arr_ref = sitk.GetArrayFromImage(mask_itk_ref) 67 | return mask_arr_ref 68 | 69 | 70 | def merge_multi_class_to_one(input_arr, classes_index=None): 71 | new_arr = np.zeros_like(input_arr) 72 | for cls_ind in classes_index: 73 | new_arr[input_arr == cls_ind] = 1 74 | return new_arr 75 | 76 | 77 | def convert_one_hot_label_to_multi_organs(ont_hot_label_path, save_path): 78 | patient_results = [] 79 | spacing = None 80 | for organ in segrap_subset_task001.keys(): 81 | ont_hot_label_arr = nii2array(ont_hot_label_path) 82 | ont_hot_label_itk = sitk.ReadImage(ont_hot_label_path) 83 | spacing = ont_hot_label_itk.GetSpacing() 84 | 85 | if type(segrap_subset_task001[organ]) is list: 86 | new_arr = merge_multi_class_to_one( 87 | ont_hot_label_arr, segrap_subset_task001[organ]) 88 | else: 89 | new_arr = np.zeros_like(ont_hot_label_arr) 90 | new_arr[ont_hot_label_arr == segrap_subset_task001[organ]] = 1 91 | patient_results.append(new_arr) 92 | 93 | oars = [] 94 | for t in patient_results: 95 | oars.append(sitk.GetImageFromArray(t, False)) 96 | output_itk = sitk.JoinSeries(oars) 97 | new_spacing = (spacing[0], spacing[1], spacing[2], 1) 98 | output_itk.SetSpacing(new_spacing) 99 | print(output_itk.GetSize()) 100 | sitk.WriteImage(output_itk, save_path, True) 101 | print("Conversion Finished !") 102 | 103 | 104 | def convert_one_hot_label_to_multi_lesions(ont_hot_label_path, save_fold): 105 | patient_results = [] 106 | spacing = None 107 | for lesion in segrap_subset_task002.keys(): 108 | ont_hot_label_arr = nii2array(ont_hot_label_path) 109 | ont_hot_label_itk = sitk.ReadImage(ont_hot_label_path) 110 | spacing = ont_hot_label_itk.GetSpacing() 111 | new_arr = np.zeros_like(ont_hot_label_arr) 112 | new_arr[ont_hot_label_arr == segrap_subset_task002[lesion]] = 1 113 | patient_results.append(new_arr) 114 | new_itk = sitk.GetImageFromArray( 115 | np.array(patient_results).transpose(1, 2, 3, 0)) 116 | new_itk.SetSpacing(spacing) 117 | sitk.WriteImage(new_itk, "{}.nii.gz".format(save_fold)) 118 | return "Conversion Finished" 119 | 120 | 121 | # if __name__ == "__main__": 122 | # for patient in glob.glob("test/*"): 123 | # new_path = "test/{}".format( 124 | # patient.split("/")[-1].replace("_cropped.nii.gz", "")) 125 | # if os.path.exists(new_path): 126 | # pass 127 | # convert_one_hot_label_to_multi_organs(patient, new_path) 128 | # else: 129 | # os.mkdir(new_path) 130 | # convert_one_hot_label_to_multi_organs(patient, new_path) 131 | # print("Convert all predictions to single organ files") 132 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/process.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from post_processing import convert_one_hot_label_to_multi_organs 3 | from inference_code import predict_from_folder_segrap2023 4 | from evalutils.validators import ( 5 | UniquePathIndicesValidator, 6 | UniqueImagesValidator, 7 | ) 8 | from evalutils import SegmentationAlgorithm 9 | import numpy as np 10 | import SimpleITK 11 | import os 12 | import sys 13 | o_path = os.getcwd() 14 | print(o_path) 15 | sys.path.append(o_path) 16 | 17 | 18 | class Customalgorithm(): # SegmentationAlgorithm is not inherited in this class anymore 19 | def __init__(self): 20 | """ 21 | Do not modify the `self.input_dir` and `self.output_dir`. 22 | (Check https://grand-challenge.org/algorithms/interfaces/) 23 | """ 24 | self.input_dir = "/input/" 25 | self.output_dir = "/output/images/head-neck-segmentation/" 26 | 27 | """ 28 | Store the validation/test data and predictions into the `self.nii_path` and `self.result_path`, respectively. 29 | Put your model and pkl files into the `self.weight`. 30 | """ 31 | self.nii_path = '/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/imagesTs' 32 | self.result_path = '/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/result' 33 | self.nii_seg_file = 'SegRap2023_001.nii.gz' 34 | self.weight = "./weight/" 35 | if not os.path.exists(self.nii_path): 36 | os.makedirs(self.nii_path, exist_ok=True) 37 | if not os.path.exists(self.result_path): 38 | os.makedirs(self.result_path, exist_ok=True) 39 | pass 40 | 41 | def convert_mha_to_nii(self, mha_input_path, nii_out_path): # nnUNet specific 42 | img = SimpleITK.ReadImage(mha_input_path) 43 | print(img.GetSize()) 44 | SimpleITK.WriteImage(img, nii_out_path, True) 45 | 46 | def convert_nii_to_mha(self, nii_input_path, mha_out_path): # nnUNet specific 47 | img = SimpleITK.ReadImage(nii_input_path) 48 | SimpleITK.WriteImage(img, mha_out_path, True) 49 | 50 | def check_gpu(self): 51 | """ 52 | Check if GPU is available. Note that the Grand Challenge only has one available GPU. 53 | """ 54 | print('Checking GPU availability') 55 | is_available = torch.cuda.is_available() 56 | print('Available: ' + str(is_available)) 57 | print(f'Device count: {torch.cuda.device_count()}') 58 | if is_available: 59 | print(f'Current device: {torch.cuda.current_device()}') 60 | print('Device name: ' + torch.cuda.get_device_name(0)) 61 | print('Device memory: ' + 62 | str(torch.cuda.get_device_properties(0).total_memory)) 63 | 64 | def load_inputs(self): # use two modalities input data 65 | """ 66 | Read input data (two modalities) from `self.input_dir` (/input/). 67 | Please do not modify the path for CT and contrast-CT images. 68 | """ 69 | ct_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-ct/'))[0] 70 | ctc_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] 71 | uuid = os.path.splitext(ct_mha)[0] 72 | 73 | """ 74 | if your model was based on nnUNet baseline and used two modalities as inputs, 75 | please convert the input data into '_0000.nii.gz' and '_0001.nii.gz' using following code. 76 | """ 77 | self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-ct/', ct_mha), 78 | os.path.join(self.nii_path, 'SegRap2023_001_0000.nii.gz')) 79 | self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ctc_mha), 80 | os.path.join(self.nii_path, 'SegRap2023_001_0001.nii.gz')) 81 | 82 | # Check the validation/test data exist. 83 | print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/imagesTs')) 84 | 85 | return uuid 86 | 87 | 88 | # def load_inputs(self): # only use non-contrast-CT images as input 89 | # """ 90 | # Read input data (non-contrast-CT images) from `self.input_dir` (/input/). 91 | # Please do not modify the path for non-contrast-CT images. 92 | # """ 93 | # ct = os.listdir(os.path.join(self.input_dir, 'images/head-neck-ct/'))[0] 94 | # uuid = os.path.splitext(ct)[0] 95 | 96 | # """ 97 | # if your model was based on nnUNet baseline and only used non-contrast-CT images as inputs, 98 | # please convert the input data into '_0000.nii.gz' using following code. 99 | # """ 100 | # self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-ct/', ct), 101 | # os.path.join(self.nii_path, 'SegRap2023_001_0000.nii.gz')) 102 | # # Check the validation/test data exist. 103 | # print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/imagesTs')) 104 | 105 | # return uuid 106 | 107 | 108 | # def load_inputs(self): # only use contrast-CT images as input 109 | # """ 110 | # Read input data (single contrast-CT images) from `self.input_dir` (/input/). 111 | # Please do not modify the path for contrast-CT images. 112 | # """ 113 | # ct = os.listdir(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] 114 | # uuid = os.path.splitext(ct)[0] 115 | 116 | # """ 117 | # if your model was based on nnUNet baseline and only used contrast-CT images as inputs, 118 | # please convert the input data into '_0000.nii.gz' using following code. 119 | # """ 120 | # self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ct), 121 | # os.path.join(self.nii_path, 'SegRap2023_001_0000.nii.gz')) 122 | 123 | # # Check the validation/test data exist. 124 | # print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task001_SegRap2023/imagesTs')) 125 | 126 | # return uuid 127 | 128 | 129 | def write_outputs(self, uuid): 130 | """ 131 | If you used one-hot label (54 classes) for training, please convert the 54 classes prediction to 45 oars prediction using function `convert_one_hot_label_to_multi_organs`. 132 | Otherwise, stack your 45 predictions for oars in the first channel, the corresponding mapping between the channel index and the organ names is: 133 | {0: 'Brain', 134 | 1: 'BrainStem', 135 | 2: 'Chiasm', 136 | 3: 'TemporalLobe_L', 137 | 4: 'TemporalLobe_R', 138 | 5: 'Hippocampus_L', 139 | 6: 'Hippocampus_R', 140 | 7: 'Eye_L', 141 | 8: 'Eye_R', 142 | 9: 'Lens_L', 143 | 10: 'Lens_R', 144 | 11: 'OpticNerve_L', 145 | 12: 'OpticNerve_R', 146 | 13: 'MiddleEar_L', 147 | 14: 'MiddleEar_R', 148 | 15: 'IAC_L', 149 | 16: 'IAC_R', 150 | 17: 'TympanicCavity_L', 151 | 18: 'TympanicCavity_R', 152 | 19: 'VestibulSemi_L', 153 | 20: 'VestibulSemi_R', 154 | 21: 'Cochlea_L', 155 | 22: 'Cochlea_R', 156 | 23: 'ETbone_L', 157 | 24: 'ETbone_R', 158 | 25: 'Pituitary', 159 | 26: 'OralCavity', 160 | 27: 'Mandible_L', 161 | 28: 'Mandible_R', 162 | 29: 'Submandibular_L', 163 | 30: 'Submandibular_R', 164 | 31: 'Parotid_L', 165 | 32: 'Parotid_R', 166 | 33: 'Mastoid_L', 167 | 34: 'Mastoid_R', 168 | 35: 'TMjoint_L', 169 | 36: 'TMjoint_R', 170 | 37: 'SpinalCord', 171 | 38: 'Esophagus', 172 | 39: 'Larynx', 173 | 40: 'Larynx_Glottic', 174 | 41: 'Larynx_Supraglot', 175 | 42: 'PharynxConst', 176 | 43: 'Thyroid', 177 | 44: 'Trachea'} 178 | Please ensure the 0 channel is the prediction of Brain, the 1 channel is the prediction of BrainStem, ......, the 44 channel is the prediction of Trachea. 179 | and also ensure the shape of final prediction array is [45, *image_shape]. 180 | The predictions should be saved in the `self.output_dir` (/output/). Please do not modify the path and the suffix (.mha) for saving the prediction. 181 | """ 182 | os.makedirs(os.path.dirname(self.output_dir), exist_ok=True) 183 | convert_one_hot_label_to_multi_organs(os.path.join( 184 | self.result_path, self.nii_seg_file), os.path.join(self.output_dir, uuid + ".mha")) 185 | print('Output written to: ', os.path.join(self.output_dir, uuid + ".mha")) 186 | 187 | def predict(self): 188 | """ 189 | load the model and checkpoint, and generate the predictions. You can replace this part with your own model. 190 | """ 191 | predict_from_folder_segrap2023(self.weight, self.nii_path, self.result_path, 0, 0, 1) 192 | print("nnUNet segmentation done!") 193 | if not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): 194 | print('waiting for nnUNet segmentation to be created') 195 | 196 | while not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): 197 | import time 198 | print('.', end='') 199 | time.sleep(5) 200 | # print(cproc) # since nnUNet_predict call is split into prediction and postprocess, a pre-mature exit code is received but segmentation file not yet written. This hack ensures that all spawned subprocesses are finished before being printed. 201 | print('Prediction finished !') 202 | 203 | def post_process(self): 204 | self.check_gpu() 205 | print('Start processing') 206 | uuid = self.load_inputs() 207 | print('Start prediction') 208 | self.predict() 209 | print('Start output writing') 210 | self.write_outputs(uuid) 211 | 212 | def process(self): 213 | """ 214 | Read inputs from /input, process with your algorithm and write to /output 215 | """ 216 | print(self.weight, self.nii_path, self.result_path) 217 | self.post_process() 218 | 219 | 220 | if __name__ == "__main__": 221 | Customalgorithm().process() 222 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/requirements.in: -------------------------------------------------------------------------------- 1 | 2 | evalutils==0.4.2 3 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.10 3 | # by the following command: 4 | # 5 | # pip-compile --resolver=backtracking 6 | # 7 | arrow==1.2.3 8 | # via jinja2-time 9 | binaryornot==0.4.4 10 | # via cookiecutter 11 | build==0.10.0 12 | # via pip-tools 13 | certifi==2023.5.7 14 | # via requests 15 | chardet==5.1.0 16 | # via binaryornot 17 | charset-normalizer==3.1.0 18 | # via requests 19 | click==8.1.3 20 | # via 21 | # cookiecutter 22 | # evalutils 23 | # pip-tools 24 | cookiecutter==2.1.1 25 | # via evalutils 26 | evalutils==0.4.2 27 | # via -r requirements.in 28 | idna==3.4 29 | # via requests 30 | imageio[tifffile]==2.31.1 31 | # via evalutils 32 | jinja2==3.1.2 33 | # via 34 | # cookiecutter 35 | # jinja2-time 36 | jinja2-time==0.2.0 37 | # via cookiecutter 38 | joblib==1.3.1 39 | # via scikit-learn 40 | markupsafe==2.1.3 41 | # via jinja2 42 | numpy==1.25.0 43 | # via 44 | # evalutils 45 | # imageio 46 | # pandas 47 | # scikit-learn 48 | # scipy 49 | # tifffile 50 | packaging==23.1 51 | # via build 52 | pandas==2.0.3 53 | # via evalutils 54 | pillow==10.0.0 55 | # via imageio 56 | pip-tools==6.14.0 57 | # via evalutils 58 | pyproject-hooks==1.0.0 59 | # via build 60 | python-dateutil==2.8.2 61 | # via 62 | # arrow 63 | # pandas 64 | python-slugify==8.0.1 65 | # via cookiecutter 66 | pytz==2023.3 67 | # via pandas 68 | pyyaml==6.0 69 | # via cookiecutter 70 | requests==2.31.0 71 | # via cookiecutter 72 | scikit-learn==1.3.0 73 | # via evalutils 74 | scipy==1.11.1 75 | # via 76 | # evalutils 77 | # scikit-learn 78 | simpleitk==2.2.1 79 | # via evalutils 80 | six==1.16.0 81 | # via python-dateutil 82 | text-unidecode==1.3 83 | # via python-slugify 84 | threadpoolctl==3.1.0 85 | # via scikit-learn 86 | tifffile==2023.4.12 87 | # via imageio 88 | tomli==2.0.1 89 | # via 90 | # build 91 | # pip-tools 92 | # pyproject-hooks 93 | tzdata==2023.3 94 | # via pandas 95 | urllib3==2.0.3 96 | # via requests 97 | wheel==0.40.0 98 | # via pip-tools 99 | batchgenerators==0.25 100 | matplotlib 101 | tqdm 102 | dicom2nifti 103 | scikit-image 104 | medpy 105 | nibabel 106 | # The following packages are considered to be unsafe in a requirements file: 107 | # pip 108 | # setuptools 109 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 4 | 5 | ./build.sh 6 | 7 | VOLUME_SUFFIX=$(dd if=/dev/urandom bs=32 count=1 | md5sum | cut --delimiter=' ' --fields=1) 8 | # Maximum is currently 30g, configurable in your algorithm image settings on grand challenge 9 | MEM_LIMIT="32g" 10 | 11 | docker volume create segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX 12 | 13 | echo $VOLUME_SUFFIX 14 | # Do not change any of the parameters to docker run, these are fixed 15 | docker run --rm --gpus all \ 16 | --memory="${MEM_LIMIT}" \ 17 | --memory-swap="${MEM_LIMIT}" \ 18 | --network="none" \ 19 | --cap-drop="ALL" \ 20 | --security-opt="no-new-privileges" \ 21 | --shm-size="128m" \ 22 | --pids-limit="256" \ 23 | -v $SCRIPTPATH/images/:/input/ \ 24 | -v segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 25 | segrap2023_oar_segmentationcontainer 26 | 27 | docker run --rm \ 28 | -v segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 29 | python:3.10-slim ls -al /output/images/head-neck-segmentation 30 | 31 | docker run --rm \ 32 | -v segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 33 | python:3.10-slim cat /output/results.json | python -m json.tool 34 | 35 | docker run --rm \ 36 | -v segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 37 | -v $SCRIPTPATH/test/:/input/ \ 38 | python:3.10-slim python -c "import json, sys; f1 = json.load(open('/output/results.json')); f2 = json.load(open('/input/expected_output.json')); sys.exit(f1 != f2);" 39 | 40 | if [ $? -eq 0 ]; then 41 | echo "Tests successfully passed..." 42 | else 43 | echo "Expected output was not found..." 44 | fi 45 | 46 | docker volume rm segrap2023_oar_segmentationcontainer-output-$VOLUME_SUFFIX 47 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/weight/fold_0/plans.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/weight/fold_0/plans.pkl -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task1_OARs_nnUNet_Example/weight/fold_0/weight.txt: -------------------------------------------------------------------------------- 1 | Download a trained nnUNet model for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). 2 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/Dockerfile: -------------------------------------------------------------------------------- 1 | # Pull the docker image | 拉取镜像 2 | # FROM pytorch/pytorch:1.13.0-cuda11.6-cudnn8-devel 3 | 4 | FROM python:3.10-slim 5 | 6 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user 7 | 8 | RUN mkdir -p /opt/app /input /output \ 9 | && chown user:user /opt/app /input /output 10 | 11 | USER user 12 | WORKDIR /opt/app 13 | 14 | ENV PATH="/home/user/.local/bin:${PATH}" 15 | COPY --chown=user:user requirements.txt /opt/app/ 16 | 17 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools 18 | RUN python -m piptools sync requirements.txt 19 | 20 | # RUN python -m pip install --user -r requirements.txt 21 | RUN python -m pip install --user torch==1.12.0+cu116 torchvision==0.13.0+cu116 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu116 22 | RUN python -m pip install --no-deps --user nnunet==1.7.1 23 | 24 | COPY --chown=user:user process.py /opt/app/ 25 | COPY --chown=user:user inference_code.py /opt/app/ 26 | COPY --chown=user:user post_processing.py /opt/app/ 27 | # COPY --chown=user:user images/ /opt/app/images/ 28 | COPY --chown=user:user weight/ /opt/app/weight/ 29 | # COPY --chown=user:user output/ /opt/app/ 30 | 31 | 32 | ENTRYPOINT [ "python", "-m", "process" ] 33 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/README.md: -------------------------------------------------------------------------------- 1 | # SegRap2023_GTVs_SegmentationContainer Algorithm 2 | 3 | The source code for the algorithm container for 4 | SegRap2023_GTVs_SegmentationContainer, generated with 5 | evalutils version 0.4.2 6 | using Python 3.10. 7 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 3 | 4 | docker build -t segrap2023_gtv_segmentationcontainer "$SCRIPTPATH" 5 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/export.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./build.sh 4 | 5 | docker save segrap2023_gtv_segmentationcontainer | gzip -c > SegRap2023_GTV_SegmentationContainer.tar.gz 6 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/images/images/head-neck-contrast-enhanced-ct/data.txt: -------------------------------------------------------------------------------- 1 | Download a non-contrast image from our provided link for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/images/images/head-neck-ct/data.txt: -------------------------------------------------------------------------------- 1 | Download a contrast-enhanced image from our provided link for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/inference_code.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | o_path = os.getcwd() 4 | sys.path.append(o_path) 5 | import shutil 6 | from multiprocessing import Pool 7 | 8 | import numpy as np 9 | import torch 10 | from copy import deepcopy 11 | from batchgenerators.utilities.file_and_folder_operations import * 12 | from nnunet.inference.predict import preprocess_multithreaded 13 | from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax 14 | from nnunet.postprocessing.connected_components import load_postprocessing, load_remove_save 15 | from nnunet.training.model_restore import load_model_and_checkpoint_files 16 | from torch import cuda 17 | from torch.nn import functional as F 18 | 19 | 20 | def predict_cases_segrap2023(model, list_of_lists, output_filenames, folds, save_npz=False, num_threads_preprocessing=6, 21 | num_threads_nifti_save=2, segs_from_prev_stage=None, do_tta=False, 22 | overwrite_existing=False, step_size=0.5, checkpoint_name="model_final_checkpoint", 23 | disable_postprocessing: bool = False): 24 | assert len(list_of_lists) == len(output_filenames) 25 | if segs_from_prev_stage is not None: 26 | assert len(segs_from_prev_stage) == len(output_filenames) 27 | 28 | pool = Pool(num_threads_nifti_save) 29 | results = [] 30 | 31 | cleaned_output_files = [] 32 | for o in output_filenames: 33 | dr, f = os.path.split(o) 34 | if len(dr) > 0: 35 | maybe_mkdir_p(dr) 36 | if not f.endswith(".nii.gz"): 37 | f, _ = os.path.splitext(f) 38 | f = f + ".nii.gz" 39 | cleaned_output_files.append(join(dr, f)) 40 | 41 | if not overwrite_existing: 42 | print("number of cases:", len(list_of_lists)) 43 | # if save_npz=True then we should also check for missing npz files 44 | not_done_idx = [i for i, j in enumerate(cleaned_output_files) if 45 | (not isfile(j)) or (save_npz and not isfile(j[:-7] + '.npz'))] 46 | 47 | cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx] 48 | print(list_of_lists) 49 | list_of_lists = [list_of_lists[i] for i in not_done_idx] 50 | if segs_from_prev_stage is not None: 51 | segs_from_prev_stage = [segs_from_prev_stage[i] 52 | for i in not_done_idx] 53 | 54 | print("number of cases that still need to be predicted:", 55 | len(cleaned_output_files)) 56 | 57 | print("emptying cuda cache") 58 | torch.cuda.empty_cache() 59 | 60 | print("loading parameters for folds,", folds) 61 | trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=True, 62 | checkpoint_name=checkpoint_name) 63 | 64 | print("starting preprocessing generator") 65 | preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, 66 | segs_from_prev_stage) 67 | print("starting prediction...") 68 | all_output_files = [] 69 | with torch.no_grad(): 70 | for preprocessed in preprocessing: 71 | output_filename, (d, dct) = preprocessed 72 | all_output_files.append(all_output_files) 73 | if isinstance(d, str): 74 | data = np.load(d) 75 | os.remove(d) 76 | d = data 77 | 78 | # we need to be able to del it if things fail (just in case) 79 | softmax = None 80 | 81 | try: 82 | print("predicting", output_filename) 83 | print(f"attempting all_in_gpu {True}") 84 | trainer.load_checkpoint_ram(params[0], False) 85 | softmax = trainer.predict_preprocessed_data_return_seg_and_softmax( 86 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 87 | 'mirror_axes'], use_sliding_window=True, 88 | step_size=step_size, use_gaussian=True, all_in_gpu=True, 89 | mixed_precision=True)[1] 90 | for p in params[1:]: 91 | trainer.load_checkpoint_ram(p, False) 92 | softmax += trainer.predict_preprocessed_data_return_seg_and_softmax( 93 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 94 | 'mirror_axes'], use_sliding_window=True, 95 | step_size=step_size, use_gaussian=True, all_in_gpu=True, 96 | mixed_precision=True)[1] 97 | except RuntimeError: # out of gpu memory 98 | del softmax 99 | cuda.empty_cache() 100 | print( 101 | f"\nGPU AGGREGATION FAILED FOR CASE {os.path.basename(output_filename)} DUE TO OUT OF MEMORY, falling back to all_in_gpu False\n") 102 | trainer.load_checkpoint_ram(params[0], False) 103 | softmax = trainer.predict_preprocessed_data_return_seg_and_softmax( 104 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 105 | 'mirror_axes'], use_sliding_window=True, 106 | step_size=step_size, use_gaussian=True, all_in_gpu=False, 107 | mixed_precision=True)[1] 108 | 109 | for p in params[1:]: 110 | trainer.load_checkpoint_ram(p, False) 111 | softmax += trainer.predict_preprocessed_data_return_seg_and_softmax( 112 | d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params[ 113 | 'mirror_axes'], use_sliding_window=True, 114 | step_size=step_size, use_gaussian=True, all_in_gpu=False, 115 | mixed_precision=True)[1] 116 | cuda.empty_cache() 117 | 118 | if len(params) > 1: 119 | softmax /= len(params) 120 | 121 | transpose_forward = trainer.plans.get('transpose_forward') 122 | if transpose_forward is not None: 123 | transpose_backward = trainer.plans.get('transpose_backward') 124 | # softmax = softmax.transpose([0] + [i + 1 for i in transpose_backward]) 125 | 126 | # resampling linearly on GPU 127 | torch.cuda.empty_cache() 128 | target_shape = dct.get('size_after_cropping') 129 | target_shape = [target_shape[i] for i in transpose_forward] 130 | if not isinstance(softmax, torch.Tensor): 131 | softmax = torch.from_numpy(softmax) 132 | try: 133 | with torch.no_grad(): 134 | softmax_resampled = torch.zeros((softmax.shape[0], *target_shape), dtype=torch.half, 135 | device='cuda:0') 136 | if not softmax.device == torch.device('cuda:0'): 137 | softmax_gpu = softmax.to(torch.device('cuda:0')) 138 | else: 139 | softmax_gpu = softmax 140 | for c in range(len(softmax)): 141 | softmax_resampled[c] = \ 142 | F.interpolate( 143 | softmax_gpu[c][None, None], size=target_shape, mode='trilinear')[0, 0] 144 | del softmax, softmax_gpu 145 | softmax_resampled = softmax_resampled.cpu().numpy() 146 | except RuntimeError: 147 | # gpu failed, try CPU 148 | print( 149 | f"\nGPU RESAMPLING FAILED FOR CASE {os.path.basename(output_filename)} DUE TO OUT OF MEMORY, falling back to CPU\n") 150 | 151 | if not softmax.device == torch.device('cpu'): 152 | softmax_cpu = softmax.to(torch.device('cpu')).float() 153 | else: 154 | softmax_cpu = softmax 155 | 156 | torch.cuda.empty_cache() 157 | with torch.no_grad(): 158 | softmax_resampled = torch.zeros( 159 | (softmax.shape[0], *target_shape)) 160 | # depending on where we crash this has already been converted or not 161 | if not isinstance(softmax, torch.Tensor): 162 | softmax = torch.from_numpy(softmax) 163 | for c in range(len(softmax)): 164 | softmax_resampled[c] = \ 165 | F.interpolate( 166 | softmax_cpu[c][None, None], size=target_shape, mode='trilinear')[0, 0] 167 | del softmax, softmax_cpu 168 | softmax_resampled = softmax_resampled.half().numpy() 169 | torch.cuda.empty_cache() 170 | ##################################### 171 | softmax_resampled = softmax_resampled.transpose( 172 | [0] + [i + 1 for i in transpose_backward]) 173 | 174 | if save_npz: 175 | npz_file = output_filename[:-7] + ".npz" 176 | else: 177 | npz_file = None 178 | 179 | if hasattr(trainer, 'regions_class_order'): 180 | region_class_order = trainer.regions_class_order 181 | else: 182 | region_class_order = None 183 | 184 | """There is a problem with python process communication that prevents us from communicating objects 185 | larger than 2 GB between processes (basically when the length of the pickle string that will be sent is 186 | communicated by the multiprocessing.Pipe object then the placeholder (I think) does not allow for long 187 | enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually 188 | patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will 189 | then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either 190 | filename or np.ndarray and will handle this automatically""" 191 | bytes_per_voxel = 4 192 | print( 193 | f'softmax shape {softmax_resampled.shape}, softmax dtype {softmax_resampled.dtype}') 194 | if True: 195 | # if all_in_gpu then the return value is half (float16) 196 | bytes_per_voxel = 2 197 | # * 0.85 just to be save 198 | if np.prod(softmax_resampled.shape) > (2e9 / bytes_per_voxel * 0.85): 199 | print( 200 | "This output is too large for python process-process communication. Saving output temporarily to disk") 201 | np.save(output_filename[:-7] + ".npy", softmax_resampled) 202 | softmax_resampled = output_filename[:-7] + ".npy" 203 | 204 | results.append(pool.starmap_async(save_segmentation_nifti_from_softmax, 205 | ((softmax_resampled, output_filename, dct, 1, region_class_order, 206 | None, None, 207 | npz_file, None, False, 1),) 208 | )) 209 | 210 | print("inference done. Now waiting for the segmentation export to finish...") 211 | _ = [i.get() for i in results] 212 | # now apply postprocessing 213 | # first load the postprocessing properties if they are present. Else raise a well visible warning 214 | if not disable_postprocessing: 215 | results = [] 216 | pp_file = join(model, "postprocessing.json") 217 | if isfile(pp_file): 218 | print("postprocessing...") 219 | shutil.copy(pp_file, os.path.abspath( 220 | os.path.dirname(output_filenames[0]))) 221 | # for_which_classes stores for which of the classes everything but the largest connected component needs to be 222 | # removed 223 | for_which_classes, min_valid_obj_size = load_postprocessing( 224 | pp_file) 225 | results.append(pool.starmap_async(load_remove_save, 226 | zip(output_filenames, output_filenames, 227 | [for_which_classes] * 228 | len(output_filenames), 229 | [min_valid_obj_size] * len(output_filenames)))) 230 | _ = [i.get() for i in results] 231 | else: 232 | print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run " 233 | "consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is " 234 | "%s" % model) 235 | 236 | pool.close() 237 | pool.join() 238 | 239 | 240 | def check_input_folder_and_return_caseIDs(input_folder, expected_num_modalities): 241 | print("This model expects %d input modalities for each image" % 242 | expected_num_modalities) 243 | files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) 244 | 245 | maybe_case_ids = np.unique([i[:-12] for i in files]) 246 | 247 | remaining = deepcopy(files) 248 | missing = [] 249 | 250 | assert len( 251 | files) > 0, "input folder did not contain any images (expected to find .nii.gz file endings)" 252 | 253 | # now check if all required files are present and that no unexpected files are remaining 254 | for c in maybe_case_ids: 255 | for n in range(expected_num_modalities): 256 | expected_output_file = c + "_%04.0d.nii.gz" % n 257 | if not isfile(join(input_folder, expected_output_file)): 258 | missing.append(expected_output_file) 259 | else: 260 | remaining.remove(expected_output_file) 261 | 262 | print("Found %d unique case ids, here are some examples:" % len(maybe_case_ids), 263 | np.random.choice(maybe_case_ids, min(len(maybe_case_ids), 10))) 264 | print("If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc") 265 | 266 | if len(remaining) > 0: 267 | print("found %d unexpected remaining files in the folder. Here are some examples:" % len(remaining), 268 | np.random.choice(remaining, min(len(remaining), 10))) 269 | 270 | if len(missing) > 0: 271 | print("Some files are missing:") 272 | print(missing) 273 | raise RuntimeError("missing files in input_folder") 274 | 275 | return maybe_case_ids 276 | 277 | 278 | def predict_from_folder_segrap2023(model: str, input_folder: str, output_folder: str, folds: 0, part_id:0, num_parts:1): 279 | """ 280 | here we use the standard naming scheme to generate list_of_lists and output_files needed by predict_cases 281 | 282 | :param model: 283 | :param input_folder: 284 | :param output_folder: 285 | :param folds: 286 | :param save_npz: 287 | :param num_threads_preprocessing: 288 | :param num_threads_nifti_save: 289 | :param lowres_segmentations: 290 | :param part_id: 291 | :param num_parts: 292 | :param tta: 293 | :param mixed_precision: 294 | :param overwrite_existing: if not None then it will be overwritten with whatever is in there. None is default (no overwrite) 295 | :return: 296 | """ 297 | maybe_mkdir_p(output_folder) 298 | # shutil.copy(join(model, 'plans.pkl'), output_folder) 299 | 300 | # assert isfile(join(model, "plans.pkl") 301 | # ), "Folder with saved model weights must contain a plans.pkl file" 302 | expected_num_modalities = load_pickle( 303 | join(model+"/fold_{}".format(folds), "plans.pkl"))['num_modalities'] 304 | 305 | # check input folder integrity 306 | case_ids = check_input_folder_and_return_caseIDs( 307 | input_folder, expected_num_modalities) 308 | 309 | output_files = [join(output_folder, i + ".nii.gz") for i in case_ids] 310 | all_files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) 311 | list_of_lists = [[join(input_folder, i) for i in all_files if i[:len(j)].startswith(j) and 312 | len(i) == (len(j) + 12)] for j in case_ids] 313 | 314 | return predict_cases_segrap2023(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds=0) 315 | 316 | 317 | # predict_from_folder_segrap2023("weight/", "images/", "test/", 0, 0, 1) 318 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/post_processing.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import numpy as np 3 | import shutil 4 | import glob 5 | import os 6 | import sys 7 | o_path = os.getcwd() 8 | sys.path.append(o_path) 9 | 10 | 11 | segrap_subset_task002 = { 12 | "GTVp": 1, 13 | "GTVnd": 2} 14 | 15 | 16 | def nii2array(path): 17 | mask_itk_ref = sitk.ReadImage(path) 18 | mask_arr_ref = sitk.GetArrayFromImage(mask_itk_ref) 19 | return mask_arr_ref 20 | 21 | 22 | def merge_multi_class_to_one(input_arr, classes_index=None): 23 | new_arr = np.zeros_like(input_arr) 24 | for cls_ind in classes_index: 25 | new_arr[input_arr == cls_ind] = 1 26 | return new_arr 27 | 28 | 29 | def convert_one_hot_label_to_multi_lesions(ont_hot_label_path, save_path): 30 | patient_results = [] 31 | spacing = None 32 | for lesion in segrap_subset_task002.keys(): 33 | ont_hot_label_arr = nii2array(ont_hot_label_path) 34 | ont_hot_label_itk = sitk.ReadImage(ont_hot_label_path) 35 | spacing = ont_hot_label_itk.GetSpacing() 36 | new_arr = np.zeros_like(ont_hot_label_arr) 37 | new_arr[ont_hot_label_arr == segrap_subset_task002[lesion]] = 1 38 | patient_results.append(new_arr) 39 | oars = [] 40 | for t in patient_results: 41 | oars.append(sitk.GetImageFromArray(t, False)) 42 | output_itk = sitk.JoinSeries(oars) 43 | new_spacing = (spacing[0], spacing[1], spacing[2], 1) 44 | output_itk.SetSpacing(new_spacing) 45 | print(output_itk.GetSize()) 46 | sitk.WriteImage(output_itk, save_path, True) 47 | print("Conversion Finished") 48 | 49 | 50 | # if __name__ == "__main__": 51 | # for patient in glob.glob("test/*"): 52 | # new_path = "test/{}".format( 53 | # patient.split("/")[-1].replace("_cropped.nii.gz", "")) 54 | # if os.path.exists(new_path): 55 | # pass 56 | # convert_one_hot_label_to_multi_organs(patient, new_path) 57 | # else: 58 | # os.mkdir(new_path) 59 | # convert_one_hot_label_to_multi_organs(patient, new_path) 60 | # print("Convert all predictions to single organ files") 61 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/process.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from post_processing import convert_one_hot_label_to_multi_lesions 3 | from inference_code import predict_from_folder_segrap2023 4 | from evalutils.validators import ( 5 | UniquePathIndicesValidator, 6 | UniqueImagesValidator, 7 | ) 8 | from evalutils import SegmentationAlgorithm 9 | import numpy as np 10 | import SimpleITK 11 | import os 12 | import sys 13 | o_path = os.getcwd() 14 | print(o_path) 15 | sys.path.append(o_path) 16 | 17 | 18 | class Customalgorithm(): # SegmentationAlgorithm is not inherited in this class anymore 19 | def __init__(self): 20 | """ 21 | Do not modify the `self.input_dir` and `self.output_dir`. 22 | (Check https://grand-challenge.org/algorithms/interfaces/) 23 | """ 24 | self.input_dir = "/input/" 25 | self.output_dir = "/output/images/gross-tumor-volume-segmentation/" 26 | 27 | """ 28 | Store the validation/test data and predictions into the `self.nii_path` and `self.result_path`, respectively. 29 | Put your model and pkl files into the `self.weight`. 30 | """ 31 | self.nii_path = '/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/imagesTs' 32 | self.result_path = '/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/result' 33 | self.nii_seg_file = 'SegRap2023_002.nii.gz' 34 | self.weight = "./weight/" 35 | if not os.path.exists(self.nii_path): 36 | os.makedirs(self.nii_path, exist_ok=True) 37 | if not os.path.exists(self.result_path): 38 | os.makedirs(self.result_path, exist_ok=True) 39 | pass 40 | 41 | def convert_mha_to_nii(self, mha_input_path, nii_out_path): # nnUNet specific 42 | img = SimpleITK.ReadImage(mha_input_path) 43 | print(img.GetSize()) 44 | print(img.GetSpacing()) 45 | SimpleITK.WriteImage(img, nii_out_path, True) 46 | 47 | def convert_nii_to_mha(self, nii_input_path, mha_out_path): # nnUNet specific 48 | img = SimpleITK.ReadImage(nii_input_path) 49 | print(img.GetSize()) 50 | print(img.GetSpacing()) 51 | SimpleITK.WriteImage(img, mha_out_path, True) 52 | 53 | def check_gpu(self): 54 | """ 55 | Check if GPU is available. Note that the Grand Challenge only has one available GPU. 56 | """ 57 | print('Checking GPU availability') 58 | is_available = torch.cuda.is_available() 59 | print('Available: ' + str(is_available)) 60 | print(f'Device count: {torch.cuda.device_count()}') 61 | if is_available: 62 | print(f'Current device: {torch.cuda.current_device()}') 63 | print('Device name: ' + torch.cuda.get_device_name(0)) 64 | print('Device memory: ' + 65 | str(torch.cuda.get_device_properties(0).total_memory)) 66 | 67 | def load_inputs(self): # use two modalities input data 68 | """ 69 | Read input data (two modalities) from `self.input_dir` (/input/). 70 | Please do not modify the path for CT and contrast-CT images. 71 | """ 72 | ct_mha = os.listdir(os.path.join( 73 | self.input_dir, 'images/head-neck-ct/'))[0] 74 | ctc_mha = os.listdir(os.path.join( 75 | self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] 76 | uuid = os.path.splitext(ct_mha)[0] 77 | 78 | """ 79 | if your model was based on nnUNet baseline and used two modalities as inputs, 80 | please convert the input data into '_0000.nii.gz' and '_0001.nii.gz' using following code. 81 | """ 82 | self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-ct/', ct_mha), 83 | os.path.join(self.nii_path, 'SegRap2023_002_0000.nii.gz')) 84 | self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ctc_mha), 85 | os.path.join(self.nii_path, 'SegRap2023_002_0001.nii.gz')) 86 | 87 | # Check the validation/test data exist. 88 | print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/imagesTs')) 89 | return uuid 90 | 91 | # def load_inputs(self): # only use non-contrast-CT images as input 92 | # """ 93 | # Read input data (non-contrast-CT images) from `self.input_dir` (/input/). 94 | # Please do not modify the path for non-contrast-CT images. 95 | # """ 96 | # ct = os.listdir(os.path.join(self.input_dir, 'images/head-neck-ct/'))[0] 97 | # uuid = os.path.splitext(ct)[0] 98 | 99 | # """ 100 | # if your model was based on nnUNet baseline and only used non-contrast-CT images as inputs, 101 | # please convert the input data into '_0000.nii.gz' using following code. 102 | # """ 103 | # self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-ct/', ct), 104 | # os.path.join(self.nii_path, 'SegRap2023_002_0000.nii.gz')) 105 | # # Check the validation/test data exist. 106 | # print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/imagesTs')) 107 | 108 | # return uuid 109 | 110 | 111 | # def load_inputs(self): # only use contrast-CT images as input 112 | # """ 113 | # Read input data (single contrast-CT images) from `self.input_dir` (/input/). 114 | # Please do not modify the path for contrast-CT images. 115 | # """ 116 | # ct = os.listdir(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] 117 | # uuid = os.path.splitext(ct)[0] 118 | 119 | # """ 120 | # if your model was based on nnUNet baseline and only used contrast-CT images as inputs, 121 | # please convert the input data into '_0000.nii.gz' using following code. 122 | # """ 123 | # self.convert_mha_to_nii(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ct), 124 | # os.path.join(self.nii_path, 'SegRap2023_002_0000.nii.gz')) 125 | 126 | # # Check the validation/test data exist. 127 | # print(os.listdir('/opt/app/nnUNet_raw_data_base/nnUNet_raw_data/Task002_SegRap2023/imagesTs')) 128 | 129 | # return uuid 130 | 131 | 132 | def write_outputs(self, uuid): 133 | """ 134 | If you used one-hot label (2 classes) for training, please convert the 2 classes prediction to 2 gtvs prediction using function `convert_one_hot_label_to_multi_lesions`. 135 | Otherwise, stack your 2 predictions for gtvs in the first channel, the corresponding mapping between the channel index and the gtv names is: 136 | {0: 'GTVp', 137 | 1: 'GTVnd'} 138 | Please ensure the 0 channel is the prediction of GTVp, the 1 channel is the prediction of GTVnd. 139 | and also ensure the shape of final prediction array is [2, *image_shape]. 140 | The predictions should be saved in the `self.output_dir` (/output/). Please do not modify the path and the suffix (.mha) for saving the prediction. 141 | """ 142 | os.makedirs(os.path.dirname(self.output_dir), exist_ok=True) 143 | convert_one_hot_label_to_multi_lesions(os.path.join( 144 | self.result_path, self.nii_seg_file), os.path.join(self.output_dir, uuid + ".mha")) 145 | print('Output written to: ' + 146 | os.path.join(self.output_dir, uuid + ".mha")) 147 | 148 | def predict(self): 149 | """ 150 | load the model and checkpoint, and generate the predictions. You can replace this part with your own model. 151 | """ 152 | predict_from_folder_segrap2023( 153 | self.weight, self.nii_path, self.result_path, 0, 0, 1) 154 | print("nnUNet segmentation done!") 155 | if not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): 156 | print('waiting for nnUNet segmentation to be created') 157 | 158 | while not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): 159 | import time 160 | print('.', end='') 161 | # print(cproc) # since nnUNet_predict call is split into prediction and postprocess, a pre-mature exit code is received but segmentation file not yet written. This hack ensures that all spawned subprocesses are finished before being printed. 162 | print('Prediction finished') 163 | 164 | def post_process(self): 165 | self.check_gpu() 166 | print('Start processing') 167 | uuid = self.load_inputs() 168 | print('Start prediction') 169 | self.predict() 170 | print('Start output writing') 171 | self.write_outputs(uuid) 172 | 173 | def process(self): 174 | """ 175 | Read inputs from /input, process with your algorithm and write to /output 176 | """ 177 | self.post_process() 178 | 179 | 180 | if __name__ == "__main__": 181 | Customalgorithm().process() 182 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/requirements.in: -------------------------------------------------------------------------------- 1 | 2 | evalutils==0.4.2 3 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.10 3 | # by the following command: 4 | # 5 | # pip-compile --resolver=backtracking 6 | # 7 | arrow==1.2.3 8 | # via jinja2-time 9 | binaryornot==0.4.4 10 | # via cookiecutter 11 | build==0.10.0 12 | # via pip-tools 13 | certifi==2023.5.7 14 | # via requests 15 | chardet==5.1.0 16 | # via binaryornot 17 | charset-normalizer==3.1.0 18 | # via requests 19 | click==8.1.3 20 | # via 21 | # cookiecutter 22 | # evalutils 23 | # pip-tools 24 | cookiecutter==2.1.1 25 | # via evalutils 26 | evalutils==0.4.2 27 | # via -r requirements.in 28 | idna==3.4 29 | # via requests 30 | imageio[tifffile]==2.31.1 31 | # via evalutils 32 | jinja2==3.1.2 33 | # via 34 | # cookiecutter 35 | # jinja2-time 36 | jinja2-time==0.2.0 37 | # via cookiecutter 38 | joblib==1.3.1 39 | # via scikit-learn 40 | markupsafe==2.1.3 41 | # via jinja2 42 | numpy==1.25.0 43 | # via 44 | # evalutils 45 | # imageio 46 | # pandas 47 | # scikit-learn 48 | # scipy 49 | # tifffile 50 | packaging==23.1 51 | # via build 52 | pandas==2.0.3 53 | # via evalutils 54 | pillow==10.0.0 55 | # via imageio 56 | pip-tools==6.14.0 57 | # via evalutils 58 | pyproject-hooks==1.0.0 59 | # via build 60 | python-dateutil==2.8.2 61 | # via 62 | # arrow 63 | # pandas 64 | python-slugify==8.0.1 65 | # via cookiecutter 66 | pytz==2023.3 67 | # via pandas 68 | pyyaml==6.0 69 | # via cookiecutter 70 | requests==2.31.0 71 | # via cookiecutter 72 | scikit-learn==1.3.0 73 | # via evalutils 74 | scipy==1.11.1 75 | # via 76 | # evalutils 77 | # scikit-learn 78 | simpleitk==2.2.1 79 | # via evalutils 80 | six==1.16.0 81 | # via python-dateutil 82 | text-unidecode==1.3 83 | # via python-slugify 84 | threadpoolctl==3.1.0 85 | # via scikit-learn 86 | tifffile==2023.4.12 87 | # via imageio 88 | tomli==2.0.1 89 | # via 90 | # build 91 | # pip-tools 92 | # pyproject-hooks 93 | tzdata==2023.3 94 | # via pandas 95 | urllib3==2.0.3 96 | # via requests 97 | wheel==0.40.0 98 | # via pip-tools 99 | batchgenerators==0.25 100 | matplotlib 101 | tqdm 102 | dicom2nifti 103 | scikit-image 104 | medpy 105 | nibabel 106 | # The following packages are considered to be unsafe in a requirements file: 107 | # pip 108 | # setuptools 109 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 4 | 5 | ./build.sh 6 | 7 | VOLUME_SUFFIX=$(dd if=/dev/urandom bs=32 count=1 | md5sum | cut --delimiter=' ' --fields=1) 8 | # Maximum is currently 30g, configurable in your algorithm image settings on grand challenge 9 | MEM_LIMIT="32g" 10 | 11 | docker volume create segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX 12 | 13 | echo $VOLUME_SUFFIX 14 | # Do not change any of the parameters to docker run, these are fixed 15 | docker run --rm --gpus all \ 16 | --memory="${MEM_LIMIT}" \ 17 | --memory-swap="${MEM_LIMIT}" \ 18 | --network="none" \ 19 | --cap-drop="ALL" \ 20 | --security-opt="no-new-privileges" \ 21 | --shm-size="128m" \ 22 | --pids-limit="256" \ 23 | -v $SCRIPTPATH/images/:/input/ \ 24 | -v segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 25 | segrap2023_gtv_segmentationcontainer 26 | 27 | docker run --rm \ 28 | -v segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 29 | python:3.10-slim ls -al /output/images/gross-tumor-volume-segmentation 30 | 31 | docker run --rm \ 32 | -v segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 33 | python:3.10-slim cat /output/results.json | python -m json.tool 34 | 35 | docker run --rm \ 36 | -v segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX:/output/ \ 37 | -v $SCRIPTPATH/test/:/input/ \ 38 | python:3.10-slim python -c "import json, sys; f1 = json.load(open('/output/results.json')); f2 = json.load(open('/input/expected_output.json')); sys.exit(f1 != f2);" 39 | 40 | if [ $? -eq 0 ]; then 41 | echo "Tests successfully passed..." 42 | else 43 | echo "Expected output was not found..." 44 | fi 45 | 46 | docker volume rm segrap2023_gtv_segmentationcontainer-output-$VOLUME_SUFFIX 47 | -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/weight/fold_0/plans.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/weight/fold_0/plans.pkl -------------------------------------------------------------------------------- /Docker_tutorial/SegRap2023_task2_GTVs_nnUNet_Example/weight/fold_0/weight.txt: -------------------------------------------------------------------------------- 1 | Download a trained nnUNet model for your local machine testing [GoogleDrive](https://drive.google.com/file/d/17hJz9hQ1sajsW0aEgmiydvL9bVchqipr/view), [BaiduNetDisk](https://pan.baidu.com/s/1lwGENM9R7z3791FxQoy7fQ?pwd=2023). 2 | -------------------------------------------------------------------------------- /Docker_tutorial/gtvs_output_example.mha: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/gtvs_output_example.mha -------------------------------------------------------------------------------- /Docker_tutorial/gtvs_output_example.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/gtvs_output_example.zip -------------------------------------------------------------------------------- /Docker_tutorial/oars_output_example.mha: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/oars_output_example.mha -------------------------------------------------------------------------------- /Docker_tutorial/oars_output_example.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/oars_output_example.zip -------------------------------------------------------------------------------- /Docker_tutorial/outputs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Docker_tutorial/outputs.png -------------------------------------------------------------------------------- /Docker_tutorial/stacked_results_to_4d_mha.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import SimpleITK as sitk 4 | 5 | # download and unzip "Docker_tutorial/gtvs_output_example.zip" and "Docker_tutorial/oars_output_example.zip" to run this example. 6 | # # The mapping between the structures (OARs or GTVs) and the index of the 4D file (mha). 7 | oars_mapping_dict = {0: 'Brain', 8 | 1: 'BrainStem', 9 | 2: 'Chiasm', 10 | 3: 'TemporalLobe_L', 11 | 4: 'TemporalLobe_R', 12 | 5: 'Hippocampus_L', 13 | 6: 'Hippocampus_R', 14 | 7: 'Eye_L', 15 | 8: 'Eye_R', 16 | 9: 'Lens_L', 17 | 10: 'Lens_R', 18 | 11: 'OpticNerve_L', 19 | 12: 'OpticNerve_R', 20 | 13: 'MiddleEar_L', 21 | 14: 'MiddleEar_R', 22 | 15: 'IAC_L', 23 | 16: 'IAC_R', 24 | 17: 'TympanicCavity_L', 25 | 18: 'TympanicCavity_R', 26 | 19: 'VestibulSemi_L', 27 | 20: 'VestibulSemi_R', 28 | 21: 'Cochlea_L', 29 | 22: 'Cochlea_R', 30 | 23: 'ETbone_L', 31 | 24: 'ETbone_R', 32 | 25: 'Pituitary', 33 | 26: 'OralCavity', 34 | 27: 'Mandible_L', 35 | 28: 'Mandible_R', 36 | 29: 'Submandibular_L', 37 | 30: 'Submandibular_R', 38 | 31: 'Parotid_L', 39 | 32: 'Parotid_R', 40 | 33: 'Mastoid_L', 41 | 34: 'Mastoid_R', 42 | 35: 'TMjoint_L', 43 | 36: 'TMjoint_R', 44 | 37: 'SpinalCord', 45 | 38: 'Esophagus', 46 | 39: 'Larynx', 47 | 40: 'Larynx_Glottic', 48 | 41: 'Larynx_Supraglot', 49 | 42: 'PharynxConst', 50 | 43: 'Thyroid', 51 | 44: 'Trachea'} 52 | 53 | gtvs_mapping_dict = {0: 'GTVp', 54 | 1: 'GTVnd'} 55 | 56 | 57 | def convert_individual_organs_to_4d_mha(input_dir="/path/of/45organs/folder", output_path="/path/of/4dmha/"): 58 | patient_results = [] 59 | spacing = None 60 | for index in oars_mapping_dict.keys(): 61 | organ_itk = sitk.ReadImage(os.path.join( 62 | input_dir, "{}.nii.gz".format(oars_mapping_dict[index]))) 63 | organ_arr = sitk.GetArrayFromImage(organ_itk) 64 | spacing = organ_itk.GetSpacing() 65 | patient_results.append(organ_arr) 66 | 67 | # the following part is very important, please save your results as follows. 68 | stacked_oars = [] 69 | for each_organ in patient_results: 70 | # isVector must be set to False!!! 71 | stacked_oars.append(sitk.GetImageFromArray(each_organ, isVector=False)) 72 | output_itk = sitk.JoinSeries(stacked_oars) 73 | new_spacing = (spacing[0], spacing[1], spacing[2], 1) 74 | output_itk.SetSpacing(new_spacing) 75 | print(output_itk.GetSize()) 76 | # The last parameter must be True 77 | sitk.WriteImage(output_itk, output_path, True) 78 | print("Conversion Finished") 79 | 80 | 81 | def convert_individual_gtvs_to_4d_mha(input_dir="/path/of/45organs/folder", output_path="/path/of/4dmha/"): 82 | patient_results = [] 83 | spacing = None 84 | for index in gtvs_mapping_dict.keys(): 85 | gtv_itk = sitk.ReadImage(os.path.join( 86 | input_dir, "{}.nii.gz".format(gtvs_mapping_dict[index]))) 87 | gtv_arr = sitk.GetArrayFromImage(gtv_itk) 88 | spacing = gtv_itk.GetSpacing() 89 | patient_results.append(gtv_arr) 90 | # the following part is very important, please save your results as follows. 91 | stacked_gtvs = [] 92 | for each_gtv in patient_results: 93 | # isVector must be set to False!!! 94 | stacked_gtvs.append(sitk.GetImageFromArray(each_gtv, isVector=False)) 95 | output_itk = sitk.JoinSeries(stacked_gtvs) 96 | new_spacing = (spacing[0], spacing[1], spacing[2], 1) 97 | output_itk.SetSpacing(new_spacing) 98 | print(output_itk.GetSize()) 99 | # The last parameter must be True 100 | sitk.WriteImage(output_itk, output_path, True) 101 | print("Conversion Finished") 102 | 103 | 104 | convert_individual_gtvs_to_4d_mha( 105 | "./gtvs_output_example/", "gtvs_output_example.mha") 106 | convert_individual_organs_to_4d_mha( 107 | "./oars_output_example/", "oars_output_example.mha") 108 | -------------------------------------------------------------------------------- /Eval/SegRap_Task001_DSC_NSD_Eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | from collections import OrderedDict 3 | 4 | import numpy as np 5 | import pandas as pd 6 | import SimpleITK as sitk 7 | from two_evaluation_metrics import dsc, nsd 8 | 9 | 10 | submission_path = '/data_8t/radiology_images/processed/SegRap2023/nnUNetV2_infersVal/task001' 11 | gt_path = '/data_8t/radiology_images/processed/SegRap2023/SegRap2023_Validation_Set_Labels_Cropped' 12 | save_path = '/data_8t/radiology_images/processed/SegRap2023/nnUNetV2_infersVal' 13 | patientnames = os.listdir(submission_path) 14 | task01_submission_result = OrderedDict() 15 | 16 | task01_submission_result['Name'] = list() 17 | 18 | task01_label_tolerance = OrderedDict({ 19 | "Brain": 1, 20 | "BrainStem": 1, 21 | "Chiasm": 1, 22 | "Cochlea_L": 1, 23 | "Cochlea_R": 1, 24 | "Esophagus": 1, 25 | "ETbone_L": 1, 26 | "ETbone_R": 1, 27 | "Eye_L": 1, 28 | "Eye_R": 1, 29 | "Hippocampus_L": 1, 30 | "Hippocampus_R": 1, 31 | "IAC_L": 1, 32 | "IAC_R": 1, 33 | "Larynx": 2, 34 | "Larynx_Glottic": 1, 35 | "Larynx_Supraglot": 1, 36 | "Lens_L": 1, 37 | "Lens_R": 1, 38 | "Mandible_L": 1, 39 | "Mandible_R": 1, 40 | "Mastoid_L": 1, 41 | "Mastoid_R": 1, 42 | "MiddleEar_L": 1, 43 | "MiddleEar_R": 1, 44 | "OpticNerve_L": 1, 45 | "OpticNerve_R": 1, 46 | "OralCavity": 3, 47 | "Parotid_L": 1, 48 | "Parotid_R": 1, 49 | "PharynxConst": 1, 50 | "Pituitary": 1, 51 | "SpinalCord": 1, 52 | "Submandibular_L": 1, 53 | "Submandibular_R": 1, 54 | "TemporalLobe_L": 1, 55 | "TemporalLobe_R": 1, 56 | "Thyroid": 1, 57 | "Trachea": 1, 58 | "TympanicCavity_L": 1, 59 | "TMjoint_L": 1, 60 | "TMjoint_R": 1, 61 | "TympanicCavity_R": 1, 62 | "VestibulSemi_L": 1, 63 | "VestibulSemi_R": 1 64 | }) 65 | 66 | for organ in task01_label_tolerance.keys(): 67 | task01_submission_result['{}_DSC'.format(organ)] = list() 68 | for organ in task01_label_tolerance.keys(): 69 | task01_submission_result['{}_NSD'.format(organ)] = list() 70 | 71 | 72 | def compute_each_organ_lesion_performace(result, reference, voxel_spacing, tolerance_mm): 73 | if np.sum(reference) == 0 and np.sum(result) == 0: 74 | DSC = 1 75 | NSD = 1 76 | elif np.sum(reference) == 0 and np.sum(result) > 0: 77 | DSC = 0 78 | NSD = 0 79 | else: 80 | DSC = dsc(result, reference) 81 | NSD = nsd(result, reference, voxel_spacing, tolerance_mm) 82 | return round(DSC, 4), round(NSD, 4) 83 | 84 | 85 | def nii2arr(path): 86 | itk_data = sitk.ReadImage(path) 87 | arr_data = sitk.GetArrayFromImage(itk_data) 88 | spacing = itk_data.GetSpacing()[::-1] 89 | return arr_data, spacing 90 | 91 | 92 | for patient in os.listdir(submission_path): 93 | print(patient) 94 | task01_submission_result["Name"].append(patient) 95 | 96 | for organ in sorted(task01_label_tolerance.keys()): 97 | result_organ, spacing = nii2arr( 98 | "{}/{}/{}.nii.gz".format(submission_path, patient, organ)) 99 | reference_organ, spacing = nii2arr( 100 | "{}/{}/{}.nii.gz".format(gt_path, patient, organ)) 101 | DSC_organ, NSD_organ = compute_each_organ_lesion_performace( 102 | result_organ > 0, reference_organ > 0, spacing, task01_label_tolerance[organ]) 103 | task01_submission_result['{}_DSC'.format(organ)].append(DSC_organ) 104 | task01_submission_result['{}_NSD'.format(organ)].append(NSD_organ) 105 | 106 | 107 | task01_df = pd.DataFrame(task01_submission_result) 108 | task01_df.to_csv(os.path.join( 109 | save_path, 'DSC_NSD_Task001_Admin_nnUNetV2.csv'), index=False) 110 | -------------------------------------------------------------------------------- /Eval/SegRap_Task002_DSC_NSD_Eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | from collections import OrderedDict 3 | 4 | import numpy as np 5 | import pandas as pd 6 | import SimpleITK as sitk 7 | from two_evaluation_metrics import dsc, nsd 8 | 9 | 10 | submission_path = '/data_8t/radiology_images/processed/SegRap2023/SegRap2023_Validation_Set(with labels)' 11 | gt_path = '/data_8t/radiology_images/processed/SegRap2023/SegRap2023_Validation_Set(with labels)' 12 | save_path = '/data_8t/radiology_images/processed/SegRap2023/SegRap2023_Validation_Set(with labels)' 13 | patientnames = os.listdir(submission_path) 14 | task02_submission_result = OrderedDict() 15 | 16 | task02_submission_result['Name'] = list() 17 | 18 | task02_label_tolerance = OrderedDict({"GTVp": 1, "GTVnd": 1}) 19 | 20 | 21 | for lesion in task02_label_tolerance.keys(): 22 | task02_submission_result['{}_DSC'.format(lesion)] = list() 23 | for lesion in task02_label_tolerance.keys(): 24 | task02_submission_result['{}_NSD'.format(lesion)] = list() 25 | 26 | 27 | def compute_each_organ_lesion_performace(result, reference, voxel_spacing, tolerance_mm): 28 | if np.sum(reference) == 0 and np.sum(result) == 0: 29 | DSC = 1 30 | NSD = 1 31 | elif np.sum(reference) == 0 and np.sum(result) > 0: 32 | DSC = 0 33 | NSD = 0 34 | else: 35 | DSC = dsc(result, reference) 36 | NSD = nsd(result, reference, voxel_spacing, tolerance_mm) 37 | return round(DSC, 4), round(NSD, 4) 38 | 39 | 40 | def nii2arr(path): 41 | itk_data = sitk.ReadImage(path) 42 | arr_data = sitk.GetArrayFromImage(itk_data) 43 | spacing = itk_data.GetSpacing()[::-1] 44 | return arr_data, spacing 45 | 46 | 47 | for patient in os.listdir(submission_path): 48 | print(patient) 49 | task02_submission_result["Name"].append(patient) 50 | 51 | for lesion in sorted(task02_label_tolerance.keys()): 52 | result_lesion, spacing = nii2arr( 53 | "{}/{}/{}.nii.gz".format(submission_path, patient, lesion)) 54 | reference_lesion, spacing = nii2arr( 55 | "{}/{}/{}.nii.gz".format(gt_path, patient, lesion)) 56 | DSC_lesion, NSD_lesion = compute_each_organ_lesion_performace( 57 | result_lesion > 0, reference_lesion > 0, spacing, task02_label_tolerance[lesion]) 58 | task02_submission_result['{}_DSC'.format(lesion)].append(DSC_lesion) 59 | task02_submission_result['{}_NSD'.format(lesion)].append(NSD_lesion) 60 | 61 | task02_df = pd.DataFrame(task02_submission_result) 62 | task02_df.to_csv(os.path.join( 63 | save_path, 'DSC_NSD_Task02_Admin_nnUNetV2.csv'), index=False) 64 | -------------------------------------------------------------------------------- /Eval/two_evaluation_metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | The code was borrowed from https://github.com/loli/medpy/blob/master/medpy/metric/binary.py. 3 | """ 4 | import time 5 | from collections.abc import Iterable, Iterator 6 | 7 | import numpy 8 | import numpy as np 9 | import scipy 10 | from scipy.ndimage import _ni_support 11 | from scipy.ndimage.morphology import (binary_erosion, distance_transform_edt, 12 | generate_binary_structure) 13 | from medpy import metric 14 | 15 | 16 | neighbour_code_to_normals = [ 17 | [[0, 0, 0]], 18 | [[0.125, 0.125, 0.125]], 19 | [[-0.125, -0.125, 0.125]], 20 | [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 21 | [[0.125, -0.125, 0.125]], 22 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]], 23 | [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 24 | [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 25 | [[-0.125, 0.125, 0.125]], 26 | [[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]], 27 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]], 28 | [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 29 | [[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 30 | [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]], 31 | [[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 32 | [[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]], 33 | [[0.125, -0.125, -0.125]], 34 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]], 35 | [[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 36 | [[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 37 | [[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 38 | [[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]], 39 | [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 40 | [[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], 41 | [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 42 | [[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]], 43 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]], 44 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]], 45 | [[0.125, 0.125, 0.125], [0.375, 0.375, 0.375], 46 | [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]], 47 | [[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 48 | [[0.375, 0.375, 0.375], [0.0, 0.25, -0.25], 49 | [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]], 50 | [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], 51 | [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]], 52 | [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]], 53 | [[0.125, -0.125, 0.125]], 54 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 55 | [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 56 | [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]], 57 | [[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 58 | [[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]], 59 | [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]], 60 | [[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25], 61 | [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]], 62 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 63 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 64 | [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 65 | [[0.25, 0.25, -0.25], [0.25, 0.25, -0.25], 66 | [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]], 67 | [[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 68 | [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], 69 | [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]], 70 | [[0.0, 0.25, -0.25], [0.375, -0.375, -0.375], 71 | [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]], 72 | [[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 73 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]], 74 | [[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]], 75 | [[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]], 76 | [[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]], 77 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]], 78 | [[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25], 79 | [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]], 80 | [[0.125, 0.125, 0.125], [0.0, -0.5, 0.0], 81 | [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]], 82 | [[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]], 83 | [[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]], 84 | [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], 85 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 86 | [[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0], 87 | [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]], 88 | [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 89 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], 90 | [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 91 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]], 92 | [[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]], 93 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]], 94 | [[-0.125, -0.125, 0.125]], 95 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]], 96 | [[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 97 | [[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 98 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]], 99 | [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]], 100 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]], 101 | [[0.375, -0.375, 0.375], [0.0, -0.25, -0.25], 102 | [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]], 103 | [[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 104 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 105 | [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]], 106 | [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], 107 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 108 | [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]], 109 | [[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25], 110 | [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]], 111 | [[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375], 112 | [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]], 113 | [[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]], 114 | [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 115 | [[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 116 | [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 117 | [[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375], 118 | [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]], 119 | [[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 120 | [[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]], 121 | [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125], 122 | [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 123 | [[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 124 | [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]], 125 | [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], 126 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 127 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], 128 | [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 129 | [[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 130 | [[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375], 131 | [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]], 132 | [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]], 133 | [[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 134 | [[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 135 | [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 136 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 137 | [[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 138 | [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], 139 | [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 140 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]], 141 | [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], 142 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 143 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], 144 | [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 145 | [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]], 146 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 147 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], 148 | [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]], 149 | [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], 150 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 151 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 152 | [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], 153 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 154 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 155 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]], 156 | [[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]], 157 | [[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]], 158 | [[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125], 159 | [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]], 160 | [[0.375, -0.375, 0.375], [0.0, 0.25, 0.25], 161 | [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]], 162 | [[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 163 | [[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0], 164 | [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]], 165 | [[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 166 | [[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 167 | [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 168 | [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], 169 | [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]], 170 | [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]], 171 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]], 172 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 173 | [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]], 174 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]], 175 | [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]], 176 | [[0.125, 0.125, 0.125]], 177 | [[0.125, 0.125, 0.125]], 178 | [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]], 179 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]], 180 | [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]], 181 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 182 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]], 183 | [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]], 184 | [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], 185 | [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]], 186 | [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 187 | [[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 188 | [[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 189 | [[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0], 190 | [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]], 191 | [[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 192 | [[0.375, -0.375, 0.375], [0.0, 0.25, 0.25], 193 | [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]], 194 | [[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125], 195 | [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]], 196 | [[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]], 197 | [[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]], 198 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]], 199 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 200 | [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], 201 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 202 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 203 | [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], 204 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 205 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], 206 | [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]], 207 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 208 | [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]], 209 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]], 210 | [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], 211 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 212 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]], 213 | [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], 214 | [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 215 | [[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 216 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 217 | [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 218 | [[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 219 | [[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 220 | [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]], 221 | [[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375], 222 | [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]], 223 | [[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 224 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], 225 | [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]], 226 | [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], 227 | [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 228 | [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]], 229 | [[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 230 | [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125], 231 | [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]], 232 | [[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]], 233 | [[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 234 | [[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375], 235 | [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]], 236 | [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 237 | [[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 238 | [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]], 239 | [[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]], 240 | [[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375], 241 | [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]], 242 | [[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25], 243 | [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]], 244 | [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]], 245 | [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], 246 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 247 | [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]], 248 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 249 | [[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 250 | [[0.375, -0.375, 0.375], [0.0, -0.25, -0.25], 251 | [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]], 252 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]], 253 | [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]], 254 | [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]], 255 | [[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 256 | [[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 257 | [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]], 258 | [[-0.125, -0.125, 0.125]], 259 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]], 260 | [[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]], 261 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]], 262 | [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], 263 | [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 264 | [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]], 265 | [[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0], 266 | [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]], 267 | [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], 268 | [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 269 | [[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]], 270 | [[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]], 271 | [[0.125, 0.125, 0.125], [0.0, -0.5, 0.0], 272 | [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]], 273 | [[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25], 274 | [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]], 275 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]], 276 | [[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]], 277 | [[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]], 278 | [[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]], 279 | [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]], 280 | [[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 281 | [[0.0, 0.25, -0.25], [0.375, -0.375, -0.375], 282 | [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]], 283 | [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], 284 | [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]], 285 | [[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 286 | [[0.25, 0.25, -0.25], [0.25, 0.25, -0.25], 287 | [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]], 288 | [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 289 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]], 290 | [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 291 | [[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25], 292 | [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]], 293 | [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]], 294 | [[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]], 295 | [[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]], 296 | [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]], 297 | [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]], 298 | [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]], 299 | [[0.125, -0.125, 0.125]], 300 | [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]], 301 | [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], 302 | [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]], 303 | [[0.375, 0.375, 0.375], [0.0, 0.25, -0.25], 304 | [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]], 305 | [[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 306 | [[0.125, 0.125, 0.125], [0.375, 0.375, 0.375], 307 | [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]], 308 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]], 309 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]], 310 | [[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]], 311 | [[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], 312 | [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 313 | [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 314 | [[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]], 315 | [[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 316 | [[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 317 | [[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]], 318 | [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]], 319 | [[0.125, -0.125, -0.125]], 320 | [[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]], 321 | [[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]], 322 | [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]], 323 | [[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]], 324 | [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]], 325 | [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]], 326 | [[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]], 327 | [[-0.125, 0.125, 0.125]], 328 | [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]], 329 | [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]], 330 | [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]], 331 | [[0.125, -0.125, 0.125]], 332 | [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]], 333 | [[-0.125, -0.125, 0.125]], 334 | [[0.125, 0.125, 0.125]], 335 | [[0, 0, 0]]] 336 | 337 | 338 | def _normalize_sequence(input, rank): 339 | """If input is a scalar, create a sequence of length equal to the 340 | rank by duplicating the input. If input is a sequence, 341 | check if its length is equal to the length of array. 342 | """ 343 | is_str = isinstance(input, str) 344 | if not is_str and isinstance(input, Iterable): 345 | normalized = list(input) 346 | if len(normalized) != rank: 347 | err = "sequence argument must have length equal to input rank" 348 | raise RuntimeError(err) 349 | else: 350 | normalized = [input] * rank 351 | return normalized 352 | 353 | 354 | def __surface_distances(result, reference, voxelspacing=None, connectivity=1): 355 | """ 356 | The distances between the surface voxel of binary objects in result and their 357 | nearest partner surface voxel of a binary object in reference. 358 | """ 359 | result = numpy.atleast_1d(result.astype(numpy.bool)) 360 | reference = numpy.atleast_1d(reference.astype(numpy.bool)) 361 | if voxelspacing is not None: 362 | voxelspacing = _ni_support._normalize_sequence( 363 | voxelspacing, result.ndim) 364 | voxelspacing = numpy.asarray(voxelspacing, dtype=numpy.float64) 365 | if not voxelspacing.flags.contiguous: 366 | voxelspacing = voxelspacing.copy() 367 | 368 | # binary structure 369 | footprint = generate_binary_structure(result.ndim, connectivity) 370 | 371 | # test for emptiness 372 | if 0 == numpy.count_nonzero(result): 373 | raise RuntimeError( 374 | 'The first supplied array does not contain any binary object.') 375 | if 0 == numpy.count_nonzero(reference): 376 | raise RuntimeError( 377 | 'The second supplied array does not contain any binary object.') 378 | 379 | # extract only 1-pixel border line of objects 380 | result_border = result ^ binary_erosion( 381 | result, structure=footprint, iterations=1) 382 | reference_border = reference ^ binary_erosion( 383 | reference, structure=footprint, iterations=1) 384 | 385 | # compute average surface distance 386 | # Note: scipys distance transform is calculated only inside the borders of the 387 | # foreground objects, therefore the input has to be reversed 388 | dt = distance_transform_edt(~reference_border, sampling=voxelspacing) 389 | sds = dt[result_border] 390 | 391 | return sds 392 | 393 | 394 | def hd(result, reference, voxelspacing=None, connectivity=1): 395 | hd1 = __surface_distances( 396 | result, reference, voxelspacing, connectivity).max() 397 | hd2 = __surface_distances( 398 | reference, result, voxelspacing, connectivity).max() 399 | hd = max(hd1, hd2) 400 | return hd 401 | 402 | 403 | def hd_fast(result, reference, voxelspacing=None, connectivity=1): 404 | hd1 = __surface_distances( 405 | result, reference, voxelspacing, connectivity).max() 406 | hd2 = __surface_distances( 407 | reference, result, voxelspacing, connectivity).max() 408 | hd = max(hd1, hd2) 409 | return hd 410 | 411 | 412 | def hd95(result, reference, voxelspacing=None, connectivity=1): 413 | hd1 = __surface_distances(result, reference, voxelspacing, connectivity) 414 | hd2 = __surface_distances(reference, result, voxelspacing, connectivity) 415 | hd95 = numpy.percentile(numpy.hstack((hd1, hd2)), 95) 416 | return hd95 417 | 418 | 419 | def assd(result, reference, voxelspacing=None, connectivity=1): 420 | assd = numpy.mean((__surface_distances(result, reference, voxelspacing, connectivity), 421 | __surface_distances(reference, result, voxelspacing, connectivity))) 422 | return assd 423 | 424 | 425 | def asd(result, reference, voxelspacing=None, connectivity=1): 426 | sds = __surface_distances(result, reference, voxelspacing, connectivity) 427 | asd = sds.mean() 428 | return asd 429 | 430 | 431 | def compute_surface_distances(mask_gt, mask_pred, spacing_mm): 432 | """Compute closest distances from all surface points to the other surface. 433 | 434 | Finds all surface elements "surfels" in the ground truth mask `mask_gt` and 435 | the predicted mask `mask_pred`, computes their area in mm^2 and the distance 436 | to the closest point on the other surface. It returns two sorted lists of 437 | distances together with the corresponding surfel areas. If one of the masks 438 | is empty, the corresponding lists are empty and all distances in the other 439 | list are `inf` 440 | 441 | Args: 442 | mask_gt: 3-dim Numpy array of type bool. The ground truth mask. 443 | mask_pred: 3-dim Numpy array of type bool. The predicted mask. 444 | spacing_mm: 3-element list-like structure. Voxel spacing in x0, x1 and x2 445 | direction 446 | 447 | Returns: 448 | A dict with 449 | "distances_gt_to_pred": 1-dim numpy array of type float. The distances in mm 450 | from all ground truth surface elements to the predicted surface, 451 | sorted from smallest to largest 452 | "distances_pred_to_gt": 1-dim numpy array of type float. The distances in mm 453 | from all predicted surface elements to the ground truth surface, 454 | sorted from smallest to largest 455 | "surfel_areas_gt": 1-dim numpy array of type float. The area in mm^2 of 456 | the ground truth surface elements in the same order as 457 | distances_gt_to_pred 458 | "surfel_areas_pred": 1-dim numpy array of type float. The area in mm^2 of 459 | the predicted surface elements in the same order as 460 | distances_pred_to_gt 461 | 462 | """ 463 | 464 | # compute the area for all 256 possible surface elements 465 | # (given a 2x2x2 neighbourhood) according to the spacing_mm 466 | neighbour_code_to_surface_area = np.zeros([256]) 467 | for code in range(256): 468 | normals = np.array(neighbour_code_to_normals[code]) 469 | sum_area = 0 470 | for normal_idx in range(normals.shape[0]): 471 | # normal vector 472 | n = np.zeros([3]) 473 | n[0] = normals[normal_idx, 0] * spacing_mm[1] * spacing_mm[2] 474 | n[1] = normals[normal_idx, 1] * spacing_mm[0] * spacing_mm[2] 475 | n[2] = normals[normal_idx, 2] * spacing_mm[0] * spacing_mm[1] 476 | area = np.linalg.norm(n) 477 | sum_area += area 478 | neighbour_code_to_surface_area[code] = sum_area 479 | 480 | # compute the bounding box of the masks to trim 481 | # the volume to the smallest possible processing subvolume 482 | mask_all = mask_gt | mask_pred 483 | bbox_min = np.zeros(3, np.int64) 484 | bbox_max = np.zeros(3, np.int64) 485 | 486 | # max projection to the x0-axis 487 | proj_0 = np.max(np.max(mask_all, axis=2), axis=1) 488 | idx_nonzero_0 = np.nonzero(proj_0)[0] 489 | if len(idx_nonzero_0) == 0: 490 | return {"distances_gt_to_pred": np.array([]), 491 | "distances_pred_to_gt": np.array([]), 492 | "surfel_areas_gt": np.array([]), 493 | "surfel_areas_pred": np.array([])} 494 | 495 | bbox_min[0] = np.min(idx_nonzero_0) 496 | bbox_max[0] = np.max(idx_nonzero_0) 497 | 498 | # max projection to the x1-axis 499 | proj_1 = np.max(np.max(mask_all, axis=2), axis=0) 500 | idx_nonzero_1 = np.nonzero(proj_1)[0] 501 | bbox_min[1] = np.min(idx_nonzero_1) 502 | bbox_max[1] = np.max(idx_nonzero_1) 503 | 504 | # max projection to the x2-axis 505 | proj_2 = np.max(np.max(mask_all, axis=1), axis=0) 506 | idx_nonzero_2 = np.nonzero(proj_2)[0] 507 | bbox_min[2] = np.min(idx_nonzero_2) 508 | bbox_max[2] = np.max(idx_nonzero_2) 509 | 510 | # print("bounding box min = {}".format(bbox_min)) 511 | # print("bounding box max = {}".format(bbox_max)) 512 | 513 | # crop the processing subvolume. 514 | # we need to zeropad the cropped region with 1 voxel at the lower, 515 | # the right and the back side. This is required to obtain the "full" 516 | # convolution result with the 2x2x2 kernel 517 | cropmask_gt = np.zeros((bbox_max - bbox_min) + 2, np.uint8) 518 | cropmask_pred = np.zeros((bbox_max - bbox_min) + 2, np.uint8) 519 | 520 | cropmask_gt[0:-1, 0:-1, 0:-1] = mask_gt[bbox_min[0]:bbox_max[0] + 1, 521 | bbox_min[1]:bbox_max[1] + 1, 522 | bbox_min[2]:bbox_max[2] + 1] 523 | 524 | cropmask_pred[0:-1, 0:-1, 0:-1] = mask_pred[bbox_min[0]:bbox_max[0] + 1, 525 | bbox_min[1]:bbox_max[1] + 1, 526 | bbox_min[2]:bbox_max[2] + 1] 527 | 528 | # compute the neighbour code (local binary pattern) for each voxel 529 | # the resultsing arrays are spacially shifted by minus half a voxel in each axis. 530 | # i.e. the points are located at the corners of the original voxels 531 | kernel = np.array([[[128, 64], 532 | [32, 16]], 533 | [[8, 4], 534 | [2, 1]]]) 535 | neighbour_code_map_gt = scipy.ndimage.filters.correlate( 536 | cropmask_gt.astype(np.uint8), kernel, mode="constant", cval=0) 537 | neighbour_code_map_pred = scipy.ndimage.filters.correlate(cropmask_pred.astype(np.uint8), kernel, mode="constant", 538 | cval=0) 539 | 540 | # create masks with the surface voxels 541 | borders_gt = ((neighbour_code_map_gt != 0) & 542 | (neighbour_code_map_gt != 255)) 543 | borders_pred = ((neighbour_code_map_pred != 0) & 544 | (neighbour_code_map_pred != 255)) 545 | 546 | # compute the distance transform (closest distance of each voxel to the surface voxels) 547 | if borders_gt.any(): 548 | distmap_gt = scipy.ndimage.morphology.distance_transform_edt( 549 | ~borders_gt, sampling=spacing_mm) 550 | else: 551 | distmap_gt = np.Inf * np.ones(borders_gt.shape) 552 | 553 | if borders_pred.any(): 554 | distmap_pred = scipy.ndimage.morphology.distance_transform_edt( 555 | ~borders_pred, sampling=spacing_mm) 556 | else: 557 | distmap_pred = np.Inf * np.ones(borders_pred.shape) 558 | 559 | # compute the area of each surface element 560 | surface_area_map_gt = neighbour_code_to_surface_area[neighbour_code_map_gt] 561 | surface_area_map_pred = neighbour_code_to_surface_area[neighbour_code_map_pred] 562 | 563 | # create a list of all surface elements with distance and area 564 | distances_gt_to_pred = distmap_pred[borders_gt] 565 | distances_pred_to_gt = distmap_gt[borders_pred] 566 | surfel_areas_gt = surface_area_map_gt[borders_gt] 567 | surfel_areas_pred = surface_area_map_pred[borders_pred] 568 | 569 | # sort them by distance 570 | if distances_gt_to_pred.shape != (0,): 571 | sorted_surfels_gt = np.array( 572 | sorted(zip(distances_gt_to_pred, surfel_areas_gt))) 573 | distances_gt_to_pred = sorted_surfels_gt[:, 0] 574 | surfel_areas_gt = sorted_surfels_gt[:, 1] 575 | 576 | if distances_pred_to_gt.shape != (0,): 577 | sorted_surfels_pred = np.array( 578 | sorted(zip(distances_pred_to_gt, surfel_areas_pred))) 579 | distances_pred_to_gt = sorted_surfels_pred[:, 0] 580 | surfel_areas_pred = sorted_surfels_pred[:, 1] 581 | 582 | return {"distances_gt_to_pred": distances_gt_to_pred, 583 | "distances_pred_to_gt": distances_pred_to_gt, 584 | "surfel_areas_gt": surfel_areas_gt, 585 | "surfel_areas_pred": surfel_areas_pred} 586 | 587 | 588 | def compute_average_surface_distance(surface_distances): 589 | distances_gt_to_pred = surface_distances["distances_gt_to_pred"] 590 | distances_pred_to_gt = surface_distances["distances_pred_to_gt"] 591 | surfel_areas_gt = surface_distances["surfel_areas_gt"] 592 | surfel_areas_pred = surface_distances["surfel_areas_pred"] 593 | average_distance_gt_to_pred = np.sum( 594 | distances_gt_to_pred * surfel_areas_gt) / np.sum(surfel_areas_gt) 595 | average_distance_pred_to_gt = np.sum( 596 | distances_pred_to_gt * surfel_areas_pred) / np.sum(surfel_areas_pred) 597 | return (average_distance_gt_to_pred, average_distance_pred_to_gt) 598 | 599 | 600 | def compute_robust_hausdorff(surface_distances, percent): 601 | distances_gt_to_pred = surface_distances["distances_gt_to_pred"] 602 | distances_pred_to_gt = surface_distances["distances_pred_to_gt"] 603 | surfel_areas_gt = surface_distances["surfel_areas_gt"] 604 | surfel_areas_pred = surface_distances["surfel_areas_pred"] 605 | if len(distances_gt_to_pred) > 0: 606 | surfel_areas_cum_gt = np.cumsum( 607 | surfel_areas_gt) / np.sum(surfel_areas_gt) 608 | idx = np.searchsorted(surfel_areas_cum_gt, percent / 100.0) 609 | perc_distance_gt_to_pred = distances_gt_to_pred[min( 610 | idx, len(distances_gt_to_pred) - 1)] 611 | else: 612 | perc_distance_gt_to_pred = np.Inf 613 | 614 | if len(distances_pred_to_gt) > 0: 615 | surfel_areas_cum_pred = np.cumsum( 616 | surfel_areas_pred) / np.sum(surfel_areas_pred) 617 | idx = np.searchsorted(surfel_areas_cum_pred, percent / 100.0) 618 | perc_distance_pred_to_gt = distances_pred_to_gt[min( 619 | idx, len(distances_pred_to_gt) - 1)] 620 | else: 621 | perc_distance_pred_to_gt = np.Inf 622 | 623 | return max(perc_distance_gt_to_pred, perc_distance_pred_to_gt) 624 | 625 | 626 | def compute_surface_overlap_at_tolerance(surface_distances, tolerance_mm): 627 | distances_gt_to_pred = surface_distances["distances_gt_to_pred"] 628 | distances_pred_to_gt = surface_distances["distances_pred_to_gt"] 629 | surfel_areas_gt = surface_distances["surfel_areas_gt"] 630 | surfel_areas_pred = surface_distances["surfel_areas_pred"] 631 | rel_overlap_gt = np.sum( 632 | surfel_areas_gt[distances_gt_to_pred <= tolerance_mm]) / np.sum(surfel_areas_gt) 633 | rel_overlap_pred = np.sum( 634 | surfel_areas_pred[distances_pred_to_gt <= tolerance_mm]) / np.sum(surfel_areas_pred) 635 | return (rel_overlap_gt, rel_overlap_pred) 636 | 637 | 638 | def compute_surface_dice_at_tolerance(surface_distances, tolerance_mm): 639 | distances_gt_to_pred = surface_distances["distances_gt_to_pred"] 640 | distances_pred_to_gt = surface_distances["distances_pred_to_gt"] 641 | surfel_areas_gt = surface_distances["surfel_areas_gt"] 642 | surfel_areas_pred = surface_distances["surfel_areas_pred"] 643 | overlap_gt = np.sum(surfel_areas_gt[distances_gt_to_pred <= tolerance_mm]) 644 | overlap_pred = np.sum( 645 | surfel_areas_pred[distances_pred_to_gt <= tolerance_mm]) 646 | surface_dice = (overlap_gt + overlap_pred) / ( 647 | np.sum(surfel_areas_gt) + np.sum(surfel_areas_pred)) 648 | return surface_dice 649 | 650 | 651 | def nsd(result, reference, voxelspacing=None, tolerance_mm=1): 652 | nsd = compute_surface_dice_at_tolerance( 653 | compute_surface_distances(reference, result, voxelspacing), tolerance_mm) 654 | return nsd 655 | 656 | 657 | def dsc(result, reference): 658 | dsc = metric.binary.dc(result, reference) 659 | return dsc 660 | 661 | 662 | # if __name__ == "__main__": 663 | # import SimpleITK as sitk 664 | # gt = sitk.ReadImage("gt.nii.gz") 665 | # pred = sitk.ReadImage("pred.nii.gz") 666 | 667 | # gt_array = sitk.GetArrayFromImage(gt) 668 | # pred_array = sitk.GetArrayFromImage(pred) 669 | # time1 = time.time() 670 | # for i in range(1, 17): 671 | # print(hd95(pred_array == i, gt_array == i, gt.GetSpacing()[::-1])) 672 | # print("Original total time: ", time.time() - time1) 673 | # time2 = time.time() 674 | # for i in range(1, 17): 675 | # print(nsd(pred_array == i, gt_array == 676 | # i, gt.GetSpacing()[::-1])) 677 | # print("Fast version total time: ", time.time() - time2) 678 | -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Chan Woong Lee.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Chan Woong Lee.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Kaixiang Yang.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Kaixiang Yang.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Yanzhou Su.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Yanzhou Su.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Yiwen Ye.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Yiwen Ye.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task01/Yunxin Zhong.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task01/Yunxin Zhong.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Constantin Ulrich.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Constantin Ulrich.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Kaixiang Yang.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Kaixiang Yang.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Mehdi Astaraki.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Mehdi Astaraki.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Yiwen Ye.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Yiwen Ye.pdf -------------------------------------------------------------------------------- /Poster_Top5_Team/Task02/Zhaohu Xing.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HiLab-git/SegRap2023/de87c551571a1ee4c7e6dc17e9824c19ddec6f30/Poster_Top5_Team/Task02/Zhaohu Xing.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #