├── CoTr_package
├── CoTr
│ ├── __init__.py
│ ├── configuration.py
│ ├── network_architecture
│ │ ├── CNNBackbone.py
│ │ ├── DeTrans
│ │ │ ├── DeformableTrans.py
│ │ │ ├── ops
│ │ │ │ ├── functions
│ │ │ │ │ └── ms_deform_attn_func.py
│ │ │ │ └── modules
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── ms_deform_attn.py
│ │ │ └── position_encoding.py
│ │ ├── ResTranUnet.py
│ │ ├── __init__.py
│ │ └── neural_network.py
│ ├── run
│ │ ├── __init__.py
│ │ ├── default_configuration.py
│ │ └── run_training.py
│ └── training
│ │ ├── __init__.py
│ │ ├── model_restore.py
│ │ └── network_training
│ │ ├── __init__.py
│ │ ├── network_trainer.py
│ │ ├── nnUNetTrainer.py
│ │ └── nnUNetTrainerV2_ResTrans.py
└── setup.py
├── LICENSE
├── README.md
├── data
└── splits_final.pkl
└── nnUNet
├── LICENSE
├── documentation
├── common_problems_and_solutions.md
├── common_questions.md
├── data_format_inference.md
├── dataset_conversion.md
├── expected_epoch_times.md
├── extending_nnunet.md
├── inference_example_Prostate.md
├── setting_up_paths.md
├── training_example_Hippocampus.md
└── using_nnUNet_as_baseline.md
├── nnunet
├── __init__.py
├── configuration.py
├── dataset_conversion
│ ├── Task017_BeyondCranialVaultAbdominalOrganSegmentation.py
│ ├── Task024_Promise2012.py
│ ├── Task027_AutomaticCardiacDetectionChallenge.py
│ ├── Task029_LiverTumorSegmentationChallenge.py
│ ├── Task032_BraTS_2018.py
│ ├── Task035_ISBI_MSLesionSegmentationChallenge.py
│ ├── Task037_038_Chaos_Challenge.py
│ ├── Task040_KiTS.py
│ ├── Task043_BraTS_2019.py
│ ├── Task055_SegTHOR.py
│ ├── Task056_VerSe2019.py
│ ├── Task056_Verse_normalize_orientation.py
│ ├── Task058_ISBI_EM_SEG.py
│ ├── Task059_EPFL_EM_MITO_SEG.py
│ ├── Task061_CREMI.py
│ ├── Task062_NIHPancreas.py
│ ├── Task064_KiTS_labelsFixed.py
│ ├── Task065_KiTS_NicksLabels.py
│ ├── Task069_CovidSeg.py
│ ├── Task075_Fluo_C3DH_A549_ManAndSim.py
│ ├── Task076_Fluo_N3DH_SIM.py
│ ├── Task082_BraTS_2020.py
│ ├── Task089_Fluo-N2DH-SIM.py
│ ├── Task114_heart_MNMs.py
│ └── __init__.py
├── evaluation
│ ├── __init__.py
│ ├── add_dummy_task_with_mean_over_all_tasks.py
│ ├── add_mean_dice_to_json.py
│ ├── collect_results_files.py
│ ├── evaluator.py
│ ├── metrics.py
│ ├── model_selection
│ │ ├── __init__.py
│ │ ├── collect_all_fold0_results_and_summarize_in_one_csv.py
│ │ ├── ensemble.py
│ │ ├── figure_out_what_to_submit.py
│ │ ├── rank_candidates.py
│ │ ├── rank_candidates_StructSeg.py
│ │ ├── rank_candidates_cascade.py
│ │ ├── summarize_results_in_one_json.py
│ │ └── summarize_results_with_plans.py
│ ├── region_based_evaluation.py
│ └── surface_dice.py
├── experiment_planning
│ ├── DatasetAnalyzer.py
│ ├── __init__.py
│ ├── alternative_experiment_planning
│ │ ├── experiment_planner_baseline_3DUNet_v21_11GB.py
│ │ ├── experiment_planner_baseline_3DUNet_v21_32GB.py
│ │ ├── experiment_planner_baseline_3DUNet_v21_3convperstage.py
│ │ ├── experiment_planner_baseline_3DUNet_v22.py
│ │ ├── experiment_planner_baseline_3DUNet_v23.py
│ │ ├── experiment_planner_residual_3DUNet_v21.py
│ │ ├── normalization
│ │ │ ├── experiment_planner_3DUNet_CT2.py
│ │ │ └── experiment_planner_3DUNet_nonCT.py
│ │ ├── patch_size
│ │ │ ├── experiment_planner_3DUNet_isotropic_in_mm.py
│ │ │ └── experiment_planner_3DUNet_isotropic_in_voxels.py
│ │ ├── pooling_and_convs
│ │ │ ├── experiment_planner_baseline_3DUNet_allConv3x3.py
│ │ │ └── experiment_planner_baseline_3DUNet_poolBasedOnSpacing.py
│ │ ├── readme.md
│ │ └── target_spacing
│ │ │ └── experiment_planner_baseline_3DUNet_targetSpacingForAnisoAxis.py
│ ├── change_batch_size.py
│ ├── common_utils.py
│ ├── experiment_planner_baseline_2DUNet.py
│ ├── experiment_planner_baseline_2DUNet_v21.py
│ ├── experiment_planner_baseline_3DUNet.py
│ ├── experiment_planner_baseline_3DUNet_v21.py
│ ├── nnUNet_convert_decathlon_task.py
│ ├── nnUNet_plan_and_preprocess.py
│ ├── old
│ │ └── old_plan_and_preprocess_task.py
│ ├── summarize_plans.py
│ └── utils.py
├── inference
│ ├── __init__.py
│ ├── change_trainer.py
│ ├── ensemble_predictions.py
│ ├── predict.py
│ ├── predict_simple.py
│ ├── pretrained_models
│ │ ├── collect_pretrained_models.py
│ │ └── download_pretrained_model.py
│ └── segmentation_export.py
├── network_architecture
│ ├── __init__.py
│ ├── custom_modules
│ │ ├── conv_blocks.py
│ │ ├── feature_response_normalization.py
│ │ ├── helperModules.py
│ │ └── mish.py
│ ├── generic_UNet.py
│ ├── generic_UNet_DP.py
│ ├── generic_modular_UNet.py
│ ├── generic_modular_residual_UNet.py
│ ├── initialization.py
│ └── neural_network.py
├── paths.py
├── postprocessing
│ ├── connected_components.py
│ ├── consolidate_all_for_paper.py
│ ├── consolidate_postprocessing.py
│ └── consolidate_postprocessing_simple.py
├── preprocessing
│ ├── __init__.py
│ ├── cropping.py
│ ├── preprocessing.py
│ └── sanity_checks.py
├── run
│ ├── __init__.py
│ ├── default_configuration.py
│ ├── run_training.py
│ ├── run_training_DDP.py
│ └── run_training_DP.py
├── training
│ ├── __init__.py
│ ├── cascade_stuff
│ │ ├── __init__.py
│ │ └── predict_next_stage.py
│ ├── data_augmentation
│ │ ├── __init__.py
│ │ ├── custom_transforms.py
│ │ ├── default_data_augmentation.py
│ │ ├── downsampling.py
│ │ └── pyramid_augmentations.py
│ ├── dataloading
│ │ ├── __init__.py
│ │ └── dataset_loading.py
│ ├── learning_rate
│ │ └── poly_lr.py
│ ├── loss_functions
│ │ ├── TopK_loss.py
│ │ ├── __init__.py
│ │ ├── crossentropy.py
│ │ ├── deep_supervision.py
│ │ └── dice_loss.py
│ ├── model_restore.py
│ ├── network_training
│ │ ├── __init__.py
│ │ ├── competitions_with_custom_Trainers
│ │ │ ├── BraTS2020
│ │ │ │ ├── nnUNetTrainerV2BraTSRegions.py
│ │ │ │ └── nnUNetTrainerV2BraTSRegions_moreDA.py
│ │ │ └── MMS
│ │ │ │ └── nnUNetTrainerV2_MMS.py
│ │ ├── network_trainer.py
│ │ ├── nnUNetTrainer.py
│ │ ├── nnUNetTrainerCascadeFullRes.py
│ │ ├── nnUNetTrainerV2.py
│ │ ├── nnUNetTrainerV2_CascadeFullRes.py
│ │ ├── nnUNetTrainerV2_DDP.py
│ │ ├── nnUNetTrainerV2_DP.py
│ │ ├── nnUNetTrainerV2_fp32.py
│ │ └── nnUNet_variants
│ │ │ ├── __init__.py
│ │ │ ├── architectural_variants
│ │ │ ├── nnUNetTrainerV2_3ConvPerStage.py
│ │ │ ├── nnUNetTrainerV2_3ConvPerStage_samefilters.py
│ │ │ ├── nnUNetTrainerV2_BN.py
│ │ │ ├── nnUNetTrainerV2_FRN.py
│ │ │ ├── nnUNetTrainerV2_GN.py
│ │ │ ├── nnUNetTrainerV2_GeLU.py
│ │ │ ├── nnUNetTrainerV2_LReLU_slope_2en1.py
│ │ │ ├── nnUNetTrainerV2_Mish.py
│ │ │ ├── nnUNetTrainerV2_NoNormalization.py
│ │ │ ├── nnUNetTrainerV2_NoNormalization_lr1en3.py
│ │ │ ├── nnUNetTrainerV2_ReLU.py
│ │ │ ├── nnUNetTrainerV2_ReLU_biasInSegOutput.py
│ │ │ ├── nnUNetTrainerV2_ReLU_convReLUIN.py
│ │ │ ├── nnUNetTrainerV2_ResencUNet.py
│ │ │ ├── nnUNetTrainerV2_allConv3x3.py
│ │ │ ├── nnUNetTrainerV2_lReLU_biasInSegOutput.py
│ │ │ ├── nnUNetTrainerV2_lReLU_convlReLUIN.py
│ │ │ ├── nnUNetTrainerV2_noDeepSupervision.py
│ │ │ └── nnUNetTrainerV2_softDeepSupervision.py
│ │ │ ├── cascade
│ │ │ ├── nnUNetTrainerV2CascadeFullRes_DAVariants.py
│ │ │ ├── nnUNetTrainerV2CascadeFullRes_lowerLR.py
│ │ │ ├── nnUNetTrainerV2CascadeFullRes_shorter.py
│ │ │ └── nnUNetTrainerV2CascadeFullRes_shorter_lowerLR.py
│ │ │ ├── copies
│ │ │ └── nnUNetTrainerV2_copies.py
│ │ │ ├── data_augmentation
│ │ │ ├── nnUNetTrainerV2_DA2.py
│ │ │ ├── nnUNetTrainerV2_DA3.py
│ │ │ ├── nnUNetTrainerV2_independentScalePerAxis.py
│ │ │ ├── nnUNetTrainerV2_insaneDA.py
│ │ │ ├── nnUNetTrainerV2_noDA.py
│ │ │ └── nnUNetTrainerV2_noMirroring.py
│ │ │ ├── loss_function
│ │ │ ├── nnUNetTrainerV2_ForceBD.py
│ │ │ ├── nnUNetTrainerV2_ForceSD.py
│ │ │ ├── nnUNetTrainerV2_Loss_CE.py
│ │ │ ├── nnUNetTrainerV2_Loss_CEGDL.py
│ │ │ ├── nnUNetTrainerV2_Loss_Dice.py
│ │ │ ├── nnUNetTrainerV2_Loss_DiceTopK10.py
│ │ │ ├── nnUNetTrainerV2_Loss_Dice_lr1en3.py
│ │ │ ├── nnUNetTrainerV2_Loss_Dice_squared.py
│ │ │ ├── nnUNetTrainerV2_Loss_MCC.py
│ │ │ ├── nnUNetTrainerV2_Loss_TopK10.py
│ │ │ ├── nnUNetTrainerV2_focalLoss.py
│ │ │ └── nnUNetTrainerV2_graduallyTransitionFromCEToDice.py
│ │ │ ├── miscellaneous
│ │ │ └── nnUNetTrainerV2_fullEvals.py
│ │ │ ├── nnUNetTrainerCE.py
│ │ │ ├── nnUNetTrainerNoDA.py
│ │ │ ├── optimizer_and_lr
│ │ │ ├── nnUNetTrainerV2_Adam.py
│ │ │ ├── nnUNetTrainerV2_Adam_ReduceOnPlateau.py
│ │ │ ├── nnUNetTrainerV2_Adam_lr_3en4.py
│ │ │ ├── nnUNetTrainerV2_Ranger_lr1en2.py
│ │ │ ├── nnUNetTrainerV2_Ranger_lr3en3.py
│ │ │ ├── nnUNetTrainerV2_Ranger_lr3en4.py
│ │ │ ├── nnUNetTrainerV2_SGD_ReduceOnPlateau.py
│ │ │ ├── nnUNetTrainerV2_SGD_fixedSchedule.py
│ │ │ ├── nnUNetTrainerV2_SGD_fixedSchedule2.py
│ │ │ ├── nnUNetTrainerV2_SGD_lrs.py
│ │ │ ├── nnUNetTrainerV2_cycleAtEnd.py
│ │ │ ├── nnUNetTrainerV2_fp16.py
│ │ │ ├── nnUNetTrainerV2_momentum09.py
│ │ │ ├── nnUNetTrainerV2_momentum095.py
│ │ │ ├── nnUNetTrainerV2_momentum098.py
│ │ │ ├── nnUNetTrainerV2_momentum09in2D.py
│ │ │ ├── nnUNetTrainerV2_reduceMomentumDuringTraining.py
│ │ │ └── nnUNetTrainerV2_warmup.py
│ │ │ ├── profiling
│ │ │ ├── nnUNetTrainerV2_2epochs.py
│ │ │ └── nnUNetTrainerV2_dummyLoad.py
│ │ │ └── resampling
│ │ │ └── nnUNetTrainerV2_resample33.py
│ └── optimizer
│ │ └── ranger.py
└── utilities
│ ├── __init__.py
│ ├── distributed.py
│ ├── file_conversions.py
│ ├── file_endings.py
│ ├── folder_names.py
│ ├── nd_softmax.py
│ ├── one_hot_encoding.py
│ ├── random_stuff.py
│ ├── recursive_delete_npz.py
│ ├── recursive_rename_taskXX_to_taskXXX.py
│ ├── sitk_stuff.py
│ ├── task_name_id_conversion.py
│ ├── tensor_utilities.py
│ └── to_torch.py
├── readme.md
├── setup.py
└── tests
└── test_steps_for_sliding_window_prediction.py
/CoTr_package/CoTr/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | print("This is CoTr\n")
4 |
5 | from . import *
--------------------------------------------------------------------------------
/CoTr_package/CoTr/configuration.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | default_num_threads = 8 if 'nnUNet_def_n_proc' not in os.environ else int(os.environ['nnUNet_def_n_proc'])
4 | RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 3 # determines what threshold to use for resampling the low resolution axis
5 | # separately (with NN)
--------------------------------------------------------------------------------
/CoTr_package/CoTr/network_architecture/DeTrans/ops/functions/ms_deform_attn_func.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # 3D Deformable Self-attention
3 | # ------------------------------------------------------------------------
4 | # Modified from Deformable DETR
5 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
6 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
7 | # ------------------------------------------------------------------------
8 |
9 | from __future__ import absolute_import
10 | from __future__ import print_function
11 | from __future__ import division
12 |
13 | import torch
14 | import torch.nn.functional as F
15 | from torch.autograd import Function
16 | from torch.autograd.function import once_differentiable
17 |
18 | def ms_deform_attn_core_pytorch_3D(value, value_spatial_shapes, sampling_locations, attention_weights):
19 | N_, S_, M_, D_ = value.shape
20 | _, Lq_, M_, L_, P_, _ = sampling_locations.shape
21 | value_list = value.split([T_ * H_ * W_ for T_, H_, W_ in value_spatial_shapes], dim=1)
22 | sampling_grids = 2 * sampling_locations - 1
23 | # sampling_grids = 3 * sampling_locations - 1
24 | sampling_value_list = []
25 | for lid_, (T_, H_, W_) in enumerate(value_spatial_shapes):
26 | value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, T_, H_, W_)
27 | sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)[:,None,:,:,:]
28 | sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_.to(dtype=value_l_.dtype), mode='bilinear', padding_mode='zeros', align_corners=False)[:,:,0]
29 | sampling_value_list.append(sampling_value_l_)
30 | attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
31 | output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
32 | return output.transpose(1, 2).contiguous()
--------------------------------------------------------------------------------
/CoTr_package/CoTr/network_architecture/DeTrans/ops/modules/__init__.py:
--------------------------------------------------------------------------------
1 | from .ms_deform_attn import MSDeformAttn
2 |
--------------------------------------------------------------------------------
/CoTr_package/CoTr/network_architecture/DeTrans/position_encoding.py:
--------------------------------------------------------------------------------
1 | """
2 | Positional encodings for the transformer.
3 | """
4 | import math
5 | import torch
6 | from torch import nn
7 | from typing import Optional
8 | from torch import Tensor
9 |
10 | class PositionEmbeddingSine(nn.Module):
11 | """
12 | This is a more standard version of the position embedding, very similar to the one
13 | used by the Attention is all you need paper, generalized to work on images.
14 | """
15 | def __init__(self, num_pos_feats=[64, 64, 64], temperature=10000, normalize=False, scale=None):
16 | super().__init__()
17 | self.num_pos_feats = num_pos_feats
18 | self.temperature = temperature
19 | self.normalize = normalize
20 | if scale is not None and normalize is False:
21 | raise ValueError("normalize should be True if scale is passed")
22 | if scale is None:
23 | scale = 2 * math.pi
24 | self.scale = scale
25 |
26 | def forward(self, x):
27 | bs, c, d, h, w = x.shape
28 | mask = torch.zeros(bs, d, h, w, dtype=torch.bool).cuda()
29 | assert mask is not None
30 | not_mask = ~mask
31 | d_embed = not_mask.cumsum(1, dtype=torch.float32)
32 | y_embed = not_mask.cumsum(2, dtype=torch.float32)
33 | x_embed = not_mask.cumsum(3, dtype=torch.float32)
34 | if self.normalize:
35 | eps = 1e-6
36 | d_embed = (d_embed - 0.5) / (d_embed[:, -1:, :, :] + eps) * self.scale
37 | y_embed = (y_embed - 0.5) / (y_embed[:, :, -1:, :] + eps) * self.scale
38 | x_embed = (x_embed - 0.5) / (x_embed[:, :, :, -1:] + eps) * self.scale
39 |
40 | dim_tx = torch.arange(self.num_pos_feats[0], dtype=torch.float32, device=x.device)
41 | dim_tx = self.temperature ** (3 * (dim_tx // 3) / self.num_pos_feats[0])
42 |
43 | dim_ty = torch.arange(self.num_pos_feats[1], dtype=torch.float32, device=x.device)
44 | dim_ty = self.temperature ** (3 * (dim_ty // 3) / self.num_pos_feats[1])
45 |
46 | dim_td = torch.arange(self.num_pos_feats[2], dtype=torch.float32, device=x.device)
47 | dim_td = self.temperature ** (3 * (dim_td // 3) / self.num_pos_feats[2])
48 |
49 | pos_x = x_embed[:, :, :, :, None] / dim_tx
50 | pos_y = y_embed[:, :, :, :, None] / dim_ty
51 | pos_d = d_embed[:, :, :, :, None] / dim_td
52 |
53 | pos_x = torch.stack((pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
54 | pos_y = torch.stack((pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
55 | pos_d = torch.stack((pos_d[:, :, :, :, 0::2].sin(), pos_d[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
56 |
57 | pos = torch.cat((pos_d, pos_y, pos_x), dim=4).permute(0, 4, 1, 2, 3)
58 | return pos
59 |
60 |
61 | def build_position_encoding(mode, hidden_dim):
62 | N_steps = hidden_dim // 3
63 | if (hidden_dim % 3) != 0:
64 | N_steps = [N_steps, N_steps, N_steps + hidden_dim % 3]
65 | else:
66 | N_steps = [N_steps, N_steps, N_steps]
67 |
68 | if mode in ('v2', 'sine'):
69 | position_embedding = PositionEmbeddingSine(num_pos_feats=N_steps, normalize=True)
70 | else:
71 | raise ValueError(f"not supported {mode}")
72 |
73 | return position_embedding
74 |
--------------------------------------------------------------------------------
/CoTr_package/CoTr/network_architecture/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/CoTr_package/CoTr/run/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/CoTr_package/CoTr/run/default_configuration.py:
--------------------------------------------------------------------------------
1 | import nnunet
2 | from nnunet.paths import network_training_output_dir, preprocessing_output_dir, default_plans_identifier
3 | from batchgenerators.utilities.file_and_folder_operations import *
4 | from nnunet.experiment_planning.summarize_plans import summarize_plans
5 | from nnunet.training.model_restore import recursive_find_python_class
6 |
7 |
8 | def get_configuration_from_output_folder(folder):
9 | # split off network_training_output_dir
10 | folder = folder[len(network_training_output_dir):]
11 | if folder.startswith("/"):
12 | folder = folder[1:]
13 |
14 | configuration, task, trainer_and_plans_identifier = folder.split("/")
15 | trainer, plans_identifier = trainer_and_plans_identifier.split("__")
16 | return configuration, task, trainer, plans_identifier
17 |
18 |
19 | def get_default_configuration(outname, network, task, network_trainer, plans_identifier=default_plans_identifier,
20 | search_in=(nnunet.__path__[0], "training", "network_training"),
21 | base_module='nnunet.training.network_training'):
22 | assert network in ['2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'], \
23 | "network can only be one of the following: \'3d\', \'3d_lowres\', \'3d_fullres\', \'3d_cascade_fullres\'"
24 |
25 | dataset_directory = join(preprocessing_output_dir, task)
26 |
27 | if network == '2d':
28 | plans_file = join(preprocessing_output_dir, task, plans_identifier + "_plans_2D.pkl")
29 | else:
30 | plans_file = join(preprocessing_output_dir, task, plans_identifier + "_plans_3D.pkl")
31 |
32 | plans = load_pickle(plans_file)
33 | possible_stages = list(plans['plans_per_stage'].keys())
34 |
35 | if (network == '3d_cascade_fullres' or network == "3d_lowres") and len(possible_stages) == 1:
36 | raise RuntimeError("3d_lowres/3d_cascade_fullres only applies if there is more than one stage. This task does "
37 | "not require the cascade. Run 3d_fullres instead")
38 |
39 | if network == '2d' or network == "3d_lowres":
40 | stage = 0
41 | else:
42 | stage = possible_stages[-1]
43 |
44 | trainer_class = recursive_find_python_class([join(*search_in)], network_trainer, current_module=base_module)
45 |
46 | output_folder_name = join(network_training_output_dir, network+'_'+plans_identifier, task, outname)
47 |
48 | print("###############################################")
49 | print("I am running the following nnUNet: %s" % network)
50 | print("My trainer class is: ", trainer_class)
51 | print("For that I will be using the following configuration:")
52 | summarize_plans(plans_file)
53 | print("I am using stage %d from these plans" % stage)
54 |
55 | if (network == '2d' or len(possible_stages) > 1) and not network == '3d_lowres':
56 | batch_dice = True
57 | print("I am using batch dice + CE loss")
58 | else:
59 | batch_dice = False
60 | print("I am using sample dice + CE loss")
61 |
62 | print("\nI am using data from this folder: ", join(dataset_directory, plans['data_identifier']))
63 | print("###############################################")
64 | return plans_file, output_folder_name, dataset_directory, batch_dice, stage, trainer_class
65 |
--------------------------------------------------------------------------------
/CoTr_package/CoTr/training/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/CoTr_package/CoTr/training/network_training/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/CoTr_package/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_namespace_packages
2 |
3 | setup(name='CoTr',
4 | packages=find_namespace_packages(include=["CoTr", "CoTr.*"]),
5 | version='0.0.1'
6 | )
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## CoTr: Efficient 3D Medical Image Segmentation by bridging CNN and Transformer
2 |
3 | This is the official pytorch implementation of the CoTr:
4 |
5 | **Paper: [CoTr: Efficient 3D Medical Image Segmentation
6 | by bridging CNN and Transformer](https://arxiv.org/pdf/2103.03024.pdf
7 | ).**
8 |
9 |
10 | ## Requirements
11 | CUDA 11.0
12 | Python 3.7
13 | Pytorch 1.7
14 | Torchvision 0.8.2
15 |
16 | ## Usage
17 |
18 | ### 0. Installation
19 | * Install Pytorch1.7, nnUNet and CoTr as below
20 |
21 | ```
22 | pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
23 |
24 | cd nnUNet
25 | pip install -e .
26 |
27 | cd CoTr_package
28 | pip install -e .
29 | ```
30 |
31 | ### 1. Data Preparation
32 | * Download [BCV dataset](https://www.synapse.org/#!Synapse:syn3193805/wiki/217789)
33 | * Preprocess the BCV dataset according to the uploaded nnUNet package.
34 | * Training and Testing ID are in `data/splits_final.pkl`.
35 |
36 | ### 2. Training
37 | cd CoTr_package/CoTr/run
38 |
39 | * Run `nohup python run_training.py -gpu='0' -outpath='CoTr' 2>&1 &` for training.
40 |
41 | ### 3. Testing
42 | * Run `nohup python run_training.py -gpu='0' -outpath='CoTr' -val --val_folder='validation_output' 2>&1 &` for validation.
43 |
44 | ### 4. Citation
45 | If this code is helpful for your study, please cite:
46 |
47 | ```
48 | @article{xie2021cotr,
49 | title={CoTr: Efficiently Bridging CNN and Transformer for 3D Medical Image Segmentation},
50 | author={Xie, Yutong and Zhang, Jianpeng and Shen, Chunhua and Xia, Yong},
51 | booktitle={MICCAI},
52 | year={2021}
53 | }
54 |
55 | ```
56 |
57 | ### 5. Acknowledgements
58 | Part of codes are reused from the [nnU-Net](https://github.com/MIC-DKFZ/nnUNet). Thanks to Fabian Isensee for the codes of nnU-Net.
59 |
60 | ### Contact
61 | Yutong Xie (yutong.xie678@gmail.com)
62 |
--------------------------------------------------------------------------------
/data/splits_final.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YtongXie/CoTr/521b4cc4cc6128c0f0e08057b0b15e5c1da7cad4/data/splits_final.pkl
--------------------------------------------------------------------------------
/nnUNet/documentation/data_format_inference.md:
--------------------------------------------------------------------------------
1 | # Data format for Inference
2 |
3 | The data format for inference must match the one used for the raw data (specifically, the images must be in exactly
4 | the same format as in the imagesTr folder). As before, the filenames must start with a
5 | unique identifier, followed by a 4-digit modality identifier. Here is an example for two different datasets:
6 |
7 | 1) Task005_Prostate:
8 |
9 | This task has 2 modalities, so the files in the input folder must look like this:
10 |
11 | input_folder
12 | ├── prostate_03_0000.nii.gz
13 | ├── prostate_03_0001.nii.gz
14 | ├── prostate_05_0000.nii.gz
15 | ├── prostate_05_0001.nii.gz
16 | ├── prostate_08_0000.nii.gz
17 | ├── prostate_08_0001.nii.gz
18 | ├── ...
19 |
20 | _0000 is always the T2 image and _0001 is always the ADC image (as specified by 'modality' in the dataset.json)
21 |
22 | 2) Task002_Heart:
23 |
24 | imagesTs
25 | ├── la_001_0000.nii.gz
26 | ├── la_002_0000.nii.gz
27 | ├── la_006_0000.nii.gz
28 | ├── ...
29 |
30 | Task002 only has one modality, so each case only has one _0000.nii.gz file.
31 |
32 |
33 | The segmentations in the output folder will be named INDENTIFIER.nii.gz (omitting the modality identifier).
34 |
--------------------------------------------------------------------------------
/nnUNet/documentation/training_example_Hippocampus.md:
--------------------------------------------------------------------------------
1 | # Example: 3D U-Net training on the Hippocampus dataset
2 |
3 | This is a step-by-step example on how to run a 3D full resolution Training with the Hippocampus dataset from the
4 | Medical Segmentation Decathlon.
5 |
6 | 1) Install nnU-Net by following the instructions [here](../readme.md#installation). Make sure to set all relevant paths,
7 | also see [here](setting_up_paths.md). This step is necessary so that nnU-Net knows where to store raw data,
8 | preprocessed data and trained models.
9 | 2) Download the Hippocampus dataset of the Medical Segmentation Decathlon from
10 | [here](https://drive.google.com/drive/folders/1HqEgzS8BV2c7xYNrZdEAnrHk7osJJ--2). Then extract the archive to a
11 | destination of your choice.
12 | 3) Decathlon data come as 4D niftis. This is not compatible with nnU-Net (see dataset format specified
13 | [here](dataset_conversion.md)). Convert the Hippocampus dataset into the correct format with
14 |
15 | ```bash
16 | nnUNet_convert_decathlon_task -i /xxx/Task04_Hippocampus
17 | ```
18 |
19 | Note that `Task04_Hippocampus` must be the folder that has the three 'imagesTr', 'labelsTr', 'imagesTs' subfolders!
20 | The converted dataset can be found in $nnUNet_raw_data_base/nnUNet_raw_data ($nnUNet_raw_data_base is the folder for
21 | raw data that you specified during installation)
22 | 4) You can now run nnU-Nets pipeline configuration (and the preprocessing) with the following line:
23 | ```bash
24 | nnUNet_plan_and_preprocess -t 4
25 | ```
26 | Where 4 refers to the task ID of the Hippocampus dataset.
27 | 5) Now you can already start network training. This is how you train a 3d full resoltion U-Net on the Hippocampus dataset:
28 | ```bash
29 | nnUNet_train 3d_fullres nnUNetTrainerV2 4 0
30 | ```
31 | nnU-Net per default requires all trainings as 5-fold cross validation. The command above will run only the training for the
32 | first fold (fold 0). 4 is the task identifier of the hippocampus dataset. Training one fold should take about 9
33 | hours on a modern GPU.
34 |
35 | This tutorial is only intended to demonstrate how easy it is to get nnU-Net running. You do not need to finish the
36 | network training - pretrained models for the hippocampus task are available (see [here](../readme.md#run-inference)).
37 |
38 | The only prerequisite for running nnU-Net on your custom dataset is to bring it into a structured, nnU-Net compatible
39 | format. nnU-Net will take care of the rest. See [here](dataset_conversion.md) for instructions on how to convert
40 | datasets into nnU-Net compatible format.
41 |
--------------------------------------------------------------------------------
/nnUNet/documentation/using_nnUNet_as_baseline.md:
--------------------------------------------------------------------------------
1 | (The U-Net is the current punching bag of methods development. nnU-Net is going to be that looking forward. That is
2 | cool (great, in fact!), but it should be done correctly. Here are tips on how to benchmark against nnU-Net)
3 |
4 | This is work in progress
--------------------------------------------------------------------------------
/nnUNet/nnunet/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | print("\n\nPlease cite the following paper when using nnUNet:\n\nIsensee, F., Jaeger, P.F., Kohl, S.A.A. et al. "
3 | "\"nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation.\" "
4 | "Nat Methods (2020). https://doi.org/10.1038/s41592-020-01008-z\n\n")
5 | print("If you have questions or suggestions, feel free to open an issue at https://github.com/MIC-DKFZ/nnUNet\n")
6 |
7 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/configuration.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | default_num_threads = 8 if 'nnUNet_def_n_proc' not in os.environ else int(os.environ['nnUNet_def_n_proc'])
4 | RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 3 # determines what threshold to use for resampling the low resolution axis
5 | # separately (with NN)
--------------------------------------------------------------------------------
/nnUNet/nnunet/dataset_conversion/Task024_Promise2012.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | from collections import OrderedDict
15 | import SimpleITK as sitk
16 | from batchgenerators.utilities.file_and_folder_operations import *
17 |
18 |
19 | def export_for_submission(source_dir, target_dir):
20 | """
21 | promise wants mhd :-/
22 | :param source_dir:
23 | :param target_dir:
24 | :return:
25 | """
26 | files = subfiles(source_dir, suffix=".nii.gz", join=False)
27 | target_files = [join(target_dir, i[:-7] + ".mhd") for i in files]
28 | maybe_mkdir_p(target_dir)
29 | for f, t in zip(files, target_files):
30 | img = sitk.ReadImage(join(source_dir, f))
31 | sitk.WriteImage(img, t)
32 |
33 |
34 | if __name__ == "__main__":
35 | folder = "/media/fabian/My Book/datasets/promise2012"
36 | out_folder = "/media/fabian/My Book/MedicalDecathlon/MedicalDecathlon_raw_splitted/Task024_Promise"
37 |
38 | maybe_mkdir_p(join(out_folder, "imagesTr"))
39 | maybe_mkdir_p(join(out_folder, "imagesTs"))
40 | maybe_mkdir_p(join(out_folder, "labelsTr"))
41 | # train
42 | current_dir = join(folder, "train")
43 | segmentations = subfiles(current_dir, suffix="segmentation.mhd")
44 | raw_data = [i for i in subfiles(current_dir, suffix="mhd") if not i.endswith("segmentation.mhd")]
45 | for i in raw_data:
46 | out_fname = join(out_folder, "imagesTr", i.split("/")[-1][:-4] + "_0000.nii.gz")
47 | sitk.WriteImage(sitk.ReadImage(i), out_fname)
48 | for i in segmentations:
49 | out_fname = join(out_folder, "labelsTr", i.split("/")[-1][:-17] + ".nii.gz")
50 | sitk.WriteImage(sitk.ReadImage(i), out_fname)
51 |
52 | # test
53 | current_dir = join(folder, "test")
54 | test_data = subfiles(current_dir, suffix="mhd")
55 | for i in test_data:
56 | out_fname = join(out_folder, "imagesTs", i.split("/")[-1][:-4] + "_0000.nii.gz")
57 | sitk.WriteImage(sitk.ReadImage(i), out_fname)
58 |
59 |
60 | json_dict = OrderedDict()
61 | json_dict['name'] = "PROMISE12"
62 | json_dict['description'] = "prostate"
63 | json_dict['tensorImageSize'] = "4D"
64 | json_dict['reference'] = "see challenge website"
65 | json_dict['licence'] = "see challenge website"
66 | json_dict['release'] = "0.0"
67 | json_dict['modality'] = {
68 | "0": "MRI",
69 | }
70 | json_dict['labels'] = {
71 | "0": "background",
72 | "1": "prostate"
73 | }
74 | json_dict['numTraining'] = len(raw_data)
75 | json_dict['numTest'] = len(test_data)
76 | json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1][:-4], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1][:-4]} for i in
77 | raw_data]
78 | json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1][:-4] for i in test_data]
79 |
80 | save_json(json_dict, os.path.join(out_folder, "dataset.json"))
81 |
82 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/dataset_conversion/Task062_NIHPancreas.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from collections import OrderedDict
17 | from nnunet.paths import nnUNet_raw_data
18 | from batchgenerators.utilities.file_and_folder_operations import *
19 | import shutil
20 | from multiprocessing import Pool
21 | import nibabel
22 |
23 |
24 | def reorient(filename):
25 | img = nibabel.load(filename)
26 | img = nibabel.as_closest_canonical(img)
27 | nibabel.save(img, filename)
28 |
29 |
30 | if __name__ == "__main__":
31 | base = "/media/fabian/DeepLearningData/Pancreas-CT"
32 |
33 | # reorient
34 | p = Pool(8)
35 | results = []
36 |
37 | for f in subfiles(join(base, "data"), suffix=".nii.gz"):
38 | results.append(p.map_async(reorient, (f, )))
39 | _ = [i.get() for i in results]
40 |
41 | for f in subfiles(join(base, "TCIA_pancreas_labels-02-05-2017"), suffix=".nii.gz"):
42 | results.append(p.map_async(reorient, (f, )))
43 | _ = [i.get() for i in results]
44 |
45 | task_id = 62
46 | task_name = "NIHPancreas"
47 |
48 | foldername = "Task%03.0d_%s" % (task_id, task_name)
49 |
50 | out_base = join(nnUNet_raw_data, foldername)
51 | imagestr = join(out_base, "imagesTr")
52 | imagests = join(out_base, "imagesTs")
53 | labelstr = join(out_base, "labelsTr")
54 | maybe_mkdir_p(imagestr)
55 | maybe_mkdir_p(imagests)
56 | maybe_mkdir_p(labelstr)
57 |
58 | train_patient_names = []
59 | test_patient_names = []
60 | cases = list(range(1, 83))
61 | folder_data = join(base, "data")
62 | folder_labels = join(base, "TCIA_pancreas_labels-02-05-2017")
63 | for c in cases:
64 | casename = "pancreas_%04.0d" % c
65 | shutil.copy(join(folder_data, "PANCREAS_%04.0d.nii.gz" % c), join(imagestr, casename + "_0000.nii.gz"))
66 | shutil.copy(join(folder_labels, "label%04.0d.nii.gz" % c), join(labelstr, casename + ".nii.gz"))
67 | train_patient_names.append(casename)
68 |
69 | json_dict = OrderedDict()
70 | json_dict['name'] = task_name
71 | json_dict['description'] = task_name
72 | json_dict['tensorImageSize'] = "4D"
73 | json_dict['reference'] = "see website"
74 | json_dict['licence'] = "see website"
75 | json_dict['release'] = "0.0"
76 | json_dict['modality'] = {
77 | "0": "CT",
78 | }
79 | json_dict['labels'] = {
80 | "0": "background",
81 | "1": "Pancreas",
82 | }
83 | json_dict['numTraining'] = len(train_patient_names)
84 | json_dict['numTest'] = len(test_patient_names)
85 | json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
86 | train_patient_names]
87 | json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
88 |
89 | save_json(json_dict, os.path.join(out_base, "dataset.json"))
90 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/dataset_conversion/Task064_KiTS_labelsFixed.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import shutil
17 | from batchgenerators.utilities.file_and_folder_operations import *
18 | from nnunet.paths import nnUNet_raw_data
19 |
20 |
21 | if __name__ == "__main__":
22 | """
23 | This is the KiTS dataset after Nick fixed all the labels that had errors. Downloaded on Jan 6th 2020
24 | """
25 |
26 | base = "/media/userdisk1/Datasets/KITS2019/data"
27 |
28 | task_id = 64
29 | task_name = "KiTS_labelsFixed"
30 |
31 | foldername = "Task%03.0d_%s" % (task_id, task_name)
32 |
33 | out_base = join(nnUNet_raw_data, foldername)
34 | imagestr = join(out_base, "imagesTr")
35 | imagests = join(out_base, "imagesTs")
36 | labelstr = join(out_base, "labelsTr")
37 | maybe_mkdir_p(imagestr)
38 | maybe_mkdir_p(imagests)
39 | maybe_mkdir_p(labelstr)
40 |
41 | train_patient_names = []
42 | test_patient_names = []
43 | all_cases = subfolders(base, join=False)
44 |
45 | train_patients = all_cases[:210]
46 | test_patients = all_cases[210:]
47 |
48 | for p in train_patients:
49 | curr = join(base, p)
50 | label_file = join(curr, "segmentation.nii.gz")
51 | image_file = join(curr, "imaging.nii.gz")
52 | shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz"))
53 | shutil.copy(label_file, join(labelstr, p + ".nii.gz"))
54 | train_patient_names.append(p)
55 |
56 | for p in test_patients:
57 | curr = join(base, p)
58 | image_file = join(curr, "imaging.nii.gz")
59 | shutil.copy(image_file, join(imagests, p + "_0000.nii.gz"))
60 | test_patient_names.append(p)
61 |
62 | json_dict = {}
63 | json_dict['name'] = "KiTS"
64 | json_dict['description'] = "kidney and kidney tumor segmentation"
65 | json_dict['tensorImageSize'] = "4D"
66 | json_dict['reference'] = "KiTS data for nnunet"
67 | json_dict['licence'] = ""
68 | json_dict['release'] = "0.0"
69 | json_dict['modality'] = {
70 | "0": "CT",
71 | }
72 | json_dict['labels'] = {
73 | "0": "background",
74 | "1": "Kidney",
75 | "2": "Tumor"
76 | }
77 |
78 | json_dict['numTraining'] = len(train_patient_names)
79 | json_dict['numTest'] = len(test_patient_names)
80 | json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
81 | train_patient_names]
82 | json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
83 |
84 | save_json(json_dict, os.path.join(out_base, "dataset.json"))
85 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/dataset_conversion/Task065_KiTS_NicksLabels.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import shutil
17 |
18 | from batchgenerators.utilities.file_and_folder_operations import *
19 | from nnunet.paths import nnUNet_raw_data
20 |
21 | if __name__ == "__main__":
22 | """
23 | Nick asked me to rerun the training with other labels (the Kidney region is defined differently).
24 |
25 | These labels operate in interpolated spacing. I don't like that but that's how it is
26 | """
27 |
28 | base = "/media/fabian/My Book/datasets/KiTS_NicksLabels/kits19/data"
29 | labelsdir = "/media/fabian/My Book/datasets/KiTS_NicksLabels/filled_labels"
30 |
31 | task_id = 65
32 | task_name = "KiTS_NicksLabels"
33 |
34 | foldername = "Task%03.0d_%s" % (task_id, task_name)
35 |
36 | out_base = join(nnUNet_raw_data, foldername)
37 | imagestr = join(out_base, "imagesTr")
38 | imagests = join(out_base, "imagesTs")
39 | labelstr = join(out_base, "labelsTr")
40 | maybe_mkdir_p(imagestr)
41 | maybe_mkdir_p(imagests)
42 | maybe_mkdir_p(labelstr)
43 |
44 | train_patient_names = []
45 | test_patient_names = []
46 | all_cases = subfolders(base, join=False)
47 |
48 | train_patients = all_cases[:210]
49 | test_patients = all_cases[210:]
50 |
51 | for p in train_patients:
52 | curr = join(base, p)
53 | label_file = join(labelsdir, p + ".nii.gz")
54 | image_file = join(curr, "imaging.nii.gz")
55 | shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz"))
56 | shutil.copy(label_file, join(labelstr, p + ".nii.gz"))
57 | train_patient_names.append(p)
58 |
59 | for p in test_patients:
60 | curr = join(base, p)
61 | image_file = join(curr, "imaging.nii.gz")
62 | shutil.copy(image_file, join(imagests, p + "_0000.nii.gz"))
63 | test_patient_names.append(p)
64 |
65 | json_dict = {}
66 | json_dict['name'] = "KiTS"
67 | json_dict['description'] = "kidney and kidney tumor segmentation"
68 | json_dict['tensorImageSize'] = "4D"
69 | json_dict['reference'] = "KiTS data for nnunet"
70 | json_dict['licence'] = ""
71 | json_dict['release'] = "0.0"
72 | json_dict['modality'] = {
73 | "0": "CT",
74 | }
75 | json_dict['labels'] = {
76 | "0": "background",
77 | "1": "Kidney",
78 | "2": "Tumor"
79 | }
80 |
81 | json_dict['numTraining'] = len(train_patient_names)
82 | json_dict['numTest'] = len(test_patient_names)
83 | json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
84 | train_patient_names]
85 | json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
86 |
87 | save_json(json_dict, os.path.join(out_base, "dataset.json"))
88 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/dataset_conversion/Task069_CovidSeg.py:
--------------------------------------------------------------------------------
1 | import shutil
2 |
3 | from batchgenerators.utilities.file_and_folder_operations import *
4 | import SimpleITK as sitk
5 | from nnunet.paths import nnUNet_raw_data
6 |
7 | if __name__ == '__main__':
8 | #data is available at http://medicalsegmentation.com/covid19/
9 | download_dir = '/home/fabian/Downloads'
10 |
11 | task_id = 69
12 | task_name = "CovidSeg"
13 |
14 | foldername = "Task%03.0d_%s" % (task_id, task_name)
15 |
16 | out_base = join(nnUNet_raw_data, foldername)
17 | imagestr = join(out_base, "imagesTr")
18 | imagests = join(out_base, "imagesTs")
19 | labelstr = join(out_base, "labelsTr")
20 | maybe_mkdir_p(imagestr)
21 | maybe_mkdir_p(imagests)
22 | maybe_mkdir_p(labelstr)
23 |
24 | train_patient_names = []
25 | test_patient_names = []
26 |
27 | # the niftis are 3d, but they are just stacks of 2d slices from different patients. So no 3d U-Net, please
28 |
29 | # the training stack has 100 slices, so we split it into 5 equally sized parts (20 slices each) for cross-validation
30 | training_data = sitk.GetArrayFromImage(sitk.ReadImage(join(download_dir, 'tr_im.nii.gz')))
31 | training_labels = sitk.GetArrayFromImage(sitk.ReadImage(join(download_dir, 'tr_mask.nii.gz')))
32 |
33 | for f in range(5):
34 | this_name = 'part_%d' % f
35 | data = training_data[f::5]
36 | labels = training_labels[f::5]
37 | sitk.WriteImage(sitk.GetImageFromArray(data), join(imagestr, this_name + '_0000.nii.gz'))
38 | sitk.WriteImage(sitk.GetImageFromArray(labels), join(labelstr, this_name + '.nii.gz'))
39 | train_patient_names.append(this_name)
40 |
41 | shutil.copy(join(download_dir, 'val_im.nii.gz'), join(imagests, 'val_im.nii.gz'))
42 |
43 | test_patient_names.append('val_im')
44 |
45 | json_dict = {}
46 | json_dict['name'] = task_name
47 | json_dict['description'] = ""
48 | json_dict['tensorImageSize'] = "4D"
49 | json_dict['reference'] = ""
50 | json_dict['licence'] = ""
51 | json_dict['release'] = "0.0"
52 | json_dict['modality'] = {
53 | "0": "nonct",
54 | }
55 | json_dict['labels'] = {
56 | "0": "background",
57 | "1": "stuff1",
58 | "2": "stuff2",
59 | "3": "stuff3",
60 | }
61 |
62 | json_dict['numTraining'] = len(train_patient_names)
63 | json_dict['numTest'] = len(test_patient_names)
64 | json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
65 | train_patient_names]
66 | json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
67 |
68 | save_json(json_dict, os.path.join(out_base, "dataset.json"))
69 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/dataset_conversion/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/evaluation/add_dummy_task_with_mean_over_all_tasks.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import json
16 | import numpy as np
17 | from batchgenerators.utilities.file_and_folder_operations import subfiles
18 | import os
19 | from collections import OrderedDict
20 |
21 | folder = "/home/fabian/drives/E132-Projekte/Projects/2018_MedicalDecathlon/Leaderboard"
22 | task_descriptors = ['2D final 2',
23 | '2D final, less pool, dc and topK, fold0',
24 | '2D final pseudo3d 7, fold0',
25 | '2D final, less pool, dc and ce, fold0',
26 | '3D stage0 final 2, fold0',
27 | '3D fullres final 2, fold0']
28 | task_ids_with_no_stage0 = ["Task001_BrainTumour", "Task004_Hippocampus", "Task005_Prostate"]
29 |
30 | mean_scores = OrderedDict()
31 | for t in task_descriptors:
32 | mean_scores[t] = OrderedDict()
33 |
34 | json_files = subfiles(folder, True, None, ".json", True)
35 | json_files = [i for i in json_files if not i.split("/")[-1].startswith(".")] # stupid mac
36 | for j in json_files:
37 | with open(j, 'r') as f:
38 | res = json.load(f)
39 | task = res['task']
40 | if task != "Task999_ALL":
41 | name = res['name']
42 | if name in task_descriptors:
43 | if task not in list(mean_scores[name].keys()):
44 | mean_scores[name][task] = res['results']['mean']['mean']
45 | else:
46 | raise RuntimeError("duplicate task %s for description %s" % (task, name))
47 |
48 | for t in task_ids_with_no_stage0:
49 | mean_scores["3D stage0 final 2, fold0"][t] = mean_scores["3D fullres final 2, fold0"][t]
50 |
51 | a = set()
52 | for i in mean_scores.keys():
53 | a = a.union(list(mean_scores[i].keys()))
54 |
55 | for i in mean_scores.keys():
56 | try:
57 | for t in list(a):
58 | assert t in mean_scores[i].keys(), "did not find task %s for experiment %s" % (t, i)
59 | new_res = OrderedDict()
60 | new_res['name'] = i
61 | new_res['author'] = "Fabian"
62 | new_res['task'] = "Task999_ALL"
63 | new_res['results'] = OrderedDict()
64 | new_res['results']['mean'] = OrderedDict()
65 | new_res['results']['mean']['mean'] = OrderedDict()
66 | tasks = list(mean_scores[i].keys())
67 | metrics = mean_scores[i][tasks[0]].keys()
68 | for m in metrics:
69 | foreground_values = [mean_scores[i][n][m] for n in tasks]
70 | new_res['results']['mean']["mean"][m] = np.nanmean(foreground_values)
71 | output_fname = i.replace(" ", "_") + "_globalMean.json"
72 | with open(os.path.join(folder, output_fname), 'w') as f:
73 | json.dump(new_res, f)
74 | except AssertionError:
75 | print("could not process experiment %s" % i)
76 | print("did not find task %s for experiment %s" % (t, i))
77 |
78 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/evaluation/add_mean_dice_to_json.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import json
16 | import numpy as np
17 | from batchgenerators.utilities.file_and_folder_operations import subfiles
18 | from collections import OrderedDict
19 |
20 |
21 | def foreground_mean(filename):
22 | with open(filename, 'r') as f:
23 | res = json.load(f)
24 | class_ids = np.array([int(i) for i in res['results']['mean'].keys() if (i != 'mean')])
25 | class_ids = class_ids[class_ids != 0]
26 | class_ids = class_ids[class_ids != -1]
27 | class_ids = class_ids[class_ids != 99]
28 |
29 | tmp = res['results']['mean'].get('99')
30 | if tmp is not None:
31 | _ = res['results']['mean'].pop('99')
32 |
33 | metrics = res['results']['mean']['1'].keys()
34 | res['results']['mean']["mean"] = OrderedDict()
35 | for m in metrics:
36 | foreground_values = [res['results']['mean'][str(i)][m] for i in class_ids]
37 | res['results']['mean']["mean"][m] = np.nanmean(foreground_values)
38 | with open(filename, 'w') as f:
39 | json.dump(res, f, indent=4, sort_keys=True)
40 |
41 |
42 | def run_in_folder(folder):
43 | json_files = subfiles(folder, True, None, ".json", True)
44 | json_files = [i for i in json_files if not i.split("/")[-1].startswith(".") and not i.endswith("_globalMean.json")] # stupid mac
45 | for j in json_files:
46 | foreground_mean(j)
47 |
48 |
49 | if __name__ == "__main__":
50 | folder = "/media/fabian/Results/nnUNetOutput_final/summary_jsons"
51 | run_in_folder(folder)
52 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/evaluation/collect_results_files.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import os
16 | import shutil
17 | from batchgenerators.utilities.file_and_folder_operations import subdirs, subfiles
18 |
19 |
20 | def crawl_and_copy(current_folder, out_folder, prefix="fabian_", suffix="ummary.json"):
21 | """
22 | This script will run recursively through all subfolders of current_folder and copy all files that end with
23 | suffix with some automatically generated prefix into out_folder
24 | :param current_folder:
25 | :param out_folder:
26 | :param prefix:
27 | :return:
28 | """
29 | s = subdirs(current_folder, join=False)
30 | f = subfiles(current_folder, join=False)
31 | f = [i for i in f if i.endswith(suffix)]
32 | if current_folder.find("fold0") != -1:
33 | for fl in f:
34 | shutil.copy(os.path.join(current_folder, fl), os.path.join(out_folder, prefix+fl))
35 | for su in s:
36 | if prefix == "":
37 | add = su
38 | else:
39 | add = "__" + su
40 | crawl_and_copy(os.path.join(current_folder, su), out_folder, prefix=prefix+add)
41 |
42 |
43 | if __name__ == "__main__":
44 | from nnunet.paths import network_training_output_dir
45 | output_folder = "/home/fabian/PhD/results/nnUNetV2/leaderboard"
46 | crawl_and_copy(network_training_output_dir, output_folder)
47 | from nnunet.evaluation.add_mean_dice_to_json import run_in_folder
48 | run_in_folder(output_folder)
49 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/evaluation/model_selection/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/evaluation/surface_dice.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import numpy as np
17 | from medpy.metric.binary import __surface_distances
18 |
19 |
20 | def normalized_surface_dice(a: np.ndarray, b: np.ndarray, threshold: float, spacing: tuple = None, connectivity=1):
21 | """
22 | This implementation differs from the official surface dice implementation! These two are not comparable!!!!!
23 |
24 | The normalized surface dice is symmetric, so it should not matter whether a or b is the reference image
25 |
26 | This implementation natively supports 2D and 3D images. Whether other dimensions are supported depends on the
27 | __surface_distances implementation in medpy
28 |
29 | :param a: image 1, must have the same shape as b
30 | :param b: image 2, must have the same shape as a
31 | :param threshold: distances below this threshold will be counted as true positives. Threshold is in mm, not voxels!
32 | (if spacing = (1, 1(, 1)) then one voxel=1mm so the threshold is effectively in voxels)
33 | must be a tuple of len dimension(a)
34 | :param spacing: how many mm is one voxel in reality? Can be left at None, we then assume an isotropic spacing of 1mm
35 | :param connectivity: see scipy.ndimage.generate_binary_structure for more information. I suggest you leave that
36 | one alone
37 | :return:
38 | """
39 | assert all([i == j for i, j in zip(a.shape, b.shape)]), "a and b must have the same shape. a.shape= %s, " \
40 | "b.shape= %s" % (str(a.shape), str(b.shape))
41 | if spacing is None:
42 | spacing = tuple([1 for _ in range(len(a.shape))])
43 | a_to_b = __surface_distances(a, b, spacing, connectivity)
44 | b_to_a = __surface_distances(b, a, spacing, connectivity)
45 |
46 | numel_a = len(a_to_b)
47 | numel_b = len(b_to_a)
48 |
49 | tp_a = np.sum(a_to_b <= threshold) / numel_a
50 | tp_b = np.sum(b_to_a <= threshold) / numel_b
51 |
52 | fp = np.sum(a_to_b > threshold) / numel_a
53 | fn = np.sum(b_to_a > threshold) / numel_b
54 |
55 | dc = (tp_a + tp_b) / (tp_a + tp_b + fp + fn + 1e-8) # 1e-8 just so that we don't get div by 0
56 | return dc
57 |
58 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/experiment_planning/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v21_3convperstage.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from copy import deepcopy
16 |
17 | import numpy as np
18 | from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
19 | from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
20 | from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import ExperimentPlanner3D_v21
21 | from nnunet.network_architecture.generic_UNet import Generic_UNet
22 | from nnunet.paths import *
23 |
24 |
25 | class ExperimentPlanner3D_v21_3cps(ExperimentPlanner3D_v21):
26 | """
27 | have 3x conv-in-lrelu per resolution instead of 2 while remaining in the same memory budget
28 |
29 | This only works with 3d fullres because we use the same data as ExperimentPlanner3D_v21. Lowres would require to
30 | rerun preprocesing (different patch size = different 3d lowres target spacing)
31 | """
32 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
33 | super(ExperimentPlanner3D_v21_3cps, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
34 | self.plans_fname = join(self.preprocessed_output_folder,
35 | "nnUNetPlansv2.1_3cps_plans_3D.pkl")
36 | self.unet_base_num_features = 32
37 | self.conv_per_stage = 3
38 |
39 | def run_preprocessing(self, num_threads):
40 | pass
41 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v22.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import numpy as np
16 | from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import \
17 | ExperimentPlanner3D_v21
18 | from nnunet.paths import *
19 |
20 |
21 | class ExperimentPlanner3D_v22(ExperimentPlanner3D_v21):
22 | """
23 | """
24 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
25 | super().__init__(folder_with_cropped_data, preprocessed_output_folder)
26 | self.data_identifier = "nnUNetData_plans_v2.2"
27 | self.plans_fname = join(self.preprocessed_output_folder,
28 | "nnUNetPlansv2.2_plans_3D.pkl")
29 |
30 | def get_target_spacing(self):
31 | spacings = self.dataset_properties['all_spacings']
32 | sizes = self.dataset_properties['all_sizes']
33 |
34 | target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0)
35 | target_size = np.percentile(np.vstack(sizes), self.target_spacing_percentile, 0)
36 | target_size_mm = np.array(target) * np.array(target_size)
37 | # we need to identify datasets for which a different target spacing could be beneficial. These datasets have
38 | # the following properties:
39 | # - one axis which much lower resolution than the others
40 | # - the lowres axis has much less voxels than the others
41 | # - (the size in mm of the lowres axis is also reduced)
42 | worst_spacing_axis = np.argmax(target)
43 | other_axes = [i for i in range(len(target)) if i != worst_spacing_axis]
44 | other_spacings = [target[i] for i in other_axes]
45 | other_sizes = [target_size[i] for i in other_axes]
46 |
47 | has_aniso_spacing = target[worst_spacing_axis] > (self.anisotropy_threshold * max(other_spacings))
48 | has_aniso_voxels = target_size[worst_spacing_axis] * self.anisotropy_threshold < min(other_sizes)
49 | # we don't use the last one for now
50 | #median_size_in_mm = target[target_size_mm] * RESAMPLING_SEPARATE_Z_ANISOTROPY_THRESHOLD < max(target_size_mm)
51 |
52 | if has_aniso_spacing and has_aniso_voxels:
53 | spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
54 | target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10)
55 | # don't let the spacing of that axis get higher than self.anisotropy_thresholdxthe_other_axes
56 | target_spacing_of_that_axis = max(max(other_spacings) * self.anisotropy_threshold, target_spacing_of_that_axis)
57 | target[worst_spacing_axis] = target_spacing_of_that_axis
58 | return target
59 |
60 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v23.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import \
16 | ExperimentPlanner3D_v21
17 | from nnunet.paths import *
18 |
19 |
20 | class ExperimentPlanner3D_v23(ExperimentPlanner3D_v21):
21 | """
22 | """
23 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
24 | super(ExperimentPlanner3D_v23, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
25 | self.data_identifier = "nnUNetData_plans_v2.3"
26 | self.plans_fname = join(self.preprocessed_output_folder,
27 | "nnUNetPlansv2.3_plans_3D.pkl")
28 | self.preprocessor_name = "Preprocessor3DDifferentResampling"
29 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/normalization/experiment_planner_3DUNet_CT2.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from collections import OrderedDict
17 |
18 | from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
19 | from nnunet.paths import *
20 |
21 |
22 | class ExperimentPlannerCT2(ExperimentPlanner):
23 | """
24 | preprocesses CT data with the "CT2" normalization.
25 |
26 | (clip range comes from training set and is the 0.5 and 99.5 percentile of intensities in foreground)
27 | CT = clip to range, then normalize with global mn and sd (computed on foreground in training set)
28 | CT2 = clip to range, normalize each case separately with its own mn and std (computed within the area that was in clip_range)
29 | """
30 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
31 | super(ExperimentPlannerCT2, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
32 | self.data_identifier = "nnUNet_CT2"
33 | self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "CT2_plans_3D.pkl")
34 |
35 | def determine_normalization_scheme(self):
36 | schemes = OrderedDict()
37 | modalities = self.dataset_properties['modalities']
38 | num_modalities = len(list(modalities.keys()))
39 |
40 | for i in range(num_modalities):
41 | if modalities[i] == "CT":
42 | schemes[i] = "CT2"
43 | else:
44 | schemes[i] = "nonCT"
45 | return schemes
46 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/normalization/experiment_planner_3DUNet_nonCT.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from collections import OrderedDict
17 |
18 | from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
19 | from nnunet.paths import *
20 |
21 |
22 | class ExperimentPlannernonCT(ExperimentPlanner):
23 | """
24 | Preprocesses all data in nonCT mode (this is what we use for MRI per default, but here it is applied to CT images
25 | as well)
26 | """
27 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
28 | super(ExperimentPlannernonCT, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
29 | self.data_identifier = "nnUNet_nonCT"
30 | self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "nonCT_plans_3D.pkl")
31 |
32 | def determine_normalization_scheme(self):
33 | schemes = OrderedDict()
34 | modalities = self.dataset_properties['modalities']
35 | num_modalities = len(list(modalities.keys()))
36 |
37 | for i in range(num_modalities):
38 | if modalities[i] == "CT":
39 | schemes[i] = "nonCT"
40 | else:
41 | schemes[i] = "nonCT"
42 | return schemes
43 |
44 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/readme.md:
--------------------------------------------------------------------------------
1 | These alternatives are not used in nnU-Net, but you can use them if you believe they might be better suited for you.
2 | I (Fabian) have not found them to be consistently superior.
--------------------------------------------------------------------------------
/nnUNet/nnunet/experiment_planning/change_batch_size.py:
--------------------------------------------------------------------------------
1 | from batchgenerators.utilities.file_and_folder_operations import *
2 | import numpy as np
3 |
4 | if __name__ == '__main__':
5 | input_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_plans_3D.pkl'
6 | output_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_LISA_plans_3D.pkl'
7 | a = load_pickle(input_file)
8 | a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))
9 | save_pickle(a, output_file)
--------------------------------------------------------------------------------
/nnUNet/nnunet/inference/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/inference/change_trainer.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from batchgenerators.utilities.file_and_folder_operations import *
17 |
18 |
19 | def pretend_to_be_nnUNetTrainer(folder, checkpoints=("model_best.model.pkl", "model_latest.model.pkl", "model_final_checkpoint.model.pkl")):
20 | pretend_to_be_other_trainer(folder, "nnUNetTrainer", checkpoints)
21 |
22 |
23 | def pretend_to_be_other_trainer(folder, new_trainer_name, checkpoints=("model_best.model.pkl", "model_latest.model.pkl", "model_final_checkpoint.model.pkl")):
24 | folds = subdirs(folder, prefix="fold_", join=False)
25 |
26 | if isdir(join(folder, 'all')):
27 | folds.append('all')
28 |
29 | for c in checkpoints:
30 | for f in folds:
31 | checkpoint_file = join(folder, f, c)
32 | if isfile(checkpoint_file):
33 | a = load_pickle(checkpoint_file)
34 | a['name'] = new_trainer_name
35 | save_pickle(a, checkpoint_file)
36 |
37 |
38 | def main():
39 | import argparse
40 | parser = argparse.ArgumentParser(description='Use this script to change the nnunet trainer class of a saved '
41 | 'model. Useful for models that were trained with trainers that do '
42 | 'not support inference (multi GPU trainers) or for trainer classes '
43 | 'whose source code is not available. For this to work the network '
44 | 'architecture must be identical between the original trainer '
45 | 'class and the trainer class we are changing to. This script is '
46 | 'experimental and only to be used by advanced users.')
47 | parser.add_argument('-i', help='Folder containing the trained model. This folder is the one containing the '
48 | 'fold_X subfolders.')
49 | parser.add_argument('-tr', help='Name of the new trainer class')
50 | args = parser.parse_args()
51 | pretend_to_be_other_trainer(args.i, args.tr)
52 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/network_architecture/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/network_architecture/custom_modules/feature_response_normalization.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.utilities.tensor_utilities import mean_tensor
17 | from torch import nn
18 | import torch
19 | from torch.nn.parameter import Parameter
20 | import torch.jit
21 |
22 |
23 | class FRN3D(nn.Module):
24 | def __init__(self, num_features: int, eps=1e-6, **kwargs):
25 | super().__init__()
26 | self.eps = eps
27 | self.num_features = num_features
28 | self.weight = Parameter(torch.ones(1, num_features, 1, 1, 1), True)
29 | self.bias = Parameter(torch.zeros(1, num_features, 1, 1, 1), True)
30 | self.tau = Parameter(torch.zeros(1, num_features, 1, 1, 1), True)
31 |
32 | def forward(self, x: torch.Tensor):
33 | x = x * torch.rsqrt(mean_tensor(x * x, [2, 3, 4], keepdim=True) + self.eps)
34 |
35 | return torch.max(self.weight * x + self.bias, self.tau)
36 |
37 |
38 | if __name__ == "__main__":
39 | tmp = torch.rand((3, 32, 16, 16, 16))
40 |
41 | frn = FRN3D(32)
42 |
43 | out = frn(tmp)
44 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/network_architecture/custom_modules/helperModules.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from torch import nn
17 |
18 |
19 | class Identity(nn.Module):
20 | def __init__(self, *args, **kwargs):
21 | super().__init__()
22 |
23 | def forward(self, input):
24 | return input
25 |
26 |
27 | class MyGroupNorm(nn.GroupNorm):
28 | def __init__(self, num_channels, eps=1e-5, affine=True, num_groups=8):
29 | super(MyGroupNorm, self).__init__(num_groups, num_channels, eps, affine)
30 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/network_architecture/custom_modules/mish.py:
--------------------------------------------------------------------------------
1 | ############
2 | # https://github.com/lessw2020/mish/blob/master/mish.py
3 | # This code was taken from the repo above and was not created by me (Fabian)! Full credit goes to the original authors
4 | ############
5 |
6 | import torch
7 |
8 | import torch.nn as nn
9 | import torch.nn.functional as F
10 |
11 |
12 | # Mish - "Mish: A Self Regularized Non-Monotonic Neural Activation Function"
13 | # https://arxiv.org/abs/1908.08681v1
14 | # implemented for PyTorch / FastAI by lessw2020
15 | # github: https://github.com/lessw2020/mish
16 |
17 | class Mish(nn.Module):
18 | def __init__(self):
19 | super().__init__()
20 |
21 | def forward(self, x):
22 | # inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!)
23 | return x * (torch.tanh(F.softplus(x)))
24 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/network_architecture/initialization.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from torch import nn
17 |
18 |
19 | class InitWeights_He(object):
20 | def __init__(self, neg_slope=1e-2):
21 | self.neg_slope = neg_slope
22 |
23 | def __call__(self, module):
24 | if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
25 | module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope)
26 | if module.bias is not None:
27 | module.bias = nn.init.constant_(module.bias, 0)
28 |
29 |
30 | class InitWeights_XavierUniform(object):
31 | def __init__(self, gain=1):
32 | self.gain = gain
33 |
34 | def __call__(self, module):
35 | if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
36 | module.weight = nn.init.xavier_uniform_(module.weight, self.gain)
37 | if module.bias is not None:
38 | module.bias = nn.init.constant_(module.bias, 0)
39 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/paths.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import os
16 | from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join
17 |
18 | # do not modify these unless you know what you are doing
19 | my_output_identifier = "nnUNet"
20 | default_plans_identifier = "nnUNetPlansv2.1"
21 | default_data_identifier = 'nnUNet'
22 | default_trainer = "nnUNetTrainerV2"
23 | default_cascade_trainer = "nnUNetTrainerV2CascadeFullRes"
24 |
25 | """
26 | PLEASE READ paths.md FOR INFORMATION TO HOW TO SET THIS UP
27 | """
28 |
29 | base = os.environ['nnUNet_raw_data_base'] if "nnUNet_raw_data_base" in os.environ.keys() else None
30 | preprocessing_output_dir = os.environ['nnUNet_preprocessed'] if "nnUNet_preprocessed" in os.environ.keys() else None
31 | network_training_output_dir_base = os.path.join(os.environ['RESULTS_FOLDER']) if "RESULTS_FOLDER" in os.environ.keys() else None
32 |
33 | if base is not None:
34 | nnUNet_raw_data = join(base, "nnUNet_raw_data")
35 | nnUNet_cropped_data = join(base, "nnUNet_cropped_data")
36 | maybe_mkdir_p(nnUNet_raw_data)
37 | maybe_mkdir_p(nnUNet_cropped_data)
38 | else:
39 | print("nnUNet_raw_data_base is not defined and nnU-Net can only be used on data for which preprocessed files "
40 | "are already present on your system. nnU-Net cannot be used for experiment planning and preprocessing like "
41 | "this. If this is not intended, please read nnunet/paths.md for information on how to set this up properly.")
42 | nnUNet_cropped_data = nnUNet_raw_data = None
43 |
44 | if preprocessing_output_dir is not None:
45 | maybe_mkdir_p(preprocessing_output_dir)
46 | else:
47 | print("nnUNet_preprocessed is not defined and nnU-Net can not be used for preprocessing "
48 | "or training. If this is not intended, please read nnunet/pathy.md for information on how to set this up.")
49 | preprocessing_output_dir = None
50 |
51 | if network_training_output_dir_base is not None:
52 | network_training_output_dir = join(network_training_output_dir_base, my_output_identifier)
53 | maybe_mkdir_p(network_training_output_dir)
54 | else:
55 | print("RESULTS_FOLDER is not defined and nnU-Net cannot be used for training or "
56 | "inference. If this is not intended behavior, please read nnunet/paths.md for information on how to set this "
57 | "up")
58 | network_training_output_dir = None
59 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/postprocessing/consolidate_all_for_paper.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.utilities.folder_names import get_output_folder_name
17 |
18 |
19 | def get_datasets():
20 | configurations_all = {
21 | "Task01_BrainTumour": ("3d_fullres", "2d"),
22 | "Task02_Heart": ("3d_fullres", "2d",),
23 | "Task03_Liver": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
24 | "Task04_Hippocampus": ("3d_fullres", "2d",),
25 | "Task05_Prostate": ("3d_fullres", "2d",),
26 | "Task06_Lung": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
27 | "Task07_Pancreas": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
28 | "Task08_HepaticVessel": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
29 | "Task09_Spleen": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
30 | "Task10_Colon": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
31 | "Task48_KiTS_clean": ("3d_cascade_fullres", "3d_lowres", "3d_fullres", "2d"),
32 | "Task27_ACDC": ("3d_fullres", "2d",),
33 | "Task24_Promise": ("3d_fullres", "2d",),
34 | "Task35_ISBILesionSegmentation": ("3d_fullres", "2d",),
35 | "Task38_CHAOS_Task_3_5_Variant2": ("3d_fullres", "2d",),
36 | "Task29_LITS": ("3d_cascade_fullres", "3d_lowres", "2d", "3d_fullres",),
37 | "Task17_AbdominalOrganSegmentation": ("3d_cascade_fullres", "3d_lowres", "2d", "3d_fullres",),
38 | "Task55_SegTHOR": ("3d_cascade_fullres", "3d_lowres", "3d_fullres", "2d",),
39 | "Task56_VerSe": ("3d_cascade_fullres", "3d_lowres", "3d_fullres", "2d",),
40 | }
41 | return configurations_all
42 |
43 |
44 | def get_commands(configurations, regular_trainer="nnUNetTrainerV2", cascade_trainer="nnUNetTrainerV2CascadeFullRes",
45 | plans="nnUNetPlansv2.1"):
46 |
47 | node_pool = ["hdf18-gpu%02.0d" % i for i in range(1, 21)] + ["hdf19-gpu%02.0d" % i for i in range(1, 8)] + ["hdf19-gpu%02.0d" % i for i in range(11, 16)]
48 | ctr = 0
49 | for task in configurations:
50 | models = configurations[task]
51 | for m in models:
52 | if m == "3d_cascade_fullres":
53 | trainer = cascade_trainer
54 | else:
55 | trainer = regular_trainer
56 |
57 | folder = get_output_folder_name(m, task, trainer, plans, overwrite_training_output_dir="/datasets/datasets_fabian/results/nnUNet")
58 | node = node_pool[ctr % len(node_pool)]
59 | print("bsub -m %s -q gputest -L /bin/bash \"source ~/.bashrc && python postprocessing/"
60 | "consolidate_postprocessing.py -f" % node, folder, "\"")
61 | ctr += 1
62 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/postprocessing/consolidate_postprocessing_simple.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import argparse
17 | from nnunet.postprocessing.consolidate_postprocessing import consolidate_folds
18 | from nnunet.utilities.folder_names import get_output_folder_name
19 | from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
20 | from nnunet.paths import default_cascade_trainer, default_trainer, default_plans_identifier
21 |
22 |
23 | def main():
24 | argparser = argparse.ArgumentParser(usage="Used to determine the postprocessing for a trained model. Useful for "
25 | "when the best configuration (2d, 3d_fullres etc) as selected manually.")
26 | argparser.add_argument("-m", type=str, required=True, help="U-Net model (2d, 3d_lowres, 3d_fullres or "
27 | "3d_cascade_fullres)")
28 | argparser.add_argument("-t", type=str, required=True, help="Task name or id")
29 | argparser.add_argument("-tr", type=str, required=False, default=None,
30 | help="nnUNetTrainer class. Default: %s, unless 3d_cascade_fullres "
31 | "(then it's %s)" % (default_trainer, default_cascade_trainer))
32 | argparser.add_argument("-pl", type=str, required=False, default=default_plans_identifier,
33 | help="Plans name, Default=%s" % default_plans_identifier)
34 | argparser.add_argument("-val", type=str, required=False, default="validation_raw",
35 | help="Validation folder name. Default: validation_raw")
36 |
37 | args = argparser.parse_args()
38 | model = args.m
39 | task = args.t
40 | trainer = args.tr
41 | plans = args.pl
42 | val = args.val
43 |
44 | if not task.startswith("Task"):
45 | task_id = int(task)
46 | task = convert_id_to_task_name(task_id)
47 |
48 | if trainer is None:
49 | if model == "3d_cascade_fullres":
50 | trainer = "nnUNetTrainerV2CascadeFullRes"
51 | else:
52 | trainer = "nnUNetTrainerV2"
53 |
54 | folder = get_output_folder_name(model, task, trainer, plans, None)
55 |
56 | consolidate_folds(folder, val)
57 |
58 |
59 | if __name__ == "__main__":
60 | main()
61 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/preprocessing/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/run/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/cascade_stuff/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/data_augmentation/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/dataloading/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/learning_rate/poly_lr.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9):
17 | return initial_lr * (1 - epoch / max_epochs)**exponent
18 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/loss_functions/TopK_loss.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import numpy as np
16 | import torch
17 | from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
18 |
19 |
20 | class TopKLoss(RobustCrossEntropyLoss):
21 | """
22 | Network has to have NO LINEARITY!
23 | """
24 | def __init__(self, weight=None, ignore_index=-100, k=10):
25 | self.k = k
26 | super(TopKLoss, self).__init__(weight, False, ignore_index, reduce=False)
27 |
28 | def forward(self, inp, target):
29 | target = target[:, 0].long()
30 | res = super(TopKLoss, self).forward(inp, target)
31 | num_voxels = np.prod(res.shape, dtype=np.int64)
32 | res, _ = torch.topk(res.view((-1, )), int(num_voxels * self.k / 100), sorted=False)
33 | return res.mean()
34 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/loss_functions/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/loss_functions/crossentropy.py:
--------------------------------------------------------------------------------
1 | from torch import nn, Tensor
2 |
3 |
4 | class RobustCrossEntropyLoss(nn.CrossEntropyLoss):
5 | """
6 | this is just a compatibility layer because my target tensor is float and has an extra dimension
7 | """
8 | def forward(self, input: Tensor, target: Tensor) -> Tensor:
9 | if len(target.shape) == len(input.shape):
10 | assert target.shape[1] == 1
11 | target = target[:, 0]
12 | return super().forward(input, target.long())
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/loss_functions/deep_supervision.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from torch import nn
17 |
18 |
19 | class MultipleOutputLoss2(nn.Module):
20 | def __init__(self, loss, weight_factors=None):
21 | """
22 | use this if you have several outputs and ground truth (both list of same len) and the loss should be computed
23 | between them (x[0] and y[0], x[1] and y[1] etc)
24 | :param loss:
25 | :param weight_factors:
26 | """
27 | super(MultipleOutputLoss2, self).__init__()
28 | self.weight_factors = weight_factors
29 | self.loss = loss
30 |
31 | def forward(self, x, y):
32 | assert isinstance(x, (tuple, list)), "x must be either tuple or list"
33 | assert isinstance(y, (tuple, list)), "y must be either tuple or list"
34 | if self.weight_factors is None:
35 | weights = [1] * len(x)
36 | else:
37 | weights = self.weight_factors
38 |
39 | l = weights[0] * self.loss(x[0], y[0])
40 | for i in range(1, len(x)):
41 | if weights[i] != 0:
42 | l += weights[i] * self.loss(x[i], y[i])
43 | return l
44 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from nnunet.network_architecture.generic_UNet import Generic_UNet
3 | from nnunet.network_architecture.initialization import InitWeights_He
4 | from nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_insaneDA import \
5 | nnUNetTrainerV2_insaneDA
6 | from nnunet.utilities.nd_softmax import softmax_helper
7 | from torch import nn
8 |
9 |
10 | class nnUNetTrainerV2_MMS(nnUNetTrainerV2_insaneDA):
11 | def setup_DA_params(self):
12 | super().setup_DA_params()
13 | self.data_aug_params["p_rot"] = 0.7
14 | self.data_aug_params["p_eldef"] = 0.1
15 | self.data_aug_params["p_scale"] = 0.3
16 |
17 | self.data_aug_params["independent_scale_factor_for_each_axis"] = True
18 | self.data_aug_params["p_independent_scale_per_axis"] = 0.3
19 |
20 | self.data_aug_params["do_additive_brightness"] = True
21 | self.data_aug_params["additive_brightness_mu"] = 0
22 | self.data_aug_params["additive_brightness_sigma"] = 0.2
23 | self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
24 | self.data_aug_params["additive_brightness_p_per_channel"] = 1
25 |
26 | self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
27 | self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
28 |
29 | self.data_aug_params['gamma_range'] = (0.5, 1.6)
30 |
31 | def initialize_network(self):
32 | if self.threeD:
33 | conv_op = nn.Conv3d
34 | dropout_op = nn.Dropout3d
35 | norm_op = nn.BatchNorm3d
36 |
37 | else:
38 | conv_op = nn.Conv2d
39 | dropout_op = nn.Dropout2d
40 | norm_op = nn.BatchNorm2d
41 |
42 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
43 | dropout_op_kwargs = {'p': 0, 'inplace': True}
44 | net_nonlin = nn.LeakyReLU
45 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
46 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
47 | len(self.net_num_pool_op_kernel_sizes),
48 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
49 | dropout_op_kwargs,
50 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
51 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
52 | if torch.cuda.is_available():
53 | self.network.cuda()
54 | self.network.inference_apply_nonlin = softmax_helper
55 |
56 | """def run_training(self):
57 | from batchviewer import view_batch
58 | a = next(self.tr_gen)
59 | view_batch(a['data'])
60 | import IPython;IPython.embed()"""
61 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNetTrainerV2_fp32.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_fp32(nnUNetTrainerV2):
20 | """
21 | Info for Fabian: same as internal nnUNetTrainerV2_2
22 | """
23 |
24 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
25 | unpack_data=True, deterministic=True, fp16=False):
26 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
27 | deterministic, False)
28 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_3ConvPerStage.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_3ConvPerStage(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | self.base_num_features = 24 # otherwise we run out of VRAM
25 | if self.threeD:
26 | conv_op = nn.Conv3d
27 | dropout_op = nn.Dropout3d
28 | norm_op = nn.InstanceNorm3d
29 |
30 | else:
31 | conv_op = nn.Conv2d
32 | dropout_op = nn.Dropout2d
33 | norm_op = nn.InstanceNorm2d
34 |
35 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
36 | dropout_op_kwargs = {'p': 0, 'inplace': True}
37 | net_nonlin = nn.LeakyReLU
38 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
39 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
40 | len(self.net_num_pool_op_kernel_sizes),
41 | 3, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
42 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
43 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
44 | if torch.cuda.is_available():
45 | self.network.cuda()
46 | self.network.inference_apply_nonlin = softmax_helper
47 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_3ConvPerStage_samefilters.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_3ConvPerStageSameFilters(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | if self.threeD:
25 | conv_op = nn.Conv3d
26 | dropout_op = nn.Dropout3d
27 | norm_op = nn.InstanceNorm3d
28 |
29 | else:
30 | conv_op = nn.Conv2d
31 | dropout_op = nn.Dropout2d
32 | norm_op = nn.InstanceNorm2d
33 |
34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
35 | dropout_op_kwargs = {'p': 0, 'inplace': True}
36 | net_nonlin = nn.LeakyReLU
37 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
39 | len(self.net_num_pool_op_kernel_sizes),
40 | 3, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
43 | if torch.cuda.is_available():
44 | self.network.cuda()
45 | self.network.inference_apply_nonlin = softmax_helper
46 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_BN.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_BN(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | """
25 | changed deep supervision to False
26 | :return:
27 | """
28 | if self.threeD:
29 | conv_op = nn.Conv3d
30 | dropout_op = nn.Dropout3d
31 | norm_op = nn.BatchNorm3d
32 |
33 | else:
34 | conv_op = nn.Conv2d
35 | dropout_op = nn.Dropout2d
36 | norm_op = nn.BatchNorm2d
37 |
38 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
39 | dropout_op_kwargs = {'p': 0, 'inplace': True}
40 | net_nonlin = nn.LeakyReLU
41 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
42 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
43 | len(self.net_num_pool_op_kernel_sizes),
44 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
45 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
46 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
47 | if torch.cuda.is_available():
48 | self.network.cuda()
49 | self.network.inference_apply_nonlin = softmax_helper
50 |
51 |
52 | nnUNetTrainerV2_BN_copy1 = nnUNetTrainerV2_BN
53 | nnUNetTrainerV2_BN_copy2 = nnUNetTrainerV2_BN
54 | nnUNetTrainerV2_BN_copy3 = nnUNetTrainerV2_BN
55 | nnUNetTrainerV2_BN_copy4 = nnUNetTrainerV2_BN
56 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_FRN.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.network_architecture.custom_modules.feature_response_normalization import FRN3D
17 | from nnunet.network_architecture.generic_UNet import Generic_UNet
18 | from nnunet.network_architecture.initialization import InitWeights_He
19 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
20 | from nnunet.utilities.nd_softmax import softmax_helper
21 | from torch import nn
22 | from nnunet.network_architecture.custom_modules.helperModules import Identity
23 | import torch
24 |
25 |
26 | class nnUNetTrainerV2_FRN(nnUNetTrainerV2):
27 | def initialize_network(self):
28 | """
29 | changed deep supervision to False
30 | :return:
31 | """
32 | if self.threeD:
33 | conv_op = nn.Conv3d
34 | dropout_op = nn.Dropout3d
35 | norm_op = FRN3D
36 |
37 | else:
38 | conv_op = nn.Conv2d
39 | dropout_op = nn.Dropout2d
40 | raise NotImplementedError
41 | norm_op = nn.BatchNorm2d
42 |
43 | norm_op_kwargs = {'eps': 1e-6}
44 | dropout_op_kwargs = {'p': 0, 'inplace': True}
45 | net_nonlin = Identity
46 | net_nonlin_kwargs = {}
47 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
48 | len(self.net_num_pool_op_kernel_sizes),
49 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
50 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
51 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
52 | if torch.cuda.is_available():
53 | self.network.cuda()
54 | self.network.inference_apply_nonlin = softmax_helper
55 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_GN.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.network_architecture.custom_modules.helperModules import MyGroupNorm
19 | from nnunet.utilities.nd_softmax import softmax_helper
20 | from torch import nn
21 |
22 |
23 | class nnUNetTrainerV2_GN(nnUNetTrainerV2):
24 | def initialize_network(self):
25 | """
26 | changed deep supervision to False
27 | :return:
28 | """
29 | if self.threeD:
30 | conv_op = nn.Conv3d
31 | dropout_op = nn.Dropout3d
32 | norm_op = MyGroupNorm
33 |
34 | else:
35 | conv_op = nn.Conv2d
36 | dropout_op = nn.Dropout2d
37 | norm_op = MyGroupNorm
38 |
39 | norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'num_groups': 8}
40 | dropout_op_kwargs = {'p': 0, 'inplace': True}
41 | net_nonlin = nn.LeakyReLU
42 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
43 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
44 | len(self.net_num_pool_op_kernel_sizes),
45 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
46 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
47 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
48 | if torch.cuda.is_available():
49 | self.network.cuda()
50 | self.network.inference_apply_nonlin = softmax_helper
51 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_GeLU.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 |
18 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
19 | from nnunet.utilities.nd_softmax import softmax_helper
20 | from torch import nn
21 |
22 | try:
23 | from torch.nn.functional import gelu
24 | except ImportError:
25 | gelu = None
26 |
27 |
28 | class GeLU(nn.Module):
29 | def __init__(self):
30 | super().__init__()
31 | if gelu is None:
32 | raise ImportError('You need to have at least torch==1.7.0 to use GeLUs')
33 |
34 | def forward(self, x):
35 | return gelu(x)
36 |
37 |
38 | class nnUNetTrainerV2_GeLU(nnUNetTrainerV2):
39 | def initialize_network(self):
40 | """
41 | - momentum 0.99
42 | - SGD instead of Adam
43 | - self.lr_scheduler = None because we do poly_lr
44 | - deep supervision = True
45 | - ReLU
46 | - i am sure I forgot something here
47 |
48 | Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though
49 | :return:
50 | """
51 | if self.threeD:
52 | conv_op = nn.Conv3d
53 | dropout_op = nn.Dropout3d
54 | norm_op = nn.InstanceNorm3d
55 |
56 | else:
57 | conv_op = nn.Conv2d
58 | dropout_op = nn.Dropout2d
59 | norm_op = nn.InstanceNorm2d
60 |
61 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
62 | dropout_op_kwargs = {'p': 0, 'inplace': True}
63 | net_nonlin = GeLU
64 | net_nonlin_kwargs = {}
65 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
66 | len(self.net_num_pool_op_kernel_sizes),
67 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
68 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(),
69 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
70 | if torch.cuda.is_available():
71 | self.network.cuda()
72 | self.network.inference_apply_nonlin = softmax_helper
73 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_LReLU_slope_2en1.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_LReLU_slope_2en1(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | if self.threeD:
25 | conv_op = nn.Conv3d
26 | dropout_op = nn.Dropout3d
27 | norm_op = nn.InstanceNorm3d
28 |
29 | else:
30 | conv_op = nn.Conv2d
31 | dropout_op = nn.Dropout2d
32 | norm_op = nn.InstanceNorm2d
33 |
34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
35 | dropout_op_kwargs = {'p': 0, 'inplace': True}
36 | net_nonlin = nn.LeakyReLU
37 | net_nonlin_kwargs = {'inplace': True, 'negative_slope': 2e-1}
38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
39 | len(self.net_num_pool_op_kernel_sizes),
40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
43 | if torch.cuda.is_available():
44 | self.network.cuda()
45 | self.network.inference_apply_nonlin = softmax_helper
46 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_Mish.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 | from nnunet.network_architecture.custom_modules.mish import Mish
21 |
22 |
23 | class nnUNetTrainerV2_Mish(nnUNetTrainerV2):
24 | def initialize_network(self):
25 | if self.threeD:
26 | conv_op = nn.Conv3d
27 | dropout_op = nn.Dropout3d
28 | norm_op = nn.InstanceNorm3d
29 |
30 | else:
31 | conv_op = nn.Conv2d
32 | dropout_op = nn.Dropout2d
33 | norm_op = nn.InstanceNorm2d
34 |
35 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
36 | dropout_op_kwargs = {'p': 0, 'inplace': True}
37 | net_nonlin = Mish
38 | net_nonlin_kwargs = {}
39 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
40 | len(self.net_num_pool_op_kernel_sizes),
41 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
42 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
43 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
44 | if torch.cuda.is_available():
45 | self.network.cuda()
46 | self.network.inference_apply_nonlin = softmax_helper
47 |
48 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_NoNormalization.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.network_architecture.custom_modules.helperModules import Identity
19 | from nnunet.utilities.nd_softmax import softmax_helper
20 | from torch import nn
21 |
22 |
23 | class nnUNetTrainerV2_NoNormalization(nnUNetTrainerV2):
24 | def initialize_network(self):
25 | if self.threeD:
26 | conv_op = nn.Conv3d
27 | dropout_op = nn.Dropout3d
28 | norm_op = Identity
29 |
30 | else:
31 | conv_op = nn.Conv2d
32 | dropout_op = nn.Dropout2d
33 | norm_op = Identity
34 |
35 | norm_op_kwargs = {}
36 | dropout_op_kwargs = {'p': 0, 'inplace': True}
37 | net_nonlin = nn.LeakyReLU
38 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
39 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
40 | len(self.net_num_pool_op_kernel_sizes),
41 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
42 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
43 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
44 | if torch.cuda.is_available():
45 | self.network.cuda()
46 | self.network.inference_apply_nonlin = softmax_helper
47 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_NoNormalization_lr1en3.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_NoNormalization import \
17 | nnUNetTrainerV2_NoNormalization
18 |
19 |
20 | class nnUNetTrainerV2_NoNormalization_lr1en3(nnUNetTrainerV2_NoNormalization):
21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
22 | unpack_data=True, deterministic=True, fp16=False):
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 | self.initial_lr = 1e-3
26 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_ReLU(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | if self.threeD:
25 | conv_op = nn.Conv3d
26 | dropout_op = nn.Dropout3d
27 | norm_op = nn.InstanceNorm3d
28 |
29 | else:
30 | conv_op = nn.Conv2d
31 | dropout_op = nn.Dropout2d
32 | norm_op = nn.InstanceNorm2d
33 |
34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
35 | dropout_op_kwargs = {'p': 0, 'inplace': True}
36 | net_nonlin = nn.ReLU
37 | net_nonlin_kwargs = {'inplace': True}
38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
39 | len(self.net_num_pool_op_kernel_sizes),
40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
43 | if torch.cuda.is_available():
44 | self.network.cuda()
45 | self.network.inference_apply_nonlin = softmax_helper
46 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU_biasInSegOutput.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_ReLU_biasInSegOutput(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | if self.threeD:
25 | conv_op = nn.Conv3d
26 | dropout_op = nn.Dropout3d
27 | norm_op = nn.InstanceNorm3d
28 |
29 | else:
30 | conv_op = nn.Conv2d
31 | dropout_op = nn.Dropout2d
32 | norm_op = nn.InstanceNorm2d
33 |
34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
35 | dropout_op_kwargs = {'p': 0, 'inplace': True}
36 | net_nonlin = nn.ReLU
37 | net_nonlin_kwargs = {'inplace': True}
38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
39 | len(self.net_num_pool_op_kernel_sizes),
40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True,
43 | seg_output_use_bias=True)
44 | if torch.cuda.is_available():
45 | self.network.cuda()
46 | self.network.inference_apply_nonlin = softmax_helper
47 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU_convReLUIN.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet, ConvDropoutNonlinNorm
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_ReLU_convReLUIN(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | if self.threeD:
25 | conv_op = nn.Conv3d
26 | dropout_op = nn.Dropout3d
27 | norm_op = nn.InstanceNorm3d
28 |
29 | else:
30 | conv_op = nn.Conv2d
31 | dropout_op = nn.Dropout2d
32 | norm_op = nn.InstanceNorm2d
33 |
34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
35 | dropout_op_kwargs = {'p': 0, 'inplace': True}
36 | net_nonlin = nn.ReLU
37 | net_nonlin_kwargs = {'inplace': True}
38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
39 | len(self.net_num_pool_op_kernel_sizes),
40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True,
43 | basic_block=ConvDropoutNonlinNorm)
44 | if torch.cuda.is_available():
45 | self.network.cuda()
46 | self.network.inference_apply_nonlin = softmax_helper
47 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_allConv3x3.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_allConv3x3(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | """
25 | - momentum 0.99
26 | - SGD instead of Adam
27 | - self.lr_scheduler = None because we do poly_lr
28 | - deep supervision = True
29 | - i am sure I forgot something here
30 |
31 | Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though
32 | :return:
33 | """
34 | if self.threeD:
35 | conv_op = nn.Conv3d
36 | dropout_op = nn.Dropout3d
37 | norm_op = nn.InstanceNorm3d
38 |
39 | else:
40 | conv_op = nn.Conv2d
41 | dropout_op = nn.Dropout2d
42 | norm_op = nn.InstanceNorm2d
43 |
44 | for s in range(len(self.net_conv_kernel_sizes)):
45 | for i in range(len(self.net_conv_kernel_sizes[s])):
46 | self.net_conv_kernel_sizes[s][i] = 3
47 |
48 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
49 | dropout_op_kwargs = {'p': 0, 'inplace': True}
50 | net_nonlin = nn.LeakyReLU
51 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
52 |
53 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
54 | len(self.net_num_pool_op_kernel_sizes),
55 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
56 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
57 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
58 | if torch.cuda.is_available():
59 | self.network.cuda()
60 | self.network.inference_apply_nonlin = softmax_helper
61 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_lReLU_biasInSegOutput.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_lReLU_biasInSegOutput(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | if self.threeD:
25 | conv_op = nn.Conv3d
26 | dropout_op = nn.Dropout3d
27 | norm_op = nn.InstanceNorm3d
28 |
29 | else:
30 | conv_op = nn.Conv2d
31 | dropout_op = nn.Dropout2d
32 | norm_op = nn.InstanceNorm2d
33 |
34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
35 | dropout_op_kwargs = {'p': 0, 'inplace': True}
36 | net_nonlin = nn.LeakyReLU
37 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
39 | len(self.net_num_pool_op_kernel_sizes),
40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0),
42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True,
43 | seg_output_use_bias=True)
44 | if torch.cuda.is_available():
45 | self.network.cuda()
46 | self.network.inference_apply_nonlin = softmax_helper
47 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_lReLU_convlReLUIN.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import torch
15 | from nnunet.network_architecture.generic_UNet import Generic_UNet, ConvDropoutNonlinNorm
16 | from nnunet.network_architecture.initialization import InitWeights_He
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 | from torch import nn
20 |
21 |
22 | class nnUNetTrainerV2_lReLU_convReLUIN(nnUNetTrainerV2):
23 | def initialize_network(self):
24 | if self.threeD:
25 | conv_op = nn.Conv3d
26 | dropout_op = nn.Dropout3d
27 | norm_op = nn.InstanceNorm3d
28 |
29 | else:
30 | conv_op = nn.Conv2d
31 | dropout_op = nn.Dropout2d
32 | norm_op = nn.InstanceNorm2d
33 |
34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True}
35 | dropout_op_kwargs = {'p': 0, 'inplace': True}
36 | net_nonlin = nn.LeakyReLU
37 | net_nonlin_kwargs = {'inplace': True, 'negative_slope': 1e-2}
38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
39 | len(self.net_num_pool_op_kernel_sizes),
40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True,
43 | basic_block=ConvDropoutNonlinNorm)
44 | if torch.cuda.is_available():
45 | self.network.cuda()
46 | self.network.inference_apply_nonlin = softmax_helper
47 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_lowerLR.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
17 |
18 |
19 | class nnUNetTrainerV2CascadeFullRes_lowerLR(nnUNetTrainerV2CascadeFullRes):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False):
22 | super().__init__(plans_file, fold, output_folder, dataset_directory,
23 | batch_dice, stage, unpack_data, deterministic,
24 | previous_trainer, fp16)
25 | self.initial_lr = 1e-3
26 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_shorter.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
17 |
18 |
19 | class nnUNetTrainerV2CascadeFullRes_shorter(nnUNetTrainerV2CascadeFullRes):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False):
22 | super().__init__(plans_file, fold, output_folder, dataset_directory,
23 | batch_dice, stage, unpack_data, deterministic,
24 | previous_trainer, fp16)
25 | self.max_num_epochs = 500
26 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_shorter_lowerLR.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
17 |
18 |
19 | class nnUNetTrainerV2CascadeFullRes_shorter_lowerLR(nnUNetTrainerV2CascadeFullRes):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False):
22 | super().__init__(plans_file, fold, output_folder, dataset_directory,
23 | batch_dice, stage, unpack_data, deterministic,
24 | previous_trainer, fp16)
25 | self.max_num_epochs = 500
26 | self.initial_lr = 1e-3
27 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/copies/nnUNetTrainerV2_copies.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | # This stuff is just so that we can check stability of results. Training is nondeterministic and by renaming the trainer
20 | # class we can have several trained models coexist although the trainer is effectively the same
21 |
22 |
23 | class nnUNetTrainerV2_copy1(nnUNetTrainerV2):
24 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
25 | unpack_data=True, deterministic=True, fp16=False):
26 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
27 | deterministic, fp16)
28 |
29 |
30 | class nnUNetTrainerV2_copy2(nnUNetTrainerV2):
31 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
32 | unpack_data=True, deterministic=True, fp16=False):
33 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
34 | deterministic, fp16)
35 |
36 |
37 | class nnUNetTrainerV2_copy3(nnUNetTrainerV2):
38 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
39 | unpack_data=True, deterministic=True, fp16=False):
40 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
41 | deterministic, fp16)
42 |
43 |
44 | class nnUNetTrainerV2_copy4(nnUNetTrainerV2):
45 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
46 | unpack_data=True, deterministic=True, fp16=False):
47 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
48 | deterministic, fp16)
49 |
50 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_DA2.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_DA2(nnUNetTrainerV2):
20 | def setup_DA_params(self):
21 | super().setup_DA_params()
22 |
23 | self.data_aug_params["independent_scale_factor_for_each_axis"] = True
24 |
25 | if self.threeD:
26 | self.data_aug_params["rotation_p_per_axis"] = 0.5
27 | else:
28 | self.data_aug_params["rotation_p_per_axis"] = 1
29 |
30 | self.data_aug_params["do_additive_brightness"] = True
31 |
32 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_independentScalePerAxis.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_independentScalePerAxis(nnUNetTrainerV2):
20 | def setup_DA_params(self):
21 | super().setup_DA_params()
22 | self.data_aug_params["independent_scale_factor_for_each_axis"] = True
23 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_noMirroring.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_noMirroring(nnUNetTrainerV2):
20 | def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
21 | step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
22 | validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
23 | segmentation_export_kwargs: dict = None):
24 | """
25 | We need to wrap this because we need to enforce self.network.do_ds = False for prediction
26 |
27 | :param do_mirroring:
28 | :param use_train_mode:
29 | :param use_sliding_window:
30 | :param step_size:
31 | :param save_softmax:
32 | :param use_gaussian:
33 | :param compute_global_dice:
34 | :param overwrite:
35 | :param validation_folder_name:
36 | :return:
37 | """
38 | ds = self.network.do_ds
39 | if do_mirroring:
40 | print("WARNING! do_mirroring was True but we cannot do that because we trained without mirroring. "
41 | "do_mirroring was set to False")
42 | do_mirroring = False
43 | self.network.do_ds = False
44 | ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
45 | save_softmax=save_softmax, use_gaussian=use_gaussian,
46 | overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
47 | all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs)
48 | self.network.do_ds = ds
49 | return ret
50 |
51 | def setup_DA_params(self):
52 | super().setup_DA_params()
53 | self.data_aug_params["do_mirror"] = False
54 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_ForceBD.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_ForceBD(nnUNetTrainerV2):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, fp16=False):
22 | batch_dice = True
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_ForceSD.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_ForceSD(nnUNetTrainerV2):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, fp16=False):
22 | batch_dice = False
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_CE.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
15 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
16 |
17 |
18 | class nnUNetTrainerV2_Loss_CE(nnUNetTrainerV2):
19 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
20 | unpack_data=True, deterministic=True, fp16=False):
21 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
22 | deterministic, fp16)
23 | self.loss = RobustCrossEntropyLoss()
24 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_CEGDL.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 | from nnunet.training.loss_functions.dice_loss import GDL_and_CE_loss
18 |
19 |
20 | class nnUNetTrainerV2_Loss_CEGDL(nnUNetTrainerV2):
21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
22 | unpack_data=True, deterministic=True, fp16=False):
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 | self.loss = GDL_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})
26 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_Dice.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 | from nnunet.training.loss_functions.dice_loss import SoftDiceLoss
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 |
20 |
21 | class nnUNetTrainerV2_Loss_Dice(nnUNetTrainerV2):
22 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
23 | unpack_data=True, deterministic=True, fp16=False):
24 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
25 | deterministic, fp16)
26 | self.loss = SoftDiceLoss(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False})
27 |
28 |
29 | class nnUNetTrainerV2_Loss_DicewithBG(nnUNetTrainerV2):
30 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
31 | unpack_data=True, deterministic=True, fp16=False):
32 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
33 | deterministic, fp16)
34 | self.loss = SoftDiceLoss(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': True})
35 |
36 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_DiceTopK10.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 | from nnunet.training.loss_functions.dice_loss import DC_and_topk_loss
18 |
19 |
20 | class nnUNetTrainerV2_Loss_DiceTopK10(nnUNetTrainerV2):
21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
22 | unpack_data=True, deterministic=True, fp16=False):
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 | self.loss = DC_and_topk_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False},
26 | {'k': 10})
27 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_Dice_lr1en3.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNet_variants.loss_function.nnUNetTrainerV2_Loss_Dice import \
17 | nnUNetTrainerV2_Loss_Dice, nnUNetTrainerV2_Loss_DicewithBG
18 |
19 |
20 | class nnUNetTrainerV2_Loss_Dice_LR1en3(nnUNetTrainerV2_Loss_Dice):
21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
22 | unpack_data=True, deterministic=True, fp16=False):
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 | self.initial_lr = 1e-3
26 |
27 |
28 | class nnUNetTrainerV2_Loss_DicewithBG_LR1en3(nnUNetTrainerV2_Loss_DicewithBG):
29 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
30 | unpack_data=True, deterministic=True, fp16=False):
31 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
32 | deterministic, fp16)
33 | self.initial_lr = 1e-3
34 |
35 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_Dice_squared.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 | from nnunet.training.loss_functions.dice_loss import SoftDiceLossSquared
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 |
20 |
21 | class nnUNetTrainerV2_Loss_Dice_squared(nnUNetTrainerV2):
22 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
23 | unpack_data=True, deterministic=True, fp16=False):
24 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
25 | deterministic, fp16)
26 | self.initial_lr = 1e-3
27 | self.loss = SoftDiceLossSquared(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False})
28 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_MCC.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 | from nnunet.training.loss_functions.dice_loss import MCCLoss
18 | from nnunet.utilities.nd_softmax import softmax_helper
19 |
20 |
21 | class nnUNetTrainerV2_Loss_MCC(nnUNetTrainerV2):
22 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
23 | unpack_data=True, deterministic=True, fp16=False):
24 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
25 | deterministic, fp16)
26 | self.initial_lr = 1e-3
27 | self.loss = MCCLoss(apply_nonlin=softmax_helper, batch_mcc=self.batch_dice, do_bg=True, smooth=0.0)
28 |
29 |
30 | class nnUNetTrainerV2_Loss_MCCnoBG(nnUNetTrainerV2):
31 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
32 | unpack_data=True, deterministic=True, fp16=False):
33 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
34 | deterministic, fp16)
35 | self.initial_lr = 1e-3
36 | self.loss = MCCLoss(apply_nonlin=softmax_helper, batch_mcc=self.batch_dice, do_bg=False, smooth=0.0)
37 |
38 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_TopK10.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 | from nnunet.training.loss_functions.TopK_loss import TopKLoss
18 |
19 |
20 | class nnUNetTrainerV2_Loss_TopK10(nnUNetTrainerV2):
21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
22 | unpack_data=True, deterministic=True, fp16=False):
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 | self.loss = TopKLoss(k=10)
26 |
27 |
28 | nnUNetTrainerV2_Loss_TopK10_copy1 = nnUNetTrainerV2_Loss_TopK10
29 | nnUNetTrainerV2_Loss_TopK10_copy2 = nnUNetTrainerV2_Loss_TopK10
30 | nnUNetTrainerV2_Loss_TopK10_copy3 = nnUNetTrainerV2_Loss_TopK10
31 | nnUNetTrainerV2_Loss_TopK10_copy4 = nnUNetTrainerV2_Loss_TopK10
32 |
33 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_graduallyTransitionFromCEToDice.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
17 | from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
18 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
19 |
20 |
21 | class nnUNetTrainerV2_graduallyTransitionFromCEToDice(nnUNetTrainerV2):
22 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
23 | unpack_data=True, deterministic=True, fp16=False):
24 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
25 | deterministic, fp16)
26 | self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}, weight_ce=2, weight_dice=0)
27 |
28 | def update_loss(self):
29 | # we train the first 500 epochs with CE, then transition to Dice between 500 and 750. The last 250 epochs will be Dice only
30 |
31 | if self.epoch <= 500:
32 | weight_ce = 2
33 | weight_dice = 0
34 | elif 500 < self.epoch <= 750:
35 | weight_ce = 2 - 2 / 250 * (self.epoch - 500)
36 | weight_dice = 0 + 2 / 250 * (self.epoch - 500)
37 | elif 750 < self.epoch <= self.max_num_epochs:
38 | weight_ce = 0
39 | weight_dice = 2
40 | else:
41 | raise RuntimeError("Invalid epoch: %d" % self.epoch)
42 |
43 | self.print_to_log_file("weight ce", weight_ce, "weight dice", weight_dice)
44 |
45 | self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}, weight_ce=weight_ce,
46 | weight_dice=weight_dice)
47 |
48 | self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
49 |
50 | def on_epoch_end(self):
51 | ret = super().on_epoch_end()
52 | self.update_loss()
53 | return ret
54 |
55 | def load_checkpoint_ram(self, checkpoint, train=True):
56 | ret = super().load_checkpoint_ram(checkpoint, train)
57 | self.update_loss()
58 | return ret
59 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/nnUNetTrainerCE.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
15 | from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
16 |
17 |
18 | class nnUNetTrainerCE(nnUNetTrainer):
19 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
20 | unpack_data=True, deterministic=True, fp16=False):
21 | super(nnUNetTrainerCE, self).__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage,
22 | unpack_data, deterministic, fp16)
23 | self.loss = RobustCrossEntropyLoss()
24 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import torch
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 |
19 |
20 | class nnUNetTrainerV2_Adam(nnUNetTrainerV2):
21 |
22 | def initialize_optimizer_and_scheduler(self):
23 | self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, amsgrad=True)
24 | self.lr_scheduler = None
25 |
26 |
27 | nnUNetTrainerV2_Adam_copy1 = nnUNetTrainerV2_Adam
28 | nnUNetTrainerV2_Adam_copy2 = nnUNetTrainerV2_Adam
29 | nnUNetTrainerV2_Adam_copy3 = nnUNetTrainerV2_Adam
30 | nnUNetTrainerV2_Adam_copy4 = nnUNetTrainerV2_Adam
31 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam_ReduceOnPlateau.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import torch
17 | from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
18 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
19 | from torch.optim import lr_scheduler
20 |
21 |
22 | class nnUNetTrainerV2_Adam_ReduceOnPlateau(nnUNetTrainerV2):
23 | """
24 | Same schedule as nnUNetTrainer
25 | """
26 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
27 | unpack_data=True, deterministic=True, fp16=False):
28 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
29 | deterministic, fp16)
30 | self.initial_lr = 3e-4
31 |
32 | def initialize_optimizer_and_scheduler(self):
33 | assert self.network is not None, "self.initialize_network must be called first"
34 | self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
35 | amsgrad=True)
36 | self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,
37 | patience=self.lr_scheduler_patience,
38 | verbose=True, threshold=self.lr_scheduler_eps,
39 | threshold_mode="abs")
40 |
41 | def maybe_update_lr(self, epoch=None):
42 | # maybe update learning rate
43 | if self.lr_scheduler is not None:
44 | assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
45 |
46 | if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
47 | # lr scheduler is updated with moving average val loss. should be more robust
48 | if self.epoch > 0 and self.train_loss_MA is not None: # otherwise self.train_loss_MA is None
49 | self.lr_scheduler.step(self.train_loss_MA)
50 | else:
51 | self.lr_scheduler.step(self.epoch + 1)
52 | self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
53 |
54 | def on_epoch_end(self):
55 | return nnUNetTrainer.on_epoch_end(self)
56 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam_lr_3en4.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNet_variants.optimizer_and_lr.nnUNetTrainerV2_Adam import nnUNetTrainerV2_Adam
17 |
18 |
19 | class nnUNetTrainerV2_Adam_nnUNetTrainerlr(nnUNetTrainerV2_Adam):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, fp16=False):
22 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
23 | deterministic, fp16)
24 | self.initial_lr = 3e-4
25 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Ranger_lr1en2.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 | from nnunet.training.optimizer.ranger import Ranger
18 |
19 |
20 | class nnUNetTrainerV2_Ranger_lr1en2(nnUNetTrainerV2):
21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
22 | unpack_data=True, deterministic=True, fp16=False):
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 | self.initial_lr = 1e-2
26 |
27 | def initialize_optimizer_and_scheduler(self):
28 | self.optimizer = Ranger(self.network.parameters(), self.initial_lr, k=6, N_sma_threshhold=5,
29 | weight_decay=self.weight_decay)
30 | self.lr_scheduler = None
31 |
32 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Ranger_lr3en3.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 | from nnunet.training.optimizer.ranger import Ranger
18 |
19 |
20 | class nnUNetTrainerV2_Ranger_lr3en3(nnUNetTrainerV2):
21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
22 | unpack_data=True, deterministic=True, fp16=False):
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 | self.initial_lr = 3e-3
26 |
27 | def initialize_optimizer_and_scheduler(self):
28 | self.optimizer = Ranger(self.network.parameters(), self.initial_lr, k=6, N_sma_threshhold=5,
29 | weight_decay=self.weight_decay)
30 | self.lr_scheduler = None
31 |
32 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Ranger_lr3en4.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 | from nnunet.training.optimizer.ranger import Ranger
18 |
19 |
20 | class nnUNetTrainerV2_Ranger_lr3en4(nnUNetTrainerV2):
21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
22 | unpack_data=True, deterministic=True, fp16=False):
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 | self.initial_lr = 3e-4
26 |
27 | def initialize_optimizer_and_scheduler(self):
28 | self.optimizer = Ranger(self.network.parameters(), self.initial_lr, k=6, N_sma_threshhold=5,
29 | weight_decay=self.weight_decay)
30 | self.lr_scheduler = None
31 |
32 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_ReduceOnPlateau.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import torch
17 | from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
18 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
19 | from torch.optim import lr_scheduler
20 |
21 |
22 | class nnUNetTrainerV2_SGD_ReduceOnPlateau(nnUNetTrainerV2):
23 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
24 | unpack_data=True, deterministic=True, fp16=False):
25 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
26 | deterministic, fp16)
27 |
28 | def initialize_optimizer_and_scheduler(self):
29 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
30 | momentum=0.99, nesterov=True)
31 | self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,
32 | patience=self.lr_scheduler_patience,
33 | verbose=True, threshold=self.lr_scheduler_eps,
34 | threshold_mode="abs")
35 |
36 | def maybe_update_lr(self, epoch=None):
37 | # maybe update learning rate
38 | if self.lr_scheduler is not None:
39 | assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
40 |
41 | if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
42 | # lr scheduler is updated with moving average val loss. should be more robust
43 | if self.epoch > 0: # otherwise self.train_loss_MA is None
44 | self.lr_scheduler.step(self.train_loss_MA)
45 | else:
46 | self.lr_scheduler.step(self.epoch + 1)
47 | self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
48 |
49 | def on_epoch_end(self):
50 | return nnUNetTrainer.on_epoch_end(self)
51 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_fixedSchedule.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_SGD_fixedSchedule(nnUNetTrainerV2):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, fp16=False):
22 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
23 | deterministic, fp16)
24 |
25 | def maybe_update_lr(self, epoch=None):
26 | if epoch is None:
27 | ep = self.epoch + 1
28 | else:
29 | ep = epoch
30 |
31 | if 0 <= ep < 500:
32 | new_lr = self.initial_lr
33 | elif 500 <= ep < 675:
34 | new_lr = self.initial_lr * 0.1
35 | elif 675 <= ep < 850:
36 | new_lr = self.initial_lr * 0.01
37 | elif ep >= 850:
38 | new_lr = self.initial_lr * 0.001
39 | else:
40 | raise RuntimeError("Really unexpected things happened, ep=%d" % ep)
41 |
42 | self.optimizer.param_groups[0]['lr'] = new_lr
43 | self.print_to_log_file("lr:", self.optimizer.param_groups[0]['lr'])
44 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_fixedSchedule2.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.learning_rate.poly_lr import poly_lr
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 |
19 |
20 | class nnUNetTrainerV2_SGD_fixedSchedule2(nnUNetTrainerV2):
21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
22 | unpack_data=True, deterministic=True, fp16=False):
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 |
26 | def maybe_update_lr(self, epoch=None):
27 | """
28 | here we go one step, then use polyLR
29 | :param epoch:
30 | :return:
31 | """
32 | if epoch is None:
33 | ep = self.epoch + 1
34 | else:
35 | ep = epoch
36 |
37 | if 0 <= ep < 500:
38 | new_lr = self.initial_lr
39 | elif 500 <= ep < 675:
40 | new_lr = self.initial_lr * 0.1
41 | elif ep >= 675:
42 | new_lr = poly_lr(ep - 675, self.max_num_epochs - 675, self.initial_lr * 0.1, 0.9)
43 | else:
44 | raise RuntimeError("Really unexpected things happened, ep=%d" % ep)
45 |
46 | self.optimizer.param_groups[0]['lr'] = new_lr
47 | self.print_to_log_file("lr:", self.optimizer.param_groups[0]['lr'])
48 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_lrs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_SGD_lr1en1(nnUNetTrainerV2):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, fp16=False):
22 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
23 | deterministic, fp16)
24 | self.initial_lr = 1e-1
25 |
26 |
27 | class nnUNetTrainerV2_SGD_lr1en3(nnUNetTrainerV2):
28 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
29 | unpack_data=True, deterministic=True, fp16=False):
30 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
31 | deterministic, fp16)
32 | self.initial_lr = 1e-3
33 |
34 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_fp16.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_fp16(nnUNetTrainerV2):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, fp16=False):
22 | assert fp16, "This one only accepts fp16=True"
23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
24 | deterministic, fp16)
25 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum09.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import torch
17 |
18 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
19 |
20 |
21 | class nnUNetTrainerV2_momentum09(nnUNetTrainerV2):
22 | def initialize_optimizer_and_scheduler(self):
23 | assert self.network is not None, "self.initialize_network must be called first"
24 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
25 | momentum=0.9, nesterov=True)
26 | self.lr_scheduler = None
27 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum095.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import torch
17 |
18 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
19 |
20 |
21 | class nnUNetTrainerV2_momentum095(nnUNetTrainerV2):
22 | def initialize_optimizer_and_scheduler(self):
23 | assert self.network is not None, "self.initialize_network must be called first"
24 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
25 | momentum=0.95, nesterov=True)
26 | self.lr_scheduler = None
27 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum098.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import torch
17 |
18 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
19 |
20 |
21 | class nnUNetTrainerV2_momentum098(nnUNetTrainerV2):
22 | def initialize_optimizer_and_scheduler(self):
23 | assert self.network is not None, "self.initialize_network must be called first"
24 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
25 | momentum=0.98, nesterov=True)
26 | self.lr_scheduler = None
27 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum09in2D.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import torch
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 |
19 |
20 | class nnUNetTrainerV2_momentum09in2D(nnUNetTrainerV2):
21 | def initialize_optimizer_and_scheduler(self):
22 | if self.threeD:
23 | momentum = 0.99
24 | else:
25 | momentum = 0.9
26 | assert self.network is not None, "self.initialize_network must be called first"
27 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
28 | momentum=momentum, nesterov=True)
29 | self.lr_scheduler = None
30 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_reduceMomentumDuringTraining.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import torch
17 |
18 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
19 |
20 |
21 | class nnUNetTrainerV2_reduceMomentumDuringTraining(nnUNetTrainerV2):
22 | """
23 | This implementation will not work with LR scheduler!!!!!!!!!!
24 |
25 | After epoch 800, linearly decrease momentum from 0.99 to 0.9
26 | """
27 | def initialize_optimizer_and_scheduler(self):
28 | current_momentum = 0.99
29 | min_momentum = 0.9
30 |
31 | if self.epoch > 800:
32 | current_momentum = current_momentum - (current_momentum - min_momentum) / 200 * (self.epoch - 800)
33 |
34 | self.print_to_log_file("current momentum", current_momentum)
35 | assert self.network is not None, "self.initialize_network must be called first"
36 | if self.optimizer is None:
37 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
38 | momentum=0.99, nesterov=True)
39 | else:
40 | # can't reinstantiate because that would break NVIDIA AMP
41 | self.optimizer.param_groups[0]["momentum"] = current_momentum
42 | self.lr_scheduler = None
43 |
44 | def on_epoch_end(self):
45 | self.initialize_optimizer_and_scheduler()
46 | return super().on_epoch_end()
47 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_warmup.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
17 |
18 |
19 | class nnUNetTrainerV2_warmup(nnUNetTrainerV2):
20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
21 | unpack_data=True, deterministic=True, fp16=False):
22 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
23 | deterministic, fp16)
24 | self.max_num_epochs = 1050
25 |
26 | def maybe_update_lr(self, epoch=None):
27 | if self.epoch < 50:
28 | # epoch 49 is max
29 | # we increase lr linearly from 0 to initial_lr
30 | lr = (self.epoch + 1) / 50 * self.initial_lr
31 | self.optimizer.param_groups[0]['lr'] = lr
32 | self.print_to_log_file("epoch:", self.epoch, "lr:", lr)
33 | else:
34 | if epoch is not None:
35 | ep = epoch - 49
36 | else:
37 | ep = self.epoch - 49
38 | assert ep > 0, "epoch must be >0"
39 | return super().maybe_update_lr(ep)
40 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/training/network_training/nnUNet_variants/resampling/nnUNetTrainerV2_resample33.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
17 | from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
18 |
19 |
20 | class nnUNetTrainerV2_resample33(nnUNetTrainerV2):
21 | def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
22 | step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
23 | validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
24 | segmentation_export_kwargs: dict = None):
25 | return super().validate(do_mirroring, use_sliding_window, step_size, save_softmax, use_gaussian,
26 | overwrite, validation_folder_name, debug, all_in_gpu, segmentation_export_kwargs)
27 |
28 | def preprocess_predict_nifti(self, input_files, output_file=None, softmax_ouput_file=None,
29 | mixed_precision: bool = True):
30 | """
31 | Use this to predict new data
32 | :param input_files:
33 | :param output_file:
34 | :param softmax_ouput_file:
35 | :param mixed_precision:
36 | :return:
37 | """
38 | print("preprocessing...")
39 | d, s, properties = self.preprocess_patient(input_files)
40 | print("predicting...")
41 | pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params["do_mirror"],
42 | mirror_axes=self.data_aug_params['mirror_axes'],
43 | use_sliding_window=True, step_size=0.5,
44 | use_gaussian=True, pad_border_mode='constant',
45 | pad_kwargs={'constant_values': 0},
46 | all_in_gpu=True,
47 | mixed_precision=mixed_precision)[1]
48 | pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward])
49 |
50 | print("resampling to original spacing and nifti export...")
51 | save_segmentation_nifti_from_softmax(pred, output_file, properties, 3, None, None, None, softmax_ouput_file,
52 | None, force_separate_z=False, interpolation_order_z=3)
53 | print("done")
54 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from . import *
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/distributed.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import torch
17 | from torch import distributed
18 | from torch import autograd
19 | from torch.nn.parallel import DistributedDataParallel as DDP
20 |
21 |
22 | def print_if_rank0(*args):
23 | if distributed.get_rank() == 0:
24 | print(*args)
25 |
26 |
27 | class awesome_allgather_function(autograd.Function):
28 | @staticmethod
29 | def forward(ctx, input):
30 | world_size = distributed.get_world_size()
31 | # create a destination list for the allgather. I'm assuming you're gathering from 3 workers.
32 | allgather_list = [torch.empty_like(input) for _ in range(world_size)]
33 | #if distributed.get_rank() == 0:
34 | # import IPython;IPython.embed()
35 | distributed.all_gather(allgather_list, input)
36 | return torch.cat(allgather_list, dim=0)
37 |
38 | @staticmethod
39 | def backward(ctx, grad_output):
40 | #print_if_rank0("backward grad_output len", len(grad_output))
41 | #print_if_rank0("backward grad_output shape", grad_output.shape)
42 | grads_per_rank = grad_output.shape[0] // distributed.get_world_size()
43 | rank = distributed.get_rank()
44 | # We'll receive gradients for the entire catted forward output, so to mimic DataParallel,
45 | # return only the slice that corresponds to this process's input:
46 | sl = slice(rank * grads_per_rank, (rank + 1) * grads_per_rank)
47 | #print("worker", rank, "backward slice", sl)
48 | return grad_output[sl]
49 |
50 |
51 | if __name__ == "__main__":
52 | import torch.distributed as dist
53 | import argparse
54 | from torch import nn
55 | from torch.optim import Adam
56 |
57 | argumentparser = argparse.ArgumentParser()
58 | argumentparser.add_argument("--local_rank", type=int)
59 | args = argumentparser.parse_args()
60 |
61 | torch.cuda.set_device(args.local_rank)
62 | dist.init_process_group(backend='nccl', init_method='env://')
63 |
64 | rnd = torch.rand((5, 2)).cuda()
65 |
66 | rnd_gathered = awesome_allgather_function.apply(rnd)
67 | print("gathering random tensors\nbefore\b", rnd, "\nafter\n", rnd_gathered)
68 |
69 | # so far this works as expected
70 | print("now running a DDP model")
71 | c = nn.Conv2d(2, 3, 3, 1, 1, 1, 1, True).cuda()
72 | c = DDP(c)
73 | opt = Adam(c.parameters())
74 |
75 | bs = 5
76 | if dist.get_rank() == 0:
77 | bs = 4
78 | inp = torch.rand((bs, 2, 5, 5)).cuda()
79 |
80 | out = c(inp)
81 | print("output_shape", out.shape)
82 |
83 | out_gathered = awesome_allgather_function.apply(out)
84 | print("output_shape_after_gather", out_gathered.shape)
85 | # this also works
86 |
87 | loss = out_gathered.sum()
88 | loss.backward()
89 | opt.step()
90 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/file_endings.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from batchgenerators.utilities.file_and_folder_operations import *
17 |
18 |
19 | def remove_trailing_slash(filename: str):
20 | while filename.endswith('/'):
21 | filename = filename[:-1]
22 | return filename
23 |
24 |
25 | def maybe_add_0000_to_all_niigz(folder):
26 | nii_gz = subfiles(folder, suffix='.nii.gz')
27 | for n in nii_gz:
28 | n = remove_trailing_slash(n)
29 | if not n.endswith('_0000.nii.gz'):
30 | os.rename(n, n[:-7] + '_0000.nii.gz')
31 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/folder_names.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from batchgenerators.utilities.file_and_folder_operations import *
17 | from nnunet.paths import network_training_output_dir
18 |
19 |
20 | def get_output_folder_name(model: str, task: str = None, trainer: str = None, plans: str = None, fold: int = None,
21 | overwrite_training_output_dir: str = None):
22 | """
23 | Retrieves the correct output directory for the nnU-Net model described by the input parameters
24 |
25 | :param model:
26 | :param task:
27 | :param trainer:
28 | :param plans:
29 | :param fold:
30 | :param overwrite_training_output_dir:
31 | :return:
32 | """
33 | assert model in ["2d", "3d_cascade_fullres", '3d_fullres', '3d_lowres']
34 |
35 | if overwrite_training_output_dir is not None:
36 | tr_dir = overwrite_training_output_dir
37 | else:
38 | tr_dir = network_training_output_dir
39 |
40 | current = join(tr_dir, model)
41 | if task is not None:
42 | current = join(current, task)
43 | if trainer is not None and plans is not None:
44 | current = join(current, trainer + "__" + plans)
45 | if fold is not None:
46 | current = join(current, "fold_%d" % fold)
47 | return current
48 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/nd_softmax.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import torch
16 | from torch import nn
17 | import torch.nn.functional as F
18 |
19 |
20 | softmax_helper = lambda x: F.softmax(x, 1)
21 |
22 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/one_hot_encoding.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import numpy as np
16 |
17 |
18 | def to_one_hot(seg, all_seg_labels=None):
19 | if all_seg_labels is None:
20 | all_seg_labels = np.unique(seg)
21 | result = np.zeros((len(all_seg_labels), *seg.shape), dtype=seg.dtype)
22 | for i, l in enumerate(all_seg_labels):
23 | result[i][seg == l] = 1
24 | return result
25 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/random_stuff.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | class no_op(object):
17 | def __enter__(self):
18 | pass
19 |
20 | def __exit__(self, *args):
21 | pass
22 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/recursive_delete_npz.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from batchgenerators.utilities.file_and_folder_operations import *
17 | import argparse
18 | import os
19 |
20 |
21 | def recursive_delete_npz(current_directory: str):
22 | npz_files = subfiles(current_directory, join=True, suffix=".npz")
23 | npz_files = [i for i in npz_files if not i.endswith("segFromPrevStage.npz")] # to be extra safe
24 | _ = [os.remove(i) for i in npz_files]
25 | for d in subdirs(current_directory, join=False):
26 | if d != "pred_next_stage":
27 | recursive_delete_npz(join(current_directory, d))
28 |
29 |
30 | if __name__ == "__main__":
31 | parser = argparse.ArgumentParser(usage="USE THIS RESPONSIBLY! DANGEROUS! I (Fabian) use this to remove npz files "
32 | "after I ran figure_out_what_to_submit")
33 | parser.add_argument("-f", help="folder", required=True)
34 |
35 | args = parser.parse_args()
36 |
37 | recursive_delete_npz(args.f)
38 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/recursive_rename_taskXX_to_taskXXX.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | from batchgenerators.utilities.file_and_folder_operations import *
17 | import os
18 |
19 |
20 | def recursive_rename(folder):
21 | s = subdirs(folder, join=False)
22 | for ss in s:
23 | if ss.startswith("Task") and ss.find("_") == 6:
24 | task_id = int(ss[4:6])
25 | name = ss[7:]
26 | os.rename(join(folder, ss), join(folder, "Task%03.0d_" % task_id + name))
27 | s = subdirs(folder, join=True)
28 | for ss in s:
29 | recursive_rename(ss)
30 |
31 | if __name__ == "__main__":
32 | recursive_rename("/media/fabian/Results/nnUNet")
33 | recursive_rename("/media/fabian/nnunet")
34 | recursive_rename("/media/fabian/My Book/MedicalDecathlon")
35 | recursive_rename("/home/fabian/drives/datasets/nnUNet_raw")
36 | recursive_rename("/home/fabian/drives/datasets/nnUNet_preprocessed")
37 | recursive_rename("/home/fabian/drives/datasets/nnUNet_testSets")
38 | recursive_rename("/home/fabian/drives/datasets/results/nnUNet")
39 | recursive_rename("/home/fabian/drives/e230-dgx2-1-data_fabian/Decathlon_raw")
40 | recursive_rename("/home/fabian/drives/e230-dgx2-1-data_fabian/nnUNet_preprocessed")
41 |
42 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/sitk_stuff.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import SimpleITK as sitk
17 |
18 |
19 | def copy_geometry(image: sitk.Image, ref: sitk.Image):
20 | image.SetOrigin(ref.GetOrigin())
21 | image.SetDirection(ref.GetDirection())
22 | image.SetSpacing(ref.GetSpacing())
23 | return image
24 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/tensor_utilities.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import numpy as np
16 | import torch
17 | from torch import nn
18 |
19 |
20 | def sum_tensor(inp, axes, keepdim=False):
21 | axes = np.unique(axes).astype(int)
22 | if keepdim:
23 | for ax in axes:
24 | inp = inp.sum(int(ax), keepdim=True)
25 | else:
26 | for ax in sorted(axes, reverse=True):
27 | inp = inp.sum(int(ax))
28 | return inp
29 |
30 |
31 | def mean_tensor(inp, axes, keepdim=False):
32 | axes = np.unique(axes).astype(int)
33 | if keepdim:
34 | for ax in axes:
35 | inp = inp.mean(int(ax), keepdim=True)
36 | else:
37 | for ax in sorted(axes, reverse=True):
38 | inp = inp.mean(int(ax))
39 | return inp
40 |
41 |
42 | def flip(x, dim):
43 | """
44 | flips the tensor at dimension dim (mirroring!)
45 | :param x:
46 | :param dim:
47 | :return:
48 | """
49 | indices = [slice(None)] * x.dim()
50 | indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
51 | dtype=torch.long, device=x.device)
52 | return x[tuple(indices)]
53 |
54 |
55 |
--------------------------------------------------------------------------------
/nnUNet/nnunet/utilities/to_torch.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import torch
16 |
17 |
18 | def maybe_to_torch(d):
19 | if isinstance(d, list):
20 | d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d]
21 | elif not isinstance(d, torch.Tensor):
22 | d = torch.from_numpy(d).float()
23 | return d
24 |
25 |
26 | def to_cuda(data, non_blocking=True, gpu_id=0):
27 | if isinstance(data, list):
28 | data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data]
29 | else:
30 | data = data.cuda(gpu_id, non_blocking=True)
31 | return data
32 |
--------------------------------------------------------------------------------
/nnUNet/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_namespace_packages
2 |
3 | setup(name='nnunet',
4 | packages=find_namespace_packages(include=["nnunet", "nnunet.*"]),
5 | version='1.6.6',
6 | description='nnU-Net. Framework for out-of-the box biomedical image segmentation.',
7 | url='https://github.com/MIC-DKFZ/nnUNet',
8 | author='Division of Medical Image Computing, German Cancer Research Center',
9 | author_email='f.isensee@dkfz-heidelberg.de',
10 | license='Apache License Version 2.0, January 2004',
11 | install_requires=[
12 | "tqdm",
13 | "dicom2nifti",
14 | "scikit-image>=0.14",
15 | "medpy",
16 | "scipy",
17 | "batchgenerators>=0.21",
18 | "numpy",
19 | "sklearn",
20 | "SimpleITK",
21 | "pandas",
22 | "requests",
23 | "nibabel", 'tifffile'
24 | ],
25 | entry_points={
26 | 'console_scripts': [
27 | 'nnUNet_convert_decathlon_task = nnunet.experiment_planning.nnUNet_convert_decathlon_task:main',
28 | 'nnUNet_plan_and_preprocess = nnunet.experiment_planning.nnUNet_plan_and_preprocess:main',
29 | 'nnUNet_train = nnunet.run.run_training:main',
30 | 'nnUNet_train_DP = nnunet.run.run_training_DP:main',
31 | 'nnUNet_train_DDP = nnunet.run.run_training_DDP:main',
32 | 'nnUNet_predict = nnunet.inference.predict_simple:main',
33 | 'nnUNet_ensemble = nnunet.inference.ensemble_predictions:main',
34 | 'nnUNet_find_best_configuration = nnunet.evaluation.model_selection.figure_out_what_to_submit:main',
35 | 'nnUNet_print_available_pretrained_models = nnunet.inference.pretrained_models.download_pretrained_model:print_available_pretrained_models',
36 | 'nnUNet_print_pretrained_model_info = nnunet.inference.pretrained_models.download_pretrained_model:print_pretrained_model_requirements',
37 | 'nnUNet_download_pretrained_model = nnunet.inference.pretrained_models.download_pretrained_model:download_by_name',
38 | 'nnUNet_download_pretrained_model_by_url = nnunet.inference.pretrained_models.download_pretrained_model:download_by_url',
39 | 'nnUNet_determine_postprocessing = nnunet.postprocessing.consolidate_postprocessing_simple:main',
40 | 'nnUNet_export_model_to_zip = nnunet.inference.pretrained_models.collect_pretrained_models:export_entry_point',
41 | 'nnUNet_install_pretrained_model_from_zip = nnunet.inference.pretrained_models.download_pretrained_model:install_from_zip_entry_point',
42 | 'nnUNet_change_trainer_class = nnunet.inference.change_trainer:main',
43 | 'nnUNet_evaluate_folder = nnunet.evaluation.evaluator:nnunet_evaluate_folder'
44 | ],
45 | },
46 | keywords=['deep learning', 'image segmentation', 'medical image analysis',
47 | 'medical image segmentation', 'nnU-Net', 'nnunet']
48 | )
49 |
--------------------------------------------------------------------------------