├── .github └── ISSUE_TEMPLATE │ └── all-issues.md ├── .gitignore ├── LICENSE ├── README.md ├── documentation ├── common_problems_and_solutions.md ├── common_questions.md ├── data_format_inference.md ├── dataset_conversion.md ├── expected_epoch_times.md ├── extending_nnunet.md ├── inference_example_Prostate.md ├── setting_up_paths.md ├── training_example_Hippocampus.md ├── tutorials │ ├── custom_preprocessing.md │ ├── custom_spacing.md │ └── edit_plans_files.md └── using_nnUNet_as_baseline.md ├── nnunet_mednext ├── __init__.py ├── configuration.py ├── dataset_conversion │ ├── Task017_BeyondCranialVaultAbdominalOrganSegmentation.py │ ├── Task024_Promise2012.py │ ├── Task027_AutomaticCardiacDetectionChallenge.py │ ├── Task029_LiverTumorSegmentationChallenge.py │ ├── Task032_BraTS_2018.py │ ├── Task035_ISBI_MSLesionSegmentationChallenge.py │ ├── Task037_038_Chaos_Challenge.py │ ├── Task040_KiTS.py │ ├── Task043_BraTS_2019.py │ ├── Task055_SegTHOR.py │ ├── Task056_VerSe2019.py │ ├── Task056_Verse_normalize_orientation.py │ ├── Task058_ISBI_EM_SEG.py │ ├── Task059_EPFL_EM_MITO_SEG.py │ ├── Task061_CREMI.py │ ├── Task062_NIHPancreas.py │ ├── Task064_KiTS_labelsFixed.py │ ├── Task065_KiTS_NicksLabels.py │ ├── Task069_CovidSeg.py │ ├── Task075_Fluo_C3DH_A549_ManAndSim.py │ ├── Task076_Fluo_N3DH_SIM.py │ ├── Task082_BraTS_2020.py │ ├── Task083_VerSe2020.py │ ├── Task089_Fluo-N2DH-SIM.py │ ├── Task114_heart_MNMs.py │ ├── Task115_COVIDSegChallenge.py │ ├── Task120_Massachusetts_RoadSegm.py │ ├── Task135_KiTS2021.py │ ├── Task137_BraTS_2021.py │ ├── Task154_RibFrac_multi_label.py │ ├── Task155_RibFrac_binary.py │ ├── Task156_RibSeg.py │ ├── Task159_MyoPS2020.py │ ├── Task777_KiTS2023.py │ ├── __init__.py │ └── utils.py ├── evaluation │ ├── __init__.py │ ├── add_dummy_task_with_mean_over_all_tasks.py │ ├── add_mean_dice_to_json.py │ ├── collect_results_files.py │ ├── evaluator.py │ ├── metrics.py │ ├── model_selection │ │ ├── __init__.py │ │ ├── collect_all_fold0_results_and_summarize_in_one_csv.py │ │ ├── ensemble.py │ │ ├── figure_out_what_to_submit.py │ │ ├── rank_candidates.py │ │ ├── rank_candidates_StructSeg.py │ │ ├── rank_candidates_cascade.py │ │ ├── summarize_results_in_one_json.py │ │ └── summarize_results_with_plans.py │ ├── region_based_evaluation.py │ └── surface_dice.py ├── experiment_planning │ ├── DatasetAnalyzer.py │ ├── __init__.py │ ├── alternative_experiment_planning │ │ ├── __init__.py │ │ ├── experiment_planner_baseline_3DUNet_v21_11GB.py │ │ ├── experiment_planner_baseline_3DUNet_v21_16GB.py │ │ ├── experiment_planner_baseline_3DUNet_v21_32GB.py │ │ ├── experiment_planner_baseline_3DUNet_v21_3convperstage.py │ │ ├── experiment_planner_baseline_3DUNet_v22.py │ │ ├── experiment_planner_baseline_3DUNet_v23.py │ │ ├── experiment_planner_pretrained.py │ │ ├── experiment_planner_residual_3DUNet_v21.py │ │ ├── normalization │ │ │ ├── __init__.py │ │ │ ├── experiment_planner_2DUNet_v21_RGB_scaleto_0_1.py │ │ │ ├── experiment_planner_3DUNet_CT2.py │ │ │ └── experiment_planner_3DUNet_nonCT.py │ │ ├── patch_size │ │ │ ├── __init__.py │ │ │ ├── experiment_planner_3DUNet_isotropic_in_mm.py │ │ │ └── experiment_planner_3DUNet_isotropic_in_voxels.py │ │ ├── pooling_and_convs │ │ │ ├── __init__.py │ │ │ ├── experiment_planner_baseline_3DUNet_allConv3x3.py │ │ │ └── experiment_planner_baseline_3DUNet_poolBasedOnSpacing.py │ │ ├── readme.md │ │ └── target_spacing │ │ │ ├── __init__.py │ │ │ ├── experiment_planner_baseline_3DUNet_targetSpacingForAnisoAxis.py │ │ │ ├── experiment_planner_baseline_3DUNet_v21_customTargetSpacing_2x2x2.py │ │ │ ├── experiment_planner_baseline_3DUNet_v21_noResampling.py │ │ │ └── experiment_planner_v21_isotropic1mm.py │ ├── change_batch_size.py │ ├── common_utils.py │ ├── experiment_planner_baseline_2DUNet.py │ ├── experiment_planner_baseline_2DUNet_v21.py │ ├── experiment_planner_baseline_3DUNet.py │ ├── experiment_planner_baseline_3DUNet_v21.py │ ├── nnUNet_convert_decathlon_task.py │ ├── nnUNet_plan_and_preprocess.py │ ├── old │ │ ├── __init__.py │ │ └── old_plan_and_preprocess_task.py │ ├── summarize_plans.py │ └── utils.py ├── inference │ ├── __init__.py │ ├── change_trainer.py │ ├── ensemble_predictions.py │ ├── predict.py │ ├── predict_simple.py │ ├── pretrained_models │ │ ├── __init__.py │ │ ├── collect_pretrained_models.py │ │ └── download_pretrained_model.py │ └── segmentation_export.py ├── network_architecture │ ├── __init__.py │ ├── custom_modules │ │ ├── __init__.py │ │ ├── conv_blocks.py │ │ ├── custom_networks │ │ │ ├── CoTr │ │ │ │ ├── ResTransUNet.py │ │ │ │ └── __init__.py │ │ │ ├── SwinUNETR │ │ │ │ ├── __init__.py │ │ │ │ └── swinunetr.py │ │ │ ├── TransBTS │ │ │ │ ├── IntmdSequential.py │ │ │ │ ├── PositionalEncoding.py │ │ │ │ ├── TransBTS_downsample8x_skipconnection.py │ │ │ │ ├── Transformer.py │ │ │ │ ├── Unet_skipconnection.py │ │ │ │ └── __init__.py │ │ │ ├── TransFuse │ │ │ │ ├── DeiT.py │ │ │ │ ├── TransFuse.py │ │ │ │ ├── __init__.py │ │ │ │ └── vision_transformer.py │ │ │ ├── TransUnet │ │ │ │ ├── __init__.py │ │ │ │ ├── vit_seg_configs.py │ │ │ │ ├── vit_seg_modeling.py │ │ │ │ └── vit_seg_modeling_resnet_skip.py │ │ │ ├── UNETR │ │ │ │ ├── __init__.py │ │ │ │ └── unetr.py │ │ │ ├── UTNet │ │ │ │ ├── __init__.py │ │ │ │ ├── conv_trans_utils.py │ │ │ │ ├── unet_utils.py │ │ │ │ └── utnet.py │ │ │ ├── UXNet3D │ │ │ │ ├── __init__.py │ │ │ │ ├── network_backbone.py │ │ │ │ └── uxnet_encoder.py │ │ │ ├── __init__.py │ │ │ └── nnFormer │ │ │ │ ├── __init__.py │ │ │ │ └── nnFormer_tumor.py │ │ ├── feature_response_normalization.py │ │ ├── helperModules.py │ │ └── mish.py │ ├── generic_UNet.py │ ├── generic_UNet_DP.py │ ├── generic_modular_UNet.py │ ├── generic_modular_preact_residual_UNet.py │ ├── generic_modular_residual_UNet.py │ ├── initialization.py │ ├── mednextv1 │ │ ├── MedNextV1.py │ │ ├── __init__.py │ │ ├── blocks.py │ │ └── create_mednext_v1.py │ └── neural_network.py ├── paths.py ├── postprocessing │ ├── __init__.py │ ├── connected_components.py │ ├── consolidate_all_for_paper.py │ ├── consolidate_postprocessing.py │ └── consolidate_postprocessing_simple.py ├── preprocessing │ ├── __init__.py │ ├── cropping.py │ ├── custom_preprocessors │ │ ├── __init__.py │ │ └── preprocessor_scale_RGB_to_0_1.py │ ├── preprocessing.py │ └── sanity_checks.py ├── run │ ├── __init__.py │ ├── default_configuration.py │ ├── load_pretrained_weights.py │ ├── load_weights.py │ ├── run_training.py │ ├── run_training_DDP.py │ └── run_training_DP.py ├── training │ ├── __init__.py │ ├── cascade_stuff │ │ ├── __init__.py │ │ └── predict_next_stage.py │ ├── data_augmentation │ │ ├── __init__.py │ │ ├── custom_transforms.py │ │ ├── data_augmentation_insaneDA.py │ │ ├── data_augmentation_insaneDA2.py │ │ ├── data_augmentation_moreDA.py │ │ ├── data_augmentation_noDA.py │ │ ├── data_augmentation_unetr.py │ │ ├── default_data_augmentation.py │ │ ├── downsampling.py │ │ └── pyramid_augmentations.py │ ├── dataloading │ │ ├── __init__.py │ │ └── dataset_loading.py │ ├── learning_rate │ │ ├── __init__.py │ │ └── poly_lr.py │ ├── loss_functions │ │ ├── TopK_loss.py │ │ ├── __init__.py │ │ ├── confidence_penalty.py │ │ ├── crossentropy.py │ │ ├── deep_supervision.py │ │ ├── dice_loss.py │ │ └── focal_loss.py │ ├── model_restore.py │ ├── network_training │ │ ├── MedNeXt │ │ │ ├── __init__.py │ │ │ └── nnUNetTrainerV2_MedNeXt.py │ │ ├── __init__.py │ │ ├── competitions_with_custom_Trainers │ │ │ ├── BraTS2020 │ │ │ │ ├── __init__.py │ │ │ │ ├── nnUNetTrainerV2BraTSRegions.py │ │ │ │ └── nnUNetTrainerV2BraTSRegions_moreDA.py │ │ │ ├── MMS │ │ │ │ ├── __init__.py │ │ │ │ └── nnUNetTrainerV2_MMS.py │ │ │ └── __init__.py │ │ ├── network_trainer.py │ │ ├── nnUNetTrainer.py │ │ ├── nnUNetTrainerCascadeFullRes.py │ │ ├── nnUNetTrainerV2.py │ │ ├── nnUNetTrainerV2_CascadeFullRes.py │ │ ├── nnUNetTrainerV2_DDP.py │ │ ├── nnUNetTrainerV2_DP.py │ │ ├── nnUNetTrainerV2_fp32.py │ │ └── nnUNet_variants │ │ │ ├── __init__.py │ │ │ ├── architectural_variants │ │ │ ├── __init__.py │ │ │ ├── nnUNetTrainerV2_3ConvPerStage.py │ │ │ ├── nnUNetTrainerV2_3ConvPerStage_samefilters.py │ │ │ ├── nnUNetTrainerV2_BN.py │ │ │ ├── nnUNetTrainerV2_FRN.py │ │ │ ├── nnUNetTrainerV2_GN.py │ │ │ ├── nnUNetTrainerV2_GeLU.py │ │ │ ├── nnUNetTrainerV2_LReLU_slope_2en1.py │ │ │ ├── nnUNetTrainerV2_Mish.py │ │ │ ├── nnUNetTrainerV2_NoNormalization.py │ │ │ ├── nnUNetTrainerV2_NoNormalization_lr1en3.py │ │ │ ├── nnUNetTrainerV2_ReLU.py │ │ │ ├── nnUNetTrainerV2_ReLU_biasInSegOutput.py │ │ │ ├── nnUNetTrainerV2_ReLU_convReLUIN.py │ │ │ ├── nnUNetTrainerV2_ResencUNet.py │ │ │ ├── nnUNetTrainerV2_ResencUNet_DA3.py │ │ │ ├── nnUNetTrainerV2_ResencUNet_DA3_BN.py │ │ │ ├── nnUNetTrainerV2_ResencUNet_SimonsInit.py │ │ │ ├── nnUNetTrainerV2_allConv3x3.py │ │ │ ├── nnUNetTrainerV2_lReLU_biasInSegOutput.py │ │ │ ├── nnUNetTrainerV2_lReLU_convlReLUIN.py │ │ │ ├── nnUNetTrainerV2_noDeepSupervision.py │ │ │ └── nnUNetTrainerV2_softDeepSupervision.py │ │ │ ├── baselines │ │ │ ├── __init__.py │ │ │ ├── nnUNetTrainerV2_3DUXNet.py │ │ │ ├── nnUNetTrainerV2_CoTr.py │ │ │ ├── nnUNetTrainerV2_SwinUNETR.py │ │ │ ├── nnUNetTrainerV2_TransBTS.py │ │ │ ├── nnUNetTrainerV2_TransFuse.py │ │ │ ├── nnUNetTrainerV2_TransUnet.py │ │ │ ├── nnUNetTrainerV2_UNETR.py │ │ │ ├── nnUNetTrainerV2_UTNet.py │ │ │ └── nnUNetTrainerV2_nnFormer.py │ │ │ ├── benchmarking │ │ │ ├── __init__.py │ │ │ ├── nnUNetTrainerV2_2epochs.py │ │ │ └── nnUNetTrainerV2_dummyLoad.py │ │ │ ├── cascade │ │ │ ├── __init__.py │ │ │ ├── nnUNetTrainerV2CascadeFullRes_DAVariants.py │ │ │ ├── nnUNetTrainerV2CascadeFullRes_lowerLR.py │ │ │ ├── nnUNetTrainerV2CascadeFullRes_shorter.py │ │ │ └── nnUNetTrainerV2CascadeFullRes_shorter_lowerLR.py │ │ │ ├── copies │ │ │ ├── __init__.py │ │ │ └── nnUNetTrainerV2_copies.py │ │ │ ├── data_augmentation │ │ │ ├── __init__.py │ │ │ ├── nnUNetTrainerV2_DA2.py │ │ │ ├── nnUNetTrainerV2_DA3.py │ │ │ ├── nnUNetTrainerV2_DA5.py │ │ │ ├── nnUNetTrainerV2_independentScalePerAxis.py │ │ │ ├── nnUNetTrainerV2_insaneDA.py │ │ │ ├── nnUNetTrainerV2_noDA.py │ │ │ └── nnUNetTrainerV2_noMirroring.py │ │ │ ├── loss_function │ │ │ ├── __init__.py │ │ │ ├── nnUNetTrainerV2_ForceBD.py │ │ │ ├── nnUNetTrainerV2_ForceSD.py │ │ │ ├── nnUNetTrainerV2_Loss_CE.py │ │ │ ├── nnUNetTrainerV2_Loss_CEGDL.py │ │ │ ├── nnUNetTrainerV2_Loss_Dice.py │ │ │ ├── nnUNetTrainerV2_Loss_DiceCE_noSmooth.py │ │ │ ├── nnUNetTrainerV2_Loss_DiceTopK10.py │ │ │ ├── nnUNetTrainerV2_Loss_Dice_lr1en3.py │ │ │ ├── nnUNetTrainerV2_Loss_Dice_squared.py │ │ │ ├── nnUNetTrainerV2_Loss_MCC.py │ │ │ ├── nnUNetTrainerV2_Loss_TopK10.py │ │ │ ├── nnUNetTrainerV2_focalLoss.py │ │ │ └── nnUNetTrainerV2_graduallyTransitionFromCEToDice.py │ │ │ ├── miscellaneous │ │ │ ├── __init__.py │ │ │ └── nnUNetTrainerV2_fullEvals.py │ │ │ ├── nnUNetTrainerCE.py │ │ │ ├── nnUNetTrainerNoDA.py │ │ │ ├── optimizer_and_lr │ │ │ ├── __init__.py │ │ │ ├── nnUNetTrainerV2_Adam.py │ │ │ ├── nnUNetTrainerV2_Adam_ReduceOnPlateau.py │ │ │ ├── nnUNetTrainerV2_Adam_lr_3en4.py │ │ │ ├── nnUNetTrainerV2_Ranger_lr1en2.py │ │ │ ├── nnUNetTrainerV2_Ranger_lr3en3.py │ │ │ ├── nnUNetTrainerV2_Ranger_lr3en4.py │ │ │ ├── nnUNetTrainerV2_SGD_ReduceOnPlateau.py │ │ │ ├── nnUNetTrainerV2_SGD_fixedSchedule.py │ │ │ ├── nnUNetTrainerV2_SGD_fixedSchedule2.py │ │ │ ├── nnUNetTrainerV2_SGD_lrs.py │ │ │ ├── nnUNetTrainerV2_cycleAtEnd.py │ │ │ ├── nnUNetTrainerV2_fp16.py │ │ │ ├── nnUNetTrainerV2_momentum09.py │ │ │ ├── nnUNetTrainerV2_momentum095.py │ │ │ ├── nnUNetTrainerV2_momentum098.py │ │ │ ├── nnUNetTrainerV2_momentum09in2D.py │ │ │ ├── nnUNetTrainerV2_reduceMomentumDuringTraining.py │ │ │ └── nnUNetTrainerV2_warmup.py │ │ │ └── resampling │ │ │ ├── __init__.py │ │ │ └── nnUNetTrainerV2_resample33.py │ └── optimizer │ │ ├── __init__.py │ │ └── ranger.py └── utilities │ ├── __init__.py │ ├── distributed.py │ ├── file_conversions.py │ ├── file_endings.py │ ├── folder_names.py │ ├── image_reorientation.py │ ├── nd_softmax.py │ ├── one_hot_encoding.py │ ├── overlay_plots.py │ ├── random_stuff.py │ ├── recursive_delete_npz.py │ ├── recursive_rename_taskXX_to_taskXXX.py │ ├── set_n_proc_DA.py │ ├── sitk_stuff.py │ ├── task_name_id_conversion.py │ ├── tensor_utilities.py │ └── to_torch.py ├── setup.cfg ├── setup.py └── tests ├── test_steps_for_sliding_window_prediction.py └── tests_mednext_miccai_architectures.py /.github/ISSUE_TEMPLATE/all-issues.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: All Issues 3 | about: Template for all types of issues 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | Please read the following resources before posting issues: 11 | 12 | Common questions: 13 | https://github.com/MIC-DKFZ/nnUNet/blob/master/documentation/common_questions.md 14 | 15 | Common Problems and their solutions: 16 | https://github.com/MIC-DKFZ/nnUNet/blob/master/documentation/common_problems_and_solutions.md 17 | 18 | Expected epoch times and tips on how to identify bottlenecks: 19 | https://github.com/MIC-DKFZ/nnUNet/blob/master/documentation/expected_epoch_times.md 20 | 21 | 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | *.memmap 92 | *.png 93 | *.zip 94 | *.npz 95 | *.npy 96 | *.jpg 97 | *.jpeg 98 | .idea 99 | *.txt 100 | .idea/* 101 | *.png 102 | *.nii.gz 103 | *.nii 104 | *.tif 105 | *.bmp 106 | *.pkl 107 | *.xml 108 | *.pkl 109 | *.pdf 110 | *.png 111 | *.jpg 112 | *.jpeg 113 | 114 | *.model 115 | 116 | -------------------------------------------------------------------------------- /documentation/data_format_inference.md: -------------------------------------------------------------------------------- 1 | # Data format for Inference 2 | 3 | The data format for inference must match the one used for the raw data (specifically, the images must be in exactly 4 | the same format as in the imagesTr folder). As before, the filenames must start with a 5 | unique identifier, followed by a 4-digit modality identifier. Here is an example for two different datasets: 6 | 7 | 1) Task005_Prostate: 8 | 9 | This task has 2 modalities, so the files in the input folder must look like this: 10 | 11 | input_folder 12 | ├── prostate_03_0000.nii.gz 13 | ├── prostate_03_0001.nii.gz 14 | ├── prostate_05_0000.nii.gz 15 | ├── prostate_05_0001.nii.gz 16 | ├── prostate_08_0000.nii.gz 17 | ├── prostate_08_0001.nii.gz 18 | ├── ... 19 | 20 | _0000 is always the T2 image and _0001 is always the ADC image (as specified by 'modality' in the dataset.json) 21 | 22 | 2) Task002_Heart: 23 | 24 | imagesTs 25 | ├── la_001_0000.nii.gz 26 | ├── la_002_0000.nii.gz 27 | ├── la_006_0000.nii.gz 28 | ├── ... 29 | 30 | Task002 only has one modality, so each case only has one _0000.nii.gz file. 31 | 32 | 33 | The segmentations in the output folder will be named INDENTIFIER.nii.gz (omitting the modality identifier). 34 | -------------------------------------------------------------------------------- /documentation/training_example_Hippocampus.md: -------------------------------------------------------------------------------- 1 | # Example: 3D U-Net training on the Hippocampus dataset 2 | 3 | This is a step-by-step example on how to run a 3D full resolution Training with the Hippocampus dataset from the 4 | Medical Segmentation Decathlon. 5 | 6 | 1) Install nnU-Net by following the instructions [here](../readme.md#installation). Make sure to set all relevant paths, 7 | also see [here](setting_up_paths.md). This step is necessary so that nnU-Net knows where to store raw data, 8 | preprocessed data and trained models. 9 | 2) Download the Hippocampus dataset of the Medical Segmentation Decathlon from 10 | [here](https://drive.google.com/drive/folders/1HqEgzS8BV2c7xYNrZdEAnrHk7osJJ--2). Then extract the archive to a 11 | destination of your choice. 12 | 3) Decathlon data come as 4D niftis. This is not compatible with nnU-Net (see dataset format specified 13 | [here](dataset_conversion.md)). Convert the Hippocampus dataset into the correct format with 14 | 15 | ```bash 16 | nnUNet_convert_decathlon_task -i /xxx/Task04_Hippocampus 17 | ``` 18 | 19 | Note that `Task04_Hippocampus` must be the folder that has the three 'imagesTr', 'labelsTr', 'imagesTs' subfolders! 20 | The converted dataset can be found in $nnUNet_raw_data_base/nnUNet_raw_data ($nnUNet_raw_data_base is the folder for 21 | raw data that you specified during installation) 22 | 4) You can now run nnU-Nets pipeline configuration (and the preprocessing) with the following line: 23 | ```bash 24 | nnUNet_plan_and_preprocess -t 4 25 | ``` 26 | Where 4 refers to the task ID of the Hippocampus dataset. 27 | 5) Now you can already start network training. This is how you train a 3d full resoltion U-Net on the Hippocampus dataset: 28 | ```bash 29 | nnUNet_train 3d_fullres nnUNetTrainerV2 4 0 30 | ``` 31 | nnU-Net per default requires all trainings as 5-fold cross validation. The command above will run only the training for the 32 | first fold (fold 0). 4 is the task identifier of the hippocampus dataset. Training one fold should take about 9 33 | hours on a modern GPU. 34 | 35 | This tutorial is only intended to demonstrate how easy it is to get nnU-Net running. You do not need to finish the 36 | network training - pretrained models for the hippocampus task are available (see [here](../readme.md#run-inference)). 37 | 38 | The only prerequisite for running nnU-Net on your custom dataset is to bring it into a structured, nnU-Net compatible 39 | format. nnU-Net will take care of the rest. See [here](dataset_conversion.md) for instructions on how to convert 40 | datasets into nnU-Net compatible format. 41 | -------------------------------------------------------------------------------- /documentation/tutorials/custom_spacing.md: -------------------------------------------------------------------------------- 1 | Sometimes you want to set custom target spacings. This is done by creating a custom ExperimentPlanner. 2 | Let's run this with the Task002_Heart example from the Medical Segmentation Decathlon. This dataset is not too large 3 | and working with it is therefore a breeze! 4 | 5 | This example requires you to have downloaded the dataset and converted it to nnU-Net format with 6 | nnUNet_convert_decathlon_task 7 | 8 | We need to run the nnUNet_plan_and_preprocess command with a custom 3d experiment planner to achieve this. I have 9 | created an appropriate trainer and placed it in [experiment_planner_baseline_3DUNet_v21_customTargetSpacing_2x2x2.py](../../nnunet/experiment_planning/alternative_experiment_planning/target_spacing/experiment_planner_baseline_3DUNet_v21_customTargetSpacing_2x2x2.py) 10 | 11 | This will set a hard coded target spacing of 2x2x2mm for the 3d_fullres configuration (3d_lowres is unchanged). 12 | Go have a look at this ExperimentPlanner now. 13 | 14 | To run nnUNet_plan_and_preprocess with the new ExperimentPlanner, simply specify it: 15 | 16 | `nnUNet_plan_and_preprocess -t 2 -pl2d None -pl3d ExperimentPlanner3D_v21_customTargetSpacing_2x2x2` 17 | 18 | Note how we are disabling 2D preprocessing with `-pl2d None`. The ExperimentPlanner I created is only for 3D. 19 | You will need to generate a separate one for 3D. 20 | 21 | Once this is completed your task will have been preprocessed with the desired target spacing. You can use it by 22 | specifying the new custom plans file that is linked to it (see 23 | `ExperimentPlanner3D_v21_customTargetSpacing_2x2x2` source code) when running any nnUNet_* command, for example: 24 | 25 | `nnUNet_train 3d_fullres nnUNetTrainerV2 2 FOLD -p nnUNetPlansv2.1_trgSp_2x2x2` 26 | 27 | (make sure to omit the `_plans_3D.pkl` suffix!) 28 | 29 | **TODO**: how to compare with the default run? 30 | 31 | IMPORTANT: When creating custom ExperimentPlanner, make sure to always place them under a unique class name somewhere 32 | in the nnunet.experiment_planning module. If you create subfolders, make sure they contain an __init__py file 33 | (can be empty). If you fail to do so nnU-Net will not be able to locate your ExperimentPlanner and crash! -------------------------------------------------------------------------------- /documentation/using_nnUNet_as_baseline.md: -------------------------------------------------------------------------------- 1 | (The U-Net is the current punching bag of methods development. nnU-Net is going to be that looking forward. That is 2 | cool (great, in fact!), but it should be done correctly. Here are tips on how to benchmark against nnU-Net) 3 | 4 | This is work in progress -------------------------------------------------------------------------------- /nnunet_mednext/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | # print("\n\nPlease cite the following paper when using nnUNet:\n\nIsensee, F., Jaeger, P.F., Kohl, S.A.A. et al. " 3 | # "\"nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation.\" " 4 | # "Nat Methods (2020). https://doi.org/10.1038/s41592-020-01008-z\n\n") 5 | # print("If you have questions or suggestions, feel free to open an issue at https://github.com/MIC-DKFZ/nnUNet\n") 6 | 7 | from . import * 8 | from nnunet_mednext.network_architecture.mednextv1.MedNextV1 import MedNeXt 9 | from nnunet_mednext.network_architecture.mednextv1.create_mednext_v1 import create_mednext_v1 10 | from nnunet_mednext.network_architecture.mednextv1.blocks import \ 11 | MedNeXtBlock, MedNeXtUpBlock, MedNeXtDownBlock 12 | from nnunet_mednext.run.load_weights import upkern_load_weights -------------------------------------------------------------------------------- /nnunet_mednext/configuration.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | default_num_threads = 8 if 'nnUNet_def_n_proc' not in os.environ else int(os.environ['nnUNet_def_n_proc']) 4 | RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 3 # determines what threshold to use for resampling the low resolution axis 5 | # separately (with NN) -------------------------------------------------------------------------------- /nnunet_mednext/dataset_conversion/Task069_CovidSeg.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | 3 | from batchgenerators.utilities.file_and_folder_operations import * 4 | import SimpleITK as sitk 5 | from nnunet_mednext.paths import nnUNet_raw_data 6 | 7 | if __name__ == '__main__': 8 | #data is available at http://medicalsegmentation.com/covid19/ 9 | download_dir = '/home/fabian/Downloads' 10 | 11 | task_id = 69 12 | task_name = "CovidSeg" 13 | 14 | foldername = "Task%03.0d_%s" % (task_id, task_name) 15 | 16 | out_base = join(nnUNet_raw_data, foldername) 17 | imagestr = join(out_base, "imagesTr") 18 | imagests = join(out_base, "imagesTs") 19 | labelstr = join(out_base, "labelsTr") 20 | maybe_mkdir_p(imagestr) 21 | maybe_mkdir_p(imagests) 22 | maybe_mkdir_p(labelstr) 23 | 24 | train_patient_names = [] 25 | test_patient_names = [] 26 | 27 | # the niftis are 3d, but they are just stacks of 2d slices from different patients. So no 3d U-Net, please 28 | 29 | # the training stack has 100 slices, so we split it into 5 equally sized parts (20 slices each) for cross-validation 30 | training_data = sitk.GetArrayFromImage(sitk.ReadImage(join(download_dir, 'tr_im.nii.gz'))) 31 | training_labels = sitk.GetArrayFromImage(sitk.ReadImage(join(download_dir, 'tr_mask.nii.gz'))) 32 | 33 | for f in range(5): 34 | this_name = 'part_%d' % f 35 | data = training_data[f::5] 36 | labels = training_labels[f::5] 37 | sitk.WriteImage(sitk.GetImageFromArray(data), join(imagestr, this_name + '_0000.nii.gz')) 38 | sitk.WriteImage(sitk.GetImageFromArray(labels), join(labelstr, this_name + '.nii.gz')) 39 | train_patient_names.append(this_name) 40 | 41 | shutil.copy(join(download_dir, 'val_im.nii.gz'), join(imagests, 'val_im.nii.gz')) 42 | 43 | test_patient_names.append('val_im') 44 | 45 | json_dict = {} 46 | json_dict['name'] = task_name 47 | json_dict['description'] = "" 48 | json_dict['tensorImageSize'] = "4D" 49 | json_dict['reference'] = "" 50 | json_dict['licence'] = "" 51 | json_dict['release'] = "0.0" 52 | json_dict['modality'] = { 53 | "0": "nonct", 54 | } 55 | json_dict['labels'] = { 56 | "0": "background", 57 | "1": "stuff1", 58 | "2": "stuff2", 59 | "3": "stuff3", 60 | } 61 | 62 | json_dict['numTraining'] = len(train_patient_names) 63 | json_dict['numTest'] = len(test_patient_names) 64 | json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in 65 | train_patient_names] 66 | json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names] 67 | 68 | save_json(json_dict, os.path.join(out_base, "dataset.json")) 69 | -------------------------------------------------------------------------------- /nnunet_mednext/dataset_conversion/Task135_KiTS2021.py: -------------------------------------------------------------------------------- 1 | from batchgenerators.utilities.file_and_folder_operations import * 2 | import shutil 3 | 4 | from nnunet_mednext.paths import nnUNet_raw_data 5 | from nnunet_mednext.dataset_conversion.utils import generate_dataset_json 6 | 7 | if __name__ == '__main__': 8 | # this is the data folder from the kits21 github repository, see https://github.com/neheller/kits21 9 | kits_data_dir = '/home/fabian/git_repos/kits21/kits21/data' 10 | 11 | # This script uses the majority voted segmentation as ground truth 12 | kits_segmentation_filename = 'aggregated_MAJ_seg.nii.gz' 13 | 14 | # Arbitrary task id. This is just to ensure each dataset ha a unique number. Set this to whatever ([0-999]) you 15 | # want 16 | task_id = 135 17 | task_name = "KiTS2021" 18 | 19 | foldername = "Task%03.0d_%s" % (task_id, task_name) 20 | 21 | # setting up nnU-Net folders 22 | out_base = join(nnUNet_raw_data, foldername) 23 | imagestr = join(out_base, "imagesTr") 24 | labelstr = join(out_base, "labelsTr") 25 | maybe_mkdir_p(imagestr) 26 | maybe_mkdir_p(labelstr) 27 | 28 | case_ids = subdirs(kits_data_dir, prefix='case_', join=False) 29 | for c in case_ids: 30 | if isfile(join(kits_data_dir, c, kits_segmentation_filename)): 31 | shutil.copy(join(kits_data_dir, c, kits_segmentation_filename), join(labelstr, c + '.nii.gz')) 32 | shutil.copy(join(kits_data_dir, c, 'imaging.nii.gz'), join(imagestr, c + '_0000.nii.gz')) 33 | 34 | generate_dataset_json(join(out_base, 'dataset.json'), 35 | imagestr, 36 | None, 37 | ('CT',), 38 | { 39 | 0: 'background', 40 | 1: "kidney", 41 | 2: "tumor", 42 | 3: "cyst", 43 | }, 44 | task_name, 45 | license='see https://kits21.kits-challenge.org/participate#download-block', 46 | dataset_description='see https://kits21.kits-challenge.org/', 47 | dataset_reference='https://www.sciencedirect.com/science/article/abs/pii/S1361841520301857, ' 48 | 'https://kits21.kits-challenge.org/', 49 | dataset_release='0') 50 | -------------------------------------------------------------------------------- /nnunet_mednext/dataset_conversion/Task777_KiTS2023.py: -------------------------------------------------------------------------------- 1 | from batchgenerators.utilities.file_and_folder_operations import * 2 | import shutil 3 | 4 | from nnunet_mednext.paths import nnUNet_raw_data 5 | from nnunet_mednext.dataset_conversion.utils import generate_dataset_json 6 | 7 | if __name__ == '__main__': 8 | # this is the data folder from the kits21 github repository, see https://github.com/neheller/kits21 9 | kits_data_dir = '/mnt/cluster-data-all/roys/raw_data/nnUNet_raw_data_base/kits23/dataset/' 10 | nnUNet_raw_data = '/mnt/cluster-data-all/roys/raw_data/nnUNet_raw_data_base/nnUNet_raw_data' 11 | 12 | # This script uses the majority voted segmentation as ground truth 13 | kits_segmentation_filename = 'segmentation.nii.gz' 14 | 15 | # Arbitrary task id. This is just to ensure each dataset ha a unique number. Set this to whatever ([0-999]) you 16 | # want 17 | task_id = 777 18 | task_name = "KiTS2023" 19 | 20 | foldername = "Task%03.0d_%s" % (task_id, task_name) 21 | 22 | # setting up nnU-Net folders 23 | out_base = join(nnUNet_raw_data, foldername) 24 | imagestr = join(out_base, "imagesTr") 25 | labelstr = join(out_base, "labelsTr") 26 | maybe_mkdir_p(imagestr) 27 | maybe_mkdir_p(labelstr) 28 | 29 | case_ids = subdirs(kits_data_dir, prefix='case_', join=False) 30 | for c in case_ids: 31 | if isfile(join(kits_data_dir, c, kits_segmentation_filename)): 32 | shutil.copy(join(kits_data_dir, c, kits_segmentation_filename), join(labelstr, c + '.nii.gz')) 33 | shutil.copy(join(kits_data_dir, c, 'imaging.nii.gz'), join(imagestr, c + '_0000.nii.gz')) 34 | 35 | generate_dataset_json(join(out_base, 'dataset.json'), 36 | imagestr, 37 | None, 38 | ('CT',), 39 | { 40 | 0: 'background', 41 | 1: "kidney", 42 | 2: "tumor", 43 | 3: "cyst", 44 | }, 45 | task_name, 46 | license='see https://kits-challenge.org/kits23/', 47 | dataset_description='see https://kits-challenge.org/kits23/', 48 | dataset_release='0') 49 | -------------------------------------------------------------------------------- /nnunet_mednext/dataset_conversion/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/evaluation/add_mean_dice_to_json.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import json 16 | import numpy as np 17 | from batchgenerators.utilities.file_and_folder_operations import subfiles 18 | from collections import OrderedDict 19 | 20 | 21 | def foreground_mean(filename): 22 | with open(filename, 'r') as f: 23 | res = json.load(f) 24 | class_ids = np.array([int(i) for i in res['results']['mean'].keys() if (i != 'mean')]) 25 | class_ids = class_ids[class_ids != 0] 26 | class_ids = class_ids[class_ids != -1] 27 | class_ids = class_ids[class_ids != 99] 28 | 29 | tmp = res['results']['mean'].get('99') 30 | if tmp is not None: 31 | _ = res['results']['mean'].pop('99') 32 | 33 | metrics = res['results']['mean']['1'].keys() 34 | res['results']['mean']["mean"] = OrderedDict() 35 | for m in metrics: 36 | foreground_values = [res['results']['mean'][str(i)][m] for i in class_ids] 37 | res['results']['mean']["mean"][m] = np.nanmean(foreground_values) 38 | with open(filename, 'w') as f: 39 | json.dump(res, f, indent=4, sort_keys=True) 40 | 41 | 42 | def run_in_folder(folder): 43 | json_files = subfiles(folder, True, None, ".json", True) 44 | json_files = [i for i in json_files if not i.split("/")[-1].startswith(".") and not i.endswith("_globalMean.json")] # stupid mac 45 | for j in json_files: 46 | foreground_mean(j) 47 | 48 | 49 | if __name__ == "__main__": 50 | folder = "/media/fabian/Results/nnUNetOutput_final/summary_jsons" 51 | run_in_folder(folder) 52 | -------------------------------------------------------------------------------- /nnunet_mednext/evaluation/collect_results_files.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import os 16 | import shutil 17 | from batchgenerators.utilities.file_and_folder_operations import subdirs, subfiles 18 | 19 | 20 | def crawl_and_copy(current_folder, out_folder, prefix="fabian_", suffix="ummary.json"): 21 | """ 22 | This script will run recursively through all subfolders of current_folder and copy all files that end with 23 | suffix with some automatically generated prefix into out_folder 24 | :param current_folder: 25 | :param out_folder: 26 | :param prefix: 27 | :return: 28 | """ 29 | s = subdirs(current_folder, join=False) 30 | f = subfiles(current_folder, join=False) 31 | f = [i for i in f if i.endswith(suffix)] 32 | if current_folder.find("fold0") != -1: 33 | for fl in f: 34 | shutil.copy(os.path.join(current_folder, fl), os.path.join(out_folder, prefix+fl)) 35 | for su in s: 36 | if prefix == "": 37 | add = su 38 | else: 39 | add = "__" + su 40 | crawl_and_copy(os.path.join(current_folder, su), out_folder, prefix=prefix+add) 41 | 42 | 43 | if __name__ == "__main__": 44 | from nnunet_mednext.paths import network_training_output_dir 45 | output_folder = "/home/fabian/PhD/results/nnUNetV2/leaderboard" 46 | crawl_and_copy(network_training_output_dir, output_folder) 47 | from nnunet_mednext.evaluation.add_mean_dice_to_json import run_in_folder 48 | run_in_folder(output_folder) 49 | -------------------------------------------------------------------------------- /nnunet_mednext/evaluation/model_selection/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/evaluation/surface_dice.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import numpy as np 17 | from medpy.metric.binary import __surface_distances 18 | 19 | 20 | def normalized_surface_dice(a: np.ndarray, b: np.ndarray, threshold: float, spacing: tuple = None, connectivity=1): 21 | """ 22 | This implementation differs from the official surface dice implementation! These two are not comparable!!!!! 23 | 24 | The normalized surface dice is symmetric, so it should not matter whether a or b is the reference image 25 | 26 | This implementation natively supports 2D and 3D images. Whether other dimensions are supported depends on the 27 | __surface_distances implementation in medpy 28 | 29 | :param a: image 1, must have the same shape as b 30 | :param b: image 2, must have the same shape as a 31 | :param threshold: distances below this threshold will be counted as true positives. Threshold is in mm, not voxels! 32 | (if spacing = (1, 1(, 1)) then one voxel=1mm so the threshold is effectively in voxels) 33 | must be a tuple of len dimension(a) 34 | :param spacing: how many mm is one voxel in reality? Can be left at None, we then assume an isotropic spacing of 1mm 35 | :param connectivity: see scipy.ndimage.generate_binary_structure for more information. I suggest you leave that 36 | one alone 37 | :return: 38 | """ 39 | assert all([i == j for i, j in zip(a.shape, b.shape)]), "a and b must have the same shape. a.shape= %s, " \ 40 | "b.shape= %s" % (str(a.shape), str(b.shape)) 41 | if spacing is None: 42 | spacing = tuple([1 for _ in range(len(a.shape))]) 43 | a_to_b = __surface_distances(a, b, spacing, connectivity) 44 | b_to_a = __surface_distances(b, a, spacing, connectivity) 45 | 46 | numel_a = len(a_to_b) 47 | numel_b = len(b_to_a) 48 | 49 | tp_a = np.sum(a_to_b <= threshold) / numel_a 50 | tp_b = np.sum(b_to_a <= threshold) / numel_b 51 | 52 | fp = np.sum(a_to_b > threshold) / numel_a 53 | fn = np.sum(b_to_a > threshold) / numel_b 54 | 55 | dc = (tp_a + tp_b) / (tp_a + tp_b + fp + fn + 1e-8) # 1e-8 just so that we don't get div by 0 56 | return dc 57 | 58 | -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/experiment_planning/alternative_experiment_planning/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v21_3convperstage.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from copy import deepcopy 16 | 17 | import numpy as np 18 | from nnunet_mednext.experiment_planning.common_utils import get_pool_and_conv_props 19 | from nnunet_mednext.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner 20 | from nnunet_mednext.experiment_planning.experiment_planner_baseline_3DUNet_v21 import ExperimentPlanner3D_v21 21 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 22 | from nnunet_mednext.paths import * 23 | 24 | 25 | class ExperimentPlanner3D_v21_3cps(ExperimentPlanner3D_v21): 26 | """ 27 | have 3x conv-in-lrelu per resolution instead of 2 while remaining in the same memory budget 28 | 29 | This only works with 3d fullres because we use the same data as ExperimentPlanner3D_v21. Lowres would require to 30 | rerun preprocesing (different patch size = different 3d lowres target spacing) 31 | """ 32 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder): 33 | super(ExperimentPlanner3D_v21_3cps, self).__init__(folder_with_cropped_data, preprocessed_output_folder) 34 | self.plans_fname = join(self.preprocessed_output_folder, 35 | "nnUNetPlansv2.1_3cps_plans_3D.pkl") 36 | self.unet_base_num_features = 32 37 | self.conv_per_stage = 3 38 | 39 | def run_preprocessing(self, num_threads): 40 | pass 41 | -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v23.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from nnunet_mednext.experiment_planning.experiment_planner_baseline_3DUNet_v21 import \ 16 | ExperimentPlanner3D_v21 17 | from nnunet_mednext.paths import * 18 | 19 | 20 | class ExperimentPlanner3D_v23(ExperimentPlanner3D_v21): 21 | """ 22 | """ 23 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder): 24 | super(ExperimentPlanner3D_v23, self).__init__(folder_with_cropped_data, preprocessed_output_folder) 25 | self.data_identifier = "nnUNetData_plans_v2.3" 26 | self.plans_fname = join(self.preprocessed_output_folder, 27 | "nnUNetPlansv2.3_plans_3D.pkl") 28 | self.preprocessor_name = "Preprocessor3DDifferentResampling" 29 | -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/experiment_planner_pretrained.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from batchgenerators.utilities.file_and_folder_operations import load_pickle 16 | from nnunet_mednext.experiment_planning.experiment_planner_baseline_3DUNet_v21 import ExperimentPlanner3D_v21 17 | from nnunet_mednext.paths import * 18 | 19 | 20 | class ExperimentPlanner3D_v21_Pretrained(ExperimentPlanner3D_v21): 21 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder, pretrained_model_plans_file: str, 22 | pretrained_name: str): 23 | super().__init__(folder_with_cropped_data, preprocessed_output_folder) 24 | self.pretrained_model_plans_file = pretrained_model_plans_file 25 | self.pretrained_name = pretrained_name 26 | self.data_identifier = "nnUNetData_pretrained_" + pretrained_name 27 | self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans_pretrained_%s_plans_3D.pkl" % pretrained_name) 28 | 29 | def load_pretrained_plans(self): 30 | classes = self.plans['num_classes'] 31 | self.plans = load_pickle(self.pretrained_model_plans_file) 32 | self.plans['num_classes'] = classes 33 | self.transpose_forward = self.plans['transpose_forward'] 34 | self.preprocessor_name = self.plans['preprocessor_name'] 35 | self.plans_per_stage = self.plans['plans_per_stage'] 36 | self.plans['data_identifier'] = self.data_identifier 37 | self.save_my_plans() 38 | print(self.plans['plans_per_stage']) 39 | 40 | def run_preprocessing(self, num_threads): 41 | self.load_pretrained_plans() 42 | super().run_preprocessing(num_threads) 43 | -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/normalization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/experiment_planning/alternative_experiment_planning/normalization/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/normalization/experiment_planner_2DUNet_v21_RGB_scaleto_0_1.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.experiment_planning.experiment_planner_baseline_2DUNet_v21 import ExperimentPlanner2D_v21 17 | from nnunet_mednext.paths import * 18 | 19 | 20 | class ExperimentPlanner2D_v21_RGB_scaleTo_0_1(ExperimentPlanner2D_v21): 21 | """ 22 | used by tutorial nnunet.tutorials.custom_preprocessing 23 | """ 24 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder): 25 | super().__init__(folder_with_cropped_data, preprocessed_output_folder) 26 | self.data_identifier = "nnUNet_RGB_scaleTo_0_1" 27 | self.plans_fname = join(self.preprocessed_output_folder, "nnUNet_RGB_scaleTo_0_1" + "_plans_2D.pkl") 28 | 29 | # The custom preprocessor class we intend to use is GenericPreprocessor_scale_uint8_to_0_1. It must be located 30 | # in nnunet.preprocessing (any file and submodule) and will be found by its name. Make sure to always define 31 | # unique names! 32 | self.preprocessor_name = 'GenericPreprocessor_scale_uint8_to_0_1' 33 | -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/normalization/experiment_planner_3DUNet_CT2.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from collections import OrderedDict 17 | 18 | from nnunet_mednext.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner 19 | from nnunet_mednext.paths import * 20 | 21 | 22 | class ExperimentPlannerCT2(ExperimentPlanner): 23 | """ 24 | preprocesses CT data with the "CT2" normalization. 25 | 26 | (clip range comes from training set and is the 0.5 and 99.5 percentile of intensities in foreground) 27 | CT = clip to range, then normalize with global mn and sd (computed on foreground in training set) 28 | CT2 = clip to range, normalize each case separately with its own mn and std (computed within the area that was in clip_range) 29 | """ 30 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder): 31 | super(ExperimentPlannerCT2, self).__init__(folder_with_cropped_data, preprocessed_output_folder) 32 | self.data_identifier = "nnUNet_CT2" 33 | self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "CT2_plans_3D.pkl") 34 | 35 | def determine_normalization_scheme(self): 36 | schemes = OrderedDict() 37 | modalities = self.dataset_properties['modalities'] 38 | num_modalities = len(list(modalities.keys())) 39 | 40 | for i in range(num_modalities): 41 | if modalities[i] == "CT": 42 | schemes[i] = "CT2" 43 | else: 44 | schemes[i] = "nonCT" 45 | return schemes 46 | -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/normalization/experiment_planner_3DUNet_nonCT.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from collections import OrderedDict 17 | 18 | from nnunet_mednext.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner 19 | from nnunet_mednext.paths import * 20 | 21 | 22 | class ExperimentPlannernonCT(ExperimentPlanner): 23 | """ 24 | Preprocesses all data in nonCT mode (this is what we use for MRI per default, but here it is applied to CT images 25 | as well) 26 | """ 27 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder): 28 | super(ExperimentPlannernonCT, self).__init__(folder_with_cropped_data, preprocessed_output_folder) 29 | self.data_identifier = "nnUNet_nonCT" 30 | self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "nonCT_plans_3D.pkl") 31 | 32 | def determine_normalization_scheme(self): 33 | schemes = OrderedDict() 34 | modalities = self.dataset_properties['modalities'] 35 | num_modalities = len(list(modalities.keys())) 36 | 37 | for i in range(num_modalities): 38 | if modalities[i] == "CT": 39 | schemes[i] = "nonCT" 40 | else: 41 | schemes[i] = "nonCT" 42 | return schemes 43 | 44 | -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/patch_size/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/experiment_planning/alternative_experiment_planning/patch_size/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/pooling_and_convs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/experiment_planning/alternative_experiment_planning/pooling_and_convs/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/readme.md: -------------------------------------------------------------------------------- 1 | These alternatives are not used in nnU-Net, but you can use them if you believe they might be better suited for you. 2 | I (Fabian) have not found them to be consistently superior. -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/target_spacing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/experiment_planning/alternative_experiment_planning/target_spacing/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/alternative_experiment_planning/target_spacing/experiment_planner_baseline_3DUNet_v21_customTargetSpacing_2x2x2.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import numpy as np 16 | from nnunet_mednext.experiment_planning.experiment_planner_baseline_3DUNet_v21 import ExperimentPlanner3D_v21 17 | from nnunet_mednext.paths import * 18 | 19 | 20 | class ExperimentPlanner3D_v21_customTargetSpacing_2x2x2(ExperimentPlanner3D_v21): 21 | def __init__(self, folder_with_cropped_data, preprocessed_output_folder): 22 | super(ExperimentPlanner3D_v21, self).__init__(folder_with_cropped_data, preprocessed_output_folder) 23 | # we change the data identifier and plans_fname. This will make this experiment planner save the preprocessed 24 | # data in a different folder so that they can co-exist with the default (ExperimentPlanner3D_v21). We also 25 | # create a custom plans file that will be linked to this data 26 | self.data_identifier = "nnUNetData_plans_v2.1_trgSp_2x2x2" 27 | self.plans_fname = join(self.preprocessed_output_folder, 28 | "nnUNetPlansv2.1_trgSp_2x2x2_plans_3D.pkl") 29 | 30 | def get_target_spacing(self): 31 | # simply return the desired spacing as np.array 32 | return np.array([2., 2., 2.]) # make sure this is float!!!! Not int! 33 | 34 | -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/change_batch_size.py: -------------------------------------------------------------------------------- 1 | from batchgenerators.utilities.file_and_folder_operations import * 2 | import numpy as np 3 | 4 | if __name__ == '__main__': 5 | input_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_plans_3D.pkl' 6 | output_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_LISA_plans_3D.pkl' 7 | a = load_pickle(input_file) 8 | a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size'])) 9 | save_pickle(a, output_file) -------------------------------------------------------------------------------- /nnunet_mednext/experiment_planning/old/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/experiment_planning/old/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/inference/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/inference/change_trainer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from batchgenerators.utilities.file_and_folder_operations import * 17 | 18 | 19 | def pretend_to_be_nnUNetTrainer(folder, checkpoints=("model_best.model.pkl", "model_final_checkpoint.model.pkl")): 20 | pretend_to_be_other_trainer(folder, "nnUNetTrainer", checkpoints) 21 | 22 | 23 | def pretend_to_be_other_trainer(folder, new_trainer_name, checkpoints=("model_best.model.pkl", "model_final_checkpoint.model.pkl")): 24 | folds = subdirs(folder, prefix="fold_", join=False) 25 | 26 | if isdir(join(folder, 'all')): 27 | folds.append('all') 28 | 29 | for c in checkpoints: 30 | for f in folds: 31 | checkpoint_file = join(folder, f, c) 32 | if isfile(checkpoint_file): 33 | a = load_pickle(checkpoint_file) 34 | a['name'] = new_trainer_name 35 | save_pickle(a, checkpoint_file) 36 | 37 | 38 | def main(): 39 | import argparse 40 | parser = argparse.ArgumentParser(description='Use this script to change the nnunet trainer class of a saved ' 41 | 'model. Useful for models that were trained with trainers that do ' 42 | 'not support inference (multi GPU trainers) or for trainer classes ' 43 | 'whose source code is not available. For this to work the network ' 44 | 'architecture must be identical between the original trainer ' 45 | 'class and the trainer class we are changing to. This script is ' 46 | 'experimental and only to be used by advanced users.') 47 | parser.add_argument('-i', help='Folder containing the trained model. This folder is the one containing the ' 48 | 'fold_X subfolders.') 49 | parser.add_argument('-tr', help='Name of the new trainer class') 50 | args = parser.parse_args() 51 | pretend_to_be_other_trainer(args.i, args.tr) 52 | -------------------------------------------------------------------------------- /nnunet_mednext/inference/pretrained_models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/inference/pretrained_models/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/CoTr/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/CoTr/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/SwinUNETR/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/SwinUNETR/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/TransBTS/IntmdSequential.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class IntermediateSequential(nn.Sequential): 5 | def __init__(self, *args, return_intermediate=True): 6 | super().__init__(*args) 7 | self.return_intermediate = return_intermediate 8 | 9 | def forward(self, input): 10 | if not self.return_intermediate: 11 | return super().forward(input) 12 | 13 | intermediate_outputs = {} 14 | output = input 15 | for name, module in self.named_children(): 16 | output = intermediate_outputs[name] = module(output) 17 | 18 | return output, intermediate_outputs 19 | -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/TransBTS/PositionalEncoding.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class FixedPositionalEncoding(nn.Module): 5 | def __init__(self, embedding_dim, max_length=512): 6 | super(FixedPositionalEncoding, self).__init__() 7 | 8 | pe = torch.zeros(max_length, embedding_dim) 9 | position = torch.arange(0, max_length, dtype=torch.float).unsqueeze(1) 10 | div_term = torch.exp( 11 | torch.arange(0, embedding_dim, 2).float() 12 | * (-torch.log(torch.tensor(10000.0)) / embedding_dim) 13 | ) 14 | pe[:, 0::2] = torch.sin(position * div_term) 15 | pe[:, 1::2] = torch.cos(position * div_term) 16 | pe = pe.unsqueeze(0).transpose(0, 1) 17 | self.register_buffer('pe', pe) 18 | 19 | def forward(self, x): 20 | x = x + self.pe[: x.size(0), :] 21 | return x 22 | 23 | 24 | class LearnedPositionalEncoding(nn.Module): 25 | def __init__(self, max_position_embeddings, embedding_dim, seq_length): 26 | super(LearnedPositionalEncoding, self).__init__() 27 | 28 | self.position_embeddings = nn.Parameter(torch.zeros(1, 4096, 512)) #8x 29 | 30 | def forward(self, x, position_ids=None): 31 | 32 | position_embeddings = self.position_embeddings 33 | return x + position_embeddings -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/TransBTS/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/TransBTS/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/TransFuse/DeiT.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015-present, Facebook, Inc. 2 | # All rights reserved. 3 | import torch 4 | import torch.nn as nn 5 | from functools import partial 6 | 7 | from .vision_transformer import VisionTransformer, _cfg 8 | from timm.models.registry import register_model 9 | from timm.models.layers import trunc_normal_ 10 | import torch.nn.functional as F 11 | import numpy as np 12 | 13 | 14 | __all__ = [ 15 | 'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224', 16 | 'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224', 17 | 'deit_base_distilled_patch16_224', 'deit_base_patch16_384', 18 | 'deit_base_distilled_patch16_384', 19 | ] 20 | 21 | 22 | class DeiT(VisionTransformer): 23 | def __init__(self, *args, **kwargs): 24 | super().__init__(*args, **kwargs) 25 | num_patches = self.patch_embed.num_patches 26 | # self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, self.embed_dim)) # Don't think I need the class token 27 | self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, self.embed_dim)) 28 | 29 | def forward(self, x): 30 | # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py 31 | # with slight modifications to add the dist_token 32 | B = x.shape[0] 33 | x = self.patch_embed(x) 34 | pe = self.pos_embed 35 | 36 | # print(x.shape, pe.shape) 37 | x = x + pe 38 | x = self.pos_drop(x) 39 | 40 | for blk in self.blocks: 41 | x = blk(x) 42 | 43 | x = self.norm(x) 44 | return x 45 | 46 | 47 | @register_model 48 | def deit_small_patch16_224(pretrained=False, **kwargs): 49 | model = DeiT( 50 | patch_size=16, embed_dim=384, depth=8, num_heads=6, mlp_ratio=4, qkv_bias=True, 51 | norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 52 | model.default_cfg = _cfg() 53 | if pretrained: 54 | ckpt = torch.load('pretrained/deit_small_patch16_224-cd65a155.pth') 55 | model.load_state_dict(ckpt['model'], strict=False) 56 | 57 | # I think they have a 12x16 position embedding for some reason 58 | # and they are manually interpolating the existing one. 59 | # I don't know why I'd need this 60 | # pe = model.pos_embed[:, 1:, :].detach() 61 | # print(pe.shape) 62 | # pe = pe.transpose(-1, -2) 63 | # pe = pe.view(pe.shape[0], pe.shape[1], int(np.sqrt(pe.shape[2])), int(np.sqrt(pe.shape[2]))) 64 | # pe = F.interpolate(pe, size=(12, 16), mode='bilinear', align_corners=True) 65 | # pe = pe.flatten(2) 66 | # pe = pe.transpose(-1, -2) 67 | # model.pos_embed = nn.Parameter(pe) 68 | model.head = nn.Identity() 69 | return model -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/TransFuse/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/TransFuse/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/TransUnet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/TransUnet/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/UNETR/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/UNETR/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/UTNet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/UTNet/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/UXNet3D/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/UXNet3D/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/custom_networks/nnFormer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/custom_modules/custom_networks/nnFormer/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/feature_response_normalization.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.utilities.tensor_utilities import mean_tensor 17 | from torch import nn 18 | import torch 19 | from torch.nn.parameter import Parameter 20 | import torch.jit 21 | 22 | 23 | class FRN3D(nn.Module): 24 | def __init__(self, num_features: int, eps=1e-6, **kwargs): 25 | super().__init__() 26 | self.eps = eps 27 | self.num_features = num_features 28 | self.weight = Parameter(torch.ones(1, num_features, 1, 1, 1), True) 29 | self.bias = Parameter(torch.zeros(1, num_features, 1, 1, 1), True) 30 | self.tau = Parameter(torch.zeros(1, num_features, 1, 1, 1), True) 31 | 32 | def forward(self, x: torch.Tensor): 33 | x = x * torch.rsqrt(mean_tensor(x * x, [2, 3, 4], keepdim=True) + self.eps) 34 | 35 | return torch.max(self.weight * x + self.bias, self.tau) 36 | 37 | 38 | if __name__ == "__main__": 39 | tmp = torch.rand((3, 32, 16, 16, 16)) 40 | 41 | frn = FRN3D(32) 42 | 43 | out = frn(tmp) 44 | -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/helperModules.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from torch import nn 17 | 18 | 19 | class Identity(nn.Module): 20 | def __init__(self, *args, **kwargs): 21 | super().__init__() 22 | 23 | def forward(self, input): 24 | return input 25 | 26 | 27 | class MyGroupNorm(nn.GroupNorm): 28 | def __init__(self, num_channels, eps=1e-5, affine=True, num_groups=8): 29 | super(MyGroupNorm, self).__init__(num_groups, num_channels, eps, affine) 30 | -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/custom_modules/mish.py: -------------------------------------------------------------------------------- 1 | ############ 2 | # https://github.com/lessw2020/mish/blob/master/mish.py 3 | # This code was taken from the repo above and was not created by me (Fabian)! Full credit goes to the original authors 4 | ############ 5 | 6 | import torch 7 | 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | 12 | # Mish - "Mish: A Self Regularized Non-Monotonic Neural Activation Function" 13 | # https://arxiv.org/abs/1908.08681v1 14 | # implemented for PyTorch / FastAI by lessw2020 15 | # github: https://github.com/lessw2020/mish 16 | 17 | class Mish(nn.Module): 18 | def __init__(self): 19 | super().__init__() 20 | 21 | def forward(self, x): 22 | # inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!) 23 | return x * (torch.tanh(F.softplus(x))) 24 | -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/initialization.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from torch import nn 17 | 18 | 19 | class InitWeights_He(object): 20 | def __init__(self, neg_slope=1e-2): 21 | self.neg_slope = neg_slope 22 | 23 | def __call__(self, module): 24 | if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): 25 | module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) 26 | if module.bias is not None: 27 | module.bias = nn.init.constant_(module.bias, 0) 28 | 29 | 30 | class InitWeights_XavierUniform(object): 31 | def __init__(self, gain=1): 32 | self.gain = gain 33 | 34 | def __call__(self, module): 35 | if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): 36 | module.weight = nn.init.xavier_uniform_(module.weight, self.gain) 37 | if module.bias is not None: 38 | module.bias = nn.init.constant_(module.bias, 0) 39 | -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/mednextv1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/network_architecture/mednextv1/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/network_architecture/mednextv1/create_mednext_v1.py: -------------------------------------------------------------------------------- 1 | from nnunet_mednext.network_architecture.mednextv1.MedNextV1 import MedNeXt 2 | 3 | def create_mednextv1_small(num_input_channels, num_classes, kernel_size=3, ds=False): 4 | 5 | return MedNeXt( 6 | in_channels = num_input_channels, 7 | n_channels = 32, 8 | n_classes = num_classes, 9 | exp_r=2, 10 | kernel_size=kernel_size, 11 | deep_supervision=ds, 12 | do_res=True, 13 | do_res_up_down = True, 14 | block_counts = [2,2,2,2,2,2,2,2,2] 15 | ) 16 | 17 | 18 | def create_mednextv1_base(num_input_channels, num_classes, kernel_size=3, ds=False): 19 | 20 | return MedNeXt( 21 | in_channels = num_input_channels, 22 | n_channels = 32, 23 | n_classes = num_classes, 24 | exp_r=[2,3,4,4,4,4,4,3,2], 25 | kernel_size=kernel_size, 26 | deep_supervision=ds, 27 | do_res=True, 28 | do_res_up_down = True, 29 | block_counts = [2,2,2,2,2,2,2,2,2] 30 | ) 31 | 32 | 33 | def create_mednextv1_medium(num_input_channels, num_classes, kernel_size=3, ds=False): 34 | 35 | return MedNeXt( 36 | in_channels = num_input_channels, 37 | n_channels = 32, 38 | n_classes = num_classes, 39 | exp_r=[2,3,4,4,4,4,4,3,2], 40 | kernel_size=kernel_size, 41 | deep_supervision=ds, 42 | do_res=True, 43 | do_res_up_down = True, 44 | block_counts = [3,4,4,4,4,4,4,4,3], 45 | checkpoint_style = 'outside_block' 46 | ) 47 | 48 | 49 | def create_mednextv1_large(num_input_channels, num_classes, kernel_size=3, ds=False): 50 | 51 | return MedNeXt( 52 | in_channels = num_input_channels, 53 | n_channels = 32, 54 | n_classes = num_classes, 55 | exp_r=[3,4,8,8,8,8,8,4,3], 56 | kernel_size=kernel_size, 57 | deep_supervision=ds, 58 | do_res=True, 59 | do_res_up_down = True, 60 | block_counts = [3,4,8,8,8,8,8,4,3], 61 | checkpoint_style = 'outside_block' 62 | ) 63 | 64 | 65 | def create_mednext_v1(num_input_channels, num_classes, model_id, kernel_size=3, 66 | deep_supervision=False): 67 | 68 | model_dict = { 69 | 'S': create_mednextv1_small, 70 | 'B': create_mednextv1_base, 71 | 'M': create_mednextv1_medium, 72 | 'L': create_mednextv1_large, 73 | } 74 | 75 | return model_dict[model_id]( 76 | num_input_channels, num_classes, kernel_size, deep_supervision 77 | ) 78 | 79 | 80 | if __name__ == "__main__": 81 | 82 | model = create_mednextv1_large(1, 3, 3, False) 83 | print(model) -------------------------------------------------------------------------------- /nnunet_mednext/postprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/postprocessing/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/postprocessing/consolidate_postprocessing_simple.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import argparse 17 | from nnunet_mednext.postprocessing.consolidate_postprocessing import consolidate_folds 18 | from nnunet_mednext.utilities.folder_names import get_output_folder_name 19 | from nnunet_mednext.utilities.task_name_id_conversion import convert_id_to_task_name 20 | from nnunet_mednext.paths import default_cascade_trainer, default_trainer, default_plans_identifier 21 | 22 | 23 | def main(): 24 | argparser = argparse.ArgumentParser(usage="Used to determine the postprocessing for a trained model. Useful for " 25 | "when the best configuration (2d, 3d_fullres etc) as selected manually.") 26 | argparser.add_argument("-m", type=str, required=True, help="U-Net model (2d, 3d_lowres, 3d_fullres or " 27 | "3d_cascade_fullres)") 28 | argparser.add_argument("-t", type=str, required=True, help="Task name or id") 29 | argparser.add_argument("-tr", type=str, required=False, default=None, 30 | help="nnUNetTrainer class. Default: %s, unless 3d_cascade_fullres " 31 | "(then it's %s)" % (default_trainer, default_cascade_trainer)) 32 | argparser.add_argument("-pl", type=str, required=False, default=default_plans_identifier, 33 | help="Plans name, Default=%s" % default_plans_identifier) 34 | argparser.add_argument("-val", type=str, required=False, default="validation_raw", 35 | help="Validation folder name. Default: validation_raw") 36 | 37 | args = argparser.parse_args() 38 | model = args.m 39 | task = args.t 40 | trainer = args.tr 41 | plans = args.pl 42 | val = args.val 43 | 44 | if not task.startswith("Task"): 45 | task_id = int(task) 46 | task = convert_id_to_task_name(task_id) 47 | 48 | if trainer is None: 49 | if model == "3d_cascade_fullres": 50 | trainer = "nnUNetTrainerV2CascadeFullRes" 51 | else: 52 | trainer = "nnUNetTrainerV2" 53 | 54 | folder = get_output_folder_name(model, task, trainer, plans, None) 55 | 56 | consolidate_folds(folder, val) 57 | 58 | 59 | if __name__ == "__main__": 60 | main() 61 | -------------------------------------------------------------------------------- /nnunet_mednext/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/preprocessing/custom_preprocessors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/preprocessing/custom_preprocessors/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/run/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/run/load_weights.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import warnings 4 | 5 | def upkern_load_weights(network:nn.Module, pretrained_net:nn.Module): 6 | pretrained_dict = pretrained_net.state_dict() 7 | model_dict = network.state_dict() 8 | 9 | for k in model_dict.keys(): 10 | # print(k, model_dict[k].shape, pretrained_dict[k].shape) 11 | 12 | if k in model_dict.keys() and k in pretrained_dict.keys(): # Common keys 13 | if 'bias' in k or 'norm' in k or 'dummy' in k: # bias, norm and dummy layers 14 | print(f"Key {k} loaded unchanged.") 15 | model_dict[k] = pretrained_dict[k] 16 | else: # Conv / linear layers 17 | inc1, outc1, *spatial_dims1 = model_dict[k].shape 18 | inc2, outc2, *spatial_dims2 = pretrained_dict[k].shape 19 | print(inc1, outc1, spatial_dims1, inc2, outc2, spatial_dims2) 20 | 21 | assert inc1==inc2 # Please use equal in_channels in all layers for resizing pretrainer 22 | assert outc1 == outc2 # Please use equal out_channels in all layers for resizing pretrainer 23 | 24 | if spatial_dims1 == spatial_dims2: 25 | model_dict[k] = pretrained_dict[k] 26 | print(f"Key {k} loaded.") 27 | else: 28 | model_dict[k] = torch.nn.functional.interpolate( 29 | pretrained_dict[k], size=spatial_dims1, 30 | mode='trilinear' 31 | ) 32 | print(f"Key {k} interpolated trilinearly from {spatial_dims2}->{spatial_dims1} and loaded.") 33 | else: # Keys which are not shared 34 | warnings.warn(f"Key {k} in current_model:{k in model_dict.keys()} and pretrained_model:{k in pretrained_dict.keys()} and will not be loaded.") 35 | 36 | network.load_state_dict(model_dict) 37 | print("######## Weight Loading DONE ############") 38 | return network -------------------------------------------------------------------------------- /nnunet_mednext/training/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/training/cascade_stuff/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/training/data_augmentation/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/training/dataloading/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/training/learning_rate/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/learning_rate/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/learning_rate/poly_lr.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): 17 | return initial_lr * (1 - epoch / max_epochs)**exponent 18 | -------------------------------------------------------------------------------- /nnunet_mednext/training/loss_functions/TopK_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import numpy as np 16 | import torch 17 | from nnunet_mednext.training.loss_functions.crossentropy import RobustCrossEntropyLoss 18 | 19 | 20 | class TopKLoss(RobustCrossEntropyLoss): 21 | """ 22 | Network has to have NO LINEARITY! 23 | """ 24 | def __init__(self, weight=None, ignore_index=-100, k=10): 25 | self.k = k 26 | super(TopKLoss, self).__init__(weight, False, ignore_index, reduce=False) 27 | 28 | def forward(self, inp, target): 29 | target = target[:, 0].long() 30 | res = super(TopKLoss, self).forward(inp, target) 31 | num_voxels = np.prod(res.shape, dtype=np.int64) 32 | res, _ = torch.topk(res.view((-1, )), int(num_voxels * self.k / 100), sorted=False) 33 | return res.mean() 34 | -------------------------------------------------------------------------------- /nnunet_mednext/training/loss_functions/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/training/loss_functions/confidence_penalty.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def confidence_penalty(preds_softmax, apply_non_lin): 4 | 5 | eps = 1e-5 6 | if apply_non_lin is not None: 7 | preds_softmax = apply_non_lin(preds_softmax) 8 | # dims = len(preds_softmax.shape)-2 9 | loss = preds_softmax * torch.clamp(torch.log(preds_softmax), min=eps, max=1-eps) 10 | # Hopefully it's never log(1) -> Murphy's law 11 | 12 | loss = torch.clamp(torch.sum(preds_softmax, dim=0), min=-1e-5, max=1e-5) 13 | loss = -1.0 * loss.mean(0) 14 | 15 | return loss -------------------------------------------------------------------------------- /nnunet_mednext/training/loss_functions/crossentropy.py: -------------------------------------------------------------------------------- 1 | from torch import nn, Tensor 2 | 3 | 4 | class RobustCrossEntropyLoss(nn.CrossEntropyLoss): 5 | """ 6 | this is just a compatibility layer because my target tensor is float and has an extra dimension 7 | """ 8 | def forward(self, input: Tensor, target: Tensor) -> Tensor: 9 | if len(target.shape) == len(input.shape): 10 | assert target.shape[1] == 1 11 | target = target[:, 0] 12 | return super().forward(input, target.long()) -------------------------------------------------------------------------------- /nnunet_mednext/training/loss_functions/deep_supervision.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from torch import nn 17 | 18 | 19 | class MultipleOutputLoss2(nn.Module): 20 | def __init__(self, loss, weight_factors=None): 21 | """ 22 | use this if you have several outputs and ground truth (both list of same len) and the loss should be computed 23 | between them (x[0] and y[0], x[1] and y[1] etc) 24 | :param loss: 25 | :param weight_factors: 26 | """ 27 | super(MultipleOutputLoss2, self).__init__() 28 | self.weight_factors = weight_factors 29 | self.loss = loss 30 | 31 | def forward(self, x, y): 32 | assert isinstance(x, (tuple, list)), "x must be either tuple or list" 33 | assert isinstance(y, (tuple, list)), "y must be either tuple or list" 34 | if self.weight_factors is None: 35 | weights = [1] * len(x) 36 | else: 37 | weights = self.weight_factors 38 | 39 | l = weights[0] * self.loss(x[0], y[0]) 40 | for i in range(1, len(x)): 41 | if weights[i] != 0: 42 | l += weights[i] * self.loss(x[i], y[i]) 43 | return l 44 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/MedNeXt/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/MedNeXt/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/competitions_with_custom_Trainers/BraTS2020/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/competitions_with_custom_Trainers/BraTS2020/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/competitions_with_custom_Trainers/MMS/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/competitions_with_custom_Trainers/MMS/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 3 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 4 | from nnunet_mednext.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_insaneDA import \ 5 | nnUNetTrainerV2_insaneDA 6 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 7 | from torch import nn 8 | 9 | 10 | class nnUNetTrainerV2_MMS(nnUNetTrainerV2_insaneDA): 11 | def setup_DA_params(self): 12 | super().setup_DA_params() 13 | self.data_aug_params["p_rot"] = 0.7 14 | self.data_aug_params["p_eldef"] = 0.1 15 | self.data_aug_params["p_scale"] = 0.3 16 | 17 | self.data_aug_params["independent_scale_factor_for_each_axis"] = True 18 | self.data_aug_params["p_independent_scale_per_axis"] = 0.3 19 | 20 | self.data_aug_params["do_additive_brightness"] = True 21 | self.data_aug_params["additive_brightness_mu"] = 0 22 | self.data_aug_params["additive_brightness_sigma"] = 0.2 23 | self.data_aug_params["additive_brightness_p_per_sample"] = 0.3 24 | self.data_aug_params["additive_brightness_p_per_channel"] = 1 25 | 26 | self.data_aug_params["elastic_deform_alpha"] = (0., 300.) 27 | self.data_aug_params["elastic_deform_sigma"] = (9., 15.) 28 | 29 | self.data_aug_params['gamma_range'] = (0.5, 1.6) 30 | 31 | def initialize_network(self): 32 | if self.threeD: 33 | conv_op = nn.Conv3d 34 | dropout_op = nn.Dropout3d 35 | norm_op = nn.BatchNorm3d 36 | 37 | else: 38 | conv_op = nn.Conv2d 39 | dropout_op = nn.Dropout2d 40 | norm_op = nn.BatchNorm2d 41 | 42 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 43 | dropout_op_kwargs = {'p': 0, 'inplace': True} 44 | net_nonlin = nn.LeakyReLU 45 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} 46 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 47 | len(self.net_num_pool_op_kernel_sizes), 48 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, 49 | dropout_op_kwargs, 50 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), 51 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 52 | if torch.cuda.is_available(): 53 | self.network.cuda() 54 | self.network.inference_apply_nonlin = softmax_helper 55 | 56 | """def run_training(self): 57 | from batchviewer import view_batch 58 | a = next(self.tr_gen) 59 | view_batch(a['data']) 60 | import IPython;IPython.embed()""" 61 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/competitions_with_custom_Trainers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/competitions_with_custom_Trainers/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNetTrainerV2_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_fp32(nnUNetTrainerV2): 20 | """ 21 | Info for Fabian: same as internal nnUNetTrainerV2_2 22 | """ 23 | 24 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 25 | unpack_data=True, deterministic=True, fp16=False): 26 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 27 | deterministic, False) 28 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_3ConvPerStage.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | 21 | 22 | class nnUNetTrainerV2_3ConvPerStage(nnUNetTrainerV2): 23 | def initialize_network(self): 24 | self.base_num_features = 24 # otherwise we run out of VRAM 25 | if self.threeD: 26 | conv_op = nn.Conv3d 27 | dropout_op = nn.Dropout3d 28 | norm_op = nn.InstanceNorm3d 29 | 30 | else: 31 | conv_op = nn.Conv2d 32 | dropout_op = nn.Dropout2d 33 | norm_op = nn.InstanceNorm2d 34 | 35 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 36 | dropout_op_kwargs = {'p': 0, 'inplace': True} 37 | net_nonlin = nn.LeakyReLU 38 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} 39 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 40 | len(self.net_num_pool_op_kernel_sizes), 41 | 3, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 42 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), 43 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 44 | if torch.cuda.is_available(): 45 | self.network.cuda() 46 | self.network.inference_apply_nonlin = softmax_helper 47 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_3ConvPerStage_samefilters.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | 21 | 22 | class nnUNetTrainerV2_3ConvPerStageSameFilters(nnUNetTrainerV2): 23 | def initialize_network(self): 24 | if self.threeD: 25 | conv_op = nn.Conv3d 26 | dropout_op = nn.Dropout3d 27 | norm_op = nn.InstanceNorm3d 28 | 29 | else: 30 | conv_op = nn.Conv2d 31 | dropout_op = nn.Dropout2d 32 | norm_op = nn.InstanceNorm2d 33 | 34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 35 | dropout_op_kwargs = {'p': 0, 'inplace': True} 36 | net_nonlin = nn.LeakyReLU 37 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} 38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 39 | len(self.net_num_pool_op_kernel_sizes), 40 | 3, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), 42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 43 | if torch.cuda.is_available(): 44 | self.network.cuda() 45 | self.network.inference_apply_nonlin = softmax_helper 46 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_BN.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | 21 | 22 | class nnUNetTrainerV2_BN(nnUNetTrainerV2): 23 | def initialize_network(self): 24 | """ 25 | changed deep supervision to False 26 | :return: 27 | """ 28 | if self.threeD: 29 | conv_op = nn.Conv3d 30 | dropout_op = nn.Dropout3d 31 | norm_op = nn.BatchNorm3d 32 | 33 | else: 34 | conv_op = nn.Conv2d 35 | dropout_op = nn.Dropout2d 36 | norm_op = nn.BatchNorm2d 37 | 38 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 39 | dropout_op_kwargs = {'p': 0, 'inplace': True} 40 | net_nonlin = nn.LeakyReLU 41 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} 42 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 43 | len(self.net_num_pool_op_kernel_sizes), 44 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 45 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), 46 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 47 | if torch.cuda.is_available(): 48 | self.network.cuda() 49 | self.network.inference_apply_nonlin = softmax_helper 50 | 51 | 52 | nnUNetTrainerV2_BN_copy1 = nnUNetTrainerV2_BN 53 | nnUNetTrainerV2_BN_copy2 = nnUNetTrainerV2_BN 54 | nnUNetTrainerV2_BN_copy3 = nnUNetTrainerV2_BN 55 | nnUNetTrainerV2_BN_copy4 = nnUNetTrainerV2_BN 56 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_FRN.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.network_architecture.custom_modules.feature_response_normalization import FRN3D 17 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 18 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 19 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 20 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 21 | from torch import nn 22 | from nnunet_mednext.network_architecture.custom_modules.helperModules import Identity 23 | import torch 24 | 25 | 26 | class nnUNetTrainerV2_FRN(nnUNetTrainerV2): 27 | def initialize_network(self): 28 | """ 29 | changed deep supervision to False 30 | :return: 31 | """ 32 | if self.threeD: 33 | conv_op = nn.Conv3d 34 | dropout_op = nn.Dropout3d 35 | norm_op = FRN3D 36 | 37 | else: 38 | conv_op = nn.Conv2d 39 | dropout_op = nn.Dropout2d 40 | raise NotImplementedError 41 | norm_op = nn.BatchNorm2d 42 | 43 | norm_op_kwargs = {'eps': 1e-6} 44 | dropout_op_kwargs = {'p': 0, 'inplace': True} 45 | net_nonlin = Identity 46 | net_nonlin_kwargs = {} 47 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 48 | len(self.net_num_pool_op_kernel_sizes), 49 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 50 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), 51 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 52 | if torch.cuda.is_available(): 53 | self.network.cuda() 54 | self.network.inference_apply_nonlin = softmax_helper 55 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_GN.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.network_architecture.custom_modules.helperModules import MyGroupNorm 19 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 20 | from torch import nn 21 | 22 | 23 | class nnUNetTrainerV2_GN(nnUNetTrainerV2): 24 | def initialize_network(self): 25 | """ 26 | changed deep supervision to False 27 | :return: 28 | """ 29 | if self.threeD: 30 | conv_op = nn.Conv3d 31 | dropout_op = nn.Dropout3d 32 | norm_op = MyGroupNorm 33 | 34 | else: 35 | conv_op = nn.Conv2d 36 | dropout_op = nn.Dropout2d 37 | norm_op = MyGroupNorm 38 | 39 | norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'num_groups': 8} 40 | dropout_op_kwargs = {'p': 0, 'inplace': True} 41 | net_nonlin = nn.LeakyReLU 42 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} 43 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 44 | len(self.net_num_pool_op_kernel_sizes), 45 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 46 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), 47 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 48 | if torch.cuda.is_available(): 49 | self.network.cuda() 50 | self.network.inference_apply_nonlin = softmax_helper 51 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_LReLU_slope_2en1.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | 21 | 22 | class nnUNetTrainerV2_LReLU_slope_2en1(nnUNetTrainerV2): 23 | def initialize_network(self): 24 | if self.threeD: 25 | conv_op = nn.Conv3d 26 | dropout_op = nn.Dropout3d 27 | norm_op = nn.InstanceNorm3d 28 | 29 | else: 30 | conv_op = nn.Conv2d 31 | dropout_op = nn.Dropout2d 32 | norm_op = nn.InstanceNorm2d 33 | 34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 35 | dropout_op_kwargs = {'p': 0, 'inplace': True} 36 | net_nonlin = nn.LeakyReLU 37 | net_nonlin_kwargs = {'inplace': True, 'negative_slope': 2e-1} 38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 39 | len(self.net_num_pool_op_kernel_sizes), 40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), 42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 43 | if torch.cuda.is_available(): 44 | self.network.cuda() 45 | self.network.inference_apply_nonlin = softmax_helper 46 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_Mish.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | from nnunet_mednext.network_architecture.custom_modules.mish import Mish 21 | 22 | 23 | class nnUNetTrainerV2_Mish(nnUNetTrainerV2): 24 | def initialize_network(self): 25 | if self.threeD: 26 | conv_op = nn.Conv3d 27 | dropout_op = nn.Dropout3d 28 | norm_op = nn.InstanceNorm3d 29 | 30 | else: 31 | conv_op = nn.Conv2d 32 | dropout_op = nn.Dropout2d 33 | norm_op = nn.InstanceNorm2d 34 | 35 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 36 | dropout_op_kwargs = {'p': 0, 'inplace': True} 37 | net_nonlin = Mish 38 | net_nonlin_kwargs = {} 39 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 40 | len(self.net_num_pool_op_kernel_sizes), 41 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 42 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), 43 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 44 | if torch.cuda.is_available(): 45 | self.network.cuda() 46 | self.network.inference_apply_nonlin = softmax_helper 47 | 48 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_NoNormalization.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.network_architecture.custom_modules.helperModules import Identity 19 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 20 | from torch import nn 21 | 22 | 23 | class nnUNetTrainerV2_NoNormalization(nnUNetTrainerV2): 24 | def initialize_network(self): 25 | if self.threeD: 26 | conv_op = nn.Conv3d 27 | dropout_op = nn.Dropout3d 28 | norm_op = Identity 29 | 30 | else: 31 | conv_op = nn.Conv2d 32 | dropout_op = nn.Dropout2d 33 | norm_op = Identity 34 | 35 | norm_op_kwargs = {} 36 | dropout_op_kwargs = {'p': 0, 'inplace': True} 37 | net_nonlin = nn.LeakyReLU 38 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} 39 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 40 | len(self.net_num_pool_op_kernel_sizes), 41 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 42 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), 43 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 44 | if torch.cuda.is_available(): 45 | self.network.cuda() 46 | self.network.inference_apply_nonlin = softmax_helper 47 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_NoNormalization_lr1en3.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_NoNormalization import \ 17 | nnUNetTrainerV2_NoNormalization 18 | 19 | 20 | class nnUNetTrainerV2_NoNormalization_lr1en3(nnUNetTrainerV2_NoNormalization): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | self.initial_lr = 1e-3 26 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | 21 | 22 | class nnUNetTrainerV2_ReLU(nnUNetTrainerV2): 23 | def initialize_network(self): 24 | if self.threeD: 25 | conv_op = nn.Conv3d 26 | dropout_op = nn.Dropout3d 27 | norm_op = nn.InstanceNorm3d 28 | 29 | else: 30 | conv_op = nn.Conv2d 31 | dropout_op = nn.Dropout2d 32 | norm_op = nn.InstanceNorm2d 33 | 34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 35 | dropout_op_kwargs = {'p': 0, 'inplace': True} 36 | net_nonlin = nn.ReLU 37 | net_nonlin_kwargs = {'inplace': True} 38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 39 | len(self.net_num_pool_op_kernel_sizes), 40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), 42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) 43 | if torch.cuda.is_available(): 44 | self.network.cuda() 45 | self.network.inference_apply_nonlin = softmax_helper 46 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU_biasInSegOutput.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | 21 | 22 | class nnUNetTrainerV2_ReLU_biasInSegOutput(nnUNetTrainerV2): 23 | def initialize_network(self): 24 | if self.threeD: 25 | conv_op = nn.Conv3d 26 | dropout_op = nn.Dropout3d 27 | norm_op = nn.InstanceNorm3d 28 | 29 | else: 30 | conv_op = nn.Conv2d 31 | dropout_op = nn.Dropout2d 32 | norm_op = nn.InstanceNorm2d 33 | 34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 35 | dropout_op_kwargs = {'p': 0, 'inplace': True} 36 | net_nonlin = nn.ReLU 37 | net_nonlin_kwargs = {'inplace': True} 38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 39 | len(self.net_num_pool_op_kernel_sizes), 40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), 42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, 43 | seg_output_use_bias=True) 44 | if torch.cuda.is_available(): 45 | self.network.cuda() 46 | self.network.inference_apply_nonlin = softmax_helper 47 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU_convReLUIN.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet, ConvDropoutNonlinNorm 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | 21 | 22 | class nnUNetTrainerV2_ReLU_convReLUIN(nnUNetTrainerV2): 23 | def initialize_network(self): 24 | if self.threeD: 25 | conv_op = nn.Conv3d 26 | dropout_op = nn.Dropout3d 27 | norm_op = nn.InstanceNorm3d 28 | 29 | else: 30 | conv_op = nn.Conv2d 31 | dropout_op = nn.Dropout2d 32 | norm_op = nn.InstanceNorm2d 33 | 34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 35 | dropout_op_kwargs = {'p': 0, 'inplace': True} 36 | net_nonlin = nn.ReLU 37 | net_nonlin_kwargs = {'inplace': True} 38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 39 | len(self.net_num_pool_op_kernel_sizes), 40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), 42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, 43 | basic_block=ConvDropoutNonlinNorm) 44 | if torch.cuda.is_available(): 45 | self.network.cuda() 46 | self.network.inference_apply_nonlin = softmax_helper 47 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet_DA3_BN.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import torch 16 | 17 | from nnunet_mednext.network_architecture.generic_modular_residual_UNet import FabiansUNet, get_default_network_config 18 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 19 | from nnunet_mednext.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_ResencUNet_DA3 import \ 20 | nnUNetTrainerV2_ResencUNet_DA3 21 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 22 | 23 | 24 | class nnUNetTrainerV2_ResencUNet_DA3_BN(nnUNetTrainerV2_ResencUNet_DA3): 25 | def initialize_network(self): 26 | if self.threeD: 27 | cfg = get_default_network_config(3, None, norm_type="bn") 28 | 29 | else: 30 | cfg = get_default_network_config(1, None, norm_type="bn") 31 | 32 | stage_plans = self.plans['plans_per_stage'][self.stage] 33 | conv_kernel_sizes = stage_plans['conv_kernel_sizes'] 34 | blocks_per_stage_encoder = stage_plans['num_blocks_encoder'] 35 | blocks_per_stage_decoder = stage_plans['num_blocks_decoder'] 36 | pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes'] 37 | 38 | self.network = FabiansUNet(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2, 39 | pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes, 40 | blocks_per_stage_decoder, True, False, 320, InitWeights_He(1e-2)) 41 | 42 | if torch.cuda.is_available(): 43 | self.network.cuda() 44 | self.network.inference_apply_nonlin = softmax_helper 45 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet_SimonsInit.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import torch 16 | from torch import nn 17 | 18 | from nnunet_mednext.network_architecture.custom_modules.conv_blocks import BasicResidualBlock 19 | from nnunet_mednext.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_ResencUNet import \ 20 | nnUNetTrainerV2_ResencUNet 21 | 22 | 23 | def init_last_bn_before_add_to_0(module): 24 | if isinstance(module, BasicResidualBlock): 25 | module.norm2.weight = nn.init.constant_(module.norm2.weight, 0) 26 | module.norm2.bias = nn.init.constant_(module.norm2.bias, 0) 27 | 28 | 29 | class nnUNetTrainerV2_ResencUNet_SimonsInit(nnUNetTrainerV2_ResencUNet): 30 | """ 31 | SimonsInit = Simon Kohl's suggestion of initializing each residual block such that it adds nothing 32 | (weight and bias initialized to zero in last batch norm) 33 | """ 34 | def initialize_network(self): 35 | ret = super().initialize_network() 36 | self.network.apply(init_last_bn_before_add_to_0) 37 | return ret 38 | 39 | 40 | class nnUNetTrainerV2_ResencUNet_SimonsInit_adamw(nnUNetTrainerV2_ResencUNet_SimonsInit): 41 | 42 | def __init__(self, *args, **kwargs): 43 | super().__init__(*args, **kwargs) 44 | self.initial_lr = 1e-3 45 | 46 | def initialize_optimizer_and_scheduler(self): 47 | assert self.network is not None, "self.initialize_network must be called first" 48 | self.optimizer = torch.optim.AdamW(self.network.parameters(), 49 | self.initial_lr, 50 | weight_decay=self.weight_decay, 51 | eps=1e-4 # 1e-8 might cause nans in fp16 52 | ) 53 | self.lr_scheduler = None -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_lReLU_biasInSegOutput.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | 21 | 22 | class nnUNetTrainerV2_lReLU_biasInSegOutput(nnUNetTrainerV2): 23 | def initialize_network(self): 24 | if self.threeD: 25 | conv_op = nn.Conv3d 26 | dropout_op = nn.Dropout3d 27 | norm_op = nn.InstanceNorm3d 28 | 29 | else: 30 | conv_op = nn.Conv2d 31 | dropout_op = nn.Dropout2d 32 | norm_op = nn.InstanceNorm2d 33 | 34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 35 | dropout_op_kwargs = {'p': 0, 'inplace': True} 36 | net_nonlin = nn.LeakyReLU 37 | net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} 38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 39 | len(self.net_num_pool_op_kernel_sizes), 40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), 42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, 43 | seg_output_use_bias=True) 44 | if torch.cuda.is_available(): 45 | self.network.cuda() 46 | self.network.inference_apply_nonlin = softmax_helper 47 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_lReLU_convlReLUIN.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import torch 15 | from nnunet_mednext.network_architecture.generic_UNet import Generic_UNet, ConvDropoutNonlinNorm 16 | from nnunet_mednext.network_architecture.initialization import InitWeights_He 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | from torch import nn 20 | 21 | 22 | class nnUNetTrainerV2_lReLU_convReLUIN(nnUNetTrainerV2): 23 | def initialize_network(self): 24 | if self.threeD: 25 | conv_op = nn.Conv3d 26 | dropout_op = nn.Dropout3d 27 | norm_op = nn.InstanceNorm3d 28 | 29 | else: 30 | conv_op = nn.Conv2d 31 | dropout_op = nn.Dropout2d 32 | norm_op = nn.InstanceNorm2d 33 | 34 | norm_op_kwargs = {'eps': 1e-5, 'affine': True} 35 | dropout_op_kwargs = {'p': 0, 'inplace': True} 36 | net_nonlin = nn.LeakyReLU 37 | net_nonlin_kwargs = {'inplace': True, 'negative_slope': 1e-2} 38 | self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, 39 | len(self.net_num_pool_op_kernel_sizes), 40 | self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, 41 | net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), 42 | self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, 43 | basic_block=ConvDropoutNonlinNorm) 44 | if torch.cuda.is_available(): 45 | self.network.cuda() 46 | self.network.inference_apply_nonlin = softmax_helper 47 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/baselines/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/baselines/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/baselines/nnUNetTrainerV2_3DUXNet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from nnunet_mednext.network_architecture.custom_modules.custom_networks.UXNet3D. \ 4 | network_backbone import UXNET as UXNET_Orig 5 | from nnunet_mednext.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_noDeepSupervision \ 6 | import nnUNetTrainerV2_noDeepSupervision 7 | from nnunet_mednext.network_architecture.neural_network import SegmentationNetwork 8 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 9 | 10 | 11 | class UXNET(UXNET_Orig, SegmentationNetwork): 12 | 13 | def __init__(self, *args, **kwargs): 14 | super().__init__(*args, **kwargs) 15 | # Segmentation Network Params. Needed for the nnUNet evaluation pipeline 16 | self.conv_op = nn.Conv3d 17 | self.inference_apply_nonlin = softmax_helper 18 | self.input_shape_must_be_divisible_by = 2**5 19 | self.num_classes = kwargs['out_chans'] 20 | self.do_ds = False 21 | 22 | 23 | class nnUNetTrainerV2_3DUXNet(nnUNetTrainerV2_noDeepSupervision): 24 | 25 | def __init__(self, *args, **kwargs): 26 | super().__init__(*args, **kwargs) 27 | self.initial_lr = 5e-4 28 | 29 | def initialize_network(self): 30 | 31 | self.network = UXNET( 32 | in_chans=self.num_input_channels, 33 | out_chans=self.num_classes, 34 | depths=[2, 2, 2, 2], 35 | feat_size=[48, 96, 192, 384], 36 | drop_path_rate=0, 37 | layer_scale_init_value=1e-6, 38 | spatial_dims=3, 39 | ) 40 | 41 | if torch.cuda.is_available(): 42 | self.network.cuda() 43 | 44 | def initialize_optimizer_and_scheduler(self): 45 | assert self.network is not None, "self.initialize_network must be called first" 46 | self.optimizer = torch.optim.AdamW(self.network.parameters(), 47 | self.initial_lr, 48 | weight_decay=self.weight_decay, 49 | eps=1e-4 # 1e-8 might cause nans in fp16 50 | ) 51 | self.lr_scheduler = None 52 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/baselines/nnUNetTrainerV2_TransBTS.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from nnunet_mednext.network_architecture.custom_modules.custom_networks.TransBTS.TransBTS_downsample8x_skipconnection \ 4 | import BTS as BTS_Orig 5 | from nnunet_mednext.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_noDeepSupervision import \ 6 | nnUNetTrainerV2_noDeepSupervision 7 | from nnunet_mednext.network_architecture.neural_network import SegmentationNetwork 8 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 9 | 10 | 11 | class BTS(BTS_Orig, SegmentationNetwork): 12 | 13 | def __init__(self, *args, **kwargs): 14 | super().__init__(*args, **kwargs) 15 | # Segmentation Network Params. Needed for the nnUNet evaluation pipeline 16 | self.conv_op = nn.Conv3d 17 | self.inference_apply_nonlin = softmax_helper 18 | self.input_shape_must_be_divisible_by = 2**5 19 | self.num_classes = kwargs['num_classes'] 20 | self.do_ds = False # Already added this in the main class 21 | 22 | 23 | class nnUNetTrainerV2_TransBTS(nnUNetTrainerV2_noDeepSupervision): 24 | 25 | def __init__(self, *args, **kwargs): 26 | super().__init__(*args, **kwargs) 27 | self.initial_lr = 1e-4 28 | 29 | def initialize_network(self): 30 | 31 | _conv_repr=True 32 | _pe_type="learned" 33 | patch_dim = 8 34 | aux_layers = [1, 2, 3, 4] 35 | 36 | self.network = BTS( 37 | img_dim=128, 38 | patch_dim=patch_dim, 39 | num_channels=self.num_input_channels, 40 | num_classes=self.num_classes, 41 | embedding_dim=512, 42 | num_heads=8, 43 | num_layers=4, 44 | hidden_dim=4096, 45 | dropout_rate=0.1, 46 | attn_dropout_rate=0.1, 47 | conv_patch_representation=_conv_repr, 48 | positional_encoding_type=_pe_type, 49 | ) 50 | 51 | if torch.cuda.is_available(): 52 | self.network.cuda() -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/baselines/nnUNetTrainerV2_UNETR.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from monai.networks.nets.unetr import UNETR as UNETR_Orig 4 | from nnunet_mednext.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_noDeepSupervision import \ 5 | nnUNetTrainerV2_noDeepSupervision 6 | from nnunet_mednext.network_architecture.neural_network import SegmentationNetwork 7 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 8 | 9 | 10 | class UNETR(UNETR_Orig, SegmentationNetwork): 11 | 12 | def __init__(self, *args, **kwargs): 13 | super().__init__(*args, **kwargs) 14 | # Segmentation Network Params. Needed for the nnUNet evaluation pipeline 15 | self.conv_op = nn.Conv3d 16 | self.inference_apply_nonlin = softmax_helper 17 | self.input_shape_must_be_divisible_by = 16 # just some random val 2**5 18 | self.num_classes = kwargs['out_channels'] 19 | self.do_ds = False 20 | 21 | 22 | class nnUNetTrainerV2_UNETR(nnUNetTrainerV2_noDeepSupervision): 23 | 24 | def __init__(self, *args, **kwargs): 25 | super().__init__(*args, **kwargs) 26 | self.initial_lr = 1e-4 27 | # self.oversample_foreground_percent = 1.0 28 | 29 | def initialize_network(self): 30 | self.network = UNETR( 31 | in_channels=self.num_input_channels, 32 | out_channels=self.num_classes, 33 | img_size=(128, 128, 128), 34 | feature_size=16, 35 | hidden_size=768, 36 | mlp_dim=3072, 37 | num_heads=12, 38 | pos_embed='perceptron', 39 | norm_name='instance', 40 | conv_block=True, 41 | res_block=True, 42 | dropout_rate=0.0 43 | ) 44 | 45 | if torch.cuda.is_available(): 46 | self.network.cuda() 47 | 48 | def initialize_optimizer_and_scheduler(self): 49 | assert self.network is not None, "self.initialize_network must be called first" 50 | self.optimizer = torch.optim.AdamW(self.network.parameters(), 51 | self.initial_lr, 52 | weight_decay=self.weight_decay, 53 | eps=1e-4 # 1e-8 might cause nans in fp16 54 | ) 55 | self.lr_scheduler = None -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/benchmarking/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/benchmarking/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/cascade/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/cascade/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_lowerLR.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes 17 | 18 | 19 | class nnUNetTrainerV2CascadeFullRes_lowerLR(nnUNetTrainerV2CascadeFullRes): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False): 22 | super().__init__(plans_file, fold, output_folder, dataset_directory, 23 | batch_dice, stage, unpack_data, deterministic, 24 | previous_trainer, fp16) 25 | self.initial_lr = 1e-3 26 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_shorter.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes 17 | 18 | 19 | class nnUNetTrainerV2CascadeFullRes_shorter(nnUNetTrainerV2CascadeFullRes): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False): 22 | super().__init__(plans_file, fold, output_folder, dataset_directory, 23 | batch_dice, stage, unpack_data, deterministic, 24 | previous_trainer, fp16) 25 | self.max_num_epochs = 500 26 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_shorter_lowerLR.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes 17 | 18 | 19 | class nnUNetTrainerV2CascadeFullRes_shorter_lowerLR(nnUNetTrainerV2CascadeFullRes): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False): 22 | super().__init__(plans_file, fold, output_folder, dataset_directory, 23 | batch_dice, stage, unpack_data, deterministic, 24 | previous_trainer, fp16) 25 | self.max_num_epochs = 500 26 | self.initial_lr = 1e-3 27 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/copies/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/copies/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/copies/nnUNetTrainerV2_copies.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | # This stuff is just so that we can check stability of results. Training is nondeterministic and by renaming the trainer 20 | # class we can have several trained models coexist although the trainer is effectively the same 21 | 22 | 23 | class nnUNetTrainerV2_copy1(nnUNetTrainerV2): 24 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 25 | unpack_data=True, deterministic=True, fp16=False): 26 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 27 | deterministic, fp16) 28 | 29 | 30 | class nnUNetTrainerV2_copy2(nnUNetTrainerV2): 31 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 32 | unpack_data=True, deterministic=True, fp16=False): 33 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 34 | deterministic, fp16) 35 | 36 | 37 | class nnUNetTrainerV2_copy3(nnUNetTrainerV2): 38 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 39 | unpack_data=True, deterministic=True, fp16=False): 40 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 41 | deterministic, fp16) 42 | 43 | 44 | class nnUNetTrainerV2_copy4(nnUNetTrainerV2): 45 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 46 | unpack_data=True, deterministic=True, fp16=False): 47 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 48 | deterministic, fp16) 49 | 50 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/data_augmentation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/data_augmentation/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_DA2.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_DA2(nnUNetTrainerV2): 20 | def setup_DA_params(self): 21 | super().setup_DA_params() 22 | 23 | self.data_aug_params["independent_scale_factor_for_each_axis"] = True 24 | 25 | if self.threeD: 26 | self.data_aug_params["rotation_p_per_axis"] = 0.5 27 | else: 28 | self.data_aug_params["rotation_p_per_axis"] = 1 29 | 30 | self.data_aug_params["do_additive_brightness"] = True 31 | 32 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_independentScalePerAxis.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_independentScalePerAxis(nnUNetTrainerV2): 20 | def setup_DA_params(self): 21 | super().setup_DA_params() 22 | self.data_aug_params["independent_scale_factor_for_each_axis"] = True 23 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_noMirroring.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_noMirroring(nnUNetTrainerV2): 20 | def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, 21 | step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, 22 | validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, 23 | segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True): 24 | """ 25 | We need to wrap this because we need to enforce self.network.do_ds = False for prediction 26 | """ 27 | ds = self.network.do_ds 28 | if do_mirroring: 29 | print("WARNING! do_mirroring was True but we cannot do that because we trained without mirroring. " 30 | "do_mirroring was set to False") 31 | do_mirroring = False 32 | self.network.do_ds = False 33 | ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, 34 | save_softmax=save_softmax, use_gaussian=use_gaussian, 35 | overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, 36 | all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs, 37 | run_postprocessing_on_folds=run_postprocessing_on_folds) 38 | self.network.do_ds = ds 39 | return ret 40 | 41 | def setup_DA_params(self): 42 | super().setup_DA_params() 43 | self.data_aug_params["do_mirror"] = False 44 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/loss_function/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_ForceBD.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_ForceBD(nnUNetTrainerV2): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, fp16=False): 22 | batch_dice = True 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_ForceSD.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_ForceSD(nnUNetTrainerV2): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, fp16=False): 22 | batch_dice = False 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_CE.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | from nnunet_mednext.training.loss_functions.crossentropy import RobustCrossEntropyLoss 15 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 16 | 17 | 18 | class nnUNetTrainerV2_Loss_CE(nnUNetTrainerV2): 19 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 20 | unpack_data=True, deterministic=True, fp16=False): 21 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 22 | deterministic, fp16) 23 | self.loss = RobustCrossEntropyLoss() 24 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_CEGDL.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.loss_functions.dice_loss import GDL_and_CE_loss 18 | 19 | 20 | class nnUNetTrainerV2_Loss_CEGDL(nnUNetTrainerV2): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | self.loss = GDL_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) 26 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_Dice.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.loss_functions.dice_loss import SoftDiceLoss 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | 20 | 21 | class nnUNetTrainerV2_Loss_Dice(nnUNetTrainerV2): 22 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 23 | unpack_data=True, deterministic=True, fp16=False): 24 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 25 | deterministic, fp16) 26 | self.loss = SoftDiceLoss(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}) 27 | 28 | 29 | class nnUNetTrainerV2_Loss_DicewithBG(nnUNetTrainerV2): 30 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 31 | unpack_data=True, deterministic=True, fp16=False): 32 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 33 | deterministic, fp16) 34 | self.loss = SoftDiceLoss(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': True}) 35 | 36 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_DiceCE_noSmooth.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.loss_functions.dice_loss import SoftDiceLoss, DC_and_CE_loss 18 | 19 | 20 | class nnUNetTrainerV2_Loss_DiceCE_noSmooth(nnUNetTrainerV2): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 0, 'do_bg': False}, {}) 26 | 27 | 28 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_DiceTopK10.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.loss_functions.dice_loss import DC_and_topk_loss 18 | 19 | 20 | class nnUNetTrainerV2_Loss_DiceTopK10(nnUNetTrainerV2): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | self.loss = DC_and_topk_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, 26 | {'k': 10}) 27 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_Dice_lr1en3.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNet_variants.loss_function.nnUNetTrainerV2_Loss_Dice import \ 17 | nnUNetTrainerV2_Loss_Dice, nnUNetTrainerV2_Loss_DicewithBG 18 | 19 | 20 | class nnUNetTrainerV2_Loss_Dice_LR1en3(nnUNetTrainerV2_Loss_Dice): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | self.initial_lr = 1e-3 26 | 27 | 28 | class nnUNetTrainerV2_Loss_DicewithBG_LR1en3(nnUNetTrainerV2_Loss_DicewithBG): 29 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 30 | unpack_data=True, deterministic=True, fp16=False): 31 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 32 | deterministic, fp16) 33 | self.initial_lr = 1e-3 34 | 35 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_Dice_squared.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.loss_functions.dice_loss import SoftDiceLossSquared 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | 20 | 21 | class nnUNetTrainerV2_Loss_Dice_squared(nnUNetTrainerV2): 22 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 23 | unpack_data=True, deterministic=True, fp16=False): 24 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 25 | deterministic, fp16) 26 | self.initial_lr = 1e-3 27 | self.loss = SoftDiceLossSquared(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}) 28 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_MCC.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.loss_functions.dice_loss import MCCLoss 18 | from nnunet_mednext.utilities.nd_softmax import softmax_helper 19 | 20 | 21 | class nnUNetTrainerV2_Loss_MCC(nnUNetTrainerV2): 22 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 23 | unpack_data=True, deterministic=True, fp16=False): 24 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 25 | deterministic, fp16) 26 | self.initial_lr = 1e-3 27 | self.loss = MCCLoss(apply_nonlin=softmax_helper, batch_mcc=self.batch_dice, do_bg=True, smooth=0.0) 28 | 29 | 30 | class nnUNetTrainerV2_Loss_MCCnoBG(nnUNetTrainerV2): 31 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 32 | unpack_data=True, deterministic=True, fp16=False): 33 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 34 | deterministic, fp16) 35 | self.initial_lr = 1e-3 36 | self.loss = MCCLoss(apply_nonlin=softmax_helper, batch_mcc=self.batch_dice, do_bg=False, smooth=0.0) 37 | 38 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_TopK10.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.loss_functions.TopK_loss import TopKLoss 18 | 19 | 20 | class nnUNetTrainerV2_Loss_TopK10(nnUNetTrainerV2): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | self.loss = TopKLoss(k=10) 26 | 27 | 28 | nnUNetTrainerV2_Loss_TopK10_copy1 = nnUNetTrainerV2_Loss_TopK10 29 | nnUNetTrainerV2_Loss_TopK10_copy2 = nnUNetTrainerV2_Loss_TopK10 30 | nnUNetTrainerV2_Loss_TopK10_copy3 = nnUNetTrainerV2_Loss_TopK10 31 | nnUNetTrainerV2_Loss_TopK10_copy4 = nnUNetTrainerV2_Loss_TopK10 32 | 33 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_focalLoss.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from nnunet_mednext.training.loss_functions.focal_loss import FocalLossV2 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from torch import nn 18 | 19 | 20 | class nnUNetTrainerV2_SegLoss_Focal(nnUNetTrainerV2): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, 24 | unpack_data, deterministic, fp16) 25 | print("Setting up self.loss = Focal_loss({'alpha':0.75, 'gamma':2, 'smooth':1e-5})") 26 | self.loss = FocalLossV2(apply_nonlin=nn.Softmax(dim=1), **{'alpha':0.5, 'gamma':2, 'smooth':1e-5}) 27 | 28 | 29 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_graduallyTransitionFromCEToDice.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.loss_functions.deep_supervision import MultipleOutputLoss2 17 | from nnunet_mednext.training.loss_functions.dice_loss import DC_and_CE_loss 18 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 19 | 20 | 21 | class nnUNetTrainerV2_graduallyTransitionFromCEToDice(nnUNetTrainerV2): 22 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 23 | unpack_data=True, deterministic=True, fp16=False): 24 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 25 | deterministic, fp16) 26 | self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}, weight_ce=2, weight_dice=0) 27 | 28 | def update_loss(self): 29 | # we train the first 500 epochs with CE, then transition to Dice between 500 and 750. The last 250 epochs will be Dice only 30 | 31 | if self.epoch <= 500: 32 | weight_ce = 2 33 | weight_dice = 0 34 | elif 500 < self.epoch <= 750: 35 | weight_ce = 2 - 2 / 250 * (self.epoch - 500) 36 | weight_dice = 0 + 2 / 250 * (self.epoch - 500) 37 | elif 750 < self.epoch <= self.max_num_epochs: 38 | weight_ce = 0 39 | weight_dice = 2 40 | else: 41 | raise RuntimeError("Invalid epoch: %d" % self.epoch) 42 | 43 | self.print_to_log_file("weight ce", weight_ce, "weight dice", weight_dice) 44 | 45 | self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}, weight_ce=weight_ce, 46 | weight_dice=weight_dice) 47 | 48 | self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) 49 | 50 | def on_epoch_end(self): 51 | ret = super().on_epoch_end() 52 | self.update_loss() 53 | return ret 54 | 55 | def load_checkpoint_ram(self, checkpoint, train=True): 56 | ret = super().load_checkpoint_ram(checkpoint, train) 57 | self.update_loss() 58 | return ret 59 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/miscellaneous/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/miscellaneous/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/nnUNetTrainerCE.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | from nnunet_mednext.training.loss_functions.crossentropy import RobustCrossEntropyLoss 15 | from nnunet_mednext.training.network_training.nnUNetTrainer import nnUNetTrainer 16 | 17 | 18 | class nnUNetTrainerCE(nnUNetTrainer): 19 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 20 | unpack_data=True, deterministic=True, fp16=False): 21 | super(nnUNetTrainerCE, self).__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, 22 | unpack_data, deterministic, fp16) 23 | self.loss = RobustCrossEntropyLoss() 24 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import torch 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | 19 | 20 | class nnUNetTrainerV2_Adam(nnUNetTrainerV2): 21 | 22 | def initialize_optimizer_and_scheduler(self): 23 | self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, amsgrad=True) 24 | self.lr_scheduler = None 25 | 26 | 27 | nnUNetTrainerV2_Adam_copy1 = nnUNetTrainerV2_Adam 28 | nnUNetTrainerV2_Adam_copy2 = nnUNetTrainerV2_Adam 29 | nnUNetTrainerV2_Adam_copy3 = nnUNetTrainerV2_Adam 30 | nnUNetTrainerV2_Adam_copy4 = nnUNetTrainerV2_Adam 31 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam_lr_3en4.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNet_variants.optimizer_and_lr.nnUNetTrainerV2_Adam import nnUNetTrainerV2_Adam 17 | 18 | 19 | class nnUNetTrainerV2_Adam_nnUNetTrainerlr(nnUNetTrainerV2_Adam): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, fp16=False): 22 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 23 | deterministic, fp16) 24 | self.initial_lr = 3e-4 25 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Ranger_lr1en2.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.optimizer.ranger import Ranger 18 | 19 | 20 | class nnUNetTrainerV2_Ranger_lr1en2(nnUNetTrainerV2): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | self.initial_lr = 1e-2 26 | 27 | def initialize_optimizer_and_scheduler(self): 28 | self.optimizer = Ranger(self.network.parameters(), self.initial_lr, k=6, N_sma_threshhold=5, 29 | weight_decay=self.weight_decay) 30 | self.lr_scheduler = None 31 | 32 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Ranger_lr3en3.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.optimizer.ranger import Ranger 18 | 19 | 20 | class nnUNetTrainerV2_Ranger_lr3en3(nnUNetTrainerV2): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | self.initial_lr = 3e-3 26 | 27 | def initialize_optimizer_and_scheduler(self): 28 | self.optimizer = Ranger(self.network.parameters(), self.initial_lr, k=6, N_sma_threshhold=5, 29 | weight_decay=self.weight_decay) 30 | self.lr_scheduler = None 31 | 32 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Ranger_lr3en4.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | from nnunet_mednext.training.optimizer.ranger import Ranger 18 | 19 | 20 | class nnUNetTrainerV2_Ranger_lr3en4(nnUNetTrainerV2): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | self.initial_lr = 3e-4 26 | 27 | def initialize_optimizer_and_scheduler(self): 28 | self.optimizer = Ranger(self.network.parameters(), self.initial_lr, k=6, N_sma_threshhold=5, 29 | weight_decay=self.weight_decay) 30 | self.lr_scheduler = None 31 | 32 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_ReduceOnPlateau.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import torch 17 | from nnunet_mednext.training.network_training.nnUNetTrainer import nnUNetTrainer 18 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 19 | from torch.optim import lr_scheduler 20 | 21 | 22 | class nnUNetTrainerV2_SGD_ReduceOnPlateau(nnUNetTrainerV2): 23 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 24 | unpack_data=True, deterministic=True, fp16=False): 25 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 26 | deterministic, fp16) 27 | 28 | def initialize_optimizer_and_scheduler(self): 29 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, 30 | momentum=0.99, nesterov=True) 31 | self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2, 32 | patience=self.lr_scheduler_patience, 33 | verbose=True, threshold=self.lr_scheduler_eps, 34 | threshold_mode="abs") 35 | 36 | def maybe_update_lr(self, epoch=None): 37 | # maybe update learning rate 38 | if self.lr_scheduler is not None: 39 | assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler)) 40 | 41 | if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau): 42 | # lr scheduler is updated with moving average val loss. should be more robust 43 | if self.epoch > 0: # otherwise self.train_loss_MA is None 44 | self.lr_scheduler.step(self.train_loss_MA) 45 | else: 46 | self.lr_scheduler.step(self.epoch + 1) 47 | self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr'])) 48 | 49 | def on_epoch_end(self): 50 | return nnUNetTrainer.on_epoch_end(self) 51 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_fixedSchedule.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_SGD_fixedSchedule(nnUNetTrainerV2): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, fp16=False): 22 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 23 | deterministic, fp16) 24 | 25 | def maybe_update_lr(self, epoch=None): 26 | if epoch is None: 27 | ep = self.epoch + 1 28 | else: 29 | ep = epoch 30 | 31 | if 0 <= ep < 500: 32 | new_lr = self.initial_lr 33 | elif 500 <= ep < 675: 34 | new_lr = self.initial_lr * 0.1 35 | elif 675 <= ep < 850: 36 | new_lr = self.initial_lr * 0.01 37 | elif ep >= 850: 38 | new_lr = self.initial_lr * 0.001 39 | else: 40 | raise RuntimeError("Really unexpected things happened, ep=%d" % ep) 41 | 42 | self.optimizer.param_groups[0]['lr'] = new_lr 43 | self.print_to_log_file("lr:", self.optimizer.param_groups[0]['lr']) 44 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_fixedSchedule2.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.learning_rate.poly_lr import poly_lr 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | 19 | 20 | class nnUNetTrainerV2_SGD_fixedSchedule2(nnUNetTrainerV2): 21 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 22 | unpack_data=True, deterministic=True, fp16=False): 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | 26 | def maybe_update_lr(self, epoch=None): 27 | """ 28 | here we go one step, then use polyLR 29 | :param epoch: 30 | :return: 31 | """ 32 | if epoch is None: 33 | ep = self.epoch + 1 34 | else: 35 | ep = epoch 36 | 37 | if 0 <= ep < 500: 38 | new_lr = self.initial_lr 39 | elif 500 <= ep < 675: 40 | new_lr = self.initial_lr * 0.1 41 | elif ep >= 675: 42 | new_lr = poly_lr(ep - 675, self.max_num_epochs - 675, self.initial_lr * 0.1, 0.9) 43 | else: 44 | raise RuntimeError("Really unexpected things happened, ep=%d" % ep) 45 | 46 | self.optimizer.param_groups[0]['lr'] = new_lr 47 | self.print_to_log_file("lr:", self.optimizer.param_groups[0]['lr']) 48 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_lrs.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_SGD_lr1en1(nnUNetTrainerV2): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, fp16=False): 22 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 23 | deterministic, fp16) 24 | self.initial_lr = 1e-1 25 | 26 | 27 | class nnUNetTrainerV2_SGD_lr1en3(nnUNetTrainerV2): 28 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 29 | unpack_data=True, deterministic=True, fp16=False): 30 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 31 | deterministic, fp16) 32 | self.initial_lr = 1e-3 33 | 34 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_fp16.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_fp16(nnUNetTrainerV2): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, fp16=False): 22 | assert fp16, "This one only accepts fp16=True" 23 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 24 | deterministic, fp16) 25 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum09.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import torch 17 | 18 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 19 | 20 | 21 | class nnUNetTrainerV2_momentum09(nnUNetTrainerV2): 22 | def initialize_optimizer_and_scheduler(self): 23 | assert self.network is not None, "self.initialize_network must be called first" 24 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, 25 | momentum=0.9, nesterov=True) 26 | self.lr_scheduler = None 27 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum095.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import torch 17 | 18 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 19 | 20 | 21 | class nnUNetTrainerV2_momentum095(nnUNetTrainerV2): 22 | def initialize_optimizer_and_scheduler(self): 23 | assert self.network is not None, "self.initialize_network must be called first" 24 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, 25 | momentum=0.95, nesterov=True) 26 | self.lr_scheduler = None 27 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum098.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import torch 17 | 18 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 19 | 20 | 21 | class nnUNetTrainerV2_momentum098(nnUNetTrainerV2): 22 | def initialize_optimizer_and_scheduler(self): 23 | assert self.network is not None, "self.initialize_network must be called first" 24 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, 25 | momentum=0.98, nesterov=True) 26 | self.lr_scheduler = None 27 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum09in2D.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import torch 17 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 18 | 19 | 20 | class nnUNetTrainerV2_momentum09in2D(nnUNetTrainerV2): 21 | def initialize_optimizer_and_scheduler(self): 22 | if self.threeD: 23 | momentum = 0.99 24 | else: 25 | momentum = 0.9 26 | assert self.network is not None, "self.initialize_network must be called first" 27 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, 28 | momentum=momentum, nesterov=True) 29 | self.lr_scheduler = None 30 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_reduceMomentumDuringTraining.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import torch 17 | 18 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 19 | 20 | 21 | class nnUNetTrainerV2_reduceMomentumDuringTraining(nnUNetTrainerV2): 22 | """ 23 | This implementation will not work with LR scheduler!!!!!!!!!! 24 | 25 | After epoch 800, linearly decrease momentum from 0.99 to 0.9 26 | """ 27 | def initialize_optimizer_and_scheduler(self): 28 | current_momentum = 0.99 29 | min_momentum = 0.9 30 | 31 | if self.epoch > 800: 32 | current_momentum = current_momentum - (current_momentum - min_momentum) / 200 * (self.epoch - 800) 33 | 34 | self.print_to_log_file("current momentum", current_momentum) 35 | assert self.network is not None, "self.initialize_network must be called first" 36 | if self.optimizer is None: 37 | self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, 38 | momentum=0.99, nesterov=True) 39 | else: 40 | # can't reinstantiate because that would break NVIDIA AMP 41 | self.optimizer.param_groups[0]["momentum"] = current_momentum 42 | self.lr_scheduler = None 43 | 44 | def on_epoch_end(self): 45 | self.initialize_optimizer_and_scheduler() 46 | return super().on_epoch_end() 47 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_warmup.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from nnunet_mednext.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 17 | 18 | 19 | class nnUNetTrainerV2_warmup(nnUNetTrainerV2): 20 | def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, 21 | unpack_data=True, deterministic=True, fp16=False): 22 | super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, 23 | deterministic, fp16) 24 | self.max_num_epochs = 1050 25 | 26 | def maybe_update_lr(self, epoch=None): 27 | if self.epoch < 50: 28 | # epoch 49 is max 29 | # we increase lr linearly from 0 to initial_lr 30 | lr = (self.epoch + 1) / 50 * self.initial_lr 31 | self.optimizer.param_groups[0]['lr'] = lr 32 | self.print_to_log_file("epoch:", self.epoch, "lr:", lr) 33 | else: 34 | if epoch is not None: 35 | ep = epoch - 49 36 | else: 37 | ep = self.epoch - 49 38 | assert ep > 0, "epoch must be >0" 39 | return super().maybe_update_lr(ep) 40 | -------------------------------------------------------------------------------- /nnunet_mednext/training/network_training/nnUNet_variants/resampling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/network_training/nnUNet_variants/resampling/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/training/optimizer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIC-DKFZ/MedNeXt/0b78ed869fbd1cc2fd38754d2f8519f1b72d43ba/nnunet_mednext/training/optimizer/__init__.py -------------------------------------------------------------------------------- /nnunet_mednext/utilities/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import * -------------------------------------------------------------------------------- /nnunet_mednext/utilities/file_endings.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from batchgenerators.utilities.file_and_folder_operations import * 17 | 18 | 19 | def remove_trailing_slash(filename: str): 20 | while filename.endswith('/'): 21 | filename = filename[:-1] 22 | return filename 23 | 24 | 25 | def maybe_add_0000_to_all_niigz(folder): 26 | nii_gz = subfiles(folder, suffix='.nii.gz') 27 | for n in nii_gz: 28 | n = remove_trailing_slash(n) 29 | if not n.endswith('_0000.nii.gz'): 30 | os.rename(n, n[:-7] + '_0000.nii.gz') 31 | -------------------------------------------------------------------------------- /nnunet_mednext/utilities/folder_names.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from batchgenerators.utilities.file_and_folder_operations import * 17 | from nnunet_mednext.paths import network_training_output_dir 18 | 19 | 20 | def get_output_folder_name(model: str, task: str = None, trainer: str = None, plans: str = None, fold: int = None, 21 | overwrite_training_output_dir: str = None): 22 | """ 23 | Retrieves the correct output directory for the nnU-Net model described by the input parameters 24 | 25 | :param model: 26 | :param task: 27 | :param trainer: 28 | :param plans: 29 | :param fold: 30 | :param overwrite_training_output_dir: 31 | :return: 32 | """ 33 | assert model in ["2d", "3d_cascade_fullres", '3d_fullres', '3d_lowres'] 34 | 35 | if overwrite_training_output_dir is not None: 36 | tr_dir = overwrite_training_output_dir 37 | else: 38 | tr_dir = network_training_output_dir 39 | 40 | current = join(tr_dir, model) 41 | if task is not None: 42 | current = join(current, task) 43 | if trainer is not None and plans is not None: 44 | current = join(current, trainer + "__" + plans) 45 | if fold is not None: 46 | current = join(current, "fold_%d" % fold) 47 | return current 48 | -------------------------------------------------------------------------------- /nnunet_mednext/utilities/nd_softmax.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import torch 16 | from torch import nn 17 | import torch.nn.functional as F 18 | 19 | 20 | softmax_helper = lambda x: F.softmax(x, 1) 21 | 22 | -------------------------------------------------------------------------------- /nnunet_mednext/utilities/one_hot_encoding.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import numpy as np 16 | 17 | 18 | def to_one_hot(seg, all_seg_labels=None): 19 | if all_seg_labels is None: 20 | all_seg_labels = np.unique(seg) 21 | result = np.zeros((len(all_seg_labels), *seg.shape), dtype=seg.dtype) 22 | for i, l in enumerate(all_seg_labels): 23 | result[i][seg == l] = 1 24 | return result 25 | -------------------------------------------------------------------------------- /nnunet_mednext/utilities/random_stuff.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | class no_op(object): 17 | def __enter__(self): 18 | pass 19 | 20 | def __exit__(self, *args): 21 | pass 22 | -------------------------------------------------------------------------------- /nnunet_mednext/utilities/recursive_delete_npz.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from batchgenerators.utilities.file_and_folder_operations import * 17 | import argparse 18 | import os 19 | 20 | 21 | def recursive_delete_npz(current_directory: str): 22 | npz_files = subfiles(current_directory, join=True, suffix=".npz") 23 | npz_files = [i for i in npz_files if not i.endswith("segFromPrevStage.npz")] # to be extra safe 24 | _ = [os.remove(i) for i in npz_files] 25 | for d in subdirs(current_directory, join=False): 26 | if d != "pred_next_stage": 27 | recursive_delete_npz(join(current_directory, d)) 28 | 29 | 30 | if __name__ == "__main__": 31 | parser = argparse.ArgumentParser(usage="USE THIS RESPONSIBLY! DANGEROUS! I (Fabian) use this to remove npz files " 32 | "after I ran figure_out_what_to_submit") 33 | parser.add_argument("-f", help="folder", required=True) 34 | 35 | args = parser.parse_args() 36 | 37 | recursive_delete_npz(args.f) 38 | -------------------------------------------------------------------------------- /nnunet_mednext/utilities/recursive_rename_taskXX_to_taskXXX.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from batchgenerators.utilities.file_and_folder_operations import * 17 | import os 18 | 19 | 20 | def recursive_rename(folder): 21 | s = subdirs(folder, join=False) 22 | for ss in s: 23 | if ss.startswith("Task") and ss.find("_") == 6: 24 | task_id = int(ss[4:6]) 25 | name = ss[7:] 26 | os.rename(join(folder, ss), join(folder, "Task%03.0d_" % task_id + name)) 27 | s = subdirs(folder, join=True) 28 | for ss in s: 29 | recursive_rename(ss) 30 | 31 | if __name__ == "__main__": 32 | recursive_rename("/media/fabian/Results/nnUNet") 33 | recursive_rename("/media/fabian/nnunet") 34 | recursive_rename("/media/fabian/My Book/MedicalDecathlon") 35 | recursive_rename("/home/fabian/drives/datasets/nnUNet_raw") 36 | recursive_rename("/home/fabian/drives/datasets/nnUNet_preprocessed") 37 | recursive_rename("/home/fabian/drives/datasets/nnUNet_testSets") 38 | recursive_rename("/home/fabian/drives/datasets/results/nnUNet") 39 | recursive_rename("/home/fabian/drives/e230-dgx2-1-data_fabian/Decathlon_raw") 40 | recursive_rename("/home/fabian/drives/e230-dgx2-1-data_fabian/nnUNet_preprocessed") 41 | 42 | -------------------------------------------------------------------------------- /nnunet_mednext/utilities/set_n_proc_DA.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import subprocess 15 | import os 16 | 17 | 18 | def get_allowed_n_proc_DA(): 19 | hostname = subprocess.getoutput(['hostname']) 20 | 21 | if 'nnUNet_n_proc_DA' in os.environ.keys(): 22 | return int(os.environ['nnUNet_n_proc_DA']) 23 | 24 | if hostname in ['hdf19-gpu16', 'hdf19-gpu17', 'e230-AMDworkstation']: 25 | return 16 26 | 27 | if hostname in ['Fabian',]: 28 | return 12 29 | 30 | if hostname.startswith('hdf19-gpu') or hostname.startswith('e071-gpu'): 31 | return 12 32 | elif hostname.startswith('e230-dgx1'): 33 | return 10 34 | elif hostname.startswith('hdf18-gpu') or hostname.startswith('e132-comp'): 35 | return 16 36 | elif hostname.startswith('e230-dgx2'): 37 | return 6 38 | elif hostname.startswith('e230-dgxa100-'): 39 | return 32 40 | else: 41 | return None -------------------------------------------------------------------------------- /nnunet_mednext/utilities/sitk_stuff.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import SimpleITK as sitk 17 | 18 | 19 | def copy_geometry(image: sitk.Image, ref: sitk.Image): 20 | image.SetOrigin(ref.GetOrigin()) 21 | image.SetDirection(ref.GetDirection()) 22 | image.SetSpacing(ref.GetSpacing()) 23 | return image 24 | -------------------------------------------------------------------------------- /nnunet_mednext/utilities/tensor_utilities.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import numpy as np 16 | import torch 17 | from torch import nn 18 | 19 | 20 | def sum_tensor(inp, axes, keepdim=False): 21 | axes = np.unique(axes).astype(int) 22 | if keepdim: 23 | for ax in axes: 24 | inp = inp.sum(int(ax), keepdim=True) 25 | else: 26 | for ax in sorted(axes, reverse=True): 27 | inp = inp.sum(int(ax)) 28 | return inp 29 | 30 | 31 | def mean_tensor(inp, axes, keepdim=False): 32 | axes = np.unique(axes).astype(int) 33 | if keepdim: 34 | for ax in axes: 35 | inp = inp.mean(int(ax), keepdim=True) 36 | else: 37 | for ax in sorted(axes, reverse=True): 38 | inp = inp.mean(int(ax)) 39 | return inp 40 | 41 | 42 | def flip(x, dim): 43 | """ 44 | flips the tensor at dimension dim (mirroring!) 45 | :param x: 46 | :param dim: 47 | :return: 48 | """ 49 | indices = [slice(None)] * x.dim() 50 | indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, 51 | dtype=torch.long, device=x.device) 52 | return x[tuple(indices)] 53 | 54 | 55 | -------------------------------------------------------------------------------- /nnunet_mednext/utilities/to_torch.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import torch 16 | 17 | 18 | def maybe_to_torch(d): 19 | if isinstance(d, list): 20 | d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d] 21 | elif not isinstance(d, torch.Tensor): 22 | d = torch.from_numpy(d).float() 23 | return d 24 | 25 | 26 | def to_cuda(data, non_blocking=True, gpu_id=0): 27 | if isinstance(data, list): 28 | data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data] 29 | else: 30 | data = data.cuda(gpu_id, non_blocking=non_blocking) 31 | return data 32 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = readme.md -------------------------------------------------------------------------------- /tests/tests_mednext_miccai_architectures.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | from nnunet_mednext.network_architecture.mednextv1.create_mednext_v1 import create_mednext_v1 4 | 5 | class Test_MedNeXt_archs: 6 | 7 | @pytest.mark.parametrize("model_size, kernel_size", [ 8 | ('S', 3), 9 | ('B', 3), 10 | ('M', 3), 11 | ('L', 3), 12 | ('S', 5), 13 | ('B', 5), 14 | ('M', 5), 15 | ('L', 5), 16 | ]) 17 | def test_init_and_forward(self, model_size, kernel_size): 18 | m = create_mednext_v1(2, 4, model_size, kernel_size).cuda() 19 | input = torch.zeros((1,2,128,128,128), requires_grad=False).cuda() 20 | with torch.no_grad(): 21 | output = m(input) 22 | del m 23 | inp_shape = input.shape 24 | assert output[0].shape == (inp_shape[0], 4, *inp_shape[2:]) 25 | --------------------------------------------------------------------------------