├── Finetune ├── AbdomenAtlas │ ├── Atlas_test.py │ ├── Atlas_test.sh │ ├── check.py │ ├── dataset │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-39.pyc │ │ │ └── dataloader_test.cpython-39.pyc │ │ ├── dataloader_bdmap.py │ │ ├── dataloader_test.py │ │ └── dataset_list │ │ │ └── AbdomenAtlas1.0.txt │ ├── main.py │ ├── optimizers │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-310.pyc │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── lr_scheduler.cpython-310.pyc │ │ │ ├── lr_scheduler.cpython-311.pyc │ │ │ ├── lr_scheduler.cpython-38.pyc │ │ │ └── lr_scheduler.cpython-39.pyc │ │ └── lr_scheduler.py │ ├── preprocess │ │ └── try_load.py │ ├── readme.md │ ├── requirements.txt │ ├── train.sh │ ├── train.slurm │ ├── trainer.py │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-310.pyc │ │ ├── __init__.cpython-311.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── __init__.cpython-39.pyc │ │ ├── data_trans.cpython-39.pyc │ │ ├── utils.cpython-310.pyc │ │ ├── utils.cpython-311.pyc │ │ ├── utils.cpython-38.pyc │ │ └── utils.cpython-39.pyc │ │ ├── data_trans.py │ │ ├── mixup.py │ │ └── utils.py ├── Amos │ ├── check_test.py │ ├── dataset │ │ ├── __init__.py │ │ ├── dataset.json │ │ └── dataset_test50.json │ ├── dataset_CT.json │ ├── gen_json.py │ ├── inferers.py │ ├── main.py │ ├── optimizers │ │ ├── __init__.py │ │ └── lr_scheduler.py │ ├── pre_cache.py │ ├── test.py │ ├── train.sh │ ├── trainer.py │ ├── utils │ │ ├── __init__.py │ │ ├── data_test.py │ │ ├── data_utils.py │ │ └── utils.py │ └── val.py ├── BTCV │ ├── dataset │ │ ├── __init__.py │ │ └── dataset_0.json │ ├── main.py │ ├── optimizers │ │ ├── __init__.py │ │ └── lr_scheduler.py │ ├── trainer.py │ ├── utils │ │ ├── __init__.py │ │ ├── data_test.py │ │ ├── data_utils.py │ │ └── utils.py │ └── val.py ├── CC-CCII │ ├── csv │ │ ├── CC_CCII_fold0_train.csv │ │ ├── CC_CCII_fold0_valid.csv │ │ ├── CC_CCII_fold1_train.csv │ │ ├── CC_CCII_fold1_valid.csv │ │ ├── CC_CCII_fold2_train.csv │ │ ├── CC_CCII_fold2_valid.csv │ │ └── CC_CCII_metadata.csv │ ├── dataset │ │ └── __init__.py │ ├── eval.py │ ├── main.py │ ├── model.py │ ├── optimizers │ │ ├── __init__.py │ │ └── lr_scheduler.py │ ├── train.sh │ ├── trainer.py │ └── utils │ │ ├── __init__.py │ │ ├── data_utils.py │ │ └── utils.py ├── Flare22 │ ├── __init__.py │ ├── dataset │ │ ├── __init__.py │ │ ├── dataset.json │ │ └── dataset_test50.json │ ├── inferers.py │ ├── main.py │ ├── optimizers │ │ ├── __init__.py │ │ └── lr_scheduler.py │ ├── train.sh │ ├── trainer.py │ ├── utils │ │ ├── __init__.py │ │ ├── data_test.py │ │ ├── data_utils.py │ │ └── utils.py │ └── val.py ├── MM-WHS │ ├── dataset.json │ ├── inferers.py │ ├── main.py │ ├── optimizers │ │ ├── __init__.py │ │ └── lr_scheduler.py │ ├── pretrained_models │ │ └── __init__.py │ ├── test.py │ ├── train.sh │ ├── trainer.py │ └── utils │ │ ├── __init__.py │ │ ├── data_utils.py │ │ └── utils.py ├── Word │ ├── dataset │ │ ├── __init__.py │ │ └── dataset_word.json │ ├── main.py │ ├── optimizers │ │ ├── __init__.py │ │ └── lr_scheduler.py │ ├── train.sh │ ├── train.slurm │ ├── trainer.py │ └── utils │ │ ├── __init__.py │ │ ├── data_utils.py │ │ └── utils.py └── nnUNet │ ├── LICENSE │ ├── documentation │ ├── __init__.py │ ├── assets │ │ ├── HI_Logo.png │ │ ├── amos2022_sparseseg10.png │ │ ├── amos2022_sparseseg10_2d.png │ │ ├── dkfz_logo.png │ │ ├── nnU-Net_overview.png │ │ ├── regions_vs_labels.png │ │ └── sparse_annotation_amos.png │ ├── benchmarking.md │ ├── changelog.md │ ├── competitions │ │ └── AutoPETII.md │ ├── convert_msd_dataset.md │ ├── dataset_format.md │ ├── dataset_format_inference.md │ ├── explanation_normalization.md │ ├── explanation_plans_files.md │ ├── extending_nnunet.md │ ├── how_to_use_nnunet.md │ ├── installation_instructions.md │ ├── manual_data_splits.md │ ├── pretraining_and_finetuning.md │ ├── region_based_training.md │ ├── run_inference_with_pretrained_models.md │ ├── set_environment_variables.md │ ├── setting_up_paths.md │ └── tldr_migration_guide_from_v1.md │ ├── msd.txt │ ├── nnunetv2.egg-info │ ├── PKG-INFO │ ├── SOURCES.txt │ ├── dependency_links.txt │ ├── entry_points.txt │ ├── requires.txt │ └── top_level.txt │ ├── nnunetv2 │ ├── __init__.py │ ├── batch_running │ │ ├── __init__.py │ │ ├── benchmarking │ │ │ ├── __init__.py │ │ │ ├── generate_benchmarking_commands.py │ │ │ └── summarize_benchmark_results.py │ │ ├── collect_results_custom_Decathlon.py │ │ ├── collect_results_custom_Decathlon_2d.py │ │ ├── generate_lsf_runs_customDecathlon.py │ │ └── release_trainings │ │ │ ├── __init__.py │ │ │ └── nnunetv2_v1 │ │ │ ├── __init__.py │ │ │ ├── collect_results.py │ │ │ └── generate_lsf_commands.py │ ├── configuration.py │ ├── dataset_conversion │ │ ├── Dataset017_BTCV.py │ │ ├── Dataset027_ACDC.py │ │ ├── Dataset073_Fluo_C3DH_A549_SIM.py │ │ ├── Dataset114_MNMs.py │ │ ├── Dataset115_EMIDEC.py │ │ ├── Dataset120_RoadSegmentation.py │ │ ├── Dataset137_BraTS21.py │ │ ├── Dataset218_Amos2022_task1.py │ │ ├── Dataset219_Amos2022_task2.py │ │ ├── Dataset220_KiTS2023.py │ │ ├── Dataset221_AutoPETII_2023.py │ │ ├── Dataset988_dummyDataset4.py │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── convert_MSD_dataset.cpython-311.pyc │ │ │ ├── convert_raw_dataset_from_old_nnunet_format.cpython-311.pyc │ │ │ ├── generate_dataset_json.cpython-310.pyc │ │ │ ├── generate_dataset_json.cpython-311.pyc │ │ │ └── generate_dataset_json.cpython-39.pyc │ │ ├── convert_MSD_dataset.py │ │ ├── convert_raw_dataset_from_old_nnunet_format.py │ │ ├── datasets_for_integration_tests │ │ │ ├── Dataset996_IntegrationTest_Hippocampus_regions_ignore.py │ │ │ ├── Dataset997_IntegrationTest_Hippocampus_regions.py │ │ │ ├── Dataset998_IntegrationTest_Hippocampus_ignore.py │ │ │ ├── Dataset999_IntegrationTest_Hippocampus.py │ │ │ └── __init__.py │ │ └── generate_dataset_json.py │ ├── ensembling │ │ ├── __init__.py │ │ └── ensemble.py │ ├── evaluation │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ └── evaluate_predictions.cpython-311.pyc │ │ ├── accumulate_cv_results.py │ │ ├── evaluate_predictions.py │ │ └── find_best_configuration.py │ ├── experiment_planning │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── plan_and_preprocess_api.cpython-311.pyc │ │ │ ├── plan_and_preprocess_api.cpython-39.pyc │ │ │ ├── plan_and_preprocess_entrypoints.cpython-311.pyc │ │ │ ├── plan_and_preprocess_entrypoints.cpython-39.pyc │ │ │ ├── verify_dataset_integrity.cpython-311.pyc │ │ │ └── verify_dataset_integrity.cpython-39.pyc │ │ ├── dataset_fingerprint │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ ├── __init__.cpython-39.pyc │ │ │ │ ├── fingerprint_extractor.cpython-311.pyc │ │ │ │ └── fingerprint_extractor.cpython-39.pyc │ │ │ └── fingerprint_extractor.py │ │ ├── experiment_planners │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ ├── __init__.cpython-39.pyc │ │ │ │ ├── default_experiment_planner.cpython-311.pyc │ │ │ │ ├── default_experiment_planner.cpython-39.pyc │ │ │ │ ├── network_topology.cpython-311.pyc │ │ │ │ └── network_topology.cpython-39.pyc │ │ │ ├── default_experiment_planner.py │ │ │ ├── network_topology.py │ │ │ ├── readme.md │ │ │ └── resencUNet_planner.py │ │ ├── plan_and_preprocess_api.py │ │ ├── plan_and_preprocess_entrypoints.py │ │ ├── plans_for_pretraining │ │ │ ├── __init__.py │ │ │ └── move_plans_between_datasets.py │ │ └── verify_dataset_integrity.py │ ├── imageio │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── base_reader_writer.cpython-311.pyc │ │ │ ├── base_reader_writer.cpython-39.pyc │ │ │ ├── natural_image_reader_writer.cpython-311.pyc │ │ │ ├── natural_image_reader_writer.cpython-39.pyc │ │ │ ├── nibabel_reader_writer.cpython-311.pyc │ │ │ ├── nibabel_reader_writer.cpython-39.pyc │ │ │ ├── reader_writer_registry.cpython-311.pyc │ │ │ ├── reader_writer_registry.cpython-39.pyc │ │ │ ├── simpleitk_reader_writer.cpython-311.pyc │ │ │ ├── simpleitk_reader_writer.cpython-39.pyc │ │ │ ├── tif_reader_writer.cpython-311.pyc │ │ │ └── tif_reader_writer.cpython-39.pyc │ │ ├── base_reader_writer.py │ │ ├── natural_image_reader_writer.py │ │ ├── nibabel_reader_writer.py │ │ ├── reader_writer_registry.py │ │ ├── readme.md │ │ ├── simpleitk_reader_writer.py │ │ └── tif_reader_writer.py │ ├── inference │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── data_iterators.cpython-311.pyc │ │ │ ├── export_prediction.cpython-311.pyc │ │ │ ├── predict_from_raw_data.cpython-311.pyc │ │ │ └── sliding_window_prediction.cpython-311.pyc │ │ ├── data_iterators.py │ │ ├── examples.py │ │ ├── export_prediction.py │ │ ├── predict_from_raw_data.py │ │ ├── readme.md │ │ └── sliding_window_prediction.py │ ├── model_sharing │ │ ├── __init__.py │ │ ├── entry_points.py │ │ ├── model_download.py │ │ ├── model_export.py │ │ └── model_import.py │ ├── paths.py │ ├── postprocessing │ │ ├── __init__.py │ │ └── remove_connected_components.py │ ├── preprocessing │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ └── __init__.cpython-39.pyc │ │ ├── cropping │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ ├── __init__.cpython-39.pyc │ │ │ │ ├── cropping.cpython-311.pyc │ │ │ │ └── cropping.cpython-39.pyc │ │ │ └── cropping.py │ │ ├── normalization │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ ├── __init__.cpython-39.pyc │ │ │ │ ├── default_normalization_schemes.cpython-311.pyc │ │ │ │ ├── default_normalization_schemes.cpython-39.pyc │ │ │ │ ├── map_channel_name_to_normalization.cpython-311.pyc │ │ │ │ └── map_channel_name_to_normalization.cpython-39.pyc │ │ │ ├── default_normalization_schemes.py │ │ │ ├── map_channel_name_to_normalization.py │ │ │ └── readme.md │ │ ├── preprocessors │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ ├── __init__.cpython-39.pyc │ │ │ │ ├── default_preprocessor.cpython-311.pyc │ │ │ │ └── default_preprocessor.cpython-39.pyc │ │ │ └── default_preprocessor.py │ │ └── resampling │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── default_resampling.cpython-311.pyc │ │ │ ├── default_resampling.cpython-39.pyc │ │ │ ├── utils.cpython-311.pyc │ │ │ └── utils.cpython-39.pyc │ │ │ ├── default_resampling.py │ │ │ └── utils.py │ ├── run │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── load_pretrained_weights.cpython-311.pyc │ │ │ └── run_training.cpython-311.pyc │ │ ├── load_pretrained_weights.py │ │ └── run_training.py │ ├── tests │ │ ├── __init__.py │ │ └── integration_tests │ │ │ ├── __init__.py │ │ │ ├── add_lowres_and_cascade.py │ │ │ ├── cleanup_integration_test.py │ │ │ ├── lsf_commands.sh │ │ │ ├── prepare_integration_tests.sh │ │ │ ├── readme.md │ │ │ ├── run_integration_test.sh │ │ │ ├── run_integration_test_bestconfig_inference.py │ │ │ └── run_integration_test_trainingOnly_DDP.sh │ ├── training │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ └── __init__.cpython-311.pyc │ │ ├── data_augmentation │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ └── compute_initial_patch_size.cpython-311.pyc │ │ │ ├── compute_initial_patch_size.py │ │ │ └── custom_transforms │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ ├── cascade_transforms.cpython-311.pyc │ │ │ │ ├── deep_supervision_donwsampling.cpython-311.pyc │ │ │ │ ├── limited_length_multithreaded_augmenter.cpython-311.pyc │ │ │ │ ├── masking.cpython-311.pyc │ │ │ │ ├── region_based_training.cpython-311.pyc │ │ │ │ └── transforms_for_dummy_2d.cpython-311.pyc │ │ │ │ ├── cascade_transforms.py │ │ │ │ ├── deep_supervision_donwsampling.py │ │ │ │ ├── limited_length_multithreaded_augmenter.py │ │ │ │ ├── manipulating_data_dict.py │ │ │ │ ├── masking.py │ │ │ │ ├── region_based_training.py │ │ │ │ └── transforms_for_dummy_2d.py │ │ ├── dataloading │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ ├── base_data_loader.cpython-311.pyc │ │ │ │ ├── data_loader_2d.cpython-311.pyc │ │ │ │ ├── data_loader_3d.cpython-311.pyc │ │ │ │ ├── nnunet_dataset.cpython-311.pyc │ │ │ │ └── utils.cpython-311.pyc │ │ │ ├── base_data_loader.py │ │ │ ├── data_loader_2d.py │ │ │ ├── data_loader_3d.py │ │ │ ├── nnunet_dataset.py │ │ │ └── utils.py │ │ ├── logging │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ └── nnunet_logger.cpython-311.pyc │ │ │ └── nnunet_logger.py │ │ ├── loss │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ ├── compound_losses.cpython-311.pyc │ │ │ │ ├── deep_supervision.cpython-311.pyc │ │ │ │ ├── dice.cpython-311.pyc │ │ │ │ └── robust_ce_loss.cpython-311.pyc │ │ │ ├── compound_losses.py │ │ │ ├── deep_supervision.py │ │ │ ├── dice.py │ │ │ └── robust_ce_loss.py │ │ ├── lr_scheduler │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ └── polylr.cpython-311.pyc │ │ │ └── polylr.py │ │ └── nnUNetTrainer │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── nnUNetTrainer.cpython-311.pyc │ │ │ ├── nnUNetTrainer_swin.cpython-311.pyc │ │ │ └── vit.cpython-311.pyc │ │ │ ├── nnUNetTrainer.py │ │ │ ├── nnUNetTrainer_swin.py │ │ │ ├── variants │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ └── __init__.cpython-311.pyc │ │ │ ├── benchmarking │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ │ ├── nnUNetTrainerBenchmark_5epochs.cpython-311.pyc │ │ │ │ │ └── nnUNetTrainerBenchmark_5epochs_noDataLoading.cpython-311.pyc │ │ │ │ ├── nnUNetTrainerBenchmark_5epochs.py │ │ │ │ └── nnUNetTrainerBenchmark_5epochs_noDataLoading.py │ │ │ ├── data_augmentation │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ │ ├── nnUNetTrainerDA5.cpython-311.pyc │ │ │ │ │ ├── nnUNetTrainerDAOrd0.cpython-311.pyc │ │ │ │ │ ├── nnUNetTrainerNoDA.cpython-311.pyc │ │ │ │ │ └── nnUNetTrainerNoMirroring.cpython-311.pyc │ │ │ │ ├── nnUNetTrainerDA5.py │ │ │ │ ├── nnUNetTrainerDAOrd0.py │ │ │ │ ├── nnUNetTrainerNoDA.py │ │ │ │ └── nnUNetTrainerNoMirroring.py │ │ │ ├── loss │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ │ ├── nnUNetTrainerCELoss.cpython-311.pyc │ │ │ │ │ ├── nnUNetTrainerDiceLoss.cpython-311.pyc │ │ │ │ │ └── nnUNetTrainerTopkLoss.cpython-311.pyc │ │ │ │ ├── nnUNetTrainerCELoss.py │ │ │ │ ├── nnUNetTrainerDiceLoss.py │ │ │ │ └── nnUNetTrainerTopkLoss.py │ │ │ ├── lr_schedule │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ │ └── nnUNetTrainerCosAnneal.cpython-311.pyc │ │ │ │ └── nnUNetTrainerCosAnneal.py │ │ │ ├── network_architecture │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ │ ├── nnUNetTrainerBN.cpython-311.pyc │ │ │ │ │ └── nnUNetTrainerNoDeepSupervision.cpython-311.pyc │ │ │ │ ├── nnUNetTrainerBN.py │ │ │ │ └── nnUNetTrainerNoDeepSupervision.py │ │ │ ├── optimizer │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ │ ├── nnUNetTrainerAdam.cpython-311.pyc │ │ │ │ │ └── nnUNetTrainerAdan.cpython-311.pyc │ │ │ │ ├── nnUNetTrainerAdam.py │ │ │ │ └── nnUNetTrainerAdan.py │ │ │ ├── sampling │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ │ └── nnUNetTrainer_probabilisticOversampling.cpython-311.pyc │ │ │ │ └── nnUNetTrainer_probabilisticOversampling.py │ │ │ └── training_length │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-311.pyc │ │ │ │ └── nnUNetTrainer_Xepochs.cpython-311.pyc │ │ │ │ ├── nnUNetTrainer_Xepochs.py │ │ │ │ └── nnUNetTrainer_Xepochs_NoMirroring.py │ │ │ └── vit.py │ └── utilities │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-311.pyc │ │ ├── __init__.cpython-39.pyc │ │ ├── collate_outputs.cpython-311.pyc │ │ ├── dataset_name_id_conversion.cpython-311.pyc │ │ ├── dataset_name_id_conversion.cpython-39.pyc │ │ ├── ddp_allgather.cpython-311.pyc │ │ ├── default_n_proc_DA.cpython-311.pyc │ │ ├── default_n_proc_DA.cpython-39.pyc │ │ ├── file_path_utilities.cpython-311.pyc │ │ ├── find_class_by_name.cpython-311.pyc │ │ ├── find_class_by_name.cpython-39.pyc │ │ ├── get_network_from_plans.cpython-311.pyc │ │ ├── helpers.cpython-311.pyc │ │ ├── helpers.cpython-39.pyc │ │ ├── json_export.cpython-311.pyc │ │ ├── json_export.cpython-39.pyc │ │ ├── network_initialization.cpython-311.pyc │ │ ├── utils.cpython-311.pyc │ │ └── utils.cpython-39.pyc │ │ ├── collate_outputs.py │ │ ├── dataset_name_id_conversion.py │ │ ├── ddp_allgather.py │ │ ├── default_n_proc_DA.py │ │ ├── file_path_utilities.py │ │ ├── find_class_by_name.py │ │ ├── get_network_from_plans.py │ │ ├── helpers.py │ │ ├── json_export.py │ │ ├── label_handling │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── label_handling.cpython-311.pyc │ │ │ └── label_handling.cpython-39.pyc │ │ └── label_handling.py │ │ ├── network_initialization.py │ │ ├── overlay_plots.py │ │ ├── plans_handling │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-311.pyc │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── plans_handler.cpython-311.pyc │ │ │ └── plans_handler.cpython-39.pyc │ │ └── plans_handler.py │ │ └── utils.py │ ├── pyproject.toml │ └── setup.py ├── LICENSE ├── README.md ├── assets ├── 10k.png ├── framework.png └── intro.png ├── jsons ├── HNSCC.json ├── Totalsegmentator_dataset.json ├── __init__.py ├── btcv.json ├── dataset_LUNA16_0.json ├── dataset_TCIAcovid19_0.json ├── flare23.json └── stoic21.json ├── models └── voco_head.py ├── optimizers ├── __init__.py └── lr_scheduler.py ├── requirements.txt ├── train.sh ├── utils ├── __init__.py ├── data_utils.py ├── ops.py └── utils.py └── voco_train.py /Finetune/AbdomenAtlas/Atlas_test.sh: -------------------------------------------------------------------------------- 1 | test_data_path=./test_examples/AbdomenAtlasTest/ 2 | save_prediction_path=./test_examples/AbdomenAtlasPredict/ 3 | 4 | torchrun --master_port=21472 Atlas_test.py \ 5 | --test_data_path $test_data_path --save_prediction_path $save_prediction_path -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/dataset/__init__.py -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/dataset/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/dataset/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/dataset/__pycache__/dataloader_test.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/dataset/__pycache__/dataloader_test.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/optimizers/__init__.py -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/optimizers/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/optimizers/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/optimizers/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/optimizers/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/optimizers/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/optimizers/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/optimizers/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/optimizers/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/optimizers/__pycache__/lr_scheduler.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/optimizers/__pycache__/lr_scheduler.cpython-310.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/optimizers/__pycache__/lr_scheduler.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/optimizers/__pycache__/lr_scheduler.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/optimizers/__pycache__/lr_scheduler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/optimizers/__pycache__/lr_scheduler.cpython-38.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/optimizers/__pycache__/lr_scheduler.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/optimizers/__pycache__/lr_scheduler.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/train.sh: -------------------------------------------------------------------------------- 1 | now=$(date +"%Y%m%d_%H%M%S") 2 | logdir=runs/logs 3 | mkdir -p $logdir 4 | 5 | data_dir=/project/medimgfmod/CT/AbdomenAtlasMini1.0/ 6 | cache_dataset=False 7 | cache_dir=/scratch/medimgfmod/CT/cache/Atlas 8 | 9 | torchrun --master_port=21472 main.py \ 10 | --data_dir $data_dir --cache_dataset $cache_dataset --cache_dir $cache_dir --logdir $logdir | tee $logdir/$now.txt -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/train.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # NOTE: Lines starting with "#SBATCH" are valid SLURM commands or statements, 4 | # while those starting with "#" and "##SBATCH" are comments. 5 | 6 | #SBATCH -J Atlas 7 | 8 | #SBATCH -t 72:00:00 #Maximum runtime of 48 hours 9 | 10 | # Enable email notificaitons when job begins and ends 11 | #SBATCH --mail-user=lwubf@connect.ust.hk #Update your email address 12 | #SBATCH --mail-type=begin 13 | #SBATCH --mail-type=end 14 | 15 | # Choose partition (queue) with "gpu" 16 | #SBATCH -p project 17 | 18 | # To use 24 cpu core and 1 gpu devices in a node 19 | #SBATCH -N 1 -n 16 --gres=gpu:1 20 | 21 | # Setup runtime environment if necessary 22 | source ~/.bashrc 23 | source activate nnunet 24 | 25 | # Go to the job submission directory and run your application 26 | cd /home/lwubf/AbdomenAtlas/ 27 | sh train.sh -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__init__.py -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__pycache__/data_trans.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__pycache__/data_trans.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__pycache__/utils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__pycache__/utils.cpython-310.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/__pycache__/utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/AbdomenAtlas/utils/__pycache__/utils.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/AbdomenAtlas/utils/mixup.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | 5 | def mixup(inputs): 6 | batch_size = inputs[0].size(0) 7 | rand = torch.randperm(batch_size) 8 | rand = [ra.tolist() for ra in rand] 9 | 10 | lam = int(np.random.beta(0.2, 0.2) * inputs[0].size(2)) 11 | new_inputs = [] 12 | 13 | for input in inputs: 14 | rand_input = input[rand] 15 | if np.random.rand() < 0.5: 16 | new_input = torch.cat([input[:, :, :, 0:lam, :], 17 | rand_input[:, :, :, lam:input.size(3), :]], dim=3) 18 | else: 19 | new_input = torch.cat([input[:, :, 0:lam, :, :], 20 | rand_input[:, :, lam:input.size(2), :, :]], dim=2) 21 | 22 | new_inputs.append(new_input) 23 | 24 | return new_inputs -------------------------------------------------------------------------------- /Finetune/Amos/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Amos/dataset/__init__.py -------------------------------------------------------------------------------- /Finetune/Amos/optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Amos/optimizers/__init__.py -------------------------------------------------------------------------------- /Finetune/Amos/train.sh: -------------------------------------------------------------------------------- 1 | now=$(date +"%Y%m%d_%H%M%S") 2 | logdir=runs/logs 3 | mkdir -p $logdir 4 | 5 | torchrun --master_port=21198 main.py \ 6 | --logdir $logdir | tee $logdir/$now.txt -------------------------------------------------------------------------------- /Finetune/Amos/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Amos/utils/__init__.py -------------------------------------------------------------------------------- /Finetune/BTCV/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/BTCV/dataset/__init__.py -------------------------------------------------------------------------------- /Finetune/BTCV/optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/BTCV/optimizers/__init__.py -------------------------------------------------------------------------------- /Finetune/BTCV/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/BTCV/utils/__init__.py -------------------------------------------------------------------------------- /Finetune/BTCV/utils/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import scipy.ndimage as ndimage 14 | import torch 15 | 16 | 17 | def resample_3d(img, target_size): 18 | imx, imy, imz = img.shape 19 | tx, ty, tz = target_size 20 | zoom_ratio = (float(tx) / float(imx), float(ty) / float(imy), float(tz) / float(imz)) 21 | img_resampled = ndimage.zoom(img, zoom_ratio, order=0, prefilter=False) 22 | return img_resampled 23 | 24 | 25 | def dice(x, y): 26 | intersect = np.sum(np.sum(np.sum(x * y))) 27 | y_sum = np.sum(np.sum(np.sum(y))) 28 | if y_sum == 0: 29 | return 0.0 30 | x_sum = np.sum(np.sum(np.sum(x))) 31 | return 2 * intersect / (x_sum + y_sum) 32 | 33 | 34 | class AverageMeter(object): 35 | def __init__(self): 36 | self.reset() 37 | 38 | def reset(self): 39 | self.val = 0 40 | self.avg = 0 41 | self.sum = 0 42 | self.count = 0 43 | 44 | def update(self, val, n=1): 45 | self.val = val 46 | self.sum += val * n 47 | self.count += n 48 | self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) 49 | 50 | 51 | def distributed_all_gather( 52 | tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None 53 | ): 54 | if world_size is None: 55 | world_size = torch.distributed.get_world_size() 56 | if valid_batch_size is not None: 57 | valid_batch_size = min(valid_batch_size, world_size) 58 | elif is_valid is not None: 59 | is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) 60 | if not no_barrier: 61 | torch.distributed.barrier() 62 | tensor_list_out = [] 63 | with torch.no_grad(): 64 | if is_valid is not None: 65 | is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] 66 | torch.distributed.all_gather(is_valid_list, is_valid) 67 | is_valid = [x.item() for x in is_valid_list] 68 | for tensor in tensor_list: 69 | gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] 70 | torch.distributed.all_gather(gather_list, tensor) 71 | if valid_batch_size is not None: 72 | gather_list = gather_list[:valid_batch_size] 73 | elif is_valid is not None: 74 | gather_list = [g for g, v in zip(gather_list, is_valid_list) if v] 75 | if out_numpy: 76 | gather_list = [t.cpu().numpy() for t in gather_list] 77 | tensor_list_out.append(gather_list) 78 | return tensor_list_out 79 | -------------------------------------------------------------------------------- /Finetune/CC-CCII/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/CC-CCII/dataset/__init__.py -------------------------------------------------------------------------------- /Finetune/CC-CCII/optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/CC-CCII/optimizers/__init__.py -------------------------------------------------------------------------------- /Finetune/CC-CCII/train.sh: -------------------------------------------------------------------------------- 1 | now=$(date +"%Y%m%d_%H%M%S") 2 | logdir=runs/logs 3 | mkdir -p $logdir 4 | 5 | torchrun --master_port=25584 main.py \ 6 | --logdir $logdir | tee $logdir/$now.txt -------------------------------------------------------------------------------- /Finetune/CC-CCII/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/CC-CCII/utils/__init__.py -------------------------------------------------------------------------------- /Finetune/CC-CCII/utils/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import scipy.ndimage as ndimage 14 | import torch 15 | 16 | 17 | def resample_3d(img, target_size): 18 | imx, imy, imz = img.shape 19 | tx, ty, tz = target_size 20 | zoom_ratio = (float(tx) / float(imx), float(ty) / float(imy), float(tz) / float(imz)) 21 | img_resampled = ndimage.zoom(img, zoom_ratio, order=0, prefilter=False) 22 | return img_resampled 23 | 24 | 25 | def dice(x, y): 26 | intersect = np.sum(np.sum(np.sum(x * y))) 27 | y_sum = np.sum(np.sum(np.sum(y))) 28 | if y_sum == 0: 29 | return 0.0 30 | x_sum = np.sum(np.sum(np.sum(x))) 31 | return 2 * intersect / (x_sum + y_sum) 32 | 33 | 34 | class AverageMeter(object): 35 | def __init__(self): 36 | self.reset() 37 | 38 | def reset(self): 39 | self.val = 0 40 | self.avg = 0 41 | self.sum = 0 42 | self.count = 0 43 | 44 | def update(self, val, n=1): 45 | self.val = val 46 | self.sum += val * n 47 | self.count += n 48 | self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) 49 | 50 | 51 | def distributed_all_gather( 52 | tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None 53 | ): 54 | if world_size is None: 55 | world_size = torch.distributed.get_world_size() 56 | if valid_batch_size is not None: 57 | valid_batch_size = min(valid_batch_size, world_size) 58 | elif is_valid is not None: 59 | is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) 60 | if not no_barrier: 61 | torch.distributed.barrier() 62 | tensor_list_out = [] 63 | with torch.no_grad(): 64 | if is_valid is not None: 65 | is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] 66 | torch.distributed.all_gather(is_valid_list, is_valid) 67 | is_valid = [x.item() for x in is_valid_list] 68 | for tensor in tensor_list: 69 | gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] 70 | torch.distributed.all_gather(gather_list, tensor) 71 | if valid_batch_size is not None: 72 | gather_list = gather_list[:valid_batch_size] 73 | elif is_valid is not None: 74 | gather_list = [g for g, v in zip(gather_list, is_valid_list) if v] 75 | if out_numpy: 76 | gather_list = [t.cpu().numpy() for t in gather_list] 77 | tensor_list_out.append(gather_list) 78 | return tensor_list_out 79 | -------------------------------------------------------------------------------- /Finetune/Flare22/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Flare22/__init__.py -------------------------------------------------------------------------------- /Finetune/Flare22/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Flare22/dataset/__init__.py -------------------------------------------------------------------------------- /Finetune/Flare22/optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Flare22/optimizers/__init__.py -------------------------------------------------------------------------------- /Finetune/Flare22/train.sh: -------------------------------------------------------------------------------- 1 | now=$(date +"%Y%m%d_%H%M%S") 2 | logdir=runs/logs 3 | mkdir -p $logdir 4 | 5 | torchrun --master_port=21198 main.py \ 6 | --logdir $logdir | tee $logdir/$now.txt -------------------------------------------------------------------------------- /Finetune/Flare22/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Flare22/utils/__init__.py -------------------------------------------------------------------------------- /Finetune/MM-WHS/optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/MM-WHS/optimizers/__init__.py -------------------------------------------------------------------------------- /Finetune/MM-WHS/pretrained_models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/MM-WHS/pretrained_models/__init__.py -------------------------------------------------------------------------------- /Finetune/MM-WHS/train.sh: -------------------------------------------------------------------------------- 1 | now=$(date +"%Y%m%d_%H%M%S") 2 | logdir=runs/logs 3 | mkdir -p $logdir 4 | 5 | torchrun --master_port=21120 --max-restart=10 main.py \ 6 | --logdir $logdir | tee $logdir/$now.txt -------------------------------------------------------------------------------- /Finetune/MM-WHS/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/MM-WHS/utils/__init__.py -------------------------------------------------------------------------------- /Finetune/Word/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Word/dataset/__init__.py -------------------------------------------------------------------------------- /Finetune/Word/optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Word/optimizers/__init__.py -------------------------------------------------------------------------------- /Finetune/Word/train.sh: -------------------------------------------------------------------------------- 1 | now=$(date +"%Y%m%d_%H%M%S") 2 | logdir=runs/logs_swin_large_scratch 3 | mkdir -p $logdir 4 | 5 | torchrun --master_port=20482 main.py \ 6 | --logdir $logdir | tee $logdir/$now.txt -------------------------------------------------------------------------------- /Finetune/Word/train.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # NOTE: Lines starting with "#SBATCH" are valid SLURM commands or statements, 4 | # while those starting with "#" and "##SBATCH" are comments. 5 | 6 | #SBATCH -J Omni_btcv 7 | 8 | #SBATCH -t 72:00:00 #Maximum runtime of 48 hours 9 | 10 | # Enable email notificaitons when job begins and ends 11 | #SBATCH --mail-user=lwubf@connect.ust.hk #Update your email address 12 | #SBATCH --mail-type=begin 13 | #SBATCH --mail-type=end 14 | 15 | # Choose partition (queue) with "gpu" 16 | #SBATCH -p project 17 | 18 | # To use 24 cpu core and 1 gpu devices in a node 19 | #SBATCH -N 1 -n 16 --gres=gpu:1 20 | 21 | # Setup runtime environment if necessary 22 | source ~/.bashrc 23 | source activate nnunet 24 | 25 | # Go to the job submission directory and run your application 26 | cd /home/lwubf/SwinUNETR/Omni/ 27 | sh train.sh -------------------------------------------------------------------------------- /Finetune/Word/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/Word/utils/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/documentation/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/assets/HI_Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/documentation/assets/HI_Logo.png -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/assets/amos2022_sparseseg10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/documentation/assets/amos2022_sparseseg10.png -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/assets/amos2022_sparseseg10_2d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/documentation/assets/amos2022_sparseseg10_2d.png -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/assets/dkfz_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/documentation/assets/dkfz_logo.png -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/assets/nnU-Net_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/documentation/assets/nnU-Net_overview.png -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/assets/regions_vs_labels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/documentation/assets/regions_vs_labels.png -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/assets/sparse_annotation_amos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/documentation/assets/sparse_annotation_amos.png -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/convert_msd_dataset.md: -------------------------------------------------------------------------------- 1 | Use `nnUNetv2_convert_MSD_dataset`. 2 | 3 | Read `nnUNetv2_convert_MSD_dataset -h` for usage instructions. -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/dataset_format_inference.md: -------------------------------------------------------------------------------- 1 | # Data format for Inference 2 | Read the documentation on the overall [data format](dataset_format.md) first! 3 | 4 | The data format for inference must match the one used for the raw data (**specifically, the images must be in exactly 5 | the same format as in the imagesTr folder**). As before, the filenames must start with a 6 | unique identifier, followed by a 4-digit modality identifier. Here is an example for two different datasets: 7 | 8 | 1) Task005_Prostate: 9 | 10 | This task has 2 modalities, so the files in the input folder must look like this: 11 | 12 | input_folder 13 | ├── prostate_03_0000.nii.gz 14 | ├── prostate_03_0001.nii.gz 15 | ├── prostate_05_0000.nii.gz 16 | ├── prostate_05_0001.nii.gz 17 | ├── prostate_08_0000.nii.gz 18 | ├── prostate_08_0001.nii.gz 19 | ├── ... 20 | 21 | _0000 has to be the T2 image and _0001 has to be the ADC image (as specified by 'channel_names' in the 22 | dataset.json), exactly the same as was used for training. 23 | 24 | 2) Task002_Heart: 25 | 26 | imagesTs 27 | ├── la_001_0000.nii.gz 28 | ├── la_002_0000.nii.gz 29 | ├── la_006_0000.nii.gz 30 | ├── ... 31 | 32 | Task002 only has one modality, so each case only has one _0000.nii.gz file. 33 | 34 | 35 | The segmentations in the output folder will be named {CASE_IDENTIFIER}.nii.gz (omitting the modality identifier). 36 | 37 | Remember that the file format used for inference (.nii.gz in this example) must be the same as was used for training 38 | (and as was specified in 'file_ending' in the dataset.json)! 39 | -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/explanation_normalization.md: -------------------------------------------------------------------------------- 1 | # Intensity normalization in nnU-Net 2 | 3 | The type of intensity normalization applied in nnU-Net can be controlled via the `channel_names` (former `modalities`) 4 | entry in the dataset.json. Just like the old nnU-Net, per-channel z-scoring as well as dataset-wide z-scoring based on 5 | foreground intensities are supported. However, there have been a few additions as well. 6 | 7 | Reminder: The `channel_names` entry typically looks like this: 8 | 9 | "channel_names": { 10 | "0": "T2", 11 | "1": "ADC" 12 | }, 13 | 14 | It has as many entries as there are input channels for the given dataset. 15 | 16 | To tell you a secret, nnU-Net does not really care what your channels are called. We just use this to determine what normalization 17 | scheme will be used for the given dataset. nnU-Net requires you to specify a normalization strategy for each of your input channels! 18 | If you enter a channel name that is not in the following list, the default (`zscore`) will be used. 19 | 20 | Here is a list of currently available normalization schemes: 21 | 22 | - `CT`: Perform CT normalization. Specifically, collect intensity values from the foreground classes (all but the 23 | background and ignore) from all training cases, compute the mean, standard deviation as well as the 0.5 and 24 | 99.5 percentile of the values. Then clip to the percentiles, followed by subtraction of the mean and division with the 25 | standard deviation. The normalization that is applied is the same for each training case (for this input channel). 26 | The values used by nnU-Net for normalization are stored in the `foreground_intensity_properties_per_channel` entry in the 27 | corresponding plans file. This normalization is suitable for modalities presenting physical quantities such as CT 28 | images and ADC maps. 29 | - `noNorm` : do not perform any normalization at all 30 | - `rescale_to_0_1`: rescale the intensities to [0, 1] 31 | - `rgb_to_0_1`: assumes uint8 inputs. Divides by 255 to rescale uint8 to [0, 1] 32 | - `zscore`/anything else: perform z-scoring (subtract mean and standard deviation) separately for each train case 33 | 34 | **Important:** The nnU-Net default is to perform 'CT' normalization for CT images and 'zscore' for everything else! If 35 | you deviate from that path, make sure to benchmark whether that actually improves results! 36 | 37 | # How to implement custom normalization strategies? 38 | - Head over to nnunetv2/preprocessing/normalization 39 | - implement a new image normalization class by deriving from ImageNormalization 40 | - register it in nnunetv2/preprocessing/normalization/map_channel_name_to_normalization.py:channel_name_to_normalization_mapping. 41 | This is where you specify a channel name that should be associated with it 42 | - use it by specifying the correct channel_name 43 | 44 | Normalization can only be applied to one channel at a time. There is currently no way of implementing a normalization scheme 45 | that gets multiple channels as input to be used jointly! -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/manual_data_splits.md: -------------------------------------------------------------------------------- 1 | # How to generate custom splits in nnU-Net 2 | 3 | Sometimes, the default 5-fold cross-validation split by nnU-Net does not fit a project. Maybe you want to run 3-fold 4 | cross-validation instead? Or maybe your training cases cannot be split randomly and require careful stratification. 5 | Fear not, for nnU-Net has got you covered (it really can do anything <3). 6 | 7 | The splits nnU-Net uses are generated in the `do_split` function of nnUNetTrainer. This function will first look for 8 | existing splits, stored as a file, and if no split exists it will create one. So if you wish to influence the split, 9 | manually creating a split file that will then be recognized and used is the way to go! 10 | 11 | The split file is located in the `nnUNet_preprocessed/DATASETXXX_NAME` folder. So it is best practice to first 12 | populate this folder by running `nnUNetv2_plan_and_preproccess`. 13 | 14 | Splits are stored as a .json file. They are a simple python list. The length of that list is the number of splits it 15 | contains (so it's 5 in the default nnU-Net). Each list entry is a dictionary with keys 'train' and 'val'. Values are 16 | again simply lists with the train identifiers in each set. To illustrate this, I am just messing with the Dataset002 17 | file as an example: 18 | 19 | ```commandline 20 | In [1]: from batchgenerators.utilities.file_and_folder_operations import load_json 21 | 22 | In [2]: splits = load_json('splits_final.json') 23 | 24 | In [3]: len(splits) 25 | Out[3]: 5 26 | 27 | In [4]: splits[0].keys() 28 | Out[4]: dict_keys(['train', 'val']) 29 | 30 | In [5]: len(splits[0]['train']) 31 | Out[5]: 16 32 | 33 | In [6]: len(splits[0]['val']) 34 | Out[6]: 4 35 | 36 | In [7]: print(splits[0]) 37 | {'train': ['la_003', 'la_004', 'la_005', 'la_009', 'la_010', 'la_011', 'la_014', 'la_017', 'la_018', 'la_019', 'la_020', 'la_022', 'la_023', 'la_026', 'la_029', 'la_030'], 38 | 'val': ['la_007', 'la_016', 'la_021', 'la_024']} 39 | ``` 40 | 41 | If you are still not sure what splits are supposed to look like, simply download some reference dataset from the 42 | [Medical Decathlon](http://medicaldecathlon.com/), start some training (to generate the splits) and manually inspect 43 | the .json file with your text editor of choice! 44 | 45 | In order to generate your custom splits, all you need to do is reproduce the data structure explained above and save it as 46 | `splits_final.json` in the `nnUNet_preprocessed/DATASETXXX_NAME` folder. Then use `nnUNetv2_train` etc. as usual. -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/run_inference_with_pretrained_models.md: -------------------------------------------------------------------------------- 1 | # How to run inference with pretrained models 2 | **Important:** Pretrained weights from nnU-Net v1 are NOT compatible with V2. You will need to retrain with the new 3 | version. But honestly, you already have a fully trained model with which you can run inference (in v1), so 4 | just continue using that! 5 | 6 | Not yet available for V2 :-( 7 | If you wish to run inference with pretrained models, check out the old nnU-Net for now. We are working on this full steam! 8 | -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/setting_up_paths.md: -------------------------------------------------------------------------------- 1 | # Setting up Paths 2 | 3 | nnU-Net relies on environment variables to know where raw data, preprocessed data and trained model weights are stored. 4 | To use the full functionality of nnU-Net, the following three environment variables must be set: 5 | 6 | 1) `nnUNet_raw`: This is where you place the raw datasets. This folder will have one subfolder for each dataset names 7 | DatasetXXX_YYY where XXX is a 3-digit identifier (such as 001, 002, 043, 999, ...) and YYY is the (unique) 8 | dataset name. The datasets must be in nnU-Net format, see [here](dataset_format.md). 9 | 10 | Example tree structure: 11 | ``` 12 | nnUNet_raw/Dataset001_NAME1 13 | ├── dataset.json 14 | ├── imagesTr 15 | │   ├── ... 16 | ├── imagesTs 17 | │   ├── ... 18 | └── labelsTr 19 | ├── ... 20 | nnUNet_raw/Dataset002_NAME2 21 | ├── dataset.json 22 | ├── imagesTr 23 | │   ├── ... 24 | ├── imagesTs 25 | │   ├── ... 26 | └── labelsTr 27 | ├── ... 28 | ``` 29 | 30 | 2) `nnUNet_preprocessed`: This is the folder where the preprocessed data will be saved. The data will also be read from 31 | this folder during training. It is important that this folder is located on a drive with low access latency and high 32 | throughput (such as a nvme SSD (PCIe gen 3 is sufficient)). 33 | 34 | 3) `nnUNet_results`: This specifies where nnU-Net will save the model weights. If pretrained models are downloaded, this 35 | is where it will save them. 36 | 37 | ### How to set environment variables 38 | See [here](set_environment_variables.md). -------------------------------------------------------------------------------- /Finetune/nnUNet/documentation/tldr_migration_guide_from_v1.md: -------------------------------------------------------------------------------- 1 | # TLDR Migration Guide from nnU-Net V1 2 | 3 | - nnU-Net V2 can be installed simultaneously with V1. They won't get in each other's way 4 | - The environment variables needed for V2 have slightly different names. Read [this](setting_up_paths.md). 5 | - nnU-Net V2 datasets are called DatasetXXX_NAME. Not Task. 6 | - Datasets have the same structure (imagesTr, labelsTr, dataset.json) but we now support more 7 | [file types](dataset_format.md#supported-file-formats). The dataset.json is simplified. Use `generate_dataset_json` 8 | from nnunetv2.dataset_conversion.generate_dataset_json.py. 9 | - Careful: labels are now no longer declared as value:name but name:value. This has to do with [hierarchical labels](region_based_training.md). 10 | - nnU-Net v2 commands start with `nnUNetv2...`. They work mostly (but not entirely) the same. Just use the `-h` option. 11 | - You can transfer your V1 raw datasets to V2 with `nnUNetv2_convert_old_nnUNet_dataset`. You cannot transfer trained 12 | models. Continue to use the old nnU-Net Version for making inference with those. 13 | - These are the commands you are most likely to be using (in that order) 14 | - `nnUNetv2_plan_and_preprocess`. Example: `nnUNetv2_plan_and_preprocess -d 2` 15 | - `nnUNetv2_train`. Example: `nnUNetv2_train 2 3d_fullres 0` 16 | - `nnUNetv2_find_best_configuration`. Example: `nnUNetv2_find_best_configuration 2 -c 2d 3d_fullres`. This command 17 | will now create a `inference_instructions.txt` file in your `nnUNet_preprocessed/DatasetXXX_NAME/` folder which 18 | tells you exactly how to do inference. 19 | - `nnUNetv2_predict`. Example: `nnUNetv2_predict -i INPUT_FOLDER -o OUTPUT_FOLDER -c 3d_fullres -d 2` 20 | - `nnUNetv2_apply_postprocessing` (see inference_instructions.txt) 21 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2.egg-info/entry_points.txt: -------------------------------------------------------------------------------- 1 | [console_scripts] 2 | nnUNetv2_accumulate_crossval_results = nnunetv2.evaluation.find_best_configuration:accumulate_crossval_results_entry_point 3 | nnUNetv2_apply_postprocessing = nnunetv2.postprocessing.remove_connected_components:entry_point_apply_postprocessing 4 | nnUNetv2_convert_MSD_dataset = nnunetv2.dataset_conversion.convert_MSD_dataset:entry_point 5 | nnUNetv2_convert_old_nnUNet_dataset = nnunetv2.dataset_conversion.convert_raw_dataset_from_old_nnunet_format:convert_entry_point 6 | nnUNetv2_determine_postprocessing = nnunetv2.postprocessing.remove_connected_components:entry_point_determine_postprocessing_folder 7 | nnUNetv2_download_pretrained_model_by_url = nnunetv2.model_sharing.entry_points:download_by_url 8 | nnUNetv2_ensemble = nnunetv2.ensembling.ensemble:entry_point_ensemble_folders 9 | nnUNetv2_evaluate_folder = nnunetv2.evaluation.evaluate_predictions:evaluate_folder_entry_point 10 | nnUNetv2_evaluate_simple = nnunetv2.evaluation.evaluate_predictions:evaluate_simple_entry_point 11 | nnUNetv2_export_model_to_zip = nnunetv2.model_sharing.entry_points:export_pretrained_model_entry 12 | nnUNetv2_extract_fingerprint = nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:extract_fingerprint_entry 13 | nnUNetv2_find_best_configuration = nnunetv2.evaluation.find_best_configuration:find_best_configuration_entry_point 14 | nnUNetv2_install_pretrained_model_from_zip = nnunetv2.model_sharing.entry_points:install_from_zip_entry_point 15 | nnUNetv2_move_plans_between_datasets = nnunetv2.experiment_planning.plans_for_pretraining.move_plans_between_datasets:entry_point_move_plans_between_datasets 16 | nnUNetv2_plan_and_preprocess = nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:plan_and_preprocess_entry 17 | nnUNetv2_plan_experiment = nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:plan_experiment_entry 18 | nnUNetv2_plot_overlay_pngs = nnunetv2.utilities.overlay_plots:entry_point_generate_overlay 19 | nnUNetv2_predict = nnunetv2.inference.predict_from_raw_data:predict_entry_point 20 | nnUNetv2_predict_from_modelfolder = nnunetv2.inference.predict_from_raw_data:predict_entry_point_modelfolder 21 | nnUNetv2_preprocess = nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:preprocess_entry 22 | nnUNetv2_train = nnunetv2.run.run_training:run_training_entry 23 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | torch>=2.0.0 2 | acvl-utils>=0.2 3 | dynamic-network-architectures>=0.2 4 | tqdm 5 | dicom2nifti 6 | scipy 7 | batchgenerators>=0.25 8 | numpy 9 | scikit-learn 10 | scikit-image>=0.19.3 11 | SimpleITK>=2.2.1 12 | pandas 13 | graphviz 14 | tifffile 15 | requests 16 | nibabel 17 | matplotlib 18 | seaborn 19 | imagecodecs 20 | yacs 21 | 22 | [dev] 23 | black 24 | ruff 25 | pre-commit 26 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | nnunetv2 2 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/batch_running/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/batch_running/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/batch_running/benchmarking/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/batch_running/benchmarking/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/batch_running/benchmarking/generate_benchmarking_commands.py: -------------------------------------------------------------------------------- 1 | if __name__ == '__main__': 2 | """ 3 | This code probably only works within the DKFZ infrastructure (using LSF). You will need to adapt it to your scheduler! 4 | """ 5 | gpu_models = [#'NVIDIAA100_PCIE_40GB', 'NVIDIAGeForceRTX2080Ti', 'NVIDIATITANRTX', 'TeslaV100_SXM2_32GB', 6 | 'NVIDIAA100_SXM4_40GB']#, 'TeslaV100_PCIE_32GB'] 7 | datasets = [2, 3, 4, 5] 8 | trainers = ['nnUNetTrainerBenchmark_5epochs', 'nnUNetTrainerBenchmark_5epochs_noDataLoading'] 9 | plans = ['nnUNetPlans'] 10 | configs = ['2d', '2d_bs3x', '2d_bs6x', '3d_fullres', '3d_fullres_bs3x', '3d_fullres_bs6x'] 11 | num_gpus = 1 12 | 13 | benchmark_configurations = {d: configs for d in datasets} 14 | 15 | exclude_hosts = "-R \"select[hname!='e230-dgxa100-1']'\"" 16 | resources = "-R \"tensorcore\"" 17 | queue = "-q gpu" 18 | preamble = "-L /bin/bash \"source ~/load_env_torch210.sh && " 19 | train_command = 'nnUNet_compile=False nnUNet_results=/dkfz/cluster/gpu/checkpoints/OE0441/isensee/nnUNet_results_remake_benchmark nnUNetv2_train' 20 | 21 | folds = (0, ) 22 | 23 | use_these_modules = { 24 | tr: plans for tr in trainers 25 | } 26 | 27 | additional_arguments = f' -num_gpus {num_gpus}' # '' 28 | 29 | output_file = "/home/isensee/deleteme.txt" 30 | with open(output_file, 'w') as f: 31 | for g in gpu_models: 32 | gpu_requirements = f"-gpu num={num_gpus}:j_exclusive=yes:gmodel={g}" 33 | for tr in use_these_modules.keys(): 34 | for p in use_these_modules[tr]: 35 | for dataset in benchmark_configurations.keys(): 36 | for config in benchmark_configurations[dataset]: 37 | for fl in folds: 38 | command = f'bsub {exclude_hosts} {resources} {queue} {gpu_requirements} {preamble} {train_command} {dataset} {config} {fl} -tr {tr} -p {p}' 39 | if additional_arguments is not None and len(additional_arguments) > 0: 40 | command += f' {additional_arguments}' 41 | f.write(f'{command}\"\n') -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/batch_running/collect_results_custom_Decathlon_2d.py: -------------------------------------------------------------------------------- 1 | from batchgenerators.utilities.file_and_folder_operations import * 2 | 3 | from nnunetv2.batch_running.collect_results_custom_Decathlon import collect_results, summarize 4 | from nnunetv2.paths import nnUNet_results 5 | 6 | if __name__ == '__main__': 7 | use_these_trainers = { 8 | 'nnUNetTrainer': ('nnUNetPlans', ), 9 | } 10 | all_results_file = join(nnUNet_results, 'hrnet_results.csv') 11 | datasets = [2, 3, 4, 17, 20, 24, 27, 38, 55, 64, 82] 12 | collect_results(use_these_trainers, datasets, all_results_file) 13 | 14 | folds = (0, ) 15 | configs = ('2d', ) 16 | output_file = join(nnUNet_results, 'hrnet_results_summary_fold0.csv') 17 | summarize(all_results_file, output_file, folds, configs, datasets, use_these_trainers) 18 | 19 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/batch_running/release_trainings/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/batch_running/release_trainings/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/batch_running/release_trainings/nnunetv2_v1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/batch_running/release_trainings/nnunetv2_v1/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/configuration.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from nnunetv2.utilities.default_n_proc_DA import get_allowed_n_proc_DA 4 | 5 | default_num_processes = 8 if 'nnUNet_def_n_proc' not in os.environ else int(os.environ['nnUNet_def_n_proc']) 6 | 7 | ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low 8 | # resolution axis must be 3x as large as the next largest spacing) 9 | 10 | default_n_proc_DA = get_allowed_n_proc_DA() 11 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset115_EMIDEC.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | from pathlib import Path 3 | 4 | from nnunetv2.dataset_conversion.Dataset027_ACDC import make_out_dirs 5 | from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json 6 | 7 | 8 | def copy_files(src_data_dir: Path, src_test_dir: Path, train_dir: Path, labels_dir: Path, test_dir: Path): 9 | """Copy files from the EMIDEC dataset to the nnUNet dataset folder. Returns the number of training cases.""" 10 | patients_train = sorted([f for f in src_data_dir.iterdir() if f.is_dir()]) 11 | patients_test = sorted([f for f in src_test_dir.iterdir() if f.is_dir()]) 12 | 13 | # Copy training files and corresponding labels. 14 | for patient in patients_train: 15 | train_file = patient / "Images" / f"{patient.name}.nii.gz" 16 | label_file = patient / "Contours" / f"{patient.name}.nii.gz" 17 | shutil.copy(train_file, train_dir / f"{train_file.stem.split('.')[0]}_0000.nii.gz") 18 | shutil.copy(label_file, labels_dir) 19 | 20 | # Copy test files. 21 | for patient in patients_test: 22 | test_file = patient / "Images" / f"{patient.name}.nii.gz" 23 | shutil.copy(test_file, test_dir / f"{test_file.stem.split('.')[0]}_0000.nii.gz") 24 | 25 | return len(patients_train) 26 | 27 | 28 | def convert_emidec(src_data_dir: str, src_test_dir: str, dataset_id=27): 29 | out_dir, train_dir, labels_dir, test_dir = make_out_dirs(dataset_id=dataset_id, task_name="EMIDEC") 30 | num_training_cases = copy_files(Path(src_data_dir), Path(src_test_dir), train_dir, labels_dir, test_dir) 31 | 32 | generate_dataset_json( 33 | str(out_dir), 34 | channel_names={ 35 | 0: "cineMRI", 36 | }, 37 | labels={ 38 | "background": 0, 39 | "cavity": 1, 40 | "normal_myocardium": 2, 41 | "myocardial_infarction": 3, 42 | "no_reflow": 4, 43 | }, 44 | file_ending=".nii.gz", 45 | num_training_cases=num_training_cases, 46 | ) 47 | 48 | 49 | if __name__ == "__main__": 50 | import argparse 51 | 52 | parser = argparse.ArgumentParser() 53 | parser.add_argument("-i", "--input_dir", type=str, help="The EMIDEC dataset directory.") 54 | parser.add_argument("-t", "--test_dir", type=str, help="The EMIDEC test set directory.") 55 | parser.add_argument( 56 | "-d", "--dataset_id", required=False, type=int, default=115, help="nnU-Net Dataset ID, default: 115" 57 | ) 58 | args = parser.parse_args() 59 | print("Converting...") 60 | convert_emidec(args.input_dir, args.test_dir, args.dataset_id) 61 | print("Done!") 62 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset220_KiTS2023.py: -------------------------------------------------------------------------------- 1 | from batchgenerators.utilities.file_and_folder_operations import * 2 | import shutil 3 | from generate_dataset_json import generate_dataset_json 4 | #from nnunetv2.paths import nnUNet_raw 5 | nnUNet_raw = '/data/linshan/nnunet_data/nnUNet_raw' 6 | 7 | def convert_kits2023(kits_base_dir: str, nnunet_dataset_id: int = 220): 8 | task_name = "KiTS2023" 9 | 10 | foldername = "Dataset%03.0d_%s" % (nnunet_dataset_id, task_name) 11 | 12 | # setting up nnU-Net folders 13 | out_base = join(nnUNet_raw, foldername) 14 | imagestr = join(out_base, "imagesTr") 15 | labelstr = join(out_base, "labelsTr") 16 | maybe_mkdir_p(imagestr) 17 | maybe_mkdir_p(labelstr) 18 | 19 | cases = subdirs(kits_base_dir, prefix='case_', join=False) 20 | for tr in cases: 21 | shutil.copy(join(kits_base_dir, tr, 'imaging.nii.gz'), join(imagestr, f'{tr}_0000.nii.gz')) 22 | shutil.copy(join(kits_base_dir, tr, 'segmentation.nii.gz'), join(labelstr, f'{tr}.nii.gz')) 23 | 24 | generate_dataset_json(out_base, {0: "CT"}, 25 | labels={ 26 | "background": 0, 27 | "kidney": (1, 2, 3), 28 | "masses": (2, 3), 29 | "tumor": 2 30 | }, 31 | regions_class_order=(1, 3, 2), 32 | num_training_cases=len(cases), file_ending='.nii.gz', 33 | dataset_name=task_name, reference='none', 34 | release='prerelease', 35 | overwrite_image_reader_writer='NibabelIOWithReorient', 36 | description="KiTS2023") 37 | 38 | 39 | if __name__ == '__main__': 40 | import argparse 41 | parser = argparse.ArgumentParser() 42 | parser.add_argument('input_folder', type=str, 43 | help="The downloaded and extracted KiTS2023 dataset (must have case_XXXXX subfolders)") 44 | parser.add_argument('-d', required=False, type=int, default=220, help='nnU-Net Dataset ID, default: 220') 45 | args = parser.parse_args() 46 | amos_base = args.input_folder 47 | convert_kits2023(amos_base, args.d) 48 | 49 | # /media/isensee/raw_data/raw_datasets/kits23/dataset 50 | 51 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset988_dummyDataset4.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from batchgenerators.utilities.file_and_folder_operations import * 4 | 5 | from nnunetv2.paths import nnUNet_raw 6 | from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets 7 | 8 | if __name__ == '__main__': 9 | # creates a dummy dataset where there are no files in imagestr and labelstr 10 | source_dataset = 'Dataset004_Hippocampus' 11 | 12 | target_dataset = 'Dataset987_dummyDataset4' 13 | target_dataset_dir = join(nnUNet_raw, target_dataset) 14 | maybe_mkdir_p(target_dataset_dir) 15 | 16 | dataset = get_filenames_of_train_images_and_targets(join(nnUNet_raw, source_dataset)) 17 | 18 | # the returned dataset will have absolute paths. We should use relative paths so that you can freely copy 19 | # datasets around between systems. As long as the source dataset is there it will continue working even if 20 | # nnUNet_raw is in different locations 21 | 22 | # paths must be relative to target_dataset_dir!!! 23 | for k in dataset.keys(): 24 | dataset[k]['label'] = os.path.relpath(dataset[k]['label'], target_dataset_dir) 25 | dataset[k]['images'] = [os.path.relpath(i, target_dataset_dir) for i in dataset[k]['images']] 26 | 27 | # load old dataset.json 28 | dataset_json = load_json(join(nnUNet_raw, source_dataset, 'dataset.json')) 29 | dataset_json['dataset'] = dataset 30 | 31 | # save 32 | save_json(dataset_json, join(target_dataset_dir, 'dataset.json'), sort_keys=False) 33 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/dataset_conversion/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/convert_MSD_dataset.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/convert_MSD_dataset.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/convert_raw_dataset_from_old_nnunet_format.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/convert_raw_dataset_from_old_nnunet_format.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/generate_dataset_json.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/generate_dataset_json.cpython-310.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/generate_dataset_json.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/generate_dataset_json.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/generate_dataset_json.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/dataset_conversion/__pycache__/generate_dataset_json.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/convert_raw_dataset_from_old_nnunet_format.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | from copy import deepcopy 3 | 4 | from batchgenerators.utilities.file_and_folder_operations import join, maybe_mkdir_p, isdir, load_json, save_json 5 | from nnunetv2.paths import nnUNet_raw 6 | 7 | 8 | def convert(source_folder, target_dataset_name): 9 | """ 10 | remember that old tasks were called TaskXXX_YYY and new ones are called DatasetXXX_YYY 11 | source_folder 12 | """ 13 | if isdir(join(nnUNet_raw, target_dataset_name)): 14 | raise RuntimeError(f'Target dataset name {target_dataset_name} already exists. Aborting... ' 15 | f'(we might break something). If you are sure you want to proceed, please manually ' 16 | f'delete {join(nnUNet_raw, target_dataset_name)}') 17 | maybe_mkdir_p(join(nnUNet_raw, target_dataset_name)) 18 | shutil.copytree(join(source_folder, 'imagesTr'), join(nnUNet_raw, target_dataset_name, 'imagesTr')) 19 | shutil.copytree(join(source_folder, 'labelsTr'), join(nnUNet_raw, target_dataset_name, 'labelsTr')) 20 | if isdir(join(source_folder, 'imagesTs')): 21 | shutil.copytree(join(source_folder, 'imagesTs'), join(nnUNet_raw, target_dataset_name, 'imagesTs')) 22 | if isdir(join(source_folder, 'labelsTs')): 23 | shutil.copytree(join(source_folder, 'labelsTs'), join(nnUNet_raw, target_dataset_name, 'labelsTs')) 24 | if isdir(join(source_folder, 'imagesVal')): 25 | shutil.copytree(join(source_folder, 'imagesVal'), join(nnUNet_raw, target_dataset_name, 'imagesVal')) 26 | if isdir(join(source_folder, 'labelsVal')): 27 | shutil.copytree(join(source_folder, 'labelsVal'), join(nnUNet_raw, target_dataset_name, 'labelsVal')) 28 | shutil.copy(join(source_folder, 'dataset.json'), join(nnUNet_raw, target_dataset_name)) 29 | 30 | dataset_json = load_json(join(nnUNet_raw, target_dataset_name, 'dataset.json')) 31 | del dataset_json['tensorImageSize'] 32 | del dataset_json['numTest'] 33 | del dataset_json['training'] 34 | del dataset_json['test'] 35 | dataset_json['channel_names'] = deepcopy(dataset_json['modality']) 36 | del dataset_json['modality'] 37 | 38 | dataset_json['labels'] = {j: int(i) for i, j in dataset_json['labels'].items()} 39 | dataset_json['file_ending'] = ".nii.gz" 40 | save_json(dataset_json, join(nnUNet_raw, target_dataset_name, 'dataset.json'), sort_keys=False) 41 | 42 | 43 | def convert_entry_point(): 44 | import argparse 45 | parser = argparse.ArgumentParser() 46 | parser.add_argument("input_folder", type=str, 47 | help='Raw old nnUNet dataset. This must be the folder with imagesTr,labelsTr etc subfolders! ' 48 | 'Please provide the PATH to the old Task, not just the task name. nnU-Net V2 does not ' 49 | 'know where v1 tasks are.') 50 | parser.add_argument("output_dataset_name", type=str, 51 | help='New dataset NAME (not path!). Must follow the DatasetXXX_NAME convention!') 52 | args = parser.parse_args() 53 | convert(args.input_folder, args.output_dataset_name) 54 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | 3 | from batchgenerators.utilities.file_and_folder_operations import isdir, join, load_json, save_json 4 | 5 | from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name 6 | from nnunetv2.paths import nnUNet_raw 7 | 8 | if __name__ == '__main__': 9 | dataset_name = 'IntegrationTest_Hippocampus_regions' 10 | dataset_id = 997 11 | dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}" 12 | 13 | try: 14 | existing_dataset_name = maybe_convert_to_dataset_name(dataset_id) 15 | if existing_dataset_name != dataset_name: 16 | raise FileExistsError( 17 | f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If " 18 | f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and " 19 | f"nnUNet_results!") 20 | except RuntimeError: 21 | pass 22 | 23 | if isdir(join(nnUNet_raw, dataset_name)): 24 | shutil.rmtree(join(nnUNet_raw, dataset_name)) 25 | 26 | source_dataset = maybe_convert_to_dataset_name(4) 27 | shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name)) 28 | 29 | # additionally optimize entire hippocampus region, remove Posterior 30 | dj = load_json(join(nnUNet_raw, dataset_name, 'dataset.json')) 31 | dj['labels'] = { 32 | 'background': 0, 33 | 'hippocampus': (1, 2), 34 | 'anterior': 1 35 | } 36 | dj['regions_class_order'] = (2, 1) 37 | save_json(dj, join(nnUNet_raw, dataset_name, 'dataset.json'), sort_keys=False) 38 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | 3 | from batchgenerators.utilities.file_and_folder_operations import isdir, join, load_json, save_json 4 | 5 | from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name 6 | from nnunetv2.paths import nnUNet_raw 7 | 8 | 9 | if __name__ == '__main__': 10 | dataset_name = 'IntegrationTest_Hippocampus_ignore' 11 | dataset_id = 998 12 | dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}" 13 | 14 | try: 15 | existing_dataset_name = maybe_convert_to_dataset_name(dataset_id) 16 | if existing_dataset_name != dataset_name: 17 | raise FileExistsError(f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If " 18 | f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and " 19 | f"nnUNet_results!") 20 | except RuntimeError: 21 | pass 22 | 23 | if isdir(join(nnUNet_raw, dataset_name)): 24 | shutil.rmtree(join(nnUNet_raw, dataset_name)) 25 | 26 | source_dataset = maybe_convert_to_dataset_name(4) 27 | shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name)) 28 | 29 | # set class 2 to ignore label 30 | dj = load_json(join(nnUNet_raw, dataset_name, 'dataset.json')) 31 | dj['labels']['ignore'] = 2 32 | del dj['labels']['Posterior'] 33 | save_json(dj, join(nnUNet_raw, dataset_name, 'dataset.json'), sort_keys=False) 34 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | 3 | from batchgenerators.utilities.file_and_folder_operations import isdir, join 4 | 5 | from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name 6 | from nnunetv2.paths import nnUNet_raw 7 | 8 | 9 | if __name__ == '__main__': 10 | dataset_name = 'IntegrationTest_Hippocampus' 11 | dataset_id = 999 12 | dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}" 13 | 14 | try: 15 | existing_dataset_name = maybe_convert_to_dataset_name(dataset_id) 16 | if existing_dataset_name != dataset_name: 17 | raise FileExistsError(f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If " 18 | f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and " 19 | f"nnUNet_results!") 20 | except RuntimeError: 21 | pass 22 | 23 | if isdir(join(nnUNet_raw, dataset_name)): 24 | shutil.rmtree(join(nnUNet_raw, dataset_name)) 25 | 26 | source_dataset = maybe_convert_to_dataset_name(4) 27 | shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name)) 28 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/ensembling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/ensembling/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/evaluation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/evaluation/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/evaluation/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/evaluation/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/evaluation/__pycache__/evaluate_predictions.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/evaluation/__pycache__/evaluate_predictions.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/plan_and_preprocess_api.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/plan_and_preprocess_api.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/plan_and_preprocess_api.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/plan_and_preprocess_api.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/plan_and_preprocess_entrypoints.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/plan_and_preprocess_entrypoints.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/plan_and_preprocess_entrypoints.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/plan_and_preprocess_entrypoints.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/verify_dataset_integrity.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/verify_dataset_integrity.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/verify_dataset_integrity.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/__pycache__/verify_dataset_integrity.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__pycache__/fingerprint_extractor.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__pycache__/fingerprint_extractor.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__pycache__/fingerprint_extractor.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__pycache__/fingerprint_extractor.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/default_experiment_planner.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/default_experiment_planner.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/default_experiment_planner.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/default_experiment_planner.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/network_topology.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/network_topology.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/network_topology.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__pycache__/network_topology.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/readme.md: -------------------------------------------------------------------------------- 1 | What do experiment planners need to do (these are notes for myself while rewriting nnU-Net, they are provided as is 2 | without further explanations. These notes also include new features): 3 | - (done) preprocessor name should be configurable via cli 4 | - (done) gpu memory target should be configurable via cli 5 | - (done) plans name should be configurable via cli 6 | - (done) data name should be specified in plans (plans specify the data they want to use, this will allow us to manually 7 | edit plans files without having to copy the data folders) 8 | - plans must contain: 9 | - (done) transpose forward/backward 10 | - (done) preprocessor name (can differ for each config) 11 | - (done) spacing 12 | - (done) normalization scheme 13 | - (done) target spacing 14 | - (done) conv and pool op kernel sizes 15 | - (done) base num features for architecture 16 | - (done) data identifier 17 | - num conv per stage? 18 | - (done) use mask for norm 19 | - [NO. Handled by LabelManager & dataset.json] num segmentation outputs 20 | - [NO. Handled by LabelManager & dataset.json] ignore class 21 | - [NO. Handled by LabelManager & dataset.json] list of regions or classes 22 | - [NO. Handled by LabelManager & dataset.json] regions class order, if applicable 23 | - (done) resampling function to be used 24 | - (done) the image reader writer class that should be used 25 | 26 | 27 | dataset.json 28 | mandatory: 29 | - numTraining 30 | - labels (value 'ignore' has special meaning. Cannot have more than one ignore_label) 31 | - modalities 32 | - file_ending 33 | 34 | optional 35 | - overwrite_image_reader_writer (if absent, auto) 36 | - regions 37 | - region_class_order 38 | - -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/experiment_planning/plans_for_pretraining/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/experiment_planning/plans_for_pretraining/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/base_reader_writer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/base_reader_writer.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/base_reader_writer.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/base_reader_writer.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/natural_image_reader_writer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/natural_image_reader_writer.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/natural_image_reader_writer.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/natural_image_reader_writer.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/nibabel_reader_writer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/nibabel_reader_writer.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/nibabel_reader_writer.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/nibabel_reader_writer.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/reader_writer_registry.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/reader_writer_registry.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/reader_writer_registry.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/reader_writer_registry.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/simpleitk_reader_writer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/simpleitk_reader_writer.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/simpleitk_reader_writer.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/simpleitk_reader_writer.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/tif_reader_writer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/tif_reader_writer.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/__pycache__/tif_reader_writer.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/imageio/__pycache__/tif_reader_writer.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/imageio/readme.md: -------------------------------------------------------------------------------- 1 | - Derive your adapter from `BaseReaderWriter`. 2 | - Reimplement all abstractmethods. 3 | - make sure to support 2d and 3d input images (or raise some error). 4 | - place it in this folder or nnU-Net won't find it! 5 | - add it to LIST_OF_IO_CLASSES in `reader_writer_registry.py` 6 | 7 | Bam, you're done! -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/inference/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/inference/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/inference/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/inference/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/inference/__pycache__/data_iterators.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/inference/__pycache__/data_iterators.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/inference/__pycache__/export_prediction.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/inference/__pycache__/export_prediction.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/inference/__pycache__/predict_from_raw_data.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/inference/__pycache__/predict_from_raw_data.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/inference/__pycache__/sliding_window_prediction.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/inference/__pycache__/sliding_window_prediction.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/inference/sliding_window_prediction.py: -------------------------------------------------------------------------------- 1 | from functools import lru_cache 2 | 3 | import numpy as np 4 | import torch 5 | from typing import Union, Tuple, List 6 | from acvl_utils.cropping_and_padding.padding import pad_nd_image 7 | from scipy.ndimage import gaussian_filter 8 | 9 | 10 | @lru_cache(maxsize=2) 11 | def compute_gaussian(tile_size: Union[Tuple[int, ...], List[int]], sigma_scale: float = 1. / 8, 12 | value_scaling_factor: float = 1, dtype=torch.float16, device=torch.device('cuda', 0)) \ 13 | -> torch.Tensor: 14 | tmp = np.zeros(tile_size) 15 | center_coords = [i // 2 for i in tile_size] 16 | sigmas = [i * sigma_scale for i in tile_size] 17 | tmp[tuple(center_coords)] = 1 18 | gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0) 19 | 20 | gaussian_importance_map = torch.from_numpy(gaussian_importance_map) 21 | 22 | gaussian_importance_map = gaussian_importance_map / torch.max(gaussian_importance_map) * value_scaling_factor 23 | gaussian_importance_map = gaussian_importance_map.type(dtype).to(device) 24 | 25 | # gaussian_importance_map cannot be 0, otherwise we may end up with nans! 26 | gaussian_importance_map[gaussian_importance_map == 0] = torch.min( 27 | gaussian_importance_map[gaussian_importance_map != 0]) 28 | 29 | return gaussian_importance_map 30 | 31 | 32 | def compute_steps_for_sliding_window(image_size: Tuple[int, ...], tile_size: Tuple[int, ...], tile_step_size: float) -> \ 33 | List[List[int]]: 34 | assert [i >= j for i, j in zip(image_size, tile_size)], "image size must be as large or larger than patch_size" 35 | assert 0 < tile_step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1' 36 | 37 | # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of 38 | # 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46 39 | target_step_sizes_in_voxels = [i * tile_step_size for i in tile_size] 40 | 41 | num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, tile_size)] 42 | 43 | steps = [] 44 | for dim in range(len(tile_size)): 45 | # the highest step value for this dimension is 46 | max_step_value = image_size[dim] - tile_size[dim] 47 | if num_steps[dim] > 1: 48 | actual_step_size = max_step_value / (num_steps[dim] - 1) 49 | else: 50 | actual_step_size = 99999999999 # does not matter because there is only one step at 0 51 | 52 | steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])] 53 | 54 | steps.append(steps_here) 55 | 56 | return steps 57 | 58 | 59 | if __name__ == '__main__': 60 | a = torch.rand((4, 2, 32, 23)) 61 | a_npy = a.numpy() 62 | 63 | a_padded = pad_nd_image(a, new_shape=(48, 27)) 64 | a_npy_padded = pad_nd_image(a_npy, new_shape=(48, 27)) 65 | assert all([i == j for i, j in zip(a_padded.shape, (4, 2, 48, 27))]) 66 | assert all([i == j for i, j in zip(a_npy_padded.shape, (4, 2, 48, 27))]) 67 | assert np.all(a_padded.numpy() == a_npy_padded) 68 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/model_sharing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/model_sharing/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/model_sharing/model_download.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import requests 4 | from batchgenerators.utilities.file_and_folder_operations import * 5 | from time import time 6 | from nnunetv2.model_sharing.model_import import install_model_from_zip_file 7 | from nnunetv2.paths import nnUNet_results 8 | from tqdm import tqdm 9 | 10 | 11 | def download_and_install_from_url(url): 12 | assert nnUNet_results is not None, "Cannot install model because network_training_output_dir is not " \ 13 | "set (RESULTS_FOLDER missing as environment variable, see " \ 14 | "Installation instructions)" 15 | print('Downloading pretrained model from url:', url) 16 | import http.client 17 | http.client.HTTPConnection._http_vsn = 10 18 | http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0' 19 | 20 | import os 21 | home = os.path.expanduser('~') 22 | random_number = int(time() * 1e7) 23 | tempfile = join(home, f'.nnunetdownload_{str(random_number)}') 24 | 25 | try: 26 | download_file(url=url, local_filename=tempfile, chunk_size=8192 * 16) 27 | print("Download finished. Extracting...") 28 | install_model_from_zip_file(tempfile) 29 | print("Done") 30 | except Exception as e: 31 | raise e 32 | finally: 33 | if isfile(tempfile): 34 | os.remove(tempfile) 35 | 36 | 37 | def download_file(url: str, local_filename: str, chunk_size: Optional[int] = 8192 * 16) -> str: 38 | # borrowed from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests 39 | # NOTE the stream=True parameter below 40 | with requests.get(url, stream=True, timeout=100) as r: 41 | r.raise_for_status() 42 | with tqdm.wrapattr(open(local_filename, 'wb'), "write", total=int(r.headers.get("Content-Length"))) as f: 43 | for chunk in r.iter_content(chunk_size=chunk_size): 44 | f.write(chunk) 45 | return local_filename 46 | 47 | 48 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/model_sharing/model_import.py: -------------------------------------------------------------------------------- 1 | import zipfile 2 | 3 | from nnunetv2.paths import nnUNet_results 4 | 5 | 6 | def install_model_from_zip_file(zip_file: str): 7 | with zipfile.ZipFile(zip_file, 'r') as zip_ref: 8 | zip_ref.extractall(nnUNet_results) -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/paths.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import os 16 | 17 | """ 18 | PLEASE READ paths.md FOR INFORMATION TO HOW TO SET THIS UP 19 | """ 20 | 21 | base = '/data/linshan/nnunet_data' 22 | nnUNet_raw = '/data/linshan/nnunet_data/nnUNet_raw' 23 | nnUNet_preprocessed = '/data/linshan/nnunet_data/nnUNet_preprocessed' 24 | nnUNet_results = '/data/linshan/nnunet_data/nnUNet_results' 25 | 26 | if nnUNet_raw is None: 27 | print("nnUNet_raw is not defined and nnU-Net can only be used on data for which preprocessed files " 28 | "are already present on your system. nnU-Net cannot be used for experiment planning and preprocessing like " 29 | "this. If this is not intended, please read documentation/setting_up_paths.md for information on how to set " 30 | "this up properly.") 31 | 32 | if nnUNet_preprocessed is None: 33 | print("nnUNet_preprocessed is not defined and nnU-Net can not be used for preprocessing " 34 | "or training. If this is not intended, please read documentation/setting_up_paths.md for information on how " 35 | "to set this up.") 36 | 37 | if nnUNet_results is None: 38 | print("nnUNet_results is not defined and nnU-Net cannot be used for training or " 39 | "inference. If this is not intended behavior, please read documentation/setting_up_paths.md for information " 40 | "on how to set this up.") 41 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/postprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/postprocessing/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/cropping/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/cropping/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/cropping/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/cropping/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/cropping/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/cropping/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/cropping/__pycache__/cropping.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/cropping/__pycache__/cropping.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/cropping/__pycache__/cropping.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/cropping/__pycache__/cropping.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/cropping/cropping.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | # Hello! crop_to_nonzero is the function you are looking for. Ignore the rest. 5 | from acvl_utils.cropping_and_padding.bounding_boxes import get_bbox_from_mask, crop_to_bbox, bounding_box_to_slice 6 | 7 | 8 | def create_nonzero_mask(data): 9 | """ 10 | 11 | :param data: 12 | :return: the mask is True where the data is nonzero 13 | """ 14 | from scipy.ndimage import binary_fill_holes 15 | assert data.ndim in (3, 4), "data must have shape (C, X, Y, Z) or shape (C, X, Y)" 16 | nonzero_mask = np.zeros(data.shape[1:], dtype=bool) 17 | for c in range(data.shape[0]): 18 | this_mask = data[c] != 0 19 | nonzero_mask = nonzero_mask | this_mask 20 | nonzero_mask = binary_fill_holes(nonzero_mask) 21 | return nonzero_mask 22 | 23 | 24 | def crop_to_nonzero(data, seg=None, nonzero_label=-1): 25 | """ 26 | 27 | :param data: 28 | :param seg: 29 | :param nonzero_label: this will be written into the segmentation map 30 | :return: 31 | """ 32 | nonzero_mask = create_nonzero_mask(data) 33 | bbox = get_bbox_from_mask(nonzero_mask) 34 | 35 | slicer = bounding_box_to_slice(bbox) 36 | data = data[tuple([slice(None), *slicer])] 37 | 38 | if seg is not None: 39 | seg = seg[tuple([slice(None), *slicer])] 40 | 41 | nonzero_mask = nonzero_mask[slicer][None] 42 | if seg is not None: 43 | seg[(seg == 0) & (~nonzero_mask)] = nonzero_label 44 | else: 45 | nonzero_mask = nonzero_mask.astype(np.int8) 46 | nonzero_mask[nonzero_mask == 0] = nonzero_label 47 | nonzero_mask[nonzero_mask > 0] = 0 48 | seg = nonzero_mask 49 | return data, seg, bbox 50 | 51 | 52 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/normalization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/normalization/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/default_normalization_schemes.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/default_normalization_schemes.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/default_normalization_schemes.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/default_normalization_schemes.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/map_channel_name_to_normalization.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/map_channel_name_to_normalization.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/map_channel_name_to_normalization.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/normalization/__pycache__/map_channel_name_to_normalization.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/normalization/map_channel_name_to_normalization.py: -------------------------------------------------------------------------------- 1 | from typing import Type 2 | 3 | from nnunetv2.preprocessing.normalization.default_normalization_schemes import CTNormalization, NoNormalization, \ 4 | ZScoreNormalization, RescaleTo01Normalization, RGBTo01Normalization, ImageNormalization 5 | 6 | channel_name_to_normalization_mapping = { 7 | 'CT': CTNormalization, 8 | 'noNorm': NoNormalization, 9 | 'zscore': ZScoreNormalization, 10 | 'rescale_to_0_1': RescaleTo01Normalization, 11 | 'rgb_to_0_1': RGBTo01Normalization 12 | } 13 | 14 | 15 | def get_normalization_scheme(channel_name: str) -> Type[ImageNormalization]: 16 | """ 17 | If we find the channel_name in channel_name_to_normalization_mapping return the corresponding normalization. If it is 18 | not found, use the default (ZScoreNormalization) 19 | """ 20 | norm_scheme = channel_name_to_normalization_mapping.get(channel_name) 21 | if norm_scheme is None: 22 | norm_scheme = ZScoreNormalization 23 | # print('Using %s for image normalization' % norm_scheme.__name__) 24 | return norm_scheme 25 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/normalization/readme.md: -------------------------------------------------------------------------------- 1 | The channel_names entry in dataset.json only determines the normlaization scheme. So if you want to use something different 2 | then you can just 3 | - create a new subclass of ImageNormalization 4 | - map your custom channel identifier to that subclass in channel_name_to_normalization_mapping 5 | - run plan and preprocess again with your custom normlaization scheme -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__pycache__/default_preprocessor.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__pycache__/default_preprocessor.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__pycache__/default_preprocessor.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/__pycache__/default_preprocessor.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/resampling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/resampling/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/default_resampling.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/default_resampling.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/default_resampling.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/default_resampling.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/preprocessing/resampling/__pycache__/utils.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/preprocessing/resampling/utils.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import nnunetv2 4 | from batchgenerators.utilities.file_and_folder_operations import join 5 | from nnunetv2.utilities.find_class_by_name import recursive_find_python_class 6 | 7 | 8 | def recursive_find_resampling_fn_by_name(resampling_fn: str) -> Callable: 9 | ret = recursive_find_python_class(join(nnunetv2.__path__[0], "preprocessing", "resampling"), resampling_fn, 10 | 'nnunetv2.preprocessing.resampling') 11 | if ret is None: 12 | raise RuntimeError("Unable to find resampling function named '%s'. Please make sure this fn is located in the " 13 | "nnunetv2.preprocessing.resampling module." % resampling_fn) 14 | else: 15 | return ret 16 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/run/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/run/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/run/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/run/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/run/__pycache__/load_pretrained_weights.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/run/__pycache__/load_pretrained_weights.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/run/__pycache__/run_training.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/run/__pycache__/run_training.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/run/load_pretrained_weights.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch._dynamo import OptimizedModule 3 | from torch.nn.parallel import DistributedDataParallel as DDP 4 | 5 | 6 | def load_pretrained_weights(network, fname, verbose=False): 7 | """ 8 | Transfers all weights between matching keys in state_dicts. matching is done by name and we only transfer if the 9 | shape is also the same. Segmentation layers (the 1x1(x1) layers that produce the segmentation maps) 10 | identified by keys ending with '.seg_layers') are not transferred! 11 | 12 | If the pretrained weights were obtained with a training outside nnU-Net and DDP or torch.optimize was used, 13 | you need to change the keys of the pretrained state_dict. DDP adds a 'module.' prefix and torch.optim adds 14 | '_orig_mod'. You DO NOT need to worry about this if pretraining was done with nnU-Net as 15 | nnUNetTrainer.save_checkpoint takes care of that! 16 | 17 | """ 18 | saved_model = torch.load(fname) 19 | pretrained_dict = saved_model['network_weights'] 20 | 21 | skip_strings_in_pretrained = [ 22 | '.seg_layers.', 23 | ] 24 | 25 | if isinstance(network, DDP): 26 | mod = network.module 27 | else: 28 | mod = network 29 | if isinstance(mod, OptimizedModule): 30 | mod = mod._orig_mod 31 | 32 | model_dict = mod.state_dict() 33 | # verify that all but the segmentation layers have the same shape 34 | for key, _ in model_dict.items(): 35 | if all([i not in key for i in skip_strings_in_pretrained]): 36 | assert key in pretrained_dict, \ 37 | f"Key {key} is missing in the pretrained model weights. The pretrained weights do not seem to be " \ 38 | f"compatible with your network." 39 | assert model_dict[key].shape == pretrained_dict[key].shape, \ 40 | f"The shape of the parameters of key {key} is not the same. Pretrained model: " \ 41 | f"{pretrained_dict[key].shape}; your network: {model_dict[key]}. The pretrained model " \ 42 | f"does not seem to be compatible with your network." 43 | 44 | # fun fact: in principle this allows loading from parameters that do not cover the entire network. For example pretrained 45 | # encoders. Not supported by this function though (see assertions above) 46 | 47 | # commenting out this abomination of a dict comprehension for preservation in the archives of 'what not to do' 48 | # pretrained_dict = {'module.' + k if is_ddp else k: v 49 | # for k, v in pretrained_dict.items() 50 | # if (('module.' + k if is_ddp else k) in model_dict) and 51 | # all([i not in k for i in skip_strings_in_pretrained])} 52 | 53 | pretrained_dict = {k: v for k, v in pretrained_dict.items() 54 | if k in model_dict.keys() and all([i not in k for i in skip_strings_in_pretrained])} 55 | 56 | model_dict.update(pretrained_dict) 57 | 58 | print("################### Loading pretrained weights from file ", fname, '###################') 59 | if verbose: 60 | print("Below is the list of overlapping blocks in pretrained model and nnUNet architecture:") 61 | for key, value in pretrained_dict.items(): 62 | print(key, 'shape', value.shape) 63 | print("################### Done ###################") 64 | mod.load_state_dict(model_dict) 65 | 66 | 67 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/tests/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/tests/integration_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/tests/integration_tests/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/tests/integration_tests/add_lowres_and_cascade.py: -------------------------------------------------------------------------------- 1 | from batchgenerators.utilities.file_and_folder_operations import * 2 | 3 | from nnunetv2.paths import nnUNet_preprocessed 4 | from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name 5 | 6 | if __name__ == '__main__': 7 | import argparse 8 | 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('-d', nargs='+', type=int, help='List of dataset ids') 11 | args = parser.parse_args() 12 | 13 | for d in args.d: 14 | dataset_name = maybe_convert_to_dataset_name(d) 15 | plans = load_json(join(nnUNet_preprocessed, dataset_name, 'nnUNetPlans.json')) 16 | plans['configurations']['3d_lowres'] = { 17 | "data_identifier": "nnUNetPlans_3d_lowres", # do not be a dumbo and forget this. I was a dumbo. And I paid dearly with ~10 min debugging time 18 | 'inherits_from': '3d_fullres', 19 | "patch_size": [20, 28, 20], 20 | "median_image_size_in_voxels": [18.0, 25.0, 18.0], 21 | "spacing": [2.0, 2.0, 2.0], 22 | "n_conv_per_stage_encoder": [2, 2, 2], 23 | "n_conv_per_stage_decoder": [2, 2], 24 | "num_pool_per_axis": [2, 2, 2], 25 | "pool_op_kernel_sizes": [[1, 1, 1], [2, 2, 2], [2, 2, 2]], 26 | "conv_kernel_sizes": [[3, 3, 3], [3, 3, 3], [3, 3, 3]], 27 | "next_stage": "3d_cascade_fullres" 28 | } 29 | plans['configurations']['3d_cascade_fullres'] = { 30 | 'inherits_from': '3d_fullres', 31 | "previous_stage": "3d_lowres" 32 | } 33 | save_json(plans, join(nnUNet_preprocessed, dataset_name, 'nnUNetPlans.json'), sort_keys=False) -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/tests/integration_tests/cleanup_integration_test.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | 3 | from batchgenerators.utilities.file_and_folder_operations import isdir, join 4 | 5 | from nnunetv2.paths import nnUNet_raw, nnUNet_results, nnUNet_preprocessed 6 | 7 | if __name__ == '__main__': 8 | # deletes everything! 9 | dataset_names = [ 10 | 'Dataset996_IntegrationTest_Hippocampus_regions_ignore', 11 | 'Dataset997_IntegrationTest_Hippocampus_regions', 12 | 'Dataset998_IntegrationTest_Hippocampus_ignore', 13 | 'Dataset999_IntegrationTest_Hippocampus', 14 | ] 15 | for fld in [nnUNet_raw, nnUNet_preprocessed, nnUNet_results]: 16 | for d in dataset_names: 17 | if isdir(join(fld, d)): 18 | shutil.rmtree(join(fld, d)) 19 | 20 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/tests/integration_tests/lsf_commands.sh: -------------------------------------------------------------------------------- 1 | bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 996" 2 | bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 997" 3 | bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 998" 4 | bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 999" 5 | 6 | 7 | bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 996" 8 | bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 997" 9 | bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 998" 10 | bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 999" 11 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/tests/integration_tests/prepare_integration_tests.sh: -------------------------------------------------------------------------------- 1 | # assumes you are in the nnunet repo! 2 | 3 | # prepare raw datasets 4 | python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py 5 | python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py 6 | python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py 7 | python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset996_IntegrationTest_Hippocampus_regions_ignore.py 8 | 9 | # now run experiment planning without preprocessing 10 | nnUNetv2_plan_and_preprocess -d 996 997 998 999 --no_pp 11 | 12 | # now add 3d lowres and cascade 13 | python nnunetv2/tests/integration_tests/add_lowres_and_cascade.py -d 996 997 998 999 14 | 15 | # now preprocess everything 16 | nnUNetv2_preprocess -d 996 997 998 999 -c 2d 3d_lowres 3d_fullres -np 8 8 8 # no need to preprocess cascade as its the same data as 3d_fullres 17 | 18 | # done -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/tests/integration_tests/readme.md: -------------------------------------------------------------------------------- 1 | # Preface 2 | 3 | I am just a mortal with many tasks and limited time. Aint nobody got time for unittests. 4 | 5 | HOWEVER, at least some integration tests should be performed testing nnU-Net from start to finish. 6 | 7 | # Introduction - What the heck is happening? 8 | This test covers all possible labeling scenarios (standard labels, regions, ignore labels and regions with 9 | ignore labels). It runs the entire nnU-Net pipeline from start to finish: 10 | 11 | - fingerprint extraction 12 | - experiment planning 13 | - preprocessing 14 | - train all 4 configurations (2d, 3d_lowres, 3d_fullres, 3d_cascade_fullres) as 5-fold CV 15 | - automatically find the best model or ensemble 16 | - determine the postprocessing used for this 17 | - predict some test set 18 | - apply postprocessing to the test set 19 | 20 | To speed things up, we do the following: 21 | - pick Dataset004_Hippocampus because it is quadratisch praktisch gut. MNIST of medical image segmentation 22 | - by default this dataset does not have 3d_lowres or cascade. We just manually add them (cool new feature, eh?). See `add_lowres_and_cascade.py` to learn more! 23 | - we use nnUNetTrainer_5epochs for a short training 24 | 25 | # How to run it? 26 | 27 | Set your pwd to be the nnunet repo folder (the one where the `nnunetv2` folder and the `setup.py` are located!) 28 | 29 | Now generate the 4 dummy datasets (ids 996, 997, 998, 999) from dataset 4. This will crash if you don't have Dataset004! 30 | ```commandline 31 | bash nnunetv2/tests/integration_tests/prepare_integration_tests.sh 32 | ``` 33 | 34 | Now you can run the integration test for each of the datasets: 35 | ```commandline 36 | bash nnunetv2/tests/integration_tests/run_integration_test.sh DATSET_ID 37 | ``` 38 | use DATSET_ID 996, 997, 998 and 999. You can run these independently on different GPUs/systems to speed things up. 39 | This will take i dunno like 10-30 Minutes!? 40 | 41 | Also run 42 | ```commandline 43 | bash nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh DATSET_ID 44 | ``` 45 | to verify DDP is working (needs 2 GPUs!) 46 | 47 | # How to check if the test was successful? 48 | If I was not as lazy as I am I would have programmed some automatism that checks if Dice scores etc are in an acceptable range. 49 | So you need to do the following: 50 | 1) check that none of your runs crashed (duh) 51 | 2) for each run, navigate to `nnUNet_results/DATASET_NAME` and take a look at the `inference_information.json` file. 52 | Does it make sense? If so: NICE! 53 | 54 | Once the integration test is completed you can delete all the temporary files associated with it by running: 55 | 56 | ```commandline 57 | python nnunetv2/tests/integration_tests/cleanup_integration_test.py 58 | ``` -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/tests/integration_tests/run_integration_test.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | nnUNetv2_train $1 3d_fullres 0 -tr nnUNetTrainer_5epochs --npz 4 | nnUNetv2_train $1 3d_fullres 1 -tr nnUNetTrainer_5epochs --npz 5 | nnUNetv2_train $1 3d_fullres 2 -tr nnUNetTrainer_5epochs --npz 6 | nnUNetv2_train $1 3d_fullres 3 -tr nnUNetTrainer_5epochs --npz 7 | nnUNetv2_train $1 3d_fullres 4 -tr nnUNetTrainer_5epochs --npz 8 | 9 | nnUNetv2_train $1 2d 0 -tr nnUNetTrainer_5epochs --npz 10 | nnUNetv2_train $1 2d 1 -tr nnUNetTrainer_5epochs --npz 11 | nnUNetv2_train $1 2d 2 -tr nnUNetTrainer_5epochs --npz 12 | nnUNetv2_train $1 2d 3 -tr nnUNetTrainer_5epochs --npz 13 | nnUNetv2_train $1 2d 4 -tr nnUNetTrainer_5epochs --npz 14 | 15 | nnUNetv2_train $1 3d_lowres 0 -tr nnUNetTrainer_5epochs --npz 16 | nnUNetv2_train $1 3d_lowres 1 -tr nnUNetTrainer_5epochs --npz 17 | nnUNetv2_train $1 3d_lowres 2 -tr nnUNetTrainer_5epochs --npz 18 | nnUNetv2_train $1 3d_lowres 3 -tr nnUNetTrainer_5epochs --npz 19 | nnUNetv2_train $1 3d_lowres 4 -tr nnUNetTrainer_5epochs --npz 20 | 21 | nnUNetv2_train $1 3d_cascade_fullres 0 -tr nnUNetTrainer_5epochs --npz 22 | nnUNetv2_train $1 3d_cascade_fullres 1 -tr nnUNetTrainer_5epochs --npz 23 | nnUNetv2_train $1 3d_cascade_fullres 2 -tr nnUNetTrainer_5epochs --npz 24 | nnUNetv2_train $1 3d_cascade_fullres 3 -tr nnUNetTrainer_5epochs --npz 25 | nnUNetv2_train $1 3d_cascade_fullres 4 -tr nnUNetTrainer_5epochs --npz 26 | 27 | python nnunetv2/tests/integration_tests/run_integration_test_bestconfig_inference.py -d $1 -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh: -------------------------------------------------------------------------------- 1 | nnUNetv2_train $1 3d_fullres 0 -tr nnUNetTrainer_10epochs -num_gpus 2 2 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/__pycache__/compute_initial_patch_size.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/__pycache__/compute_initial_patch_size.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/compute_initial_patch_size.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range): 5 | if isinstance(rot_x, (tuple, list)): 6 | rot_x = max(np.abs(rot_x)) 7 | if isinstance(rot_y, (tuple, list)): 8 | rot_y = max(np.abs(rot_y)) 9 | if isinstance(rot_z, (tuple, list)): 10 | rot_z = max(np.abs(rot_z)) 11 | rot_x = min(90 / 360 * 2. * np.pi, rot_x) 12 | rot_y = min(90 / 360 * 2. * np.pi, rot_y) 13 | rot_z = min(90 / 360 * 2. * np.pi, rot_z) 14 | from batchgenerators.augmentations.utils import rotate_coords_3d, rotate_coords_2d 15 | coords = np.array(final_patch_size) 16 | final_shape = np.copy(coords) 17 | if len(coords) == 3: 18 | final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, rot_x, 0, 0)), final_shape)), 0) 19 | final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, rot_y, 0)), final_shape)), 0) 20 | final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, 0, rot_z)), final_shape)), 0) 21 | elif len(coords) == 2: 22 | final_shape = np.max(np.vstack((np.abs(rotate_coords_2d(coords, rot_x)), final_shape)), 0) 23 | final_shape /= min(scale_range) 24 | return final_shape.astype(int) 25 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/cascade_transforms.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/cascade_transforms.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/deep_supervision_donwsampling.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/deep_supervision_donwsampling.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/limited_length_multithreaded_augmenter.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/limited_length_multithreaded_augmenter.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/masking.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/masking.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/region_based_training.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/region_based_training.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/transforms_for_dummy_2d.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/__pycache__/transforms_for_dummy_2d.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/deep_supervision_donwsampling.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Union, List 2 | 3 | from batchgenerators.augmentations.utils import resize_segmentation 4 | from batchgenerators.transforms.abstract_transforms import AbstractTransform 5 | import numpy as np 6 | 7 | 8 | class DownsampleSegForDSTransform2(AbstractTransform): 9 | ''' 10 | data_dict['output_key'] will be a list of segmentations scaled according to ds_scales 11 | ''' 12 | def __init__(self, ds_scales: Union[List, Tuple], 13 | order: int = 0, input_key: str = "seg", 14 | output_key: str = "seg", axes: Tuple[int] = None): 15 | """ 16 | Downscales data_dict[input_key] according to ds_scales. Each entry in ds_scales specified one deep supervision 17 | output and its resolution relative to the original data, for example 0.25 specifies 1/4 of the original shape. 18 | ds_scales can also be a tuple of tuples, for example ((1, 1, 1), (0.5, 0.5, 0.5)) to specify the downsampling 19 | for each axis independently 20 | """ 21 | self.axes = axes 22 | self.output_key = output_key 23 | self.input_key = input_key 24 | self.order = order 25 | self.ds_scales = ds_scales 26 | 27 | def __call__(self, **data_dict): 28 | if self.axes is None: 29 | axes = list(range(2, data_dict[self.input_key].ndim)) 30 | else: 31 | axes = self.axes 32 | 33 | output = [] 34 | for s in self.ds_scales: 35 | if not isinstance(s, (tuple, list)): 36 | s = [s] * len(axes) 37 | else: 38 | assert len(s) == len(axes), f'If ds_scales is a tuple for each resolution (one downsampling factor ' \ 39 | f'for each axis) then the number of entried in that tuple (here ' \ 40 | f'{len(s)}) must be the same as the number of axes (here {len(axes)}).' 41 | 42 | if all([i == 1 for i in s]): 43 | output.append(data_dict[self.input_key]) 44 | else: 45 | new_shape = np.array(data_dict[self.input_key].shape).astype(float) 46 | for i, a in enumerate(axes): 47 | new_shape[a] *= s[i] 48 | new_shape = np.round(new_shape).astype(int) 49 | out_seg = np.zeros(new_shape, dtype=data_dict[self.input_key].dtype) 50 | for b in range(data_dict[self.input_key].shape[0]): 51 | for c in range(data_dict[self.input_key].shape[1]): 52 | out_seg[b, c] = resize_segmentation(data_dict[self.input_key][b, c], new_shape[2:], self.order) 53 | output.append(out_seg) 54 | data_dict[self.output_key] = output 55 | return data_dict 56 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/limited_length_multithreaded_augmenter.py: -------------------------------------------------------------------------------- 1 | from batchgenerators.dataloading.nondet_multi_threaded_augmenter import NonDetMultiThreadedAugmenter 2 | 3 | 4 | class LimitedLenWrapper(NonDetMultiThreadedAugmenter): 5 | def __init__(self, my_imaginary_length, *args, **kwargs): 6 | super().__init__(*args, **kwargs) 7 | self.len = my_imaginary_length 8 | 9 | def __len__(self): 10 | return self.len 11 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/manipulating_data_dict.py: -------------------------------------------------------------------------------- 1 | from batchgenerators.transforms.abstract_transforms import AbstractTransform 2 | 3 | 4 | class RemoveKeyTransform(AbstractTransform): 5 | def __init__(self, key_to_remove: str): 6 | self.key_to_remove = key_to_remove 7 | 8 | def __call__(self, **data_dict): 9 | _ = data_dict.pop(self.key_to_remove, None) 10 | return data_dict 11 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/masking.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from batchgenerators.transforms.abstract_transforms import AbstractTransform 4 | 5 | 6 | class MaskTransform(AbstractTransform): 7 | def __init__(self, apply_to_channels: List[int], mask_idx_in_seg: int = 0, set_outside_to: int = 0, 8 | data_key: str = "data", seg_key: str = "seg"): 9 | """ 10 | Sets everything outside the mask to 0. CAREFUL! outside is defined as < 0, not =0 (in the Mask)!!! 11 | """ 12 | self.apply_to_channels = apply_to_channels 13 | self.seg_key = seg_key 14 | self.data_key = data_key 15 | self.set_outside_to = set_outside_to 16 | self.mask_idx_in_seg = mask_idx_in_seg 17 | 18 | def __call__(self, **data_dict): 19 | mask = data_dict[self.seg_key][:, self.mask_idx_in_seg] < 0 20 | for c in self.apply_to_channels: 21 | data_dict[self.data_key][:, c][mask] = self.set_outside_to 22 | return data_dict 23 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/region_based_training.py: -------------------------------------------------------------------------------- 1 | from typing import List, Tuple, Union 2 | 3 | from batchgenerators.transforms.abstract_transforms import AbstractTransform 4 | import numpy as np 5 | 6 | 7 | class ConvertSegmentationToRegionsTransform(AbstractTransform): 8 | def __init__(self, regions: Union[List, Tuple], 9 | seg_key: str = "seg", output_key: str = "seg", seg_channel: int = 0): 10 | """ 11 | regions are tuple of tuples where each inner tuple holds the class indices that are merged into one region, 12 | example: 13 | regions= ((1, 2), (2, )) will result in 2 regions: one covering the region of labels 1&2 and the other just 2 14 | :param regions: 15 | :param seg_key: 16 | :param output_key: 17 | """ 18 | self.seg_channel = seg_channel 19 | self.output_key = output_key 20 | self.seg_key = seg_key 21 | self.regions = regions 22 | 23 | def __call__(self, **data_dict): 24 | seg = data_dict.get(self.seg_key) 25 | num_regions = len(self.regions) 26 | if seg is not None: 27 | seg_shp = seg.shape 28 | output_shape = list(seg_shp) 29 | output_shape[1] = num_regions 30 | region_output = np.zeros(output_shape, dtype=seg.dtype) 31 | for b in range(seg_shp[0]): 32 | for region_id, region_source_labels in enumerate(self.regions): 33 | if not isinstance(region_source_labels, (list, tuple)): 34 | region_source_labels = (region_source_labels, ) 35 | for label_value in region_source_labels: 36 | region_output[b, region_id][seg[b, self.seg_channel] == label_value] = 1 37 | data_dict[self.output_key] = region_output 38 | return data_dict 39 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/transforms_for_dummy_2d.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Union, List 2 | 3 | from batchgenerators.transforms.abstract_transforms import AbstractTransform 4 | 5 | 6 | class Convert3DTo2DTransform(AbstractTransform): 7 | def __init__(self, apply_to_keys: Union[List[str], Tuple[str]] = ('data', 'seg')): 8 | """ 9 | Transforms a 5D array (b, c, x, y, z) to a 4D array (b, c * x, y, z) by overloading the color channel 10 | """ 11 | self.apply_to_keys = apply_to_keys 12 | 13 | def __call__(self, **data_dict): 14 | for k in self.apply_to_keys: 15 | shp = data_dict[k].shape 16 | assert len(shp) == 5, 'This transform only works on 3D data, so expects 5D tensor (b, c, x, y, z) as input.' 17 | data_dict[k] = data_dict[k].reshape((shp[0], shp[1] * shp[2], shp[3], shp[4])) 18 | shape_key = f'orig_shape_{k}' 19 | assert shape_key not in data_dict.keys(), f'Convert3DTo2DTransform needs to store the original shape. ' \ 20 | f'It does that using the {shape_key} key. That key is ' \ 21 | f'already taken. Bummer.' 22 | data_dict[shape_key] = shp 23 | return data_dict 24 | 25 | 26 | class Convert2DTo3DTransform(AbstractTransform): 27 | def __init__(self, apply_to_keys: Union[List[str], Tuple[str]] = ('data', 'seg')): 28 | """ 29 | Reverts Convert3DTo2DTransform by transforming a 4D array (b, c * x, y, z) back to 5D (b, c, x, y, z) 30 | """ 31 | self.apply_to_keys = apply_to_keys 32 | 33 | def __call__(self, **data_dict): 34 | for k in self.apply_to_keys: 35 | shape_key = f'orig_shape_{k}' 36 | assert shape_key in data_dict.keys(), f'Did not find key {shape_key} in data_dict. Shitty. ' \ 37 | f'Convert2DTo3DTransform only works in tandem with ' \ 38 | f'Convert3DTo2DTransform and you probably forgot to add ' \ 39 | f'Convert3DTo2DTransform to your pipeline. (Convert3DTo2DTransform ' \ 40 | f'is where the missing key is generated)' 41 | original_shape = data_dict[shape_key] 42 | current_shape = data_dict[k].shape 43 | data_dict[k] = data_dict[k].reshape((original_shape[0], original_shape[1], original_shape[2], 44 | current_shape[-2], current_shape[-1])) 45 | return data_dict 46 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/dataloading/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/dataloading/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/base_data_loader.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/base_data_loader.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/data_loader_2d.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/data_loader_2d.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/data_loader_3d.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/data_loader_3d.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/nnunet_dataset.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/nnunet_dataset.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/dataloading/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/dataloading/data_loader_3d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from nnunetv2.training.dataloading.base_data_loader import nnUNetDataLoaderBase 3 | from nnunetv2.training.dataloading.nnunet_dataset import nnUNetDataset 4 | 5 | 6 | class nnUNetDataLoader3D(nnUNetDataLoaderBase): 7 | def generate_train_batch(self): 8 | selected_keys = self.get_indices() 9 | # preallocate memory for data and seg 10 | data_all = np.zeros(self.data_shape, dtype=np.float32) 11 | seg_all = np.zeros(self.seg_shape, dtype=np.int16) 12 | case_properties = [] 13 | 14 | for j, i in enumerate(selected_keys): 15 | # oversampling foreground will improve stability of model training, especially if many patches are empty 16 | # (Lung for example) 17 | force_fg = self.get_do_oversample(j) 18 | 19 | data, seg, properties = self._data.load_case(i) 20 | case_properties.append(properties) 21 | 22 | # If we are doing the cascade then the segmentation from the previous stage will already have been loaded by 23 | # self._data.load_case(i) (see nnUNetDataset.load_case) 24 | shape = data.shape[1:] 25 | dim = len(shape) 26 | bbox_lbs, bbox_ubs = self.get_bbox(shape, force_fg, properties['class_locations']) 27 | 28 | # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the 29 | # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad. 30 | # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size 31 | # later 32 | valid_bbox_lbs = [max(0, bbox_lbs[i]) for i in range(dim)] 33 | valid_bbox_ubs = [min(shape[i], bbox_ubs[i]) for i in range(dim)] 34 | 35 | # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage. 36 | # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to 37 | # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also 38 | # remove label -1 in the data augmentation but this way it is less error prone) 39 | this_slice = tuple([slice(0, data.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) 40 | data = data[this_slice] 41 | 42 | this_slice = tuple([slice(0, seg.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) 43 | seg = seg[this_slice] 44 | 45 | padding = [(-min(0, bbox_lbs[i]), max(bbox_ubs[i] - shape[i], 0)) for i in range(dim)] 46 | data_all[j] = np.pad(data, ((0, 0), *padding), 'constant', constant_values=0) 47 | seg_all[j] = np.pad(seg, ((0, 0), *padding), 'constant', constant_values=-1) 48 | 49 | return {'data': data_all, 'seg': seg_all, 'properties': case_properties, 'keys': selected_keys} 50 | 51 | 52 | if __name__ == '__main__': 53 | folder = '/media/fabian/data/nnUNet_preprocessed/Dataset002_Heart/3d_fullres' 54 | ds = nnUNetDataset(folder, 0) # this should not load the properties! 55 | dl = nnUNetDataLoader3D(ds, 5, (16, 16, 16), (16, 16, 16), 0.33, None, None) 56 | a = next(dl) 57 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/logging/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/logging/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/logging/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/logging/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/logging/__pycache__/nnunet_logger.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/logging/__pycache__/nnunet_logger.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/loss/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/loss/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/loss/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/loss/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/loss/__pycache__/compound_losses.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/loss/__pycache__/compound_losses.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/loss/__pycache__/deep_supervision.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/loss/__pycache__/deep_supervision.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/loss/__pycache__/dice.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/loss/__pycache__/dice.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/loss/__pycache__/robust_ce_loss.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/loss/__pycache__/robust_ce_loss.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/loss/deep_supervision.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | 5 | class DeepSupervisionWrapper(nn.Module): 6 | def __init__(self, loss, weight_factors=None): 7 | """ 8 | Wraps a loss function so that it can be applied to multiple outputs. Forward accepts an arbitrary number of 9 | inputs. Each input is expected to be a tuple/list. Each tuple/list must have the same length. The loss is then 10 | applied to each entry like this: 11 | l = w0 * loss(input0[0], input1[0], ...) + w1 * loss(input0[1], input1[1], ...) + ... 12 | If weights are None, all w will be 1. 13 | """ 14 | super(DeepSupervisionWrapper, self).__init__() 15 | assert any([x != 0 for x in weight_factors]), "At least one weight factor should be != 0.0" 16 | self.weight_factors = tuple(weight_factors) 17 | self.loss = loss 18 | 19 | def forward(self, *args): 20 | assert all([isinstance(i, (tuple, list)) for i in args]), \ 21 | f"all args must be either tuple or list, got {[type(i) for i in args]}" 22 | # we could check for equal lengths here as well, but we really shouldn't overdo it with checks because 23 | # this code is executed a lot of times! 24 | 25 | if self.weight_factors is None: 26 | weights = (1, ) * len(args[0]) 27 | else: 28 | weights = self.weight_factors 29 | 30 | return sum([weights[i] * self.loss(*inputs) for i, inputs in enumerate(zip(*args)) if weights[i] != 0.0]) 31 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/loss/robust_ce_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn, Tensor 3 | import numpy as np 4 | 5 | 6 | class RobustCrossEntropyLoss(nn.CrossEntropyLoss): 7 | """ 8 | this is just a compatibility layer because my target tensor is float and has an extra dimension 9 | 10 | input must be logits, not probabilities! 11 | """ 12 | def forward(self, input: Tensor, target: Tensor) -> Tensor: 13 | if target.ndim == input.ndim: 14 | assert target.shape[1] == 1 15 | target = target[:, 0] 16 | return super().forward(input, target.long()) 17 | 18 | 19 | class TopKLoss(RobustCrossEntropyLoss): 20 | """ 21 | input must be logits, not probabilities! 22 | """ 23 | def __init__(self, weight=None, ignore_index: int = -100, k: float = 10, label_smoothing: float = 0): 24 | self.k = k 25 | super(TopKLoss, self).__init__(weight, False, ignore_index, reduce=False, label_smoothing=label_smoothing) 26 | 27 | def forward(self, inp, target): 28 | target = target[:, 0].long() 29 | res = super(TopKLoss, self).forward(inp, target) 30 | num_voxels = np.prod(res.shape, dtype=np.int64) 31 | res, _ = torch.topk(res.view((-1, )), int(num_voxels * self.k / 100), sorted=False) 32 | return res.mean() 33 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/lr_scheduler/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/lr_scheduler/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/lr_scheduler/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/lr_scheduler/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/lr_scheduler/__pycache__/polylr.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/lr_scheduler/__pycache__/polylr.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/lr_scheduler/polylr.py: -------------------------------------------------------------------------------- 1 | from torch.optim.lr_scheduler import _LRScheduler 2 | 3 | 4 | class PolyLRScheduler(_LRScheduler): 5 | def __init__(self, optimizer, initial_lr: float, max_steps: int, exponent: float = 0.9, current_step: int = None): 6 | self.optimizer = optimizer 7 | self.initial_lr = initial_lr 8 | self.max_steps = max_steps 9 | self.exponent = exponent 10 | self.ctr = 0 11 | super().__init__(optimizer, current_step if current_step is not None else -1, False) 12 | 13 | def step(self, current_step=None): 14 | if current_step is None or current_step == -1: 15 | current_step = self.ctr 16 | self.ctr += 1 17 | 18 | new_lr = self.initial_lr * (1 - current_step / self.max_steps) ** self.exponent 19 | for param_group in self.optimizer.param_groups: 20 | param_group['lr'] = new_lr 21 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__pycache__/nnUNetTrainer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__pycache__/nnUNetTrainer.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__pycache__/nnUNetTrainer_swin.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__pycache__/nnUNetTrainer_swin.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__pycache__/vit.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/__pycache__/vit.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__pycache__/nnUNetTrainerBenchmark_5epochs.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__pycache__/nnUNetTrainerBenchmark_5epochs.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__pycache__/nnUNetTrainerBenchmark_5epochs_noDataLoading.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__pycache__/nnUNetTrainerBenchmark_5epochs_noDataLoading.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from batchgenerators.utilities.file_and_folder_operations import save_json, join, isfile, load_json 3 | 4 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer 5 | from torch import distributed as dist 6 | 7 | 8 | class nnUNetTrainerBenchmark_5epochs(nnUNetTrainer): 9 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, 10 | device: torch.device = torch.device('cuda')): 11 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) 12 | assert self.fold == 0, "It makes absolutely no sense to specify a certain fold. Stick with 0 so that we can parse the results." 13 | self.disable_checkpointing = True 14 | self.num_epochs = 5 15 | assert torch.cuda.is_available(), "This only works on GPU" 16 | self.crashed_with_runtime_error = False 17 | 18 | def perform_actual_validation(self, save_probabilities: bool = False): 19 | pass 20 | 21 | def save_checkpoint(self, filename: str) -> None: 22 | # do not trust people to remember that self.disable_checkpointing must be True for this trainer 23 | pass 24 | 25 | def run_training(self): 26 | try: 27 | super().run_training() 28 | except RuntimeError: 29 | self.crashed_with_runtime_error = True 30 | 31 | def on_train_end(self): 32 | super().on_train_end() 33 | 34 | if not self.is_ddp or self.local_rank == 0: 35 | torch_version = torch.__version__ 36 | cudnn_version = torch.backends.cudnn.version() 37 | gpu_name = torch.cuda.get_device_name() 38 | if self.crashed_with_runtime_error: 39 | fastest_epoch = 'Not enough VRAM!' 40 | else: 41 | epoch_times = [i - j for i, j in zip(self.logger.my_fantastic_logging['epoch_end_timestamps'], 42 | self.logger.my_fantastic_logging['epoch_start_timestamps'])] 43 | fastest_epoch = min(epoch_times) 44 | 45 | if self.is_ddp: 46 | num_gpus = dist.get_world_size() 47 | else: 48 | num_gpus = 1 49 | 50 | benchmark_result_file = join(self.output_folder, 'benchmark_result.json') 51 | if isfile(benchmark_result_file): 52 | old_results = load_json(benchmark_result_file) 53 | else: 54 | old_results = {} 55 | # generate some unique key 56 | my_key = f"{cudnn_version}__{torch_version.replace(' ', '')}__{gpu_name.replace(' ', '')}__gpus_{num_gpus}" 57 | old_results[my_key] = { 58 | 'torch_version': torch_version, 59 | 'cudnn_version': cudnn_version, 60 | 'gpu_name': gpu_name, 61 | 'fastest_epoch': fastest_epoch, 62 | 'num_gpus': num_gpus, 63 | } 64 | save_json(old_results, 65 | join(self.output_folder, 'benchmark_result.json')) 66 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs_noDataLoading.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from nnunetv2.training.nnUNetTrainer.variants.benchmarking.nnUNetTrainerBenchmark_5epochs import ( 4 | nnUNetTrainerBenchmark_5epochs, 5 | ) 6 | from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels 7 | 8 | 9 | class nnUNetTrainerBenchmark_5epochs_noDataLoading(nnUNetTrainerBenchmark_5epochs): 10 | def __init__( 11 | self, 12 | plans: dict, 13 | configuration: str, 14 | fold: int, 15 | dataset_json: dict, 16 | unpack_dataset: bool = True, 17 | device: torch.device = torch.device("cuda"), 18 | ): 19 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) 20 | self._set_batch_size_and_oversample() 21 | num_input_channels = determine_num_input_channels( 22 | self.plans_manager, self.configuration_manager, self.dataset_json 23 | ) 24 | patch_size = self.configuration_manager.patch_size 25 | dummy_data = torch.rand((self.batch_size, num_input_channels, *patch_size), device=self.device) 26 | if self.enable_deep_supervision: 27 | dummy_target = [ 28 | torch.round( 29 | torch.rand((self.batch_size, 1, *[int(i * j) for i, j in zip(patch_size, k)]), device=self.device) 30 | * max(self.label_manager.all_labels) 31 | ) 32 | for k in self._get_deep_supervision_scales() 33 | ] 34 | else: 35 | raise NotImplementedError("This trainer does not support deep supervision") 36 | self.dummy_batch = {"data": dummy_data, "target": dummy_target} 37 | 38 | def get_dataloaders(self): 39 | return None, None 40 | 41 | def run_training(self): 42 | try: 43 | self.on_train_start() 44 | 45 | for epoch in range(self.current_epoch, self.num_epochs): 46 | self.on_epoch_start() 47 | 48 | self.on_train_epoch_start() 49 | train_outputs = [] 50 | for batch_id in range(self.num_iterations_per_epoch): 51 | train_outputs.append(self.train_step(self.dummy_batch)) 52 | self.on_train_epoch_end(train_outputs) 53 | 54 | with torch.no_grad(): 55 | self.on_validation_epoch_start() 56 | val_outputs = [] 57 | for batch_id in range(self.num_val_iterations_per_epoch): 58 | val_outputs.append(self.validation_step(self.dummy_batch)) 59 | self.on_validation_epoch_end(val_outputs) 60 | 61 | self.on_epoch_end() 62 | 63 | self.on_train_end() 64 | except RuntimeError: 65 | self.crashed_with_runtime_error = True 66 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/nnUNetTrainerDA5.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/nnUNetTrainerDA5.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/nnUNetTrainerDAOrd0.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/nnUNetTrainerDAOrd0.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/nnUNetTrainerNoDA.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/nnUNetTrainerNoDA.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/nnUNetTrainerNoMirroring.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__pycache__/nnUNetTrainerNoMirroring.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoDA.py: -------------------------------------------------------------------------------- 1 | from typing import Union, Tuple, List 2 | 3 | from batchgenerators.transforms.abstract_transforms import AbstractTransform 4 | 5 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer 6 | import numpy as np 7 | 8 | 9 | class nnUNetTrainerNoDA(nnUNetTrainer): 10 | @staticmethod 11 | def get_training_transforms(patch_size: Union[np.ndarray, Tuple[int]], 12 | rotation_for_DA: dict, 13 | deep_supervision_scales: Union[List, Tuple, None], 14 | mirror_axes: Tuple[int, ...], 15 | do_dummy_2d_data_aug: bool, 16 | order_resampling_data: int = 1, 17 | order_resampling_seg: int = 0, 18 | border_val_seg: int = -1, 19 | use_mask_for_norm: List[bool] = None, 20 | is_cascaded: bool = False, 21 | foreground_labels: Union[Tuple[int, ...], List[int]] = None, 22 | regions: List[Union[List[int], Tuple[int, ...], int]] = None, 23 | ignore_label: int = None) -> AbstractTransform: 24 | return nnUNetTrainer.get_validation_transforms(deep_supervision_scales, is_cascaded, foreground_labels, 25 | regions, ignore_label) 26 | 27 | def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], dim: int): 28 | return super().get_plain_dataloaders( 29 | initial_patch_size=self.configuration_manager.patch_size, 30 | dim=dim 31 | ) 32 | 33 | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): 34 | # we need to disable mirroring here so that no mirroring will be applied in inferene! 35 | rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ 36 | super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() 37 | mirror_axes = None 38 | self.inference_allowed_mirroring_axes = None 39 | return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes 40 | 41 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoMirroring.py: -------------------------------------------------------------------------------- 1 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer 2 | 3 | 4 | class nnUNetTrainerNoMirroring(nnUNetTrainer): 5 | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): 6 | rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ 7 | super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() 8 | mirror_axes = None 9 | self.inference_allowed_mirroring_axes = None 10 | return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes 11 | 12 | 13 | class nnUNetTrainer_onlyMirror01(nnUNetTrainer): 14 | """ 15 | Only mirrors along spatial axes 0 and 1 for 3D and 0 for 2D 16 | """ 17 | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): 18 | rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ 19 | super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() 20 | patch_size = self.configuration_manager.patch_size 21 | dim = len(patch_size) 22 | if dim == 2: 23 | mirror_axes = (0, ) 24 | else: 25 | mirror_axes = (0, 1) 26 | self.inference_allowed_mirroring_axes = mirror_axes 27 | return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes 28 | 29 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__pycache__/nnUNetTrainerCELoss.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__pycache__/nnUNetTrainerCELoss.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__pycache__/nnUNetTrainerDiceLoss.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__pycache__/nnUNetTrainerDiceLoss.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__pycache__/nnUNetTrainerTopkLoss.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/__pycache__/nnUNetTrainerTopkLoss.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerCELoss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper 3 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer 4 | from nnunetv2.training.loss.robust_ce_loss import RobustCrossEntropyLoss 5 | import numpy as np 6 | 7 | 8 | class nnUNetTrainerCELoss(nnUNetTrainer): 9 | def _build_loss(self): 10 | assert not self.label_manager.has_regions, "regions not supported by this trainer" 11 | loss = RobustCrossEntropyLoss( 12 | weight=None, ignore_index=self.label_manager.ignore_label if self.label_manager.has_ignore_label else -100 13 | ) 14 | 15 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases 16 | # this gives higher resolution outputs more weight in the loss 17 | if self.enable_deep_supervision: 18 | deep_supervision_scales = self._get_deep_supervision_scales() 19 | weights = np.array([1 / (2**i) for i in range(len(deep_supervision_scales))]) 20 | weights[-1] = 0 21 | 22 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 23 | weights = weights / weights.sum() 24 | # now wrap the loss 25 | loss = DeepSupervisionWrapper(loss, weights) 26 | return loss 27 | 28 | 29 | class nnUNetTrainerCELoss_5epochs(nnUNetTrainerCELoss): 30 | def __init__( 31 | self, 32 | plans: dict, 33 | configuration: str, 34 | fold: int, 35 | dataset_json: dict, 36 | unpack_dataset: bool = True, 37 | device: torch.device = torch.device("cuda"), 38 | ): 39 | """used for debugging plans etc""" 40 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) 41 | self.num_epochs = 5 42 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerDiceLoss.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss 5 | from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper 6 | from nnunetv2.training.loss.dice import MemoryEfficientSoftDiceLoss 7 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer 8 | from nnunetv2.utilities.helpers import softmax_helper_dim1 9 | 10 | 11 | class nnUNetTrainerDiceLoss(nnUNetTrainer): 12 | def _build_loss(self): 13 | loss = MemoryEfficientSoftDiceLoss(**{'batch_dice': self.configuration_manager.batch_dice, 14 | 'do_bg': self.label_manager.has_regions, 'smooth': 1e-5, 'ddp': self.is_ddp}, 15 | apply_nonlin=torch.sigmoid if self.label_manager.has_regions else softmax_helper_dim1) 16 | 17 | if self.enable_deep_supervision: 18 | deep_supervision_scales = self._get_deep_supervision_scales() 19 | 20 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases 21 | # this gives higher resolution outputs more weight in the loss 22 | weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))]) 23 | weights[-1] = 0 24 | 25 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 26 | weights = weights / weights.sum() 27 | # now wrap the loss 28 | loss = DeepSupervisionWrapper(loss, weights) 29 | return loss 30 | 31 | 32 | class nnUNetTrainerDiceCELoss_noSmooth(nnUNetTrainer): 33 | def _build_loss(self): 34 | # set smooth to 0 35 | if self.label_manager.has_regions: 36 | loss = DC_and_BCE_loss({}, 37 | {'batch_dice': self.configuration_manager.batch_dice, 38 | 'do_bg': True, 'smooth': 0, 'ddp': self.is_ddp}, 39 | use_ignore_label=self.label_manager.ignore_label is not None, 40 | dice_class=MemoryEfficientSoftDiceLoss) 41 | else: 42 | loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 43 | 'smooth': 0, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, 44 | ignore_label=self.label_manager.ignore_label, 45 | dice_class=MemoryEfficientSoftDiceLoss) 46 | 47 | if self.enable_deep_supervision: 48 | deep_supervision_scales = self._get_deep_supervision_scales() 49 | 50 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases 51 | # this gives higher resolution outputs more weight in the loss 52 | weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))]) 53 | weights[-1] = 0 54 | 55 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 56 | weights = weights / weights.sum() 57 | # now wrap the loss 58 | loss = DeepSupervisionWrapper(loss, weights) 59 | return loss 60 | 61 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__pycache__/nnUNetTrainerCosAnneal.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__pycache__/nnUNetTrainerCosAnneal.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/nnUNetTrainerCosAnneal.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.optim.lr_scheduler import CosineAnnealingLR 3 | 4 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer 5 | 6 | 7 | class nnUNetTrainerCosAnneal(nnUNetTrainer): 8 | def configure_optimizers(self): 9 | optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, 10 | momentum=0.99, nesterov=True) 11 | lr_scheduler = CosineAnnealingLR(optimizer, T_max=self.num_epochs) 12 | return optimizer, lr_scheduler 13 | 14 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__pycache__/nnUNetTrainerBN.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__pycache__/nnUNetTrainerBN.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__pycache__/nnUNetTrainerNoDeepSupervision.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__pycache__/nnUNetTrainerNoDeepSupervision.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py: -------------------------------------------------------------------------------- 1 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer 2 | import torch 3 | 4 | 5 | class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): 6 | def __init__( 7 | self, 8 | plans: dict, 9 | configuration: str, 10 | fold: int, 11 | dataset_json: dict, 12 | unpack_dataset: bool = True, 13 | device: torch.device = torch.device("cuda"), 14 | ): 15 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) 16 | self.enable_deep_supervision = False 17 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/__pycache__/nnUNetTrainerAdam.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/__pycache__/nnUNetTrainerAdam.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/__pycache__/nnUNetTrainerAdan.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/__pycache__/nnUNetTrainerAdan.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdam.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.optim import Adam, AdamW 3 | 4 | from nnunetv2.training.lr_scheduler.polylr import PolyLRScheduler 5 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer 6 | 7 | 8 | class nnUNetTrainerAdam(nnUNetTrainer): 9 | def configure_optimizers(self): 10 | optimizer = AdamW(self.network.parameters(), 11 | lr=self.initial_lr, 12 | weight_decay=self.weight_decay, 13 | amsgrad=True) 14 | # optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, 15 | # momentum=0.99, nesterov=True) 16 | lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs) 17 | return optimizer, lr_scheduler 18 | 19 | 20 | class nnUNetTrainerVanillaAdam(nnUNetTrainer): 21 | def configure_optimizers(self): 22 | optimizer = Adam(self.network.parameters(), 23 | lr=self.initial_lr, 24 | weight_decay=self.weight_decay) 25 | # optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, 26 | # momentum=0.99, nesterov=True) 27 | lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs) 28 | return optimizer, lr_scheduler 29 | 30 | 31 | class nnUNetTrainerVanillaAdam1en3(nnUNetTrainerVanillaAdam): 32 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, 33 | device: torch.device = torch.device('cuda')): 34 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) 35 | self.initial_lr = 1e-3 36 | 37 | 38 | class nnUNetTrainerVanillaAdam3en4(nnUNetTrainerVanillaAdam): 39 | # https://twitter.com/karpathy/status/801621764144971776?lang=en 40 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, 41 | device: torch.device = torch.device('cuda')): 42 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) 43 | self.initial_lr = 3e-4 44 | 45 | 46 | class nnUNetTrainerAdam1en3(nnUNetTrainerAdam): 47 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, 48 | device: torch.device = torch.device('cuda')): 49 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) 50 | self.initial_lr = 1e-3 51 | 52 | 53 | class nnUNetTrainerAdam3en4(nnUNetTrainerAdam): 54 | # https://twitter.com/karpathy/status/801621764144971776?lang=en 55 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, 56 | device: torch.device = torch.device('cuda')): 57 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) 58 | self.initial_lr = 3e-4 59 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/sampling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/sampling/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/sampling/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/sampling/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/sampling/__pycache__/nnUNetTrainer_probabilisticOversampling.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/sampling/__pycache__/nnUNetTrainer_probabilisticOversampling.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/training_length/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/training_length/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/training_length/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/training_length/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/training_length/__pycache__/nnUNetTrainer_Xepochs.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/training_length/__pycache__/nnUNetTrainer_Xepochs.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/collate_outputs.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/collate_outputs.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/dataset_name_id_conversion.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/dataset_name_id_conversion.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/dataset_name_id_conversion.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/dataset_name_id_conversion.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/ddp_allgather.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/ddp_allgather.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/default_n_proc_DA.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/default_n_proc_DA.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/default_n_proc_DA.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/default_n_proc_DA.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/file_path_utilities.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/file_path_utilities.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/find_class_by_name.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/find_class_by_name.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/find_class_by_name.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/find_class_by_name.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/get_network_from_plans.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/get_network_from_plans.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/helpers.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/helpers.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/helpers.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/helpers.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/json_export.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/json_export.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/json_export.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/json_export.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/network_initialization.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/network_initialization.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/__pycache__/utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/__pycache__/utils.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/collate_outputs.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import numpy as np 4 | 5 | 6 | def collate_outputs(outputs: List[dict]): 7 | """ 8 | used to collate default train_step and validation_step outputs. If you want something different then you gotta 9 | extend this 10 | 11 | we expect outputs to be a list of dictionaries where each of the dict has the same set of keys 12 | """ 13 | collated = {} 14 | for k in outputs[0].keys(): 15 | if np.isscalar(outputs[0][k]): 16 | collated[k] = [o[k] for o in outputs] 17 | elif isinstance(outputs[0][k], np.ndarray): 18 | collated[k] = np.vstack([o[k][None] for o in outputs]) 19 | elif isinstance(outputs[0][k], list): 20 | collated[k] = [item for o in outputs for item in o[k]] 21 | else: 22 | raise ValueError(f'Cannot collate input of type {type(outputs[0][k])}. ' 23 | f'Modify collate_outputs to add this functionality') 24 | return collated -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/ddp_allgather.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | from typing import Any, Optional, Tuple 15 | 16 | import torch 17 | from torch import distributed 18 | 19 | 20 | def print_if_rank0(*args): 21 | if distributed.get_rank() == 0: 22 | print(*args) 23 | 24 | 25 | class AllGatherGrad(torch.autograd.Function): 26 | # stolen from pytorch lightning 27 | @staticmethod 28 | def forward( 29 | ctx: Any, 30 | tensor: torch.Tensor, 31 | group: Optional["torch.distributed.ProcessGroup"] = None, 32 | ) -> torch.Tensor: 33 | ctx.group = group 34 | 35 | gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())] 36 | 37 | torch.distributed.all_gather(gathered_tensor, tensor, group=group) 38 | gathered_tensor = torch.stack(gathered_tensor, dim=0) 39 | 40 | return gathered_tensor 41 | 42 | @staticmethod 43 | def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]: 44 | grad_output = torch.cat(grad_output) 45 | 46 | torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group) 47 | 48 | return grad_output[torch.distributed.get_rank()], None 49 | 50 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/default_n_proc_DA.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | 4 | 5 | def get_allowed_n_proc_DA(): 6 | """ 7 | This function is used to set the number of processes used on different Systems. It is specific to our cluster 8 | infrastructure at DKFZ. You can modify it to suit your needs. Everything is allowed. 9 | 10 | IMPORTANT: if the environment variable nnUNet_n_proc_DA is set it will overwrite anything in this script 11 | (see first line). 12 | 13 | Interpret the output as the number of processes used for data augmentation PER GPU. 14 | 15 | The way it is implemented here is simply a look up table. We know the hostnames, CPU and GPU configurations of our 16 | systems and set the numbers accordingly. For example, a system with 4 GPUs and 48 threads can use 12 threads per 17 | GPU without overloading the CPU (technically 11 because we have a main process as well), so that's what we use. 18 | """ 19 | 20 | if 'nnUNet_n_proc_DA' in os.environ.keys(): 21 | use_this = int(os.environ['nnUNet_n_proc_DA']) 22 | else: 23 | hostname = subprocess.getoutput(['hostname']) 24 | if hostname in ['Fabian', ]: 25 | use_this = 12 26 | elif hostname in ['hdf19-gpu16', 'hdf19-gpu17', 'hdf19-gpu18', 'hdf19-gpu19', 'e230-AMDworkstation']: 27 | use_this = 16 28 | elif hostname.startswith('e230-dgx1'): 29 | use_this = 10 30 | elif hostname.startswith('hdf18-gpu') or hostname.startswith('e132-comp'): 31 | use_this = 16 32 | elif hostname.startswith('e230-dgx2'): 33 | use_this = 6 34 | elif hostname.startswith('e230-dgxa100-'): 35 | use_this = 28 36 | elif hostname.startswith('lsf22-gpu'): 37 | use_this = 28 38 | elif hostname.startswith('hdf19-gpu') or hostname.startswith('e071-gpu'): 39 | use_this = 12 40 | else: 41 | use_this = 12 # default value 42 | 43 | use_this = min(use_this, os.cpu_count()) 44 | return use_this 45 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/find_class_by_name.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import pkgutil 3 | 4 | from batchgenerators.utilities.file_and_folder_operations import * 5 | 6 | 7 | def recursive_find_python_class(folder: str, class_name: str, current_module: str): 8 | tr = None 9 | for importer, modname, ispkg in pkgutil.iter_modules([folder]): 10 | # print(modname, ispkg) 11 | if not ispkg: 12 | m = importlib.import_module(current_module + "." + modname) 13 | if hasattr(m, class_name): 14 | tr = getattr(m, class_name) 15 | break 16 | 17 | if tr is None: 18 | for importer, modname, ispkg in pkgutil.iter_modules([folder]): 19 | if ispkg: 20 | next_current_module = current_module + "." + modname 21 | tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module) 22 | if tr is not None: 23 | break 24 | return tr -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/helpers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor: 5 | return torch.softmax(x, 0) 6 | 7 | 8 | def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor: 9 | return torch.softmax(x, 1) 10 | 11 | 12 | def empty_cache(device: torch.device): 13 | if device.type == 'cuda': 14 | torch.cuda.empty_cache() 15 | elif device.type == 'mps': 16 | from torch import mps 17 | mps.empty_cache() 18 | else: 19 | pass 20 | 21 | 22 | class dummy_context(object): 23 | def __enter__(self): 24 | pass 25 | 26 | def __exit__(self, exc_type, exc_val, exc_tb): 27 | pass 28 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/json_export.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterable 2 | 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def recursive_fix_for_json_export(my_dict: dict): 8 | # json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro. 9 | keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys.... 10 | for k in keys: 11 | if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)): 12 | tmp = my_dict[k] 13 | del my_dict[k] 14 | my_dict[int(k)] = tmp 15 | del tmp 16 | k = int(k) 17 | 18 | if isinstance(my_dict[k], dict): 19 | recursive_fix_for_json_export(my_dict[k]) 20 | elif isinstance(my_dict[k], np.ndarray): 21 | assert my_dict[k].ndim == 1, 'only 1d arrays are supported' 22 | my_dict[k] = fix_types_iterable(my_dict[k], output_type=list) 23 | elif isinstance(my_dict[k], (np.bool_,)): 24 | my_dict[k] = bool(my_dict[k]) 25 | elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)): 26 | my_dict[k] = int(my_dict[k]) 27 | elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)): 28 | my_dict[k] = float(my_dict[k]) 29 | elif isinstance(my_dict[k], list): 30 | my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k])) 31 | elif isinstance(my_dict[k], tuple): 32 | my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple) 33 | elif isinstance(my_dict[k], torch.device): 34 | my_dict[k] = str(my_dict[k]) 35 | else: 36 | pass # pray it can be serialized 37 | 38 | 39 | def fix_types_iterable(iterable, output_type): 40 | # this sh!t is hacky as hell and will break if you use it for anything outside nnunet. Keep you hands off of this. 41 | out = [] 42 | for i in iterable: 43 | if type(i) in (np.int64, np.int32, np.int8, np.uint8): 44 | out.append(int(i)) 45 | elif isinstance(i, dict): 46 | recursive_fix_for_json_export(i) 47 | out.append(i) 48 | elif type(i) in (np.float32, np.float64, np.float16): 49 | out.append(float(i)) 50 | elif type(i) in (np.bool_,): 51 | out.append(bool(i)) 52 | elif isinstance(i, str): 53 | out.append(i) 54 | elif isinstance(i, Iterable): 55 | # print('recursive call on', i, type(i)) 56 | out.append(fix_types_iterable(i, type(i))) 57 | else: 58 | out.append(i) 59 | return output_type(out) 60 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/label_handling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/label_handling/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/label_handling/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/label_handling/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/label_handling/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/label_handling/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/label_handling/__pycache__/label_handling.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/label_handling/__pycache__/label_handling.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/label_handling/__pycache__/label_handling.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/label_handling/__pycache__/label_handling.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/network_initialization.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | class InitWeights_He(object): 5 | def __init__(self, neg_slope=1e-2): 6 | self.neg_slope = neg_slope 7 | 8 | def __call__(self, module): 9 | if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): 10 | module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) 11 | if module.bias is not None: 12 | module.bias = nn.init.constant_(module.bias, 0) 13 | -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/plans_handling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/plans_handling/__init__.py -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/plans_handling/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/plans_handling/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/plans_handling/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/plans_handling/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/plans_handling/__pycache__/plans_handler.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/plans_handling/__pycache__/plans_handler.cpython-311.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/plans_handling/__pycache__/plans_handler.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/Finetune/nnUNet/nnunetv2/utilities/plans_handling/__pycache__/plans_handler.cpython-39.pyc -------------------------------------------------------------------------------- /Finetune/nnUNet/nnunetv2/utilities/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center 2 | # (DKFZ), Heidelberg, Germany 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import os.path 16 | from functools import lru_cache 17 | from typing import Union 18 | 19 | from batchgenerators.utilities.file_and_folder_operations import * 20 | import numpy as np 21 | import re 22 | 23 | from nnunetv2.paths import nnUNet_raw 24 | 25 | 26 | def get_identifiers_from_splitted_dataset_folder(folder: str, file_ending: str): 27 | files = subfiles(folder, suffix=file_ending, join=False) 28 | # all files have a 4 digit channel index (_XXXX) 29 | crop = len(file_ending) + 5 30 | files = [i[:-crop] for i in files] 31 | # only unique image ids 32 | files = np.unique(files) 33 | return files 34 | 35 | 36 | def create_lists_from_splitted_dataset_folder(folder: str, file_ending: str, identifiers: List[str] = None) -> List[ 37 | List[str]]: 38 | """ 39 | does not rely on dataset.json 40 | """ 41 | if identifiers is None: 42 | identifiers = get_identifiers_from_splitted_dataset_folder(folder, file_ending) 43 | files = subfiles(folder, suffix=file_ending, join=False, sort=True) 44 | list_of_lists = [] 45 | for f in identifiers: 46 | p = re.compile(re.escape(f) + r"_\d\d\d\d" + re.escape(file_ending)) 47 | list_of_lists.append([join(folder, i) for i in files if p.fullmatch(i)]) 48 | return list_of_lists 49 | 50 | 51 | def get_filenames_of_train_images_and_targets(raw_dataset_folder: str, dataset_json: dict = None): 52 | if dataset_json is None: 53 | dataset_json = load_json(join(raw_dataset_folder, 'dataset.json')) 54 | 55 | if 'dataset' in dataset_json.keys(): 56 | dataset = dataset_json['dataset'] 57 | for k in dataset.keys(): 58 | dataset[k]['label'] = os.path.abspath(join(raw_dataset_folder, dataset[k]['label'])) if not os.path.isabs(dataset[k]['label']) else dataset[k]['label'] 59 | dataset[k]['images'] = [os.path.abspath(join(raw_dataset_folder, i)) if not os.path.isabs(i) else i for i in dataset[k]['images']] 60 | else: 61 | identifiers = get_identifiers_from_splitted_dataset_folder(join(raw_dataset_folder, 'imagesTr'), dataset_json['file_ending']) 62 | images = create_lists_from_splitted_dataset_folder(join(raw_dataset_folder, 'imagesTr'), dataset_json['file_ending'], identifiers) 63 | segs = [join(raw_dataset_folder, 'labelsTr', i + dataset_json['file_ending']) for i in identifiers] 64 | dataset = {i: {'images': im, 'label': se} for i, im, se in zip(identifiers, images, segs)} 65 | return dataset 66 | 67 | 68 | if __name__ == '__main__': 69 | print(get_filenames_of_train_images_and_targets(join(nnUNet_raw, 'Dataset002_Heart'))) 70 | -------------------------------------------------------------------------------- /Finetune/nnUNet/setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | if __name__ == "__main__": 4 | setuptools.setup() 5 | -------------------------------------------------------------------------------- /assets/10k.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/assets/10k.png -------------------------------------------------------------------------------- /assets/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/assets/framework.png -------------------------------------------------------------------------------- /assets/intro.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/assets/intro.png -------------------------------------------------------------------------------- /jsons/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/jsons/__init__.py -------------------------------------------------------------------------------- /jsons/btcv.json: -------------------------------------------------------------------------------- 1 | {"training": [{"image": "imagesTr/img0001.nii.gz"}, {"image": "imagesTr/img0002.nii.gz"}, {"image": "imagesTr/img0003.nii.gz"}, {"image": "imagesTr/img0004.nii.gz"}, {"image": "imagesTr/img0005.nii.gz"}, {"image": "imagesTr/img0006.nii.gz"}, {"image": "imagesTr/img0007.nii.gz"}, {"image": "imagesTr/img0008.nii.gz"}, {"image": "imagesTr/img0009.nii.gz"}, {"image": "imagesTr/img0010.nii.gz"}, {"image": "imagesTr/img0021.nii.gz"}, {"image": "imagesTr/img0022.nii.gz"}, {"image": "imagesTr/img0023.nii.gz"}, {"image": "imagesTr/img0024.nii.gz"}, {"image": "imagesTr/img0025.nii.gz"}, {"image": "imagesTr/img0026.nii.gz"}, {"image": "imagesTr/img0027.nii.gz"}, {"image": "imagesTr/img0028.nii.gz"}, {"image": "imagesTr/img0029.nii.gz"}, {"image": "imagesTr/img0030.nii.gz"}, {"image": "imagesTr/img0031.nii.gz"}, {"image": "imagesTr/img0032.nii.gz"}, {"image": "imagesTr/img0033.nii.gz"}, {"image": "imagesTr/img0034.nii.gz"}], "validation": [{"image": "imagesTr/img0035.nii.gz"}, {"image": "imagesTr/img0036.nii.gz"}, {"image": "imagesTr/img0037.nii.gz"}, {"image": "imagesTr/img0038.nii.gz"}, {"image": "imagesTr/img0039.nii.gz"}, {"image": "imagesTr/img0040.nii.gz"}]} -------------------------------------------------------------------------------- /optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/optimizers/__init__.py -------------------------------------------------------------------------------- /train.sh: -------------------------------------------------------------------------------- 1 | now=$(date +"%Y%m%d_%H%M%S") 2 | logdir=runs/logs_10k 3 | mkdir -p $logdir 4 | 5 | torchrun --master_port=28802 voco_train.py \ 6 | --logdir $logdir | tee $logdir/$now.txt -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Luffy03/VoCo/e8736fd999ca177eb04071d4e0848c16aecd3470/utils/__init__.py -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import scipy.ndimage as ndimage 14 | import torch 15 | 16 | 17 | def resample_3d(img, target_size): 18 | imx, imy, imz = img.shape 19 | tx, ty, tz = target_size 20 | zoom_ratio = (float(tx) / float(imx), float(ty) / float(imy), float(tz) / float(imz)) 21 | img_resampled = ndimage.zoom(img, zoom_ratio, order=0, prefilter=False) 22 | return img_resampled 23 | 24 | 25 | def dice(x, y): 26 | intersect = np.sum(np.sum(np.sum(x * y))) 27 | y_sum = np.sum(np.sum(np.sum(y))) 28 | if y_sum == 0: 29 | return 0.0 30 | x_sum = np.sum(np.sum(np.sum(x))) 31 | return 2 * intersect / (x_sum + y_sum) 32 | 33 | 34 | class AverageMeter(object): 35 | def __init__(self): 36 | self.reset() 37 | 38 | def reset(self): 39 | self.val = 0 40 | self.avg = 0 41 | self.sum = 0 42 | self.count = 0 43 | 44 | def update(self, val, n=1): 45 | self.val = val 46 | self.sum += val * n 47 | self.count += n 48 | self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) 49 | 50 | 51 | def distributed_all_gather( 52 | tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None 53 | ): 54 | if world_size is None: 55 | world_size = torch.distributed.get_world_size() 56 | if valid_batch_size is not None: 57 | valid_batch_size = min(valid_batch_size, world_size) 58 | elif is_valid is not None: 59 | is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) 60 | if not no_barrier: 61 | torch.distributed.barrier() 62 | tensor_list_out = [] 63 | with torch.no_grad(): 64 | if is_valid is not None: 65 | is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] 66 | torch.distributed.all_gather(is_valid_list, is_valid) 67 | is_valid = [x.item() for x in is_valid_list] 68 | for tensor in tensor_list: 69 | gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] 70 | torch.distributed.all_gather(gather_list, tensor) 71 | if valid_batch_size is not None: 72 | gather_list = gather_list[:valid_batch_size] 73 | elif is_valid is not None: 74 | gather_list = [g for g, v in zip(gather_list, is_valid_list) if v] 75 | if out_numpy: 76 | gather_list = [t.cpu().numpy() for t in gather_list] 77 | tensor_list_out.append(gather_list) 78 | return tensor_list_out 79 | --------------------------------------------------------------------------------