├── .gitignore
├── LICENSE
├── README.md
├── ToothFairy
├── README.md
├── algorithm_example
│ ├── .dockerignore
│ ├── .gitattributes
│ ├── .github
│ │ └── workflows
│ │ │ └── ci.yml
│ ├── Dockerfile
│ ├── PosPadUNet3D.py
│ ├── README.md
│ ├── build.sh
│ ├── export.sh
│ ├── process.py
│ ├── requirements.txt
│ └── test.sh
├── algorithms
│ ├── Haoshen_Wang
│ │ ├── README.md
│ │ ├── configs_loader.py
│ │ ├── data_function.py
│ │ ├── fine_tuning.py
│ │ ├── heatmap_generation.py
│ │ ├── loss_function.py
│ │ ├── points_sampling.py
│ │ └── test_UNet.py
│ ├── Marek_Wodzinski
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── requirements.txt
│ │ └── src
│ │ │ ├── __init__.py
│ │ │ ├── augmentation
│ │ │ ├── __init__.py
│ │ │ ├── aug.py
│ │ │ ├── torchio.py
│ │ │ └── volumetric.py
│ │ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ └── toothfairy_dataset.py
│ │ │ ├── evaluation
│ │ │ ├── __init__.py
│ │ │ ├── evaluation_functions.py
│ │ │ ├── evaluation_toothfairy.py
│ │ │ ├── np_metrics.py
│ │ │ └── tc_metrics.py
│ │ │ ├── helpers
│ │ │ ├── __init__.py
│ │ │ ├── cost_functions.py
│ │ │ ├── hausdorff.py
│ │ │ └── utils.py
│ │ │ ├── inference
│ │ │ ├── __init__.py
│ │ │ └── inference_toothfairy.py
│ │ │ ├── input_output
│ │ │ ├── __init__.py
│ │ │ ├── utils_io.py
│ │ │ └── volumetric.py
│ │ │ ├── networks
│ │ │ ├── __init__.py
│ │ │ ├── building_blocks.py
│ │ │ └── runet.py
│ │ │ ├── parsers
│ │ │ ├── __init__.py
│ │ │ ├── elastic_toothfairy.py
│ │ │ └── parse_toothfairy.py
│ │ │ ├── paths
│ │ │ ├── __init__.py
│ │ │ ├── hpc_paths.py
│ │ │ └── paths.py
│ │ │ ├── postprocessing
│ │ │ └── __init__.py
│ │ │ ├── preprocessing
│ │ │ ├── __init__.py
│ │ │ └── preprocessing_volumetric.py
│ │ │ ├── runners
│ │ │ ├── __init__.py
│ │ │ ├── experiments
│ │ │ │ ├── __init__.py
│ │ │ │ └── toothfairy_experiments.py
│ │ │ └── run_toothfairy_trainer.py
│ │ │ ├── training
│ │ │ ├── __init__.py
│ │ │ └── toothfairy_trainer.py
│ │ │ └── visualization
│ │ │ ├── __init__.py
│ │ │ └── volumetric.py
│ ├── Tomasz_Szczepanski
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── docker
│ │ │ ├── Dockerfile
│ │ │ ├── build.sh
│ │ │ ├── export.sh
│ │ │ ├── misc
│ │ │ │ └── slicer_33_colormap.txt
│ │ │ ├── process.py
│ │ │ ├── requirements.txt
│ │ │ ├── src
│ │ │ │ ├── arg_parser.py
│ │ │ │ ├── cuda_stats.py
│ │ │ │ ├── data_augmentation.py
│ │ │ │ ├── inference_HQ.py
│ │ │ │ ├── inference_LQ_coarse.py
│ │ │ │ └── models
│ │ │ │ │ ├── acti_norm.py
│ │ │ │ │ ├── convolutions.py
│ │ │ │ │ ├── resnet.py
│ │ │ │ │ └── resunet.py
│ │ │ └── test
│ │ │ │ ├── test.sh
│ │ │ │ └── transform_to_mha.py
│ │ ├── env_config.yml
│ │ ├── src
│ │ │ ├── arg_parser_gcr_finetune.py
│ │ │ ├── arg_parser_gcr_pretrain.py
│ │ │ ├── arg_parser_lq.py
│ │ │ ├── arg_parser_lq_finetune.py
│ │ │ ├── cuda_stats.py
│ │ │ ├── data_augmentation.py
│ │ │ ├── data_augmentation_fine.py
│ │ │ ├── dummy_logger.py
│ │ │ ├── log_image.py
│ │ │ ├── models
│ │ │ │ ├── acti_norm.py
│ │ │ │ ├── convolutions.py
│ │ │ │ ├── resnet.py
│ │ │ │ └── resunet.py
│ │ │ └── scheduler.py
│ │ ├── train_gcr_finetune.py
│ │ ├── train_gcr_pretrain.py
│ │ ├── train_lq.py
│ │ └── train_lq_finetune.py
│ ├── Yannick_Kirchhoff
│ │ ├── dataset.json
│ │ ├── nnunetv2
│ │ │ ├── __init__.py
│ │ │ ├── batch_running
│ │ │ │ ├── __init__.py
│ │ │ │ ├── benchmarking
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── generate_benchmarking_commands.py
│ │ │ │ │ └── summarize_benchmark_results.py
│ │ │ │ ├── collect_results_custom_Decathlon.py
│ │ │ │ ├── collect_results_custom_Decathlon_2d.py
│ │ │ │ ├── generate_lsf_runs_customDecathlon.py
│ │ │ │ └── release_trainings
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── nnunetv2_v1
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── collect_results.py
│ │ │ │ │ └── generate_lsf_commands.py
│ │ │ ├── configuration.py
│ │ │ ├── dataset_conversion
│ │ │ │ ├── Dataset027_ACDC.py
│ │ │ │ ├── Dataset073_Fluo_C3DH_A549_SIM.py
│ │ │ │ ├── Dataset114_MNMs.py
│ │ │ │ ├── Dataset115_EMIDEC.py
│ │ │ │ ├── Dataset120_RoadSegmentation.py
│ │ │ │ ├── Dataset137_BraTS21.py
│ │ │ │ ├── Dataset218_Amos2022_task1.py
│ │ │ │ ├── Dataset219_Amos2022_task2.py
│ │ │ │ ├── Dataset220_KiTS2023.py
│ │ │ │ ├── Dataset221_AutoPETII_2023.py
│ │ │ │ ├── Dataset988_dummyDataset4.py
│ │ │ │ ├── __init__.py
│ │ │ │ ├── convert_MSD_dataset.py
│ │ │ │ ├── convert_raw_dataset_from_old_nnunet_format.py
│ │ │ │ ├── datasets_for_integration_tests
│ │ │ │ │ ├── Dataset996_IntegrationTest_Hippocampus_regions_ignore.py
│ │ │ │ │ ├── Dataset997_IntegrationTest_Hippocampus_regions.py
│ │ │ │ │ ├── Dataset998_IntegrationTest_Hippocampus_ignore.py
│ │ │ │ │ ├── Dataset999_IntegrationTest_Hippocampus.py
│ │ │ │ │ └── __init__.py
│ │ │ │ └── generate_dataset_json.py
│ │ │ ├── ensembling
│ │ │ │ ├── __init__.py
│ │ │ │ └── ensemble.py
│ │ │ ├── evaluation
│ │ │ │ ├── __init__.py
│ │ │ │ ├── accumulate_cv_results.py
│ │ │ │ ├── evaluate_predictions.py
│ │ │ │ └── find_best_configuration.py
│ │ │ ├── experiment_planning
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dataset_fingerprint
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── fingerprint_extractor.py
│ │ │ │ ├── experiment_planners
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── default_experiment_planner.py
│ │ │ │ │ ├── network_topology.py
│ │ │ │ │ ├── readme.md
│ │ │ │ │ └── resencUNet_planner.py
│ │ │ │ ├── plan_and_preprocess_api.py
│ │ │ │ ├── plan_and_preprocess_entrypoints.py
│ │ │ │ ├── plans_for_pretraining
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── move_plans_between_datasets.py
│ │ │ │ └── verify_dataset_integrity.py
│ │ │ ├── imageio
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base_reader_writer.py
│ │ │ │ ├── natural_image_reager_writer.py
│ │ │ │ ├── nibabel_reader_writer.py
│ │ │ │ ├── npy_reader_writer.py
│ │ │ │ ├── reader_writer_registry.py
│ │ │ │ ├── readme.md
│ │ │ │ ├── simpleitk_reader_writer.py
│ │ │ │ └── tif_reader_writer.py
│ │ │ ├── inference
│ │ │ │ ├── __init__.py
│ │ │ │ ├── data_iterators.py
│ │ │ │ ├── examples.py
│ │ │ │ ├── export_prediction.py
│ │ │ │ ├── predict_from_raw_data.py
│ │ │ │ ├── readme.md
│ │ │ │ └── sliding_window_prediction.py
│ │ │ ├── model_sharing
│ │ │ │ ├── __init__.py
│ │ │ │ ├── entry_points.py
│ │ │ │ ├── model_download.py
│ │ │ │ ├── model_export.py
│ │ │ │ └── model_import.py
│ │ │ ├── paths.py
│ │ │ ├── postprocessing
│ │ │ │ ├── __init__.py
│ │ │ │ └── remove_connected_components.py
│ │ │ ├── preprocessing
│ │ │ │ ├── __init__.py
│ │ │ │ ├── cropping
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── cropping.py
│ │ │ │ ├── normalization
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── default_normalization_schemes.py
│ │ │ │ │ ├── map_channel_name_to_normalization.py
│ │ │ │ │ └── readme.md
│ │ │ │ ├── preprocessors
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── default_preprocessor.py
│ │ │ │ └── resampling
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── default_resampling.py
│ │ │ │ │ └── utils.py
│ │ │ ├── run
│ │ │ │ ├── __init__.py
│ │ │ │ ├── load_pretrained_weights.py
│ │ │ │ └── run_training.py
│ │ │ ├── tests
│ │ │ │ ├── __init__.py
│ │ │ │ └── integration_tests
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── add_lowres_and_cascade.py
│ │ │ │ │ ├── cleanup_integration_test.py
│ │ │ │ │ ├── lsf_commands.sh
│ │ │ │ │ ├── prepare_integration_tests.sh
│ │ │ │ │ ├── readme.md
│ │ │ │ │ ├── run_integration_test.sh
│ │ │ │ │ ├── run_integration_test_bestconfig_inference.py
│ │ │ │ │ └── run_integration_test_trainingOnly_DDP.sh
│ │ │ ├── training
│ │ │ │ ├── __init__.py
│ │ │ │ ├── data_augmentation
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── compute_initial_patch_size.py
│ │ │ │ │ └── custom_transforms
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── cascade_transforms.py
│ │ │ │ │ │ ├── deep_supervision_donwsampling.py
│ │ │ │ │ │ ├── limited_length_multithreaded_augmenter.py
│ │ │ │ │ │ ├── manipulating_data_dict.py
│ │ │ │ │ │ ├── masking.py
│ │ │ │ │ │ ├── region_based_training.py
│ │ │ │ │ │ └── transforms_for_dummy_2d.py
│ │ │ │ ├── dataloading
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base_data_loader.py
│ │ │ │ │ ├── data_loader_2d.py
│ │ │ │ │ ├── data_loader_3d.py
│ │ │ │ │ ├── nnunet_dataset.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── logging
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── nnunet_logger.py
│ │ │ │ ├── loss
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── compound_losses.py
│ │ │ │ │ ├── deep_supervision.py
│ │ │ │ │ ├── dice.py
│ │ │ │ │ ├── hausdorff_loss.py
│ │ │ │ │ └── robust_ce_loss.py
│ │ │ │ ├── lr_scheduler
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── polylr.py
│ │ │ │ └── nnUNetTrainer
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainer.py
│ │ │ │ │ └── variants
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── benchmarking
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerBenchmark_5epochs.py
│ │ │ │ │ └── nnUNetTrainerBenchmark_5epochs_noDataLoading.py
│ │ │ │ │ ├── data_augmentation
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerDA5.py
│ │ │ │ │ ├── nnUNetTrainerDAOrd0.py
│ │ │ │ │ ├── nnUNetTrainerNoDA.py
│ │ │ │ │ └── nnUNetTrainerNoMirroring.py
│ │ │ │ │ ├── loss
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerCELoss.py
│ │ │ │ │ ├── nnUNetTrainerDiceLoss.py
│ │ │ │ │ ├── nnUNetTrainerFocalLoss.py
│ │ │ │ │ ├── nnUNetTrainerHDFocalLoss.py
│ │ │ │ │ ├── nnUNetTrainerHDLoss.py
│ │ │ │ │ ├── nnUNetTrainerSkelDice.py
│ │ │ │ │ └── nnUNetTrainerTopkLoss.py
│ │ │ │ │ ├── lr_schedule
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── nnUNetTrainerCosAnneal.py
│ │ │ │ │ ├── network_architecture
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerBN.py
│ │ │ │ │ └── nnUNetTrainerNoDeepSupervision.py
│ │ │ │ │ ├── optimizer
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerAdam.py
│ │ │ │ │ └── nnUNetTrainerAdan.py
│ │ │ │ │ ├── sampling
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── nnUNetTrainer_probabilisticOversampling.py
│ │ │ │ │ └── training_length
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainer_Xepochs.py
│ │ │ │ │ └── nnUNetTrainer_Xepochs_NoMirroring.py
│ │ │ └── utilities
│ │ │ │ ├── __init__.py
│ │ │ │ ├── collate_outputs.py
│ │ │ │ ├── dataset_name_id_conversion.py
│ │ │ │ ├── ddp_allgather.py
│ │ │ │ ├── default_n_proc_DA.py
│ │ │ │ ├── file_path_utilities.py
│ │ │ │ ├── find_class_by_name.py
│ │ │ │ ├── get_network_from_plans.py
│ │ │ │ ├── helpers.py
│ │ │ │ ├── json_export.py
│ │ │ │ ├── label_handling
│ │ │ │ ├── __init__.py
│ │ │ │ └── label_handling.py
│ │ │ │ ├── network_initialization.py
│ │ │ │ ├── overlay_plots.py
│ │ │ │ ├── plans_handling
│ │ │ │ ├── __init__.py
│ │ │ │ └── plans_handler.py
│ │ │ │ └── utils.py
│ │ ├── readme.txt
│ │ └── setup.py
│ └── Yusheng_Liu
│ │ ├── README.md
│ │ ├── build.sh
│ │ ├── dockerfile
│ │ ├── framework.JPG
│ │ ├── name_config.py
│ │ ├── networks
│ │ ├── generic_UNet.py
│ │ ├── generic_UNet_small.py
│ │ ├── generic_UNet_template.py
│ │ ├── generic_UNetv2.py
│ │ ├── net_factory_3d.py
│ │ └── neural_network.py
│ │ ├── output
│ │ └── results.json
│ │ ├── predict.sh
│ │ ├── process
│ │ ├── Check_Component.py
│ │ ├── data_read.py
│ │ ├── evaluations
│ │ │ ├── dice.py
│ │ │ └── metric.py
│ │ ├── file_utils.py
│ │ ├── nnUNet_Pseudo_Generate.sh
│ │ ├── nnUNetv2_Pseudo_Generate.sh
│ │ ├── select_fineGT_series.py
│ │ ├── select_reliable_series.py
│ │ ├── transforms
│ │ │ ├── compose.py
│ │ │ ├── connected_component_labeling.py
│ │ │ ├── functional.py
│ │ │ ├── image_augment.py
│ │ │ ├── image_io.py
│ │ │ ├── image_reorient.py
│ │ │ ├── image_resample.py
│ │ │ ├── image_transform.py
│ │ │ ├── mask_one_hot.py
│ │ │ ├── mask_process.py
│ │ │ └── transform.py
│ │ └── utils.py
│ │ ├── requirements.txt
│ │ ├── spacing_config.py
│ │ ├── test_3D.py
│ │ └── test_3D_util_mirror.py
└── evaluation
│ ├── .dockerignore
│ ├── .gitattributes
│ ├── .github
│ └── workflows
│ │ └── ci.yml
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── build.sh
│ ├── evaluation.py
│ ├── export.sh
│ ├── out.txt
│ ├── requirements.txt
│ ├── test.sh
│ └── test
│ └── predictions.json
├── ToothFairy2
├── README.md
├── algorithm
│ ├── .dockerignore
│ ├── .gitattributes
│ ├── Dockerfile
│ ├── README.md
│ ├── build.ps1
│ ├── build.sh
│ ├── export.ps1
│ ├── export.sh
│ ├── output
│ │ └── toothfairy_algorithm-output-aef7d44fa8239aa6ee100726344ed4c6
│ │ │ └── _data
│ │ │ └── results.json
│ ├── process.py
│ ├── requirements.txt
│ ├── resources
│ │ └── README.md
│ ├── test.sh
│ └── test
│ │ └── .gitignore
└── evaluation
│ ├── .dockerignore
│ ├── .gitattributes
│ ├── .github
│ └── workflows
│ │ └── ci.yml
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── build.sh
│ ├── evaluation.py
│ ├── export.sh
│ ├── ground-truth
│ ├── ToothFairy2F_012_0000.mha
│ ├── ToothFairy2F_027_0000.mha
│ ├── ToothFairy2F_056_0000.mha
│ └── ToothFairy2F_065_0000.mha
│ ├── requirements.txt
│ ├── test.sh
│ └── test
│ ├── F012
│ └── output
│ │ └── images
│ │ └── oral-pharyngeal-segmentation
│ │ └── README.md
│ ├── F027
│ └── output
│ │ └── images
│ │ └── oral-pharyngeal-segmentation
│ │ └── README.md
│ ├── F056
│ └── output
│ │ └── images
│ │ └── oral-pharyngeal-segmentation
│ │ └── README.md
│ ├── F065
│ └── output
│ │ └── images
│ │ └── oral-pharyngeal-segmentation
│ │ └── README.md
│ └── predictions.json
└── ToothFairy3
├── Interactive-Segmentation
└── evaluation
│ └── evaluation.py
└── Multi-Instance-Segmentation
└── evaluation
└── evaluation.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.mha
2 | *.tif
3 | *.tiff
4 | *.tar.gz
5 | *.gz
6 | *.pth
7 |
8 | # Byte-compiled / optimized / DLL files
9 | __pycache__/
10 | *.py[cod]
11 | *$py.class
12 |
13 | # C extensions
14 | *.so
15 |
16 | # Distribution / packaging
17 | .Python
18 | env/
19 | build/
20 | develop-eggs/
21 | dist/
22 | downloads/
23 | eggs/
24 | .eggs/
25 | lib/
26 | lib64/
27 | parts/
28 | sdist/
29 | var/
30 | wheels/
31 | *.egg-info/
32 | .installed.cfg
33 | *.egg
34 |
35 | # PyInstaller
36 | # Usually these files are written by a python script from a template
37 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
38 | *.manifest
39 | *.spec
40 |
41 | # Installer logs
42 | pip-log.txt
43 | pip-delete-this-directory.txt
44 |
45 | # Unit test / coverage reports
46 | htmlcov/
47 | .tox/
48 | .coverage
49 | .coverage.*
50 | .cache
51 | nosetests.xml
52 | coverage.xml
53 | *.cover
54 | .hypothesis/
55 | .pytest_cache/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # pyenv
82 | .python-version
83 |
84 | # celery beat schedule file
85 | celerybeat-schedule
86 |
87 | # SageMath parsed files
88 | *.sage.py
89 |
90 | # dotenv
91 | .env
92 |
93 | # virtualenv
94 | .venv
95 | venv/
96 | ENV/
97 |
98 | # Spyder project settings
99 | .spyderproject
100 | .spyproject
101 |
102 | # Rope project settings
103 | .ropeproject
104 |
105 | # mkdocs documentation
106 | /site
107 |
108 | # mypy
109 | .mypy_cache/
110 |
111 | # Pycharm
112 | .idea/
113 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 The ToothFairy Organizing Team
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | # ToothFairy Challenges
6 |
7 | This repository contains the information and materials for the ToothFairy challenge, which reaches its third edition with [MICCAI 2025](https://conferences.miccai.org/2025/).
8 |
9 | ## ToothFairy3 (MICCAI 2025)
10 | Toothfairy3 has been organized by the [University of Modena and Reggio Emilia](https://www.unimore.it/) with the collaboration of [Radboud University](https://www.ru.nl/) and [Karlsruhe Institute of Technology](https://www.kit.edu/english/), hosted by Grand-Challenge and is part of [MICCAI 2025](https://conferences.miccai.org/2025/)
11 |
12 | Together with 3DTeethSeg2, the third edition of our challenge, ToothFairy3, has been accepted as a joint effort under the name of ODIN2025 - Oral and Dental Image aNalysis Challenges at MICCAI2025. Challenge results and discussion will be presented in the framework of the [ODIN2025 workshop](https://odin-workshops.org/2025/) that will take place in [Daejeon, Korea, MICCAI 2025](https://conferences.miccai.org/2025/).
13 |
14 |
15 |
16 | ## ToothFairy2 (MICCAI 2024)
17 | Toothfairy2 has been organized by the [University of Modena and Reggio Emilia](https://www.unimore.it/) with the collaboration of [Radboud University](https://www.ru.nl/), hosted by Grand-Challenge and was part of [MICCAI 2024](https://conferences.miccai.org/2024/)
18 |
19 | For more information, move to the dedicated folder in this repository, visit [Ditto Website](https://ditto.ing.unimore.it/toothfairy2), or the challenges' website [ToothFairy2](https://toothfairy2.grand-challenge.org/)
20 |
21 | ## ToothFairy (MICCAI 2023)
22 |
23 | Toothfairy has been organized by the [University of Modena and Reggio Emilia](https://www.unimore.it/) with the collaboration of [Radboud University](https://www.ru.nl/), hosted by Grand-Challenge and was part of [MICCAI 2023](https://conferences.miccai.org/2023/)
24 |
25 | For more information, move to the dedicated folder in this repository, visit [Ditto Website](https://ditto.ing.unimore.it/toothfairy), or the challenges' website [ToothFairy](https://toothfairy.grand-challenge.org/)
26 |
27 |
--------------------------------------------------------------------------------
/ToothFairy/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | # ToothFairy Challenge (MICCAI 2023)
6 |
7 | This is the first edition of the ToothFairy challenge organized by the University of Modena and Reggio Emilia with the collaboration of Raudboud University. The challenge is hosted by grand-challenge and is part of MICCAI2023.
8 |
9 | ### Algorithm Template and Evaluation Source Code
10 | This repository contains the information and source code of [algorithm template](https://github.com/AImageLab-zip/ToothFairy/tree/main/algorithm), which you can use as a starting point to develop your own algorithm, and the [evaluation source code](https://github.com/AImageLab-zip/ToothFairy/tree/main/evaluation) which is helpful to check how the evaluation is performed.
11 |
12 | ### Challenge and Dataset
13 | For any information about the challenge, please visit the [Grand-Challenge website](https://toothfairy.grand-challenge.org/).
14 | For any information about the dataset, please visit the [ToothFairy page on Ditto](https://ditto.ing.unimore.it/toothfairy).
15 |
16 | ### Submission Code and Dockers
17 | The source-code of the challenge submissions can be found in the `algorithm` subfolder. Docker images are also available in the corresponding docker-hub repository @ [https://hub.docker.com/u/toothfairychallenge](https://hub.docker.com/u/toothfairychallenge)
18 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/.dockerignore:
--------------------------------------------------------------------------------
1 | test/
2 | .git/
3 | *.tar.gz
4 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/.gitattributes:
--------------------------------------------------------------------------------
1 | ground-truth/* filter=lfs diff=lfs merge=lfs -text
2 | test/* filter=lfs diff=lfs merge=lfs -text
3 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on: [push, pull_request]
4 |
5 | env:
6 | PYTHON_VERSION: '3.10'
7 |
8 | jobs:
9 |
10 | tests:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Install Python ${{ env.PYTHON_VERSION }}
14 | uses: actions/setup-python@v4
15 | with:
16 | python-version: ${{ env.PYTHON_VERSION }}
17 | - uses: actions/checkout@v3
18 | - name: Build the containers
19 | run: |
20 | ./build.sh
21 | - name: Run the tests
22 | run: |
23 | ./test.sh
24 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM pytorch/pytorch
2 |
3 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user
4 |
5 | RUN mkdir -p /opt/app /input /output \
6 | && chown user:user /opt/app /input /output
7 | # RUN mkdir -p /output/images/inferior-alveolar-canal
8 |
9 | USER user
10 | WORKDIR /opt/app
11 |
12 | ENV PATH="/home/user/.local/bin:${PATH}"
13 |
14 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools
15 |
16 |
17 |
18 | COPY --chown=user:user requirements.txt /opt/app/
19 | # RUN python -m piptools sync requirements.txt
20 | RUN python -m pip install --user -r requirements.txt
21 |
22 |
23 | COPY --chown=user:user process.py /opt/app/
24 | COPY --chown=user:user PosPadUNet3D.py /opt/app/
25 | COPY --chown=user:user checkpoints.pth /opt/app/
26 |
27 | ENTRYPOINT [ "python", "-m", "process" ]
28 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/README.md:
--------------------------------------------------------------------------------
1 | # ToothFairy Algorithm
2 | This is a template that you can use to develop and test your algorithm.
3 |
4 | To run it, you'll need to install [docker](https://docs.docker.com/engine/install/) and [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
5 |
6 | First of all, you have to clone this repository and `cd` in the algorithm directory:
7 | ```
8 | git clone https://github.com/AImageLab-zip/ToothFairy.git
9 | cd ToothFairy/algorithm
10 | ```
11 |
12 | All the code has been tested on Linux (6.2.8-arch1-1)
13 |
14 | ## Testing Your Algorithm
15 | To test your algorithm, you can use the samples provided in the `test` folder,
16 | which are already converted to the `.mha` format that grand-challenge use
17 | behind the scenes. If you wish to load more test samples, you will have to
18 | convert all the `data.npy` to `.mha`. This conversion can be made
19 | using SimpleITK library for python.
20 |
21 | Inside the `process.py` file you have to
22 | add all the steps required by your algorithm. A simple example is already
23 | provided: A `SimpleNet` is declared (a `torch.nn.Module`) and inside the
24 | `predict()` function I've already took care of converting the `SimpleITK.Image`
25 | input to a `torch.tensor`. and the output from a `torch.tensor` back to a
26 | `SimpleITK.Image`. Feel free to modify this script but keep in mind that
27 | GrandChallenge will give you *a single image* as input and wants *a single
28 | image* as output, both as a `SimpleITK.Image`.
29 |
30 | When you are ready, check that everything works properly by running `./test.sh`.
31 |
32 |
33 | ## Submit Your Algorithm
34 | Once you have checked that everything works properly using `test.sh`, you are ready to export your algorithm into a docker container using `./export.sh` and ship it to Grand-Challenge from the [submission page](https://toothfairy.grand-challenge.org/evaluation/challenge/submissions/create/) of the challenge. Be carefull because you have a limited amount of submissions: 15 for the *Prelimaniry Test Phase*, 2 for the *Final Test Phase*.
35 |
36 |
37 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
3 |
4 | # docker build --no-cache -t toothfairy_algorithm "$SCRIPTPATH"
5 | docker build -t toothfairy_algorithm "$SCRIPTPATH"
6 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/export.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ./build.sh
4 |
5 | docker save toothfairy_algorithm | gzip -c > ToothFairy_Algorithm.tar.gz
6 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/process.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import SimpleITK as sitk
3 | import torch
4 | import torch.nn as nn
5 | import numpy as np
6 | import torchio as tio
7 |
8 | from torch.utils.data import DataLoader
9 |
10 | from evalutils import SegmentationAlgorithm
11 | from evalutils.validators import (
12 | UniquePathIndicesValidator,
13 | UniqueImagesValidator,
14 | )
15 |
16 | # import PosPadUNet3D
17 | # import MemTransPosPadUNet3D
18 |
19 | def preprocessing(x):
20 | x[x > 2100] = 2100
21 | x[x < 0] = 0
22 | return x/2100
23 |
24 | def get_default_device():
25 | """ Set device """
26 | if torch.cuda.is_available():
27 | return torch.device('cuda')
28 | else:
29 | return torch.device('cpu')
30 |
31 | class Toothfairy_algorithm(SegmentationAlgorithm):
32 | def __init__(self):
33 | super().__init__(
34 | input_path=Path('/input/images/cbct/'),
35 | output_path=Path('/output/images/inferior-alveolar-canal/'),
36 | validators=dict(
37 | input_image=(
38 | UniqueImagesValidator(),
39 | UniquePathIndicesValidator(),
40 | )
41 | ),
42 | )
43 | self._output_path.mkdir(parents=True)
44 |
45 | @torch.no_grad()
46 | def predict(self, *, input_image: sitk.Image):
47 | input_array = sitk.GetArrayFromImage(input_image)
48 |
49 | input_tensor = torch.from_numpy(input_array.astype(np.float32))
50 | input_tensor = input_tensor[None, ...].to(get_default_device())
51 | input_tensor = preprocessing(input_tensor)
52 |
53 | output = input_tensor.squeeze(0)
54 | output = (output > 0.5).int()
55 | output = output.detach().cpu().numpy().squeeze().astype(np.uint8)
56 | output = sitk.GetImageFromArray(output)
57 |
58 | return output
59 |
60 |
61 | if __name__ == "__main__":
62 | Toothfairy_algorithm().process()
63 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.10
3 | # by the following command:
4 | #
5 | # pip-compile --resolver=backtracking
6 | #
7 | arrow==1.2.3
8 | # via jinja2-time
9 | binaryornot==0.4.4
10 | # via cookiecutter
11 | build==0.10.0
12 | # via pip-tools
13 | certifi==2022.12.7
14 | # via requests
15 | chardet==5.1.0
16 | # via binaryornot
17 | charset-normalizer==3.1.0
18 | # via requests
19 | click==8.1.3
20 | # via
21 | # cookiecutter
22 | # evalutils
23 | # pip-tools
24 | cookiecutter==2.1.1
25 | # via evalutils
26 | evalutils==0.4.0
27 | # via -r requirements.in
28 | idna==3.4
29 | # via requests
30 | imageio[tifffile]==2.26.0
31 | # via evalutils
32 | jinja2==3.1.2
33 | # via
34 | # cookiecutter
35 | # jinja2-time
36 | jinja2-time==0.2.0
37 | # via cookiecutter
38 | joblib==1.2.0
39 | # via scikit-learn
40 | markupsafe==2.1.2
41 | # via jinja2
42 | numpy==1.24.2
43 | # via
44 | # evalutils
45 | # imageio
46 | # pandas
47 | # scikit-learn
48 | # scipy
49 | # tifffile
50 | packaging==23.0
51 | # via build
52 | pandas==1.5.3
53 | # via evalutils
54 | pillow==9.4.0
55 | # via imageio
56 | pip-tools==6.12.3
57 | # via evalutils
58 | pyproject-hooks==1.0.0
59 | # via build
60 | python-dateutil==2.8.2
61 | # via
62 | # arrow
63 | # pandas
64 | python-slugify==8.0.1
65 | # via cookiecutter
66 | pytz==2022.7.1
67 | # via pandas
68 | pyyaml==6.0
69 | # via cookiecutter
70 | requests==2.28.2
71 | # via cookiecutter
72 | scikit-learn==1.2.2
73 | # via evalutils
74 | scipy==1.10.1
75 | # via
76 | # evalutils
77 | # scikit-learn
78 | simpleitk==2.2.1
79 | # via evalutils
80 | six==1.16.0
81 | # via python-dateutil
82 | text-unidecode==1.3
83 | # via python-slugify
84 | threadpoolctl==3.1.0
85 | # via scikit-learn
86 | tifffile==2023.3.15
87 | # via imageio
88 | tomli==2.0.1
89 | # via
90 | # build
91 | # pyproject-hooks
92 | urllib3==1.26.15
93 | # via requests
94 | wheel==0.40.0
95 | # via pip-tools
96 | torchio==0.18.92
97 |
98 | # The following packages are considered to be unsafe in a requirements file:
99 | # pip
100 | # setuptools
101 |
--------------------------------------------------------------------------------
/ToothFairy/algorithm_example/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
4 |
5 | ./build.sh
6 |
7 | VOLUME_SUFFIX=$(dd if=/dev/urandom bs=32 count=1 | md5sum | cut --delimiter=' ' --fields=1)
8 | # Maximum is currently 30g, configurable in your algorithm image settings on grand challenge
9 | MEM_LIMIT="10g"
10 |
11 | docker volume create toothfairy_algorithm-output-$VOLUME_SUFFIX
12 |
13 | # Do not change any of the parameters to docker run, these are fixed
14 | # You are free to add --gpus all if you would like to locally test
15 | # your algorithm with your GPU hardware. In the grand-challenge container
16 | # all the docker will have a single T4 with 16GB and they will be
17 | # run using such flag
18 | docker run --rm \
19 | --memory="${MEM_LIMIT}" \
20 | --memory-swap="${MEM_LIMIT}" \
21 | --network="none" \
22 | --cap-drop="ALL" \
23 | --security-opt="no-new-privileges" \
24 | --shm-size="128m" \
25 | --pids-limit="256" \
26 | -v $SCRIPTPATH/test/:/input/ \
27 | -v toothfairy_algorithm-output-$VOLUME_SUFFIX:/output/ \
28 | toothfairy_algorithm
29 |
30 | docker run --rm \
31 | -v toothfairy_algorithm-output-$VOLUME_SUFFIX:/output/ \
32 | python:3.10-slim cat /output/results.json | python -m json.tool
33 |
34 | docker run --rm \
35 | -v toothfairy_algorithm-output-$VOLUME_SUFFIX:/output/ \
36 | python:3.10-slim ls -lah /output/images/inferior-alveolar-canal/
37 |
38 | cp -r /var/lib/docker/volumes/toothfairy_algorithm-output-$VOLUME_SUFFIX/ output
39 | chown llumetti:llumetti -R output
40 |
41 | docker volume rm toothfairy_algorithm-output-$VOLUME_SUFFIX
42 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Haoshen_Wang/README.md:
--------------------------------------------------------------------------------
1 | # Improved-IAN-Segmentation
2 |
3 | The 2nd solution to Tooth Fairy: Cone-Beam Computed Tomography (CBCT) Segmentation Challenge.
4 |
5 | ## Fine-tuning strategy
6 | The fine_tuning.py script provides a sample implementation of the fine-tuning strategy, showcasing its application on a rudimentary U-Net. However, in practical scenarios, this strategy is executed using the nnU-Net framework.
7 |
8 | ## Focal Dice loss
9 | The focal Dice loss is defined in loss_function.py and is intended to be utilized in conjunction with the nnU-Net framework.
10 |
11 | ## Acknowledgements
12 | This code repository refers to [nnU-Net](https://github.com/MIC-DKFZ/nnUNet) and [Pytorch Medical Segmentation](https://github.com/MontaEllis/Pytorch-Medical-Segmentation)
13 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Haoshen_Wang/configs_loader.py:
--------------------------------------------------------------------------------
1 | import configargparse
2 | import numpy as np
3 | import os
4 |
5 | def config_parser():
6 | parser = configargparse.ArgumentParser()
7 |
8 | parser.add_argument("--exp_name" , type = str, default = '.', help = 'Experiment name')
9 | parser.add_argument("--image_dir", type = str, default = './dataset/imagesTr', help = 'The dir of images')
10 | parser.add_argument("--label_dir" , type = str, default = './dataset/labelsTr', help = 'The dir of labels')
11 | parser.add_argument("--test_dir", type = str, default = 'test_dir', help = 'The dir of test images')
12 | parser.add_argument('--plan_path' , type = str, default = 'plans.json' , help = 'The path of plan')
13 | parser.add_argument('--network', type=str, default='UNet',help = 'The network architecture' )
14 | parser.add_argument('--checkpoint' , type = str, default='checkpoint-105.pth' , help =' checkpoints')
15 | return parser
16 |
17 | def get_config():
18 | parser = config_parser()
19 | cfg = parser.parse_args()
20 |
21 | assert cfg.exp_name is not None
22 | assert cfg.image_dir is not None
23 | assert cfg.label_dir is not None
24 |
25 | return cfg
26 |
27 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Haoshen_Wang/data_function.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import os
3 | import torchio as tio
4 | from glob import glob
5 | from preprocess import CustomTransform
6 | from preprocess import Resample
7 | import numpy as np
8 | from os.path import join
9 |
10 | class MedData_finetune(torch.utils.data.Dataset):
11 | def __init__(self, images_dir, points_dir,patch_size):
12 |
13 |
14 | queue_length = 24
15 | samples_per_volume = 2
16 | self.images = sorted(glob(os.path.join(images_dir, "volume*.nii.gz")))
17 |
18 | self.images = [self.images[0]] #-15
19 |
20 |
21 | self.subjects = []
22 | self.query_points = []
23 | self.occupancy = []
24 | self.noise =[0, 0.3, 5, 10]
25 |
26 |
27 | for img in self.images:
28 | file_num = os.path.basename(img).split('.')[0].split('-')[-1]
29 | for noise in self.noise:
30 | p=np.load(join(points_dir , f'{file_num}_boundary_{str(noise)}_samples.npz'))
31 | self.query_points.append(p['points'])
32 | self.occupancy.append(p['occupancy'])
33 | self.query_points = np.concatenate(self.query_points, axis= 0)
34 | self.occupancy = np.concatenate(self.occupancy , axis= 0 )
35 |
36 | subject = tio.Subject(
37 | image = tio.ScalarImage(img),
38 | points = self.query_points,
39 | occupancy = self.occupancy
40 | )
41 | self.subjects.append(subject)
42 |
43 |
44 | # self.transforms = self.transform()
45 |
46 | self.training_set = tio.SubjectsDataset(self.subjects, transform=None)
47 |
48 |
49 | self.queue_dataset = tio.Queue(
50 | self.training_set,
51 | queue_length,
52 | samples_per_volume,
53 | tio.UniformSampler(patch_size),
54 | num_workers= 2,
55 | )
56 |
57 |
58 |
59 |
60 | class MedData_val(torch.utils.data.Dataset):
61 | def __init__(self, images_dir, labels_dir):
62 | self.images = sorted(glob(os.path.join(images_dir, "volume*.nii.gz")))[-15:] #-15
63 | self.labels = sorted(glob(os.path.join(labels_dir, 'segmentation*.nii.gz')))[-15:]
64 |
65 |
66 | self.subjects = []
67 | for (img, lab) in zip(self.images, self.labels):
68 | subject = tio.Subject(
69 | image=tio.ScalarImage(img),
70 | label = tio.LabelMap(lab),
71 | )
72 | self.subjects.append(subject)
73 | self.val_set = tio.SubjectsDataset(self.subjects, transform=None)
74 |
75 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Haoshen_Wang/heatmap_generation.py:
--------------------------------------------------------------------------------
1 | import torchio as tio
2 | import os
3 | from scipy.spatial import cKDTree as KDTree
4 | import numpy as np
5 | import torch
6 | def heatmap_generation(path):
7 | file_num = os.path.basename(path).split('_')[0]
8 | target_path = 'nerve-002.nii.gz'
9 | kernel_size = 7
10 | sigma = kernel_size/7
11 | half_size = int((kernel_size - 1)/2)
12 | img = tio.LabelMap(path)
13 | skeleton = img[tio.DATA].squeeze().numpy()
14 |
15 | skeleton_points = np.where(skeleton == 1)
16 | skeleton_points = np.concatenate((skeleton_points[0][:,np.newaxis],skeleton_points[1][:,np.newaxis],skeleton_points[2][:,np.newaxis]),axis=-1)
17 | shape = skeleton.shape
18 | kdtree = KDTree(skeleton_points)
19 |
20 | heat_map = np.zeros_like(skeleton, dtype= np.float64)
21 | intensity = 200
22 | points_set = np.empty((0,3))
23 | for i in range(len(skeleton_points)):
24 | center = skeleton_points[i]
25 | x_min, y_min, z_min = max(center[0]-half_size , 0) , max(center[1]-half_size , 0) , max(center[2]-half_size , 0)
26 | x_max, y_max, z_max = min(center[0]+half_size , shape[0]-1), min(center[1]+half_size, shape[1]-1) , min(center[2]+half_size, shape[2]-1)
27 | x , y , z = np.arange(x_min, x_max+1), np.arange(y_min, y_max + 1), np.arange(z_min, z_max + 1)
28 | X , Y , Z = np.meshgrid( x, y , z ,indexing='ij')
29 | points = np.concatenate((X[:,:,:,np.newaxis],Y[:,:,:,np.newaxis],Z[:,:,:,np.newaxis]),axis = -1).reshape(-1, 3)
30 | points_set = np.concatenate((points_set,points), axis= 0 )
31 | points = np.unique(points_set , axis = 0)
32 | distances, _ = kdtree.query(points)
33 | gaussian_kernel = np.exp(-(distances)/(2*sigma**2))*intensity
34 | gaussian_kernel = gaussian_kernel[:,np.newaxis]
35 | points = np.concatenate((points , gaussian_kernel), axis=-1)
36 |
37 | heat_map[points[:,0].astype(int),points[:,1].astype(int), points[:,2].astype(int)] = points[:,3]
38 |
39 | tio.ScalarImage(tensor = torch.tensor(heat_map).unsqueeze(0), affine = img.affine).save(target_path)
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Haoshen_Wang/loss_function.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 | import torch
3 | import torch.nn.functional as F
4 |
5 | class DiceLoss(nn.Module):
6 | def __init__(self) -> None:
7 | super(DiceLoss,self).__init__()
8 | # def forward(self, input , target): ## onehot
9 | # input = torch.softmax(input , dim=1)
10 | # target_onehot = torch.zeros_like(input,dtype = torch.bool)
11 | # target_onehot.scatter_(1, target.type(torch.int64), 1)
12 | # smooth = 1e-5
13 | # assert target_onehot.shape == input.shape
14 | # intersect = (target_onehot * input).sum([2,3,4])
15 | # sum_pred = input.sum([2,3,4])
16 | # sum_gt = target_onehot.sum([2,3,4])
17 | # dice = (2* intersect + smooth) / (sum_pred + sum_gt +smooth)
18 | # # dice = (0.2*dice[:,0] + 0.8*dice[:,1])/2
19 | # dice = dice.mean(1)
20 | # dice = dice.sum(0)
21 | # return 1-dice
22 |
23 | def forward(self, input , target):
24 | input = torch.softmax(input , dim=1)
25 | input = input[:,1,:]
26 | target = target[:,0,:]
27 | smooth = 1e-5
28 | assert target.shape == input.shape
29 | intersect = torch.sum(input*target)
30 | sum_gt= torch.sum(target)
31 | sum_pred = torch.sum(input)
32 | dice = (2* intersect + smooth) / (sum_pred + sum_gt +smooth)
33 |
34 | return 1-dice
35 |
36 |
37 | class focal_DC_loss(nn.Module):
38 | def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None,
39 | dice_class=SoftDiceLoss):
40 | super(focal_DC_loss, self).__init__()
41 | if ignore_label is not None:
42 | ce_kwargs['ignore_index'] = ignore_label
43 |
44 | self.weight_dice = weight_dice
45 | self.weight_ce = weight_ce
46 | self.ignore_label = ignore_label
47 | self.log_smooth = 1e-10
48 | self.ce = RobustCrossEntropyLoss(**ce_kwargs)
49 | self.dc = dice_class(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs)
50 | self.gamma = 2
51 | def forward(self, net_output: torch.Tensor, target:torch.Tensor):
52 | dc_loss = self.dc(net_output, target, loss_mask = None)
53 | preds_softmax = torch.softmax(net_output, 1)
54 | preds_logsoft = torch.log(preds_softmax + self.log_smooth)
55 |
56 | preds_softmax = preds_softmax.gather(1, target.long())
57 | preds_logsoft = preds_logsoft.gather(1, target.long())
58 |
59 | ce_loss = -torch.mul(torch.pow((1-preds_softmax), self.gamma), preds_logsoft)
60 |
61 | ce_loss = ce_loss.mean()
62 | loss = dc_loss + ce_loss
63 | return loss
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Haoshen_Wang/points_sampling.py:
--------------------------------------------------------------------------------
1 | import torchio as tio
2 | import os
3 | import open3d as o3d
4 | import numpy as np
5 | from os.path import join
6 |
7 | def sampling_occupancy(self,sigma, path):
8 | if sigma == 0:
9 | sample_num = 100000
10 | elif sigma == 0.3:
11 | sample_num = 10000000
12 | elif sigma == 5:
13 | sample_num = 1000000
14 | elif sigma == 10:
15 | sample_num = 100000
16 | out_path = 'occupancy'
17 | file_name = os.path.splitext(os.path.basename(path))[0].split('-')[-1]
18 | input_file = path
19 | out_file = join(out_path,f'{file_name}_occ.npz')
20 | mesh = o3d.io.read_triangle_mesh(input_file)
21 | if sigma !=0:
22 | pcd = mesh.sample_points_uniformly(sample_num)
23 | points = np.asarray(pcd.points)
24 | query_points =points + sigma*np.random.randn(sample_num, 3)
25 | else:
26 | shape = tio.LabelMap(f'processed_LABEL/segmentation-{file_name}.nii.gz').shape[1:]
27 | query_points = np.random.uniform(low = 0 , high = shape, size= (sample_num,3))
28 | query_points = o3d.core.Tensor(query_points,dtype = o3d.core.Dtype.Float32)
29 | mesh = o3d.t.geometry.TriangleMesh.from_legacy(mesh)
30 | scene = o3d.t.geometry.RaycastingScene()
31 | _=scene.add_triangles(mesh)
32 | occupancy = scene.compute_occupancy(query_points)
33 | occupancy = occupancy.numpy()
34 | np.savez(out_file, points=query_points.numpy(), occupancy = occupancy)
35 | print('Finished: {}'.format(path))
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/README.md:
--------------------------------------------------------------------------------
1 | # ToothFairy_MW_2023
2 | Contribution to the ToothFairy Challenge (MICCAI 2023) by Marek Wodzinski (3rd place).
3 |
4 | Here you can see the full source code used to train / test the proposed solution.
5 |
6 | Only the final experiment is left (the one used for the final Docker submission).
7 |
8 | * In order to reproduce the experiment you should:
9 | * Download the Toothfairy dataset [Link](https://toothfairy.grand-challenge.org/dataset/)
10 | * Update the [hpc_paths.py](./src/paths/hpc_paths.py) and [paths.py](./src/paths/paths.py) files.
11 | * Run the [parse_toothfairy.py](./src/parsers/parse_toothfairy.py)
12 | * Run the [elastic_toothfairy.py](./src/parsers/elastic_toothfairy.py)
13 | * Run the training using [run_toothfairy_trainer.py](./src/runners/run_toothfairy_trainer.py)
14 | * And finally use the trained model for inference using [inference.py](./src/inference/inference_toothfairy.py)
15 |
16 | The network was trained using HPC infrastructure (PLGRID). Therefore the .slurm scripts are omitted for clarity.
17 |
18 | Please cite the ToothFairy challenge paper (TODO) if you found the source code useful.
19 | Please find the method description: (TODO).
20 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
3 | pandas
4 | torch
5 | torchvision
6 | torchaudio
7 | SimpleITK
8 | matplotlib
9 | torchio
10 | tensorboard
11 | torchsummary
12 | monai
13 | setuptools
14 | scikit_image
15 | Pillow
16 | einops
17 | vtk
18 | kornia
19 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/augmentation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/augmentation/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/augmentation/aug.py:
--------------------------------------------------------------------------------
1 | ### Ecosystem Imports ###
2 | import os
3 | import sys
4 | sys.path.append(os.path.join(os.path.dirname(__file__), "."))
5 | sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
6 | from typing import Iterable, Callable, Any
7 |
8 | ### External Imports ###
9 | import torch as tc
10 |
11 | ### Internal Imports ###
12 |
13 | ########################
14 |
15 | def compose_transforms(*transforms : Iterable[Callable[[tc.Tensor, dict], tuple[tc.Tensor, dict]]]) -> Callable[[tc.Tensor, dict], tuple[tc.Tensor, dict]]:
16 | """
17 | Composes the input transforms into a single Callable.
18 | TODO
19 | """
20 | def composed_transforms(tensor : tc.Tensor, **kwargs : dict) -> tuple[tc.Tensor, dict]:
21 | for transform in transforms:
22 | tensor, kwargs = transform(tensor, **kwargs)
23 | return tensor, kwargs
24 | return composed_transforms
25 |
26 | def compose_transform_parameters(*transform_parameters : Iterable[dict]) -> dict:
27 | """
28 | Composes the dictionaries of transform parameters into a single one (warning: assumes different parameter names for each transform, otherwise overwrites).
29 | TODO
30 | """
31 | composed_parameters = {}
32 | for i in range(len(transform_parameters)):
33 | composed_parameters = composed_parameters | transform_parameters[i]
34 | return composed_parameters
35 |
36 | def apply_transform(*args : Iterable[tc.Tensor], transform : Callable[[tc.Tensor, dict], tuple[tc.Tensor, dict]]=None, **kwargs : dict) -> tuple[Iterable[tc.Tensor], dict]:
37 | """
38 | Applies the input transform into an iterable of tensors.
39 | TODO
40 | """
41 | metadata = dict()
42 | try:
43 | metadata['spacing'] = kwargs['new_spacing']
44 | except:
45 | pass
46 |
47 | output = [None] * len(args)
48 | for (i, item) in enumerate(args):
49 | output[i], kwargs = transform(item, **kwargs)
50 | return output, metadata
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/datasets/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/datasets/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/evaluation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/evaluation/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/evaluation/tc_metrics.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/evaluation/tc_metrics.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/helpers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/helpers/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/helpers/utils.py:
--------------------------------------------------------------------------------
1 | ### Ecosystem Imports ###
2 | import os
3 | import sys
4 | sys.path.append(os.path.join(os.path.dirname(__file__), "."))
5 | sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
6 | import pathlib
7 | from typing import Union
8 | from enum import Enum
9 |
10 | ### External Imports ###
11 | import numpy as np
12 | import scipy.ndimage as nd
13 | import torch as tc
14 | import SimpleITK as sitk
15 |
16 | ### Internal Imports ###
17 |
18 |
19 |
20 | ########################
21 |
22 | def normalize(tensor : tc.Tensor):
23 | return (tensor - tc.min(tensor)) / (tc.max(tensor) - tc.min(tensor))
24 |
25 | def normalize_to_window(tensor : tc.Tensor, min_value : float, max_value : float):
26 | return normalize(tensor) * (max_value - min_value) + min_value
27 |
28 | def load_volume_sitk(
29 | input_path : Union[str, pathlib.Path],
30 | load_origin : bool=False,
31 | load_direction : bool=False) -> tuple[np.ndarray, tuple, dict]:
32 | """
33 | Utility function to load 3-D volume using SimpleITK.
34 | """
35 | image = sitk.ReadImage(str(input_path))
36 | spacing = image.GetSpacing()
37 | volume = sitk.GetArrayFromImage(image).swapaxes(0, 1).swapaxes(1, 2).astype(np.float32)
38 | metadata = dict()
39 | if load_origin:
40 | origin = image.GetOrigin()
41 | metadata['origin'] = origin
42 | if load_direction:
43 | direction = image.GetDirection()
44 | metadata['direction'] = direction
45 | return volume, spacing, metadata
46 |
47 | def save_volume_sitk(
48 | volume : np.ndarray,
49 | spacing : tuple,
50 | save_path : Union[str, pathlib.Path],
51 | use_compression : bool=True,
52 | origin : tuple=None,
53 | direction : tuple=None) -> None:
54 | """
55 | Utility function to save 3-D volume using SimpleITK.
56 | """
57 | image = sitk.GetImageFromArray(volume.swapaxes(2, 1).swapaxes(1, 0).astype(np.uint8))
58 | image.SetSpacing(spacing)
59 | if origin is not None:
60 | image.SetOrigin(origin)
61 | if direction is not None:
62 | image.SetDirection(direction)
63 | sitk.WriteImage(image, str(save_path), useCompression=use_compression)
64 |
65 |
66 | ########################
67 |
68 | def image_warping(
69 | image: np.ndarray,
70 | displacement_field: np.ndarray,
71 | order: int=1,
72 | cval: float=0.0) -> np.ndarray:
73 | """
74 | Warps the given image using the provided displacement field.
75 | """
76 | grid_x, grid_y, grid_z = np.meshgrid(np.arange(image.shape[1]), np.arange(image.shape[0]), np.arange(image.shape[2]))
77 | transformed_image = nd.map_coordinates(image, [grid_y + displacement_field[1], grid_x + displacement_field[0], grid_z + displacement_field[2]], order=order, cval=cval)
78 | return transformed_image
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/inference/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/inference/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/input_output/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/input_output/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/input_output/utils_io.py:
--------------------------------------------------------------------------------
1 | ### Ecosystem Imports ###
2 | import os
3 | import sys
4 | sys.path.append(os.path.join(os.path.dirname(__file__), "."))
5 | sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
6 | from typing import Union
7 | from enum import Enum
8 |
9 | ### External Imports ###
10 |
11 |
12 | ### Internal Imports ###
13 |
14 | ########################
15 |
16 | class InputOutputBackend(Enum):
17 | PYTORCH = 1
18 | NUMPY = 2
19 | SITK = 3
20 |
21 |
22 | class Representation(Enum):
23 | PYTORCH = 1
24 | NUMPY = 2
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/networks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/networks/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/parsers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/parsers/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/paths/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/paths/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/paths/hpc_paths.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pathlib
3 |
4 | data_path = None #TODO
5 |
6 | ### Data Paths ###
7 | toothfairy_path = data_path / "ToothFairy_Dataset_V2"
8 |
9 | ### RAW Data Paths ###
10 | raw_toothfairy_path = toothfairy_path / "RAW"
11 |
12 | ### Parsed Data Paths ###
13 | parsed_toothfairy_path = toothfairy_path / "PARSED"
14 |
15 | ### Training Paths ###
16 | project_path = None #TODO
17 | checkpoints_path = project_path / "Checkpoints"
18 | logs_path = project_path / "Logs"
19 | figures_path = project_path / "Figures"
20 | models_path = project_path / "Models"
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/paths/paths.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pathlib
3 |
4 | data_path = None #TODO
5 |
6 | ### Data Paths ###
7 | toothfairy_path = data_path / "ToothFairy_Dataset_V2"
8 |
9 | ### RAW Data Paths ###
10 | raw_toothfairy_path = toothfairy_path / "RAW"
11 |
12 | ### Parsed Data Paths ###
13 | parsed_toothfairy_path = toothfairy_path / "PARSED"
14 |
15 | ### Training Paths ###
16 | project_path = None #TODO
17 | checkpoints_path = project_path / "Checkpoints"
18 | logs_path = project_path / "Logs"
19 | figures_path = project_path / "Figures"
20 | models_path = project_path / "Models"
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/postprocessing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/postprocessing/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/preprocessing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/preprocessing/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/runners/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/runners/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/runners/experiments/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/runners/experiments/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/runners/run_toothfairy_trainer.py:
--------------------------------------------------------------------------------
1 | ### Ecosystem Imports ###
2 | import os
3 | import sys
4 | sys.path.append(os.path.join(os.path.dirname(__file__), "."))
5 | sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
6 | sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
7 | from typing import Union
8 | import pathlib
9 |
10 | ### External Imports ###
11 | import numpy as np
12 | import torch as tc
13 | from torch.utils.tensorboard import SummaryWriter
14 |
15 | ### Internal Imports ###
16 | from paths import hpc_paths as p
17 | from training import toothfairy_trainer as st
18 | from runners.experiments import toothfairy_experiments as toothfairy
19 | ########################
20 |
21 |
22 | def initialize(training_params):
23 | experiment_name = training_params['experiment_name']
24 | num_iterations = training_params['num_iterations']
25 | save_step = training_params['save_step']
26 | checkpoints_path = os.path.join(p.checkpoints_path, experiment_name)
27 | checkpoints_iters = list(range(0, num_iterations, save_step))
28 | log_image_iters = list(range(0, num_iterations, save_step))
29 | if not os.path.isdir(checkpoints_path):
30 | os.makedirs(checkpoints_path)
31 | log_dir = os.path.join(p.logs_path, experiment_name)
32 | if not os.path.isdir(log_dir):
33 | os.makedirs(log_dir)
34 | logger = SummaryWriter(log_dir=log_dir, comment=experiment_name)
35 | training_params['logger'] = logger
36 | training_params['checkpoints_path'] = checkpoints_path
37 | training_params['checkpoint_iters'] = checkpoints_iters
38 | training_params['log_image_iters'] = log_image_iters
39 | return training_params
40 |
41 | def run_training(training_params):
42 | training_params = initialize(training_params)
43 | trainer = st.ToothfairyTrainer(**training_params)
44 | trainer.run()
45 |
46 | def run():
47 | params = toothfairy.get_experiment_do_21()
48 | run_training(params)
49 |
50 |
51 | if __name__ == "__main__":
52 | run()
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/training/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/training/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Marek_Wodzinski/src/visualization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Marek_Wodzinski/src/visualization/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/README.md:
--------------------------------------------------------------------------------
1 | # Global Reference Attention Guided Segmentation (GRADE)
2 | Contribution to the ToothFairy - Cone-Beam Computed Tomography Segmentation Challenge - MICCAI 2023:
3 | by Tomasz Szczepański, Michal K. Grzeszczyk and Przemysław Korzeniowski.
4 |
5 | Reproduction:
6 | 1. Install environmet using conda and provided environment file: env_config.yml.
7 | 2. Download challange dataset and generate pseudo labels using [Deep Label Propagation](https://github.com/AImageLab-zip/alveolar_canal).
8 | We pre-train our models on pseudo-labeled training data and then fine-tune them on data with ground truth labels from [challange dataset](https://toothfairy.grand-challenge.org/dataset/).
9 | 4. Train the following 4 models in order:
10 | * train_lq.py : coarse segmentation on low resolution dense pseudo-labels -> probability maps - global context reference
11 | * train_lq_finetune.py : fine tuning on ground truth labels
12 | * train_grc_pretrain.py : fine segmentation on high resolution dense pseudo-labels, use of gcr as 2ch input
13 | * train_gcr_finetune.py : fine tuning on ground truth labels - high resolution
14 | 5. Paste ensemble of trained models to 'docker/checkpoints'
15 | 6. Build docker with build.sh and run to test
16 |
17 |
18 |
19 | ACKNOWLEDGEMENT
20 |
21 | The publication was created within the project of the Minister of Science and
22 | Higher Education "Support for the activity of Centers of Excellence established
23 | in Poland under Horizon 2020" on the basis of the contract number
24 | MEiN/2023/DIR/3796
25 |
26 | This project has received funding from the European Union’s Horizon 2020
27 | research and innovation programme under grant agreement No 857533
28 |
29 | This publication is supported by Sano project carried out within the
30 | International Research Agendas programme of the Foundation for Polish
31 | Science, co-financed by the European Union under the European Regional
32 | Development Fund
33 |
34 | Sano Centre for Computational Medicine, Kraków, Poland
35 | (https://sano.science/) or Sano Centre for Computational Medicine, Health Informatics Group (HIGS) Team,
36 | Nawojki 11, 30-072 Kraków, Poland (https://sano.science/).
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM pytorch/pytorch:1.13.0-cuda11.6-cudnn8-runtime
2 |
3 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user
4 |
5 | RUN mkdir -p /opt/app /input /output \
6 | && chown user:user /opt/app /input /output
7 | # RUN mkdir -p /output/images/inferior-alveolar-canal
8 |
9 | USER user
10 | WORKDIR /opt/app
11 |
12 | ENV PATH="/home/user/.local/bin:${PATH}"
13 |
14 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools
15 |
16 |
17 |
18 | COPY --chown=user:user requirements.txt /opt/app/
19 | # RUN python -m piptools sync requirements.txt
20 | RUN python -m pip install --user -r requirements.txt
21 |
22 |
23 | COPY --chown=user:user checkpoints /opt/app/checkpoints
24 | COPY --chown=user:user src /opt/app/src
25 | COPY --chown=user:user process.py /opt/app/
26 |
27 | ENTRYPOINT [ "python", "-m", "process" ]
28 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/docker/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
3 |
4 | docker build -t toothfairy_algorithm "$SCRIPTPATH"
5 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/docker/export.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ./build.sh
4 |
5 | docker save toothfairy_algorithm | pigz -c > ToothFairy_Algorithm.tar.gz
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/docker/misc/slicer_33_colormap.txt:
--------------------------------------------------------------------------------
1 | 0 0 0 0 0 0
2 | 1 1 128 174 128 255
3 | 2 2 241 214 145 255
4 | 3 3 177 122 101 255
5 | 4 4 111 184 210 255
6 | 5 5 216 101 79 255
7 | 6 6 221 130 101 255
8 | 7 7 144 238 144 255
9 | 8 8 192 104 88 255
10 | 9 9 220 245 20 255
11 | 10 10 78 63 0 255
12 | 11 11 255 250 220 255
13 | 12 12 100 100 130 255
14 | 13 13 200 200 235 255
15 | 14 14 250 250 210 255
16 | 15 15 244 214 49 255
17 | 16 16 0 151 206 255
18 | 17 17 216 101 79 255
19 | 18 18 183 156 220 255
20 | 19 19 183 214 211 255
21 | 20 20 152 189 207 255
22 | 21 21 111 184 210 255
23 | 22 22 178 212 242 255
24 | 23 23 68 172 100 255
25 | 24 24 111 197 131 255
26 | 25 25 85 188 255 255
27 | 26 26 0 145 30 255
28 | 27 27 214 230 130 255
29 | 28 28 78 63 0 255
30 | 29 29 218 255 255 255
31 | 30 30 170 250 250 255
32 | 31 31 140 224 228 255
33 | 32 32 188 65 28 255
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/docker/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.10
3 | # by the following command:
4 | #
5 | # pip-compile --resolver=backtracking
6 | #
7 | arrow==1.2.3
8 | # via jinja2-time
9 | binaryornot==0.4.4
10 | # via cookiecutter
11 | build==0.10.0
12 | # via pip-tools
13 | certifi==2022.12.7
14 | # via requests
15 | chardet==5.1.0
16 | # via binaryornot
17 | charset-normalizer==3.1.0
18 | # via requests
19 | click==8.1.3
20 | # via
21 | # cookiecutter
22 | # evalutils
23 | # pip-tools
24 | cookiecutter==2.1.1
25 | # via evalutils
26 | evalutils==0.4.0
27 | # via -r requirements.in
28 | idna==3.4
29 | # via requests
30 | imageio[tifffile]==2.26.0
31 | # via evalutils
32 | jinja2==3.1.2
33 | # via
34 | # cookiecutter
35 | # jinja2-time
36 | jinja2-time==0.2.0
37 | # via cookiecutter
38 | joblib==1.2.0
39 | # via scikit-learn
40 | markupsafe==2.1.2
41 | # via jinja2
42 | monai==1.1.0
43 | numpy==1.24.2
44 | # via
45 | # evalutils
46 | # imageio
47 | # pandas
48 | # scikit-learn
49 | # scipy
50 | # tifffile
51 | packaging==23.0
52 | # via build
53 | pandas==1.5.3
54 | # via evalutils
55 | pillow==9.4.0
56 | # via imageio
57 | pip-tools==6.12.3
58 | # via evalutils
59 | pyproject-hooks==1.0.0
60 | # via build
61 | python-dateutil==2.8.2
62 | # via
63 | # arrow
64 | # pandas
65 | python-slugify==8.0.1
66 | # via cookiecutter
67 | pytz==2022.7.1
68 | # via pandas
69 | pyyaml==6.0
70 | # via cookiecutter
71 | requests==2.28.2
72 | # via cookiecutter
73 | scikit-learn==1.2.2
74 | # via evalutils
75 | scipy==1.10.1
76 | # via
77 | # evalutils
78 | # scikit-learn
79 | simpleitk==2.2.1
80 | # via evalutils
81 | six==1.16.0
82 | # via python-dateutil
83 | text-unidecode==1.3
84 | # via python-slugify
85 | threadpoolctl==3.1.0
86 | # via scikit-learn
87 | tifffile==2023.3.15
88 | # via imageio
89 | tomli==2.0.1
90 | # via
91 | # build
92 | # pyproject-hooks
93 | urllib3==1.26.15
94 | # via requests
95 | wheel==0.40.0
96 | # via pip-tools
97 | torchio==0.18.92
98 |
99 | # The following packages are considered to be unsafe in a requirements file:
100 | # pip
101 | # setuptools
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/docker/src/inference_HQ.py:
--------------------------------------------------------------------------------
1 | import os
2 | import os
3 | import time
4 |
5 | # TORCH
6 | import torch
7 | from monai.data import set_track_meta, decollate_batch, Dataset, DataLoader
8 | from monai.inferers import sliding_window_inference
9 |
10 | # external modules
11 | from src.data_augmentation import Transforms
12 | from src.models.resunet import ResUNet
13 |
14 | def inference_HQ(img_numpy_path, gcr_path, args, device):
15 | print("Starting inference HQ...")
16 | start_time_testing = time.time()
17 | full_prediction = None
18 |
19 | for checkpoint_path in args.checkpoint_hq:
20 | if args.model_name_hq == "ResUnet18":
21 | model = ResUNet(spatial_dims=3, in_channels=args.in_channels_hq, out_channels=args.classes, act='relu',
22 | norm=args.norm,
23 | backbone_name='resnet18', bias=False, big_decoder=True)
24 | elif args.model_name_hq == "ResUnet50":
25 | model = ResUNet(spatial_dims=3, in_channels=args.in_channels_hq, out_channels=args.classes, act='relu',
26 | norm=args.norm,
27 | backbone_name='resnet50')
28 |
29 | model.load_state_dict(torch.load(checkpoint_path, map_location=device)['model_state_dict'], strict=False)
30 | model = model.to(device)
31 | model.eval()
32 |
33 | trans = Transforms(args, device, inference_mode="HQ")
34 | set_track_meta(True)
35 |
36 | dataset = Dataset([{"image": img_numpy_path, "gcr": gcr_path}], trans.val_transform_hq)
37 | loader = DataLoader(dataset, batch_size=1)
38 | data = next(iter(loader))
39 | data["pred"] = sliding_window_inference(data["image"], roi_size=args.patch_size_hq, sw_batch_size=8,
40 | predictor=model, overlap=0.6, sw_device=device,
41 | device=device, mode=args.inference_mode, sigma_scale=0.125,
42 | padding_mode='constant', cval=0, progress=True)
43 |
44 | val_pred = [trans.val_invert_transform_hq(seg_pred)["pred"] for seg_pred in decollate_batch(data)]
45 |
46 | if full_prediction is None:
47 | full_prediction = val_pred[0].detach().cpu().numpy()
48 | else:
49 | full_prediction = full_prediction + val_pred[0].detach().cpu().numpy()
50 |
51 | full_prediction = full_prediction / len(args.checkpoint_hq)
52 | test_time = time.time() - start_time_testing
53 | print(f"Finished inference HQ: {test_time:.2f}s")
54 | return full_prediction
55 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/docker/src/inference_LQ_coarse.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | # TORCH
4 | import torch
5 | from monai.data import set_track_meta, decollate_batch, Dataset, DataLoader
6 | from monai.inferers import sliding_window_inference
7 |
8 | # external modules
9 | from src.data_augmentation import Transforms
10 | from src.models.resunet import ResUNet
11 |
12 | import numpy as np
13 |
14 |
15 | def inference_LQ(img_numpy_path, args, device):
16 | print("Starting inference LQ...")
17 | start_time_testing = time.time()
18 | full_prediction = None
19 | for checkpoint_path in args.checkpoint_lq:
20 |
21 | if args.model_name_lq == "ResUnet18":
22 | model = ResUNet(spatial_dims=3, in_channels=args.in_channels_lq, out_channels=args.classes, act='relu',
23 | norm=args.norm,
24 | backbone_name='resnet18', bias=False, big_decoder=True)
25 | elif args.model_name_lq == "ResUnet50":
26 | model = ResUNet(spatial_dims=3, in_channels=args.in_channels_lq, out_channels=args.classes, act='relu',
27 | norm=args.norm,
28 | backbone_name='resnet50', bias=False)
29 |
30 | model.load_state_dict(torch.load(checkpoint_path, map_location=device)['model_state_dict'], strict=False)
31 | model = model.to(device)
32 | model.eval()
33 |
34 | trans = Transforms(args, device, inference_mode="LQ")
35 | set_track_meta(True)
36 |
37 | dataset = Dataset([{"image": img_numpy_path}], trans.val_transform_lq)
38 | loader = DataLoader(dataset, batch_size=1)
39 | data = next(iter(loader))
40 |
41 | data["pred"] = sliding_window_inference(data["image"], roi_size=args.patch_size_lq, sw_batch_size=8,
42 | predictor=model,
43 | overlap=0.6, sw_device=device,
44 | device=device, mode=args.inference_mode, sigma_scale=0.125,
45 | padding_mode='constant', cval=0, progress=True)
46 |
47 | val_pred = [trans.val_invert_transform_lq(seg_pred)["pred"] for seg_pred in decollate_batch(data)]
48 |
49 | if full_prediction is None:
50 | full_prediction = val_pred[0].detach().cpu().numpy()
51 | else:
52 | full_prediction = full_prediction+val_pred[0].detach().cpu().numpy()
53 |
54 | full_prediction = full_prediction/len(args.checkpoint_lq)
55 | test_time = time.time() - start_time_testing
56 | print(f"Finished inference LQ: {test_time:.2f}s")
57 | return full_prediction
58 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/docker/test/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
4 |
5 | ./build.sh
6 |
7 | VOLUME_SUFFIX=$(dd if=/dev/urandom bs=32 count=1 | md5sum | cut --delimiter=' ' --fields=1)
8 | # Maximum is currently 30g, configurable in your algorithm image settings on grand challenge
9 | MEM_LIMIT="10g"
10 |
11 | docker volume create toothfairy_algorithm-output-$VOLUME_SUFFIX
12 |
13 | # Do not change any of the parameters to docker run, these are fixed
14 | # You are free to add --gpus all if you would like to locally test
15 | # your algorithm with your GPU hardware. In the grand-challenge container
16 | # all the docker will have a single T4 with 16GB and they will be
17 | # run using such flag
18 | docker run --rm \
19 | --memory="${MEM_LIMIT}" \
20 | --memory-swap="${MEM_LIMIT}" \
21 | --network="none" \
22 | --cap-drop="ALL" \
23 | --security-opt="no-new-privileges" \
24 | --shm-size="128m" \
25 | --pids-limit="256" \
26 | --gpus all \
27 | -v $SCRIPTPATH/test/:/input/ \
28 | -v toothfairy_algorithm-output-$VOLUME_SUFFIX:/output/ \
29 | toothfairy_algorithm_1_13_0
30 |
31 | docker run --rm \
32 | -v toothfairy_algorithm-output-$VOLUME_SUFFIX:/output/ \
33 | python:3.10-slim cat /output/results.json #| python -m json.tool
34 |
35 | docker run --rm \
36 | -v toothfairy_algorithm-output-$VOLUME_SUFFIX:/output/ \
37 | python:3.10-slim ls -lah /output/images/inferior-alveolar-canal/
38 |
39 | cp -r /var/lib/docker/volumes/toothfairy_algorithm-output-$VOLUME_SUFFIX/ output
40 | chown mg:mg -R output
41 |
42 | docker volume rm toothfairy_algorithm-output-$VOLUME_SUFFIX
43 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/docker/test/transform_to_mha.py:
--------------------------------------------------------------------------------
1 | import SimpleITK as sitk
2 | import os
3 | import numpy as np
4 |
5 | p_id = "P1"
6 | p = f"/home/tf/ToothFairy_data/ToothFairy_Dataset/Dataset/{p_id}/data.npy"
7 | p_out = f"test/images/cbct/{p_id}.mha"
8 |
9 | mask = np.load(p)
10 | mask_sitk = sitk.GetImageFromArray(mask)
11 | # input_array = sitk.GetArrayFromImage(mask_sitk)
12 | # input_array = input_array.astype(np.float32)
13 | # np.save(p_out, input_array)
14 | writer = sitk.ImageFileWriter()
15 | writer.SetFileName(p_out)
16 | writer.Execute(mask_sitk)
17 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Tomasz_Szczepanski/src/dummy_logger.py:
--------------------------------------------------------------------------------
1 | class DummyExperiment:
2 | def __init__(self):
3 | pass
4 |
5 | def __enter__(self):
6 | pass
7 |
8 | def __exit__(self, exc_type, exc_val, exc_tb):
9 | pass
10 |
11 | def log_parameters(self, *args, **kwargs):
12 | pass
13 |
14 | def log_metric(self, *args, **kwargs):
15 | pass
16 |
17 | def log_metrics(self, *args, **kwargs):
18 | pass
19 |
20 | def log_table(self, *args, **kwargs):
21 | pass
22 |
23 | def log_current_epoch(self, *args, **kwargs):
24 | pass
25 |
26 | def log_figure(self, *args, **kwargs):
27 | pass
28 |
29 | def log_image(self, *args, **kwargs):
30 | pass
31 |
32 | def log_scene(self, *args, **kwargs):
33 | pass
34 |
35 | def train(self):
36 | return self
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/dataset.json:
--------------------------------------------------------------------------------
1 | {
2 | "channel_names": {
3 | "0": "CT"
4 | },
5 | "description": "toothfairy dataset",
6 | "file_ending": ".npy",
7 | "labels": {
8 | "background": 0,
9 | "foreground": 1
10 | },
11 | "overwrite_image_reader_writer": "NumpyIO",
12 | "name": "Dataset902_toothfairy_onlyct",
13 | "numTraining": 428
14 | }
15 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/benchmarking/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/benchmarking/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/benchmarking/generate_benchmarking_commands.py:
--------------------------------------------------------------------------------
1 | if __name__ == '__main__':
2 | """
3 | This code probably only works within the DKFZ infrastructure (using LSF). You will need to adapt it to your scheduler!
4 | """
5 | gpu_models = [#'NVIDIAA100_PCIE_40GB', 'NVIDIAGeForceRTX2080Ti', 'NVIDIATITANRTX', 'TeslaV100_SXM2_32GB',
6 | 'NVIDIAA100_SXM4_40GB']#, 'TeslaV100_PCIE_32GB']
7 | datasets = [2, 3, 4, 5]
8 | trainers = ['nnUNetTrainerBenchmark_5epochs', 'nnUNetTrainerBenchmark_5epochs_noDataLoading']
9 | plans = ['nnUNetPlans']
10 | configs = ['2d', '2d_bs3x', '2d_bs6x', '3d_fullres', '3d_fullres_bs3x', '3d_fullres_bs6x']
11 | num_gpus = 1
12 |
13 | benchmark_configurations = {d: configs for d in datasets}
14 |
15 | exclude_hosts = "-R \"select[hname!='e230-dgxa100-1']'\""
16 | resources = "-R \"tensorcore\""
17 | queue = "-q gpu"
18 | preamble = "-L /bin/bash \"source ~/load_env_torch210.sh && "
19 | train_command = 'nnUNet_compile=False nnUNet_results=/dkfz/cluster/gpu/checkpoints/OE0441/isensee/nnUNet_results_remake_benchmark nnUNetv2_train'
20 |
21 | folds = (0, )
22 |
23 | use_these_modules = {
24 | tr: plans for tr in trainers
25 | }
26 |
27 | additional_arguments = f' -num_gpus {num_gpus}' # ''
28 |
29 | output_file = "/home/isensee/deleteme.txt"
30 | with open(output_file, 'w') as f:
31 | for g in gpu_models:
32 | gpu_requirements = f"-gpu num={num_gpus}:j_exclusive=yes:gmodel={g}"
33 | for tr in use_these_modules.keys():
34 | for p in use_these_modules[tr]:
35 | for dataset in benchmark_configurations.keys():
36 | for config in benchmark_configurations[dataset]:
37 | for fl in folds:
38 | command = f'bsub {exclude_hosts} {resources} {queue} {gpu_requirements} {preamble} {train_command} {dataset} {config} {fl} -tr {tr} -p {p}'
39 | if additional_arguments is not None and len(additional_arguments) > 0:
40 | command += f' {additional_arguments}'
41 | f.write(f'{command}\"\n')
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/collect_results_custom_Decathlon_2d.py:
--------------------------------------------------------------------------------
1 | from batchgenerators.utilities.file_and_folder_operations import *
2 |
3 | from nnunetv2.batch_running.collect_results_custom_Decathlon import collect_results, summarize
4 | from nnunetv2.paths import nnUNet_results
5 |
6 | if __name__ == '__main__':
7 | use_these_trainers = {
8 | 'nnUNetTrainer': ('nnUNetPlans', ),
9 | }
10 | all_results_file = join(nnUNet_results, 'hrnet_results.csv')
11 | datasets = [2, 3, 4, 17, 20, 24, 27, 38, 55, 64, 82]
12 | collect_results(use_these_trainers, datasets, all_results_file)
13 |
14 | folds = (0, )
15 | configs = ('2d', )
16 | output_file = join(nnUNet_results, 'hrnet_results_summary_fold0.csv')
17 | summarize(all_results_file, output_file, folds, configs, datasets, use_these_trainers)
18 |
19 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/release_trainings/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/release_trainings/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/release_trainings/nnunetv2_v1/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/batch_running/release_trainings/nnunetv2_v1/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/configuration.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from nnunetv2.utilities.default_n_proc_DA import get_allowed_n_proc_DA
4 |
5 | default_num_processes = 8 if 'nnUNet_def_n_proc' not in os.environ else int(os.environ['nnUNet_def_n_proc'])
6 |
7 | ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low
8 | # resolution axis must be 3x as large as the next largest spacing)
9 |
10 | default_n_proc_DA = get_allowed_n_proc_DA()
11 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/Dataset115_EMIDEC.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | from pathlib import Path
3 |
4 | from nnunetv2.dataset_conversion.Dataset027_ACDC import make_out_dirs
5 | from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json
6 |
7 |
8 | def copy_files(src_data_dir: Path, src_test_dir: Path, train_dir: Path, labels_dir: Path, test_dir: Path):
9 | """Copy files from the EMIDEC dataset to the nnUNet dataset folder. Returns the number of training cases."""
10 | patients_train = sorted([f for f in src_data_dir.iterdir() if f.is_dir()])
11 | patients_test = sorted([f for f in src_test_dir.iterdir() if f.is_dir()])
12 |
13 | # Copy training files and corresponding labels.
14 | for patient in patients_train:
15 | train_file = patient / "Images" / f"{patient.name}.nii.gz"
16 | label_file = patient / "Contours" / f"{patient.name}.nii.gz"
17 | shutil.copy(train_file, train_dir / f"{train_file.stem.split('.')[0]}_0000.nii.gz")
18 | shutil.copy(label_file, labels_dir)
19 |
20 | # Copy test files.
21 | for patient in patients_test:
22 | test_file = patient / "Images" / f"{patient.name}.nii.gz"
23 | shutil.copy(test_file, test_dir / f"{test_file.stem.split('.')[0]}_0000.nii.gz")
24 |
25 | return len(patients_train)
26 |
27 |
28 | def convert_emidec(src_data_dir: str, src_test_dir: str, dataset_id=27):
29 | out_dir, train_dir, labels_dir, test_dir = make_out_dirs(dataset_id=dataset_id, task_name="EMIDEC")
30 | num_training_cases = copy_files(Path(src_data_dir), Path(src_test_dir), train_dir, labels_dir, test_dir)
31 |
32 | generate_dataset_json(
33 | str(out_dir),
34 | channel_names={
35 | 0: "cineMRI",
36 | },
37 | labels={
38 | "background": 0,
39 | "cavity": 1,
40 | "normal_myocardium": 2,
41 | "myocardial_infarction": 3,
42 | "no_reflow": 4,
43 | },
44 | file_ending=".nii.gz",
45 | num_training_cases=num_training_cases,
46 | )
47 |
48 |
49 | if __name__ == "__main__":
50 | import argparse
51 |
52 | parser = argparse.ArgumentParser()
53 | parser.add_argument("-i", "--input_dir", type=str, help="The EMIDEC dataset directory.")
54 | parser.add_argument("-t", "--test_dir", type=str, help="The EMIDEC test set directory.")
55 | parser.add_argument(
56 | "-d", "--dataset_id", required=False, type=int, default=115, help="nnU-Net Dataset ID, default: 115"
57 | )
58 | args = parser.parse_args()
59 | print("Converting...")
60 | convert_emidec(args.input_dir, args.test_dir, args.dataset_id)
61 | print("Done!")
62 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/Dataset220_KiTS2023.py:
--------------------------------------------------------------------------------
1 | from batchgenerators.utilities.file_and_folder_operations import *
2 | import shutil
3 | from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json
4 | from nnunetv2.paths import nnUNet_raw
5 |
6 |
7 | def convert_kits2023(kits_base_dir: str, nnunet_dataset_id: int = 220):
8 | task_name = "KiTS2023"
9 |
10 | foldername = "Dataset%03.0d_%s" % (nnunet_dataset_id, task_name)
11 |
12 | # setting up nnU-Net folders
13 | out_base = join(nnUNet_raw, foldername)
14 | imagestr = join(out_base, "imagesTr")
15 | labelstr = join(out_base, "labelsTr")
16 | maybe_mkdir_p(imagestr)
17 | maybe_mkdir_p(labelstr)
18 |
19 | cases = subdirs(kits_base_dir, prefix='case_', join=False)
20 | for tr in cases:
21 | shutil.copy(join(kits_base_dir, tr, 'imaging.nii.gz'), join(imagestr, f'{tr}_0000.nii.gz'))
22 | shutil.copy(join(kits_base_dir, tr, 'segmentation.nii.gz'), join(labelstr, f'{tr}.nii.gz'))
23 |
24 | generate_dataset_json(out_base, {0: "CT"},
25 | labels={
26 | "background": 0,
27 | "kidney": (1, 2, 3),
28 | "masses": (2, 3),
29 | "tumor": 2
30 | },
31 | regions_class_order=(1, 3, 2),
32 | num_training_cases=len(cases), file_ending='.nii.gz',
33 | dataset_name=task_name, reference='none',
34 | release='prerelease',
35 | overwrite_image_reader_writer='NibabelIOWithReorient',
36 | description="KiTS2023")
37 |
38 |
39 | if __name__ == '__main__':
40 | import argparse
41 | parser = argparse.ArgumentParser()
42 | parser.add_argument('input_folder', type=str,
43 | help="The downloaded and extracted KiTS2023 dataset (must have case_XXXXX subfolders)")
44 | parser.add_argument('-d', required=False, type=int, default=220, help='nnU-Net Dataset ID, default: 220')
45 | args = parser.parse_args()
46 | amos_base = args.input_folder
47 | convert_kits2023(amos_base, args.d)
48 |
49 | # /media/isensee/raw_data/raw_datasets/kits23/dataset
50 |
51 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/Dataset988_dummyDataset4.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from batchgenerators.utilities.file_and_folder_operations import *
4 |
5 | from nnunetv2.paths import nnUNet_raw
6 | from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets
7 |
8 | if __name__ == '__main__':
9 | # creates a dummy dataset where there are no files in imagestr and labelstr
10 | source_dataset = 'Dataset004_Hippocampus'
11 |
12 | target_dataset = 'Dataset987_dummyDataset4'
13 | target_dataset_dir = join(nnUNet_raw, target_dataset)
14 | maybe_mkdir_p(target_dataset_dir)
15 |
16 | dataset = get_filenames_of_train_images_and_targets(join(nnUNet_raw, source_dataset))
17 |
18 | # the returned dataset will have absolute paths. We should use relative paths so that you can freely copy
19 | # datasets around between systems. As long as the source dataset is there it will continue working even if
20 | # nnUNet_raw is in different locations
21 |
22 | # paths must be relative to target_dataset_dir!!!
23 | for k in dataset.keys():
24 | dataset[k]['label'] = os.path.relpath(dataset[k]['label'], target_dataset_dir)
25 | dataset[k]['images'] = [os.path.relpath(i, target_dataset_dir) for i in dataset[k]['images']]
26 |
27 | # load old dataset.json
28 | dataset_json = load_json(join(nnUNet_raw, source_dataset, 'dataset.json'))
29 | dataset_json['dataset'] = dataset
30 |
31 | # save
32 | save_json(dataset_json, join(target_dataset_dir, 'dataset.json'), sort_keys=False)
33 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py:
--------------------------------------------------------------------------------
1 | import shutil
2 |
3 | from batchgenerators.utilities.file_and_folder_operations import isdir, join, load_json, save_json
4 |
5 | from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name
6 | from nnunetv2.paths import nnUNet_raw
7 |
8 | if __name__ == '__main__':
9 | dataset_name = 'IntegrationTest_Hippocampus_regions'
10 | dataset_id = 997
11 | dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}"
12 |
13 | try:
14 | existing_dataset_name = maybe_convert_to_dataset_name(dataset_id)
15 | if existing_dataset_name != dataset_name:
16 | raise FileExistsError(
17 | f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If "
18 | f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and "
19 | f"nnUNet_results!")
20 | except RuntimeError:
21 | pass
22 |
23 | if isdir(join(nnUNet_raw, dataset_name)):
24 | shutil.rmtree(join(nnUNet_raw, dataset_name))
25 |
26 | source_dataset = maybe_convert_to_dataset_name(4)
27 | shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name))
28 |
29 | # additionally optimize entire hippocampus region, remove Posterior
30 | dj = load_json(join(nnUNet_raw, dataset_name, 'dataset.json'))
31 | dj['labels'] = {
32 | 'background': 0,
33 | 'hippocampus': (1, 2),
34 | 'anterior': 1
35 | }
36 | dj['regions_class_order'] = (2, 1)
37 | save_json(dj, join(nnUNet_raw, dataset_name, 'dataset.json'), sort_keys=False)
38 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py:
--------------------------------------------------------------------------------
1 | import shutil
2 |
3 | from batchgenerators.utilities.file_and_folder_operations import isdir, join, load_json, save_json
4 |
5 | from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name
6 | from nnunetv2.paths import nnUNet_raw
7 |
8 |
9 | if __name__ == '__main__':
10 | dataset_name = 'IntegrationTest_Hippocampus_ignore'
11 | dataset_id = 998
12 | dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}"
13 |
14 | try:
15 | existing_dataset_name = maybe_convert_to_dataset_name(dataset_id)
16 | if existing_dataset_name != dataset_name:
17 | raise FileExistsError(f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If "
18 | f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and "
19 | f"nnUNet_results!")
20 | except RuntimeError:
21 | pass
22 |
23 | if isdir(join(nnUNet_raw, dataset_name)):
24 | shutil.rmtree(join(nnUNet_raw, dataset_name))
25 |
26 | source_dataset = maybe_convert_to_dataset_name(4)
27 | shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name))
28 |
29 | # set class 2 to ignore label
30 | dj = load_json(join(nnUNet_raw, dataset_name, 'dataset.json'))
31 | dj['labels']['ignore'] = 2
32 | del dj['labels']['Posterior']
33 | save_json(dj, join(nnUNet_raw, dataset_name, 'dataset.json'), sort_keys=False)
34 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py:
--------------------------------------------------------------------------------
1 | import shutil
2 |
3 | from batchgenerators.utilities.file_and_folder_operations import isdir, join
4 |
5 | from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name
6 | from nnunetv2.paths import nnUNet_raw
7 |
8 |
9 | if __name__ == '__main__':
10 | dataset_name = 'IntegrationTest_Hippocampus'
11 | dataset_id = 999
12 | dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}"
13 |
14 | try:
15 | existing_dataset_name = maybe_convert_to_dataset_name(dataset_id)
16 | if existing_dataset_name != dataset_name:
17 | raise FileExistsError(f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If "
18 | f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and "
19 | f"nnUNet_results!")
20 | except RuntimeError:
21 | pass
22 |
23 | if isdir(join(nnUNet_raw, dataset_name)):
24 | shutil.rmtree(join(nnUNet_raw, dataset_name))
25 |
26 | source_dataset = maybe_convert_to_dataset_name(4)
27 | shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name))
28 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/datasets_for_integration_tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/dataset_conversion/datasets_for_integration_tests/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/ensembling/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/ensembling/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/evaluation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/evaluation/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/experiment_planning/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/experiment_planning/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/experiment_planning/dataset_fingerprint/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/experiment_planning/dataset_fingerprint/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/experiment_planning/experiment_planners/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/experiment_planning/experiment_planners/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/experiment_planning/experiment_planners/readme.md:
--------------------------------------------------------------------------------
1 | What do experiment planners need to do (these are notes for myself while rewriting nnU-Net, they are provided as is
2 | without further explanations. These notes also include new features):
3 | - (done) preprocessor name should be configurable via cli
4 | - (done) gpu memory target should be configurable via cli
5 | - (done) plans name should be configurable via cli
6 | - (done) data name should be specified in plans (plans specify the data they want to use, this will allow us to manually
7 | edit plans files without having to copy the data folders)
8 | - plans must contain:
9 | - (done) transpose forward/backward
10 | - (done) preprocessor name (can differ for each config)
11 | - (done) spacing
12 | - (done) normalization scheme
13 | - (done) target spacing
14 | - (done) conv and pool op kernel sizes
15 | - (done) base num features for architecture
16 | - (done) data identifier
17 | - num conv per stage?
18 | - (done) use mask for norm
19 | - [NO. Handled by LabelManager & dataset.json] num segmentation outputs
20 | - [NO. Handled by LabelManager & dataset.json] ignore class
21 | - [NO. Handled by LabelManager & dataset.json] list of regions or classes
22 | - [NO. Handled by LabelManager & dataset.json] regions class order, if applicable
23 | - (done) resampling function to be used
24 | - (done) the image reader writer class that should be used
25 |
26 |
27 | dataset.json
28 | mandatory:
29 | - numTraining
30 | - labels (value 'ignore' has special meaning. Cannot have more than one ignore_label)
31 | - modalities
32 | - file_ending
33 |
34 | optional
35 | - overwrite_image_reader_writer (if absent, auto)
36 | - regions
37 | - region_class_order
38 | -
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/experiment_planning/plans_for_pretraining/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/experiment_planning/plans_for_pretraining/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/imageio/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/imageio/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/imageio/npy_reader_writer.py:
--------------------------------------------------------------------------------
1 | # Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center
2 | # (DKFZ), Heidelberg, Germany
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | from typing import Tuple, Union, List
17 | import numpy as np
18 |
19 | from nnunetv2.imageio.base_reader_writer import BaseReaderWriter
20 |
21 |
22 | class NumpyIO(BaseReaderWriter):
23 | supported_file_endings = [
24 | '.npy',
25 | '.npz'
26 | ]
27 |
28 | def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]], key="data") -> Tuple[np.ndarray, dict]:
29 | images = []
30 | for f in image_fnames:
31 | if f.endswith(".npy"):
32 | image = np.load(f)
33 | elif f.endswith(".npz"):
34 | with np.load(f) as file:
35 | image = file[key]
36 | assert len(image.shape) == 3, 'only 3d images are supported by NumpyIO'
37 | images.append(image[None])
38 |
39 | if not self._check_all_same([i.shape for i in images]):
40 | print('ERROR! Not all input images have the same shape!')
41 | print('Shapes:')
42 | print([i.shape for i in images])
43 | print('Image files:')
44 | print(image_fnames)
45 | raise RuntimeError()
46 |
47 | stacked_images = np.vstack(images)
48 | dict = {
49 | 'spacing': [1, 1, 1]
50 | }
51 | return stacked_images.astype(np.float32), dict
52 |
53 | def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
54 | return self.read_images((seg_fname,), "seg")
55 |
56 | def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None:
57 | seg = seg.astype(np.uint8)
58 | if output_fname.endswith(".npy"):
59 | np.save(output_fname, seg)
60 | elif output_fname.endswith(".npz"):
61 | np.savez(output_fname, seg=seg)
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/imageio/readme.md:
--------------------------------------------------------------------------------
1 | - Derive your adapter from `BaseReaderWriter`.
2 | - Reimplement all abstractmethods.
3 | - make sure to support 2d and 3d input images (or raise some error).
4 | - place it in this folder or nnU-Net won't find it!
5 | - add it to LIST_OF_IO_CLASSES in `reader_writer_registry.py`
6 |
7 | Bam, you're done!
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/inference/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/inference/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/model_sharing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/model_sharing/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/model_sharing/model_download.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import requests
4 | from batchgenerators.utilities.file_and_folder_operations import *
5 | from time import time
6 | from nnunetv2.model_sharing.model_import import install_model_from_zip_file
7 | from nnunetv2.paths import nnUNet_results
8 | from tqdm import tqdm
9 |
10 |
11 | def download_and_install_from_url(url):
12 | assert nnUNet_results is not None, "Cannot install model because network_training_output_dir is not " \
13 | "set (RESULTS_FOLDER missing as environment variable, see " \
14 | "Installation instructions)"
15 | print('Downloading pretrained model from url:', url)
16 | import http.client
17 | http.client.HTTPConnection._http_vsn = 10
18 | http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
19 |
20 | import os
21 | home = os.path.expanduser('~')
22 | random_number = int(time() * 1e7)
23 | tempfile = join(home, '.nnunetdownload_%s' % str(random_number))
24 |
25 | try:
26 | download_file(url=url, local_filename=tempfile, chunk_size=8192 * 16)
27 | print("Download finished. Extracting...")
28 | install_model_from_zip_file(tempfile)
29 | print("Done")
30 | except Exception as e:
31 | raise e
32 | finally:
33 | if isfile(tempfile):
34 | os.remove(tempfile)
35 |
36 |
37 | def download_file(url: str, local_filename: str, chunk_size: Optional[int] = 8192 * 16) -> str:
38 | # borrowed from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
39 | # NOTE the stream=True parameter below
40 | with requests.get(url, stream=True, timeout=100) as r:
41 | r.raise_for_status()
42 | with tqdm.wrapattr(open(local_filename, 'wb'), "write", total=int(r.headers.get("Content-Length"))) as f:
43 | for chunk in r.iter_content(chunk_size=chunk_size):
44 | f.write(chunk)
45 | return local_filename
46 |
47 |
48 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/model_sharing/model_import.py:
--------------------------------------------------------------------------------
1 | import zipfile
2 |
3 | from nnunetv2.paths import nnUNet_results
4 |
5 |
6 | def install_model_from_zip_file(zip_file: str):
7 | with zipfile.ZipFile(zip_file, 'r') as zip_ref:
8 | zip_ref.extractall(nnUNet_results)
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/paths.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import os
16 |
17 | """
18 | PLEASE READ paths.md FOR INFORMATION TO HOW TO SET THIS UP
19 | """
20 |
21 | nnUNet_raw = os.environ.get('nnUNet_raw')
22 | nnUNet_preprocessed = os.environ.get('nnUNet_preprocessed')
23 | nnUNet_results = os.environ.get('nnUNet_results')
24 |
25 | if nnUNet_raw is None:
26 | print("nnUNet_raw is not defined and nnU-Net can only be used on data for which preprocessed files "
27 | "are already present on your system. nnU-Net cannot be used for experiment planning and preprocessing like "
28 | "this. If this is not intended, please read documentation/setting_up_paths.md for information on how to set "
29 | "this up properly.")
30 |
31 | if nnUNet_preprocessed is None:
32 | print("nnUNet_preprocessed is not defined and nnU-Net can not be used for preprocessing "
33 | "or training. If this is not intended, please read documentation/setting_up_paths.md for information on how "
34 | "to set this up.")
35 |
36 | if nnUNet_results is None:
37 | print("nnUNet_results is not defined and nnU-Net cannot be used for training or "
38 | "inference. If this is not intended behavior, please read documentation/setting_up_paths.md for information "
39 | "on how to set this up.")
40 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/postprocessing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/postprocessing/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/cropping/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/cropping/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/cropping/cropping.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | # Hello! crop_to_nonzero is the function you are looking for. Ignore the rest.
5 | from acvl_utils.cropping_and_padding.bounding_boxes import get_bbox_from_mask, crop_to_bbox, bounding_box_to_slice
6 |
7 |
8 | def create_nonzero_mask(data):
9 | """
10 |
11 | :param data:
12 | :return: the mask is True where the data is nonzero
13 | """
14 | from scipy.ndimage import binary_fill_holes
15 | assert len(data.shape) == 4 or len(data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)"
16 | nonzero_mask = np.zeros(data.shape[1:], dtype=bool)
17 | for c in range(data.shape[0]):
18 | this_mask = data[c] != 0
19 | nonzero_mask = nonzero_mask | this_mask
20 | nonzero_mask = binary_fill_holes(nonzero_mask)
21 | return nonzero_mask
22 |
23 |
24 | def crop_to_nonzero(data, seg=None, nonzero_label=-1):
25 | """
26 |
27 | :param data:
28 | :param seg:
29 | :param nonzero_label: this will be written into the segmentation map
30 | :return:
31 | """
32 | nonzero_mask = create_nonzero_mask(data)
33 | bbox = get_bbox_from_mask(nonzero_mask)
34 |
35 | slicer = bounding_box_to_slice(bbox)
36 | data = data[tuple([slice(None), *slicer])]
37 |
38 | if seg is not None:
39 | seg = seg[tuple([slice(None), *slicer])]
40 |
41 | nonzero_mask = nonzero_mask[slicer][None]
42 | if seg is not None:
43 | seg[(seg == 0) & (~nonzero_mask)] = nonzero_label
44 | else:
45 | nonzero_mask = nonzero_mask.astype(np.int8)
46 | nonzero_mask[nonzero_mask == 0] = nonzero_label
47 | nonzero_mask[nonzero_mask > 0] = 0
48 | seg = nonzero_mask
49 | return data, seg, bbox
50 |
51 |
52 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/normalization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/normalization/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/normalization/map_channel_name_to_normalization.py:
--------------------------------------------------------------------------------
1 | from typing import Type
2 |
3 | from nnunetv2.preprocessing.normalization.default_normalization_schemes import CTNormalization, NoNormalization, \
4 | ZScoreNormalization, RescaleTo01Normalization, RGBTo01Normalization, ImageNormalization
5 |
6 | channel_name_to_normalization_mapping = {
7 | 'CT': CTNormalization,
8 | 'noNorm': NoNormalization,
9 | 'zscore': ZScoreNormalization,
10 | 'rescale_to_0_1': RescaleTo01Normalization,
11 | 'rgb_to_0_1': RGBTo01Normalization
12 | }
13 |
14 |
15 | def get_normalization_scheme(channel_name: str) -> Type[ImageNormalization]:
16 | """
17 | If we find the channel_name in channel_name_to_normalization_mapping return the corresponding normalization. If it is
18 | not found, use the default (ZScoreNormalization)
19 | """
20 | norm_scheme = channel_name_to_normalization_mapping.get(channel_name)
21 | if norm_scheme is None:
22 | norm_scheme = ZScoreNormalization
23 | # print('Using %s for image normalization' % norm_scheme.__name__)
24 | return norm_scheme
25 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/normalization/readme.md:
--------------------------------------------------------------------------------
1 | The channel_names entry in dataset.json only determines the normlaization scheme. So if you want to use something different
2 | then you can just
3 | - create a new subclass of ImageNormalization
4 | - map your custom channel identifier to that subclass in channel_name_to_normalization_mapping
5 | - run plan and preprocess again with your custom normlaization scheme
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/preprocessors/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/preprocessors/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/resampling/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/resampling/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/preprocessing/resampling/utils.py:
--------------------------------------------------------------------------------
1 | from typing import Callable
2 |
3 | import nnunetv2
4 | from batchgenerators.utilities.file_and_folder_operations import join
5 | from nnunetv2.utilities.find_class_by_name import recursive_find_python_class
6 |
7 |
8 | def recursive_find_resampling_fn_by_name(resampling_fn: str) -> Callable:
9 | ret = recursive_find_python_class(join(nnunetv2.__path__[0], "preprocessing", "resampling"), resampling_fn,
10 | 'nnunetv2.preprocessing.resampling')
11 | if ret is None:
12 | raise RuntimeError("Unable to find resampling function named '%s'. Please make sure this fn is located in the "
13 | "nnunetv2.preprocessing.resampling module." % resampling_fn)
14 | else:
15 | return ret
16 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/run/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/run/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/integration_tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/integration_tests/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/integration_tests/add_lowres_and_cascade.py:
--------------------------------------------------------------------------------
1 | from batchgenerators.utilities.file_and_folder_operations import *
2 |
3 | from nnunetv2.paths import nnUNet_preprocessed
4 | from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name
5 |
6 | if __name__ == '__main__':
7 | import argparse
8 |
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument('-d', nargs='+', type=int, help='List of dataset ids')
11 | args = parser.parse_args()
12 |
13 | for d in args.d:
14 | dataset_name = maybe_convert_to_dataset_name(d)
15 | plans = load_json(join(nnUNet_preprocessed, dataset_name, 'nnUNetPlans.json'))
16 | plans['configurations']['3d_lowres'] = {
17 | "data_identifier": "nnUNetPlans_3d_lowres", # do not be a dumbo and forget this. I was a dumbo. And I paid dearly with ~10 min debugging time
18 | 'inherits_from': '3d_fullres',
19 | "patch_size": [20, 28, 20],
20 | "median_image_size_in_voxels": [18.0, 25.0, 18.0],
21 | "spacing": [2.0, 2.0, 2.0],
22 | "n_conv_per_stage_encoder": [2, 2, 2],
23 | "n_conv_per_stage_decoder": [2, 2],
24 | "num_pool_per_axis": [2, 2, 2],
25 | "pool_op_kernel_sizes": [[1, 1, 1], [2, 2, 2], [2, 2, 2]],
26 | "conv_kernel_sizes": [[3, 3, 3], [3, 3, 3], [3, 3, 3]],
27 | "next_stage": "3d_cascade_fullres"
28 | }
29 | plans['configurations']['3d_cascade_fullres'] = {
30 | 'inherits_from': '3d_fullres',
31 | "previous_stage": "3d_lowres"
32 | }
33 | save_json(plans, join(nnUNet_preprocessed, dataset_name, 'nnUNetPlans.json'), sort_keys=False)
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/integration_tests/cleanup_integration_test.py:
--------------------------------------------------------------------------------
1 | import shutil
2 |
3 | from batchgenerators.utilities.file_and_folder_operations import isdir, join
4 |
5 | from nnunetv2.paths import nnUNet_raw, nnUNet_results, nnUNet_preprocessed
6 |
7 | if __name__ == '__main__':
8 | # deletes everything!
9 | dataset_names = [
10 | 'Dataset996_IntegrationTest_Hippocampus_regions_ignore',
11 | 'Dataset997_IntegrationTest_Hippocampus_regions',
12 | 'Dataset998_IntegrationTest_Hippocampus_ignore',
13 | 'Dataset999_IntegrationTest_Hippocampus',
14 | ]
15 | for fld in [nnUNet_raw, nnUNet_preprocessed, nnUNet_results]:
16 | for d in dataset_names:
17 | if isdir(join(fld, d)):
18 | shutil.rmtree(join(fld, d))
19 |
20 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/integration_tests/lsf_commands.sh:
--------------------------------------------------------------------------------
1 | bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 996"
2 | bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 997"
3 | bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 998"
4 | bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 999"
5 |
6 |
7 | bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 996"
8 | bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 997"
9 | bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 998"
10 | bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 999"
11 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/integration_tests/prepare_integration_tests.sh:
--------------------------------------------------------------------------------
1 | # assumes you are in the nnunet repo!
2 |
3 | # prepare raw datasets
4 | python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py
5 | python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py
6 | python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py
7 | python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset996_IntegrationTest_Hippocampus_regions_ignore.py
8 |
9 | # now run experiment planning without preprocessing
10 | nnUNetv2_plan_and_preprocess -d 996 997 998 999 --no_pp
11 |
12 | # now add 3d lowres and cascade
13 | python nnunetv2/tests/integration_tests/add_lowres_and_cascade.py -d 996 997 998 999
14 |
15 | # now preprocess everything
16 | nnUNetv2_preprocess -d 996 997 998 999 -c 2d 3d_lowres 3d_fullres -np 8 8 8 # no need to preprocess cascade as its the same data as 3d_fullres
17 |
18 | # done
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/integration_tests/readme.md:
--------------------------------------------------------------------------------
1 | # Preface
2 |
3 | I am just a mortal with many tasks and limited time. Aint nobody got time for unittests.
4 |
5 | HOWEVER, at least some integration tests should be performed testing nnU-Net from start to finish.
6 |
7 | # Introduction - What the heck is happening?
8 | This test covers all possible labeling scenarios (standard labels, regions, ignore labels and regions with
9 | ignore labels). It runs the entire nnU-Net pipeline from start to finish:
10 |
11 | - fingerprint extraction
12 | - experiment planning
13 | - preprocessing
14 | - train all 4 configurations (2d, 3d_lowres, 3d_fullres, 3d_cascade_fullres) as 5-fold CV
15 | - automatically find the best model or ensemble
16 | - determine the postprocessing used for this
17 | - predict some test set
18 | - apply postprocessing to the test set
19 |
20 | To speed things up, we do the following:
21 | - pick Dataset004_Hippocampus because it is quadratisch praktisch gut. MNIST of medical image segmentation
22 | - by default this dataset does not have 3d_lowres or cascade. We just manually add them (cool new feature, eh?). See `add_lowres_and_cascade.py` to learn more!
23 | - we use nnUNetTrainer_5epochs for a short training
24 |
25 | # How to run it?
26 |
27 | Set your pwd to be the nnunet repo folder (the one where the `nnunetv2` folder and the `setup.py` are located!)
28 |
29 | Now generate the 4 dummy datasets (ids 996, 997, 998, 999) from dataset 4. This will crash if you don't have Dataset004!
30 | ```commandline
31 | bash nnunetv2/tests/integration_tests/prepare_integration_tests.sh
32 | ```
33 |
34 | Now you can run the integration test for each of the datasets:
35 | ```commandline
36 | bash nnunetv2/tests/integration_tests/run_integration_test.sh DATSET_ID
37 | ```
38 | use DATSET_ID 996, 997, 998 and 999. You can run these independently on different GPUs/systems to speed things up.
39 | This will take i dunno like 10-30 Minutes!?
40 |
41 | Also run
42 | ```commandline
43 | bash nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh DATSET_ID
44 | ```
45 | to verify DDP is working (needs 2 GPUs!)
46 |
47 | # How to check if the test was successful?
48 | If I was not as lazy as I am I would have programmed some automatism that checks if Dice scores etc are in an acceptable range.
49 | So you need to do the following:
50 | 1) check that none of your runs crashed (duh)
51 | 2) for each run, navigate to `nnUNet_results/DATASET_NAME` and take a look at the `inference_information.json` file.
52 | Does it make sense? If so: NICE!
53 |
54 | Once the integration test is completed you can delete all the temporary files associated with it by running:
55 |
56 | ```commandline
57 | python nnunetv2/tests/integration_tests/cleanup_integration_test.py
58 | ```
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/integration_tests/run_integration_test.sh:
--------------------------------------------------------------------------------
1 |
2 |
3 | nnUNetv2_train $1 3d_fullres 0 -tr nnUNetTrainer_5epochs --npz
4 | nnUNetv2_train $1 3d_fullres 1 -tr nnUNetTrainer_5epochs --npz
5 | nnUNetv2_train $1 3d_fullres 2 -tr nnUNetTrainer_5epochs --npz
6 | nnUNetv2_train $1 3d_fullres 3 -tr nnUNetTrainer_5epochs --npz
7 | nnUNetv2_train $1 3d_fullres 4 -tr nnUNetTrainer_5epochs --npz
8 |
9 | nnUNetv2_train $1 2d 0 -tr nnUNetTrainer_5epochs --npz
10 | nnUNetv2_train $1 2d 1 -tr nnUNetTrainer_5epochs --npz
11 | nnUNetv2_train $1 2d 2 -tr nnUNetTrainer_5epochs --npz
12 | nnUNetv2_train $1 2d 3 -tr nnUNetTrainer_5epochs --npz
13 | nnUNetv2_train $1 2d 4 -tr nnUNetTrainer_5epochs --npz
14 |
15 | nnUNetv2_train $1 3d_lowres 0 -tr nnUNetTrainer_5epochs --npz
16 | nnUNetv2_train $1 3d_lowres 1 -tr nnUNetTrainer_5epochs --npz
17 | nnUNetv2_train $1 3d_lowres 2 -tr nnUNetTrainer_5epochs --npz
18 | nnUNetv2_train $1 3d_lowres 3 -tr nnUNetTrainer_5epochs --npz
19 | nnUNetv2_train $1 3d_lowres 4 -tr nnUNetTrainer_5epochs --npz
20 |
21 | nnUNetv2_train $1 3d_cascade_fullres 0 -tr nnUNetTrainer_5epochs --npz
22 | nnUNetv2_train $1 3d_cascade_fullres 1 -tr nnUNetTrainer_5epochs --npz
23 | nnUNetv2_train $1 3d_cascade_fullres 2 -tr nnUNetTrainer_5epochs --npz
24 | nnUNetv2_train $1 3d_cascade_fullres 3 -tr nnUNetTrainer_5epochs --npz
25 | nnUNetv2_train $1 3d_cascade_fullres 4 -tr nnUNetTrainer_5epochs --npz
26 |
27 | python nnunetv2/tests/integration_tests/run_integration_test_bestconfig_inference.py -d $1
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh:
--------------------------------------------------------------------------------
1 | nnUNetv2_train $1 3d_fullres 0 -tr nnUNetTrainer_10epochs -num_gpus 2
2 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/compute_initial_patch_size.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range):
5 | if isinstance(rot_x, (tuple, list)):
6 | rot_x = max(np.abs(rot_x))
7 | if isinstance(rot_y, (tuple, list)):
8 | rot_y = max(np.abs(rot_y))
9 | if isinstance(rot_z, (tuple, list)):
10 | rot_z = max(np.abs(rot_z))
11 | rot_x = min(90 / 360 * 2. * np.pi, rot_x)
12 | rot_y = min(90 / 360 * 2. * np.pi, rot_y)
13 | rot_z = min(90 / 360 * 2. * np.pi, rot_z)
14 | from batchgenerators.augmentations.utils import rotate_coords_3d, rotate_coords_2d
15 | coords = np.array(final_patch_size)
16 | final_shape = np.copy(coords)
17 | if len(coords) == 3:
18 | final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, rot_x, 0, 0)), final_shape)), 0)
19 | final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, rot_y, 0)), final_shape)), 0)
20 | final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, 0, rot_z)), final_shape)), 0)
21 | elif len(coords) == 2:
22 | final_shape = np.max(np.vstack((np.abs(rotate_coords_2d(coords, rot_x)), final_shape)), 0)
23 | final_shape /= min(scale_range)
24 | return final_shape.astype(int)
25 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/custom_transforms/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/custom_transforms/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/custom_transforms/deep_supervision_donwsampling.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple, Union, List
2 |
3 | from batchgenerators.augmentations.utils import resize_segmentation
4 | from batchgenerators.transforms.abstract_transforms import AbstractTransform
5 | import numpy as np
6 |
7 |
8 | class DownsampleSegForDSTransform2(AbstractTransform):
9 | '''
10 | data_dict['output_key'] will be a list of segmentations scaled according to ds_scales
11 | '''
12 | def __init__(self, ds_scales: Union[List, Tuple],
13 | order: int = 0, input_key: str = "seg",
14 | output_key: str = "seg", axes: Tuple[int] = None):
15 | """
16 | Downscales data_dict[input_key] according to ds_scales. Each entry in ds_scales specified one deep supervision
17 | output and its resolution relative to the original data, for example 0.25 specifies 1/4 of the original shape.
18 | ds_scales can also be a tuple of tuples, for example ((1, 1, 1), (0.5, 0.5, 0.5)) to specify the downsampling
19 | for each axis independently
20 | """
21 | self.axes = axes
22 | self.output_key = output_key
23 | self.input_key = input_key
24 | self.order = order
25 | self.ds_scales = ds_scales
26 |
27 | def __call__(self, **data_dict):
28 | if self.axes is None:
29 | axes = list(range(2, len(data_dict[self.input_key].shape)))
30 | else:
31 | axes = self.axes
32 |
33 | output = []
34 | for s in self.ds_scales:
35 | if not isinstance(s, (tuple, list)):
36 | s = [s] * len(axes)
37 | else:
38 | assert len(s) == len(axes), f'If ds_scales is a tuple for each resolution (one downsampling factor ' \
39 | f'for each axis) then the number of entried in that tuple (here ' \
40 | f'{len(s)}) must be the same as the number of axes (here {len(axes)}).'
41 |
42 | if all([i == 1 for i in s]):
43 | output.append(data_dict[self.input_key])
44 | else:
45 | new_shape = np.array(data_dict[self.input_key].shape).astype(float)
46 | for i, a in enumerate(axes):
47 | new_shape[a] *= s[i]
48 | new_shape = np.round(new_shape).astype(int)
49 | out_seg = np.zeros(new_shape, dtype=data_dict[self.input_key].dtype)
50 | for b in range(data_dict[self.input_key].shape[0]):
51 | for c in range(data_dict[self.input_key].shape[1]):
52 | out_seg[b, c] = resize_segmentation(data_dict[self.input_key][b, c], new_shape[2:], self.order)
53 | output.append(out_seg)
54 | data_dict[self.output_key] = output
55 | return data_dict
56 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/custom_transforms/limited_length_multithreaded_augmenter.py:
--------------------------------------------------------------------------------
1 | from batchgenerators.dataloading.nondet_multi_threaded_augmenter import NonDetMultiThreadedAugmenter
2 |
3 |
4 | class LimitedLenWrapper(NonDetMultiThreadedAugmenter):
5 | def __init__(self, my_imaginary_length, *args, **kwargs):
6 | super().__init__(*args, **kwargs)
7 | self.len = my_imaginary_length
8 |
9 | def __len__(self):
10 | return self.len
11 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/custom_transforms/manipulating_data_dict.py:
--------------------------------------------------------------------------------
1 | from batchgenerators.transforms.abstract_transforms import AbstractTransform
2 |
3 |
4 | class RemoveKeyTransform(AbstractTransform):
5 | def __init__(self, key_to_remove: str):
6 | self.key_to_remove = key_to_remove
7 |
8 | def __call__(self, **data_dict):
9 | _ = data_dict.pop(self.key_to_remove, None)
10 | return data_dict
11 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/custom_transforms/masking.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from batchgenerators.transforms.abstract_transforms import AbstractTransform
4 |
5 |
6 | class MaskTransform(AbstractTransform):
7 | def __init__(self, apply_to_channels: List[int], mask_idx_in_seg: int = 0, set_outside_to: int = 0,
8 | data_key: str = "data", seg_key: str = "seg"):
9 | """
10 | Sets everything outside the mask to 0. CAREFUL! outside is defined as < 0, not =0 (in the Mask)!!!
11 | """
12 | self.apply_to_channels = apply_to_channels
13 | self.seg_key = seg_key
14 | self.data_key = data_key
15 | self.set_outside_to = set_outside_to
16 | self.mask_idx_in_seg = mask_idx_in_seg
17 |
18 | def __call__(self, **data_dict):
19 | mask = data_dict[self.seg_key][:, self.mask_idx_in_seg] < 0
20 | for c in self.apply_to_channels:
21 | data_dict[self.data_key][:, c][mask] = self.set_outside_to
22 | return data_dict
23 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/custom_transforms/region_based_training.py:
--------------------------------------------------------------------------------
1 | from typing import List, Tuple, Union
2 |
3 | from batchgenerators.transforms.abstract_transforms import AbstractTransform
4 | import numpy as np
5 |
6 |
7 | class ConvertSegmentationToRegionsTransform(AbstractTransform):
8 | def __init__(self, regions: Union[List, Tuple],
9 | seg_key: str = "seg", output_key: str = "seg", seg_channel: int = 0):
10 | """
11 | regions are tuple of tuples where each inner tuple holds the class indices that are merged into one region,
12 | example:
13 | regions= ((1, 2), (2, )) will result in 2 regions: one covering the region of labels 1&2 and the other just 2
14 | :param regions:
15 | :param seg_key:
16 | :param output_key:
17 | """
18 | self.seg_channel = seg_channel
19 | self.output_key = output_key
20 | self.seg_key = seg_key
21 | self.regions = regions
22 |
23 | def __call__(self, **data_dict):
24 | seg = data_dict.get(self.seg_key)
25 | num_regions = len(self.regions)
26 | if seg is not None:
27 | seg_shp = seg.shape
28 | output_shape = list(seg_shp)
29 | output_shape[1] = num_regions
30 | region_output = np.zeros(output_shape, dtype=seg.dtype)
31 | for b in range(seg_shp[0]):
32 | for region_id, region_source_labels in enumerate(self.regions):
33 | if not isinstance(region_source_labels, (list, tuple)):
34 | region_source_labels = (region_source_labels, )
35 | for label_value in region_source_labels:
36 | region_output[b, region_id][seg[b, self.seg_channel] == label_value] = 1
37 | data_dict[self.output_key] = region_output
38 | return data_dict
39 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/data_augmentation/custom_transforms/transforms_for_dummy_2d.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple, Union, List
2 |
3 | from batchgenerators.transforms.abstract_transforms import AbstractTransform
4 |
5 |
6 | class Convert3DTo2DTransform(AbstractTransform):
7 | def __init__(self, apply_to_keys: Union[List[str], Tuple[str]] = ('data', 'seg')):
8 | """
9 | Transforms a 5D array (b, c, x, y, z) to a 4D array (b, c * x, y, z) by overloading the color channel
10 | """
11 | self.apply_to_keys = apply_to_keys
12 |
13 | def __call__(self, **data_dict):
14 | for k in self.apply_to_keys:
15 | shp = data_dict[k].shape
16 | assert len(shp) == 5, 'This transform only works on 3D data, so expects 5D tensor (b, c, x, y, z) as input.'
17 | data_dict[k] = data_dict[k].reshape((shp[0], shp[1] * shp[2], shp[3], shp[4]))
18 | shape_key = f'orig_shape_{k}'
19 | assert shape_key not in data_dict.keys(), f'Convert3DTo2DTransform needs to store the original shape. ' \
20 | f'It does that using the {shape_key} key. That key is ' \
21 | f'already taken. Bummer.'
22 | data_dict[shape_key] = shp
23 | return data_dict
24 |
25 |
26 | class Convert2DTo3DTransform(AbstractTransform):
27 | def __init__(self, apply_to_keys: Union[List[str], Tuple[str]] = ('data', 'seg')):
28 | """
29 | Reverts Convert3DTo2DTransform by transforming a 4D array (b, c * x, y, z) back to 5D (b, c, x, y, z)
30 | """
31 | self.apply_to_keys = apply_to_keys
32 |
33 | def __call__(self, **data_dict):
34 | for k in self.apply_to_keys:
35 | shape_key = f'orig_shape_{k}'
36 | assert shape_key in data_dict.keys(), f'Did not find key {shape_key} in data_dict. Shitty. ' \
37 | f'Convert2DTo3DTransform only works in tandem with ' \
38 | f'Convert3DTo2DTransform and you probably forgot to add ' \
39 | f'Convert3DTo2DTransform to your pipeline. (Convert3DTo2DTransform ' \
40 | f'is where the missing key is generated)'
41 | original_shape = data_dict[shape_key]
42 | current_shape = data_dict[k].shape
43 | data_dict[k] = data_dict[k].reshape((original_shape[0], original_shape[1], original_shape[2],
44 | current_shape[-2], current_shape[-1]))
45 | return data_dict
46 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/dataloading/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/dataloading/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/dataloading/utils.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import os
3 | from multiprocessing import Pool
4 | from typing import List
5 |
6 | import numpy as np
7 | from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles
8 | from nnunetv2.configuration import default_num_processes
9 |
10 |
11 | def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None:
12 | try:
13 | a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata
14 | if overwrite_existing or not isfile(npz_file[:-3] + "npy"):
15 | np.save(npz_file[:-3] + "npy", a['data'])
16 | if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")):
17 | np.save(npz_file[:-4] + "_seg.npy", a['seg'])
18 | except KeyboardInterrupt:
19 | if isfile(npz_file[:-3] + "npy"):
20 | os.remove(npz_file[:-3] + "npy")
21 | if isfile(npz_file[:-4] + "_seg.npy"):
22 | os.remove(npz_file[:-4] + "_seg.npy")
23 | raise KeyboardInterrupt
24 |
25 |
26 | def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False,
27 | num_processes: int = default_num_processes):
28 | """
29 | all npz files in this folder belong to the dataset, unpack them all
30 | """
31 | with multiprocessing.get_context("spawn").Pool(num_processes) as p:
32 | npz_files = subfiles(folder, True, None, ".npz", True)
33 | p.starmap(_convert_to_npy, zip(npz_files,
34 | [unpack_segmentation] * len(npz_files),
35 | [overwrite_existing] * len(npz_files))
36 | )
37 |
38 |
39 | def get_case_identifiers(folder: str) -> List[str]:
40 | """
41 | finds all npz files in the given folder and reconstructs the training case names from them
42 | """
43 | case_identifiers = [i[:-4] for i in os.listdir(folder) if i.endswith("npz") and (i.find("segFromPrevStage") == -1)]
44 | return case_identifiers
45 |
46 |
47 | if __name__ == '__main__':
48 | unpack_dataset('/media/fabian/data/nnUNet_preprocessed/Dataset002_Heart/2d')
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/logging/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/logging/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/loss/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/loss/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/loss/deep_supervision.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 |
4 | class DeepSupervisionWrapper(nn.Module):
5 | def __init__(self, loss, weight_factors=None):
6 | """
7 | Wraps a loss function so that it can be applied to multiple outputs. Forward accepts an arbitrary number of
8 | inputs. Each input is expected to be a tuple/list. Each tuple/list must have the same length. The loss is then
9 | applied to each entry like this:
10 | l = w0 * loss(input0[0], input1[0], ...) + w1 * loss(input0[1], input1[1], ...) + ...
11 | If weights are None, all w will be 1.
12 | """
13 | super(DeepSupervisionWrapper, self).__init__()
14 | self.weight_factors = weight_factors
15 | self.loss = loss
16 |
17 | def forward(self, *args):
18 | for i in args:
19 | assert isinstance(i, (tuple, list)), "all args must be either tuple or list, got %s" % type(i)
20 | # we could check for equal lengths here as well but we really shouldn't overdo it with checks because
21 | # this code is executed a lot of times!
22 |
23 | if self.weight_factors is None:
24 | weights = [1] * len(args[0])
25 | else:
26 | weights = self.weight_factors
27 |
28 | # we initialize the loss like this instead of 0 to ensure it sits on the correct device, not sure if that's
29 | # really necessary
30 | l = weights[0] * self.loss(*[j[0] for j in args])
31 | for i, inputs in enumerate(zip(*args)):
32 | if i == 0:
33 | continue
34 | l += weights[i] * self.loss(*inputs)
35 | return l
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/lr_scheduler/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/lr_scheduler/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/lr_scheduler/polylr.py:
--------------------------------------------------------------------------------
1 | from torch.optim.lr_scheduler import _LRScheduler
2 |
3 |
4 | class PolyLRScheduler(_LRScheduler):
5 | def __init__(self, optimizer, initial_lr: float, max_steps: int, exponent: float = 0.9, current_step: int = None):
6 | self.optimizer = optimizer
7 | self.initial_lr = initial_lr
8 | self.max_steps = max_steps
9 | self.exponent = exponent
10 | self.ctr = 0
11 | super().__init__(optimizer, current_step if current_step is not None else -1, False)
12 |
13 | def step(self, current_step=None):
14 | if current_step is None or current_step == -1:
15 | current_step = self.ctr
16 | self.ctr += 1
17 |
18 | new_lr = self.initial_lr * (1 - current_step / self.max_steps) ** self.exponent
19 | for param_group in self.optimizer.param_groups:
20 | param_group['lr'] = new_lr
21 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs_noDataLoading.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from nnunetv2.training.nnUNetTrainer.variants.benchmarking.nnUNetTrainerBenchmark_5epochs import \
4 | nnUNetTrainerBenchmark_5epochs
5 | from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels
6 |
7 |
8 | class nnUNetTrainerBenchmark_5epochs_noDataLoading(nnUNetTrainerBenchmark_5epochs):
9 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
10 | device: torch.device = torch.device('cuda')):
11 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
12 | self._set_batch_size_and_oversample()
13 | num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager,
14 | self.dataset_json)
15 | patch_size = self.configuration_manager.patch_size
16 | dummy_data = torch.rand((self.batch_size, num_input_channels, *patch_size), device=self.device)
17 | dummy_target = [
18 | torch.round(
19 | torch.rand((self.batch_size, 1, *[int(i * j) for i, j in zip(patch_size, k)]), device=self.device) *
20 | max(self.label_manager.all_labels)
21 | ) for k in self._get_deep_supervision_scales()]
22 | self.dummy_batch = {'data': dummy_data, 'target': dummy_target}
23 |
24 | def get_dataloaders(self):
25 | return None, None
26 |
27 | def run_training(self):
28 | try:
29 | self.on_train_start()
30 |
31 | for epoch in range(self.current_epoch, self.num_epochs):
32 | self.on_epoch_start()
33 |
34 | self.on_train_epoch_start()
35 | train_outputs = []
36 | for batch_id in range(self.num_iterations_per_epoch):
37 | train_outputs.append(self.train_step(self.dummy_batch))
38 | self.on_train_epoch_end(train_outputs)
39 |
40 | with torch.no_grad():
41 | self.on_validation_epoch_start()
42 | val_outputs = []
43 | for batch_id in range(self.num_val_iterations_per_epoch):
44 | val_outputs.append(self.validation_step(self.dummy_batch))
45 | self.on_validation_epoch_end(val_outputs)
46 |
47 | self.on_epoch_end()
48 |
49 | self.on_train_end()
50 | except RuntimeError:
51 | self.crashed_with_runtime_error = True
52 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoDA.py:
--------------------------------------------------------------------------------
1 | from typing import Union, Tuple, List
2 |
3 | from batchgenerators.transforms.abstract_transforms import AbstractTransform
4 |
5 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
6 | import numpy as np
7 |
8 |
9 | class nnUNetTrainerNoDA(nnUNetTrainer):
10 | @staticmethod
11 | def get_training_transforms(patch_size: Union[np.ndarray, Tuple[int]],
12 | rotation_for_DA: dict,
13 | deep_supervision_scales: Union[List, Tuple],
14 | mirror_axes: Tuple[int, ...],
15 | do_dummy_2d_data_aug: bool,
16 | order_resampling_data: int = 1,
17 | order_resampling_seg: int = 0,
18 | border_val_seg: int = -1,
19 | use_mask_for_norm: List[bool] = None,
20 | is_cascaded: bool = False,
21 | foreground_labels: Union[Tuple[int, ...], List[int]] = None,
22 | regions: List[Union[List[int], Tuple[int, ...], int]] = None,
23 | ignore_label: int = None) -> AbstractTransform:
24 | return nnUNetTrainer.get_validation_transforms(deep_supervision_scales, is_cascaded, foreground_labels,
25 | regions, ignore_label)
26 |
27 | def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], dim: int):
28 | return super().get_plain_dataloaders(
29 | initial_patch_size=self.configuration_manager.patch_size,
30 | dim=dim
31 | )
32 |
33 | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
34 | # we need to disable mirroring here so that no mirroring will be applied in inferene!
35 | rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \
36 | super().configure_rotation_dummyDA_mirroring_and_inital_patch_size()
37 | mirror_axes = None
38 | self.inference_allowed_mirroring_axes = None
39 | return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes
40 |
41 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoMirroring.py:
--------------------------------------------------------------------------------
1 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
2 |
3 |
4 | class nnUNetTrainerNoMirroring(nnUNetTrainer):
5 | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
6 | rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \
7 | super().configure_rotation_dummyDA_mirroring_and_inital_patch_size()
8 | mirror_axes = None
9 | self.inference_allowed_mirroring_axes = None
10 | return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes
11 |
12 |
13 | class nnUNetTrainer_onlyMirror01(nnUNetTrainer):
14 | """
15 | Only mirrors along spatial axes 0 and 1 for 3D and 0 for 2D
16 | """
17 | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
18 | rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \
19 | super().configure_rotation_dummyDA_mirroring_and_inital_patch_size()
20 | patch_size = self.configuration_manager.patch_size
21 | dim = len(patch_size)
22 | if dim == 2:
23 | mirror_axes = (0, )
24 | else:
25 | mirror_axes = (0, 1)
26 | self.inference_allowed_mirroring_axes = mirror_axes
27 | return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes
28 |
29 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/loss/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/loss/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerCELoss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper
3 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
4 | from nnunetv2.training.loss.robust_ce_loss import RobustCrossEntropyLoss
5 | import numpy as np
6 |
7 |
8 | class nnUNetTrainerCELoss(nnUNetTrainer):
9 | def _build_loss(self):
10 | assert not self.label_manager.has_regions, 'regions not supported by this trainer'
11 | loss = RobustCrossEntropyLoss(weight=None,
12 | ignore_index=self.label_manager.ignore_label if self.label_manager.has_ignore_label else -100)
13 |
14 | deep_supervision_scales = self._get_deep_supervision_scales()
15 |
16 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
17 | # this gives higher resolution outputs more weight in the loss
18 | weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])
19 | weights[-1] = 0
20 |
21 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
22 | weights = weights / weights.sum()
23 | # now wrap the loss
24 | loss = DeepSupervisionWrapper(loss, weights)
25 | return loss
26 |
27 |
28 | class nnUNetTrainerCELoss_5epochs(nnUNetTrainerCELoss):
29 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
30 | device: torch.device = torch.device('cuda')):
31 | """used for debugging plans etc"""
32 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
33 | self.num_epochs = 5
34 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerDiceLoss.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss
5 | from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper
6 | from nnunetv2.training.loss.dice import MemoryEfficientSoftDiceLoss
7 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
8 | from nnunetv2.utilities.helpers import softmax_helper_dim1
9 |
10 |
11 | class nnUNetTrainerDiceLoss(nnUNetTrainer):
12 | def _build_loss(self):
13 | loss = MemoryEfficientSoftDiceLoss(**{'batch_dice': self.configuration_manager.batch_dice,
14 | 'do_bg': self.label_manager.has_regions, 'smooth': 1e-5, 'ddp': self.is_ddp},
15 | apply_nonlin=torch.sigmoid if self.label_manager.has_regions else softmax_helper_dim1)
16 |
17 | deep_supervision_scales = self._get_deep_supervision_scales()
18 |
19 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
20 | # this gives higher resolution outputs more weight in the loss
21 | weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])
22 | weights[-1] = 0
23 |
24 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
25 | weights = weights / weights.sum()
26 | # now wrap the loss
27 | loss = DeepSupervisionWrapper(loss, weights)
28 | return loss
29 |
30 |
31 | class nnUNetTrainerDiceCELoss_noSmooth(nnUNetTrainer):
32 | def _build_loss(self):
33 | # set smooth to 0
34 | if self.label_manager.has_regions:
35 | loss = DC_and_BCE_loss({},
36 | {'batch_dice': self.configuration_manager.batch_dice,
37 | 'do_bg': True, 'smooth': 0, 'ddp': self.is_ddp},
38 | use_ignore_label=self.label_manager.ignore_label is not None,
39 | dice_class=MemoryEfficientSoftDiceLoss)
40 | else:
41 | loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice,
42 | 'smooth': 0, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1,
43 | ignore_label=self.label_manager.ignore_label,
44 | dice_class=MemoryEfficientSoftDiceLoss)
45 |
46 | deep_supervision_scales = self._get_deep_supervision_scales()
47 |
48 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
49 | # this gives higher resolution outputs more weight in the loss
50 | weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])
51 | weights[-1] = 0
52 |
53 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
54 | weights = weights / weights.sum()
55 | # now wrap the loss
56 | loss = DeepSupervisionWrapper(loss, weights)
57 | return loss
58 |
59 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerFocalLoss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | from nnunetv2.training.loss.compound_losses import DC_and_Focal_loss
5 | from nnunetv2.training.loss.dice import MemoryEfficientSoftDiceLoss
6 | from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper
7 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
8 |
9 |
10 | class nnUNetTrainerFocalLoss(nnUNetTrainer):
11 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
12 | device: torch.device = torch.device('cuda')):
13 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
14 | if self.label_manager.has_regions:
15 | raise NotImplementedError("trainer not implemented for regions")
16 | self.gamma = 2
17 |
18 | def _build_loss(self):
19 | assert not self.label_manager.has_regions, 'regions not supported by this trainer'
20 | loss = DC_and_Focal_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False,
21 | 'ddp': self.is_ddp}, {"gamma": self.gamma}, weight_ce=1, weight_dice=1,
22 | ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss)
23 |
24 | deep_supervision_scales = self._get_deep_supervision_scales()
25 |
26 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
27 | # this gives higher resolution outputs more weight in the loss
28 | weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])
29 | weights[-1] = 0
30 |
31 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
32 | weights = weights / weights.sum()
33 | # now wrap the loss
34 | loss = DeepSupervisionWrapper(loss, weights)
35 | return loss
36 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerHDFocalLoss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | from nnunetv2.training.loss.compound_losses import DC_and_Focal_loss, Seg_and_HD_loss
5 | from nnunetv2.training.loss.hausdorff_loss import HausdorffDTLoss
6 | from nnunetv2.training.loss.dice import MemoryEfficientSoftDiceLoss
7 | from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper
8 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
9 |
10 |
11 | class nnUNetTrainerHDFocalLoss(nnUNetTrainer):
12 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
13 | device: torch.device = torch.device('cuda')):
14 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
15 | self.gamma = 2
16 |
17 | def _build_loss(self):
18 | loss = Seg_and_HD_loss(DC_and_Focal_loss, {"soft_dice_kwargs": {'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False,
19 | 'ddp': self.is_ddp}, "ce_kwargs": {"gamma": self.gamma}, "weight_ce": 1,
20 | "weight_dice": 1, "ignore_label": self.label_manager.ignore_label,
21 | "dice_class": MemoryEfficientSoftDiceLoss},
22 | HausdorffDTLoss, {"max_dt": 10})
23 |
24 | deep_supervision_scales = self._get_deep_supervision_scales()
25 |
26 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
27 | # this gives higher resolution outputs more weight in the loss
28 | weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])
29 | weights[-1] = 0
30 |
31 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
32 | weights = weights / weights.sum()
33 | # now wrap the loss
34 | loss = DeepSupervisionWrapper(loss, weights)
35 | return loss
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerHDLoss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | from nnunetv2.training.loss.compound_losses import DC_and_CE_loss, Seg_and_HD_loss
5 | from nnunetv2.training.loss.hausdorff_loss import HausdorffDTLoss
6 | from nnunetv2.training.loss.dice import MemoryEfficientSoftDiceLoss
7 | from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper
8 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
9 |
10 |
11 | class nnUNetTrainerHDLoss(nnUNetTrainer):
12 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
13 | device: torch.device = torch.device('cuda')):
14 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
15 | if self.label_manager.has_regions:
16 | raise NotImplementedError("trainer not implemented for regions")
17 |
18 | def _build_loss(self):
19 | loss = Seg_and_HD_loss(DC_and_CE_loss, {"soft_dice_kwargs": {'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5,
20 | 'do_bg': False, 'ddp': self.is_ddp}, "ce_kwargs": {}, "weight_ce": 1,
21 | "weight_dice": 1, "ignore_label": self.label_manager.ignore_label,
22 | "dice_class": MemoryEfficientSoftDiceLoss},
23 | HausdorffDTLoss, {"max_dt": 10})
24 |
25 | deep_supervision_scales = self._get_deep_supervision_scales()
26 |
27 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
28 | # this gives higher resolution outputs more weight in the loss
29 | weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])
30 | weights[-1] = 0
31 |
32 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
33 | weights = weights / weights.sum()
34 | # now wrap the loss
35 | loss = DeepSupervisionWrapper(loss, weights)
36 | return loss
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerSkelDice.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | from nnunetv2.training.loss.compound_losses import DC_SDC_and_CE_loss
5 | from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper
6 | from nnunetv2.training.loss.dice import MemoryEfficientSoftDiceLoss
7 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
8 |
9 |
10 | class nnUNetTrainerSkelDice(nnUNetTrainer):
11 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
12 | device: torch.device = torch.device('cuda')):
13 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
14 |
15 | def _build_loss(self):
16 | if self.label_manager.has_regions:
17 | raise NotImplementedError("Skeleton Dice not available for regions")
18 | else:
19 | loss = DC_SDC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False,
20 | 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, weight_sdc=1,
21 | ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss)
22 |
23 | deep_supervision_scales = self._get_deep_supervision_scales()
24 |
25 | # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
26 | # this gives higher resolution outputs more weight in the loss
27 | weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])
28 | weights[-1] = 0
29 |
30 | # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
31 | weights = weights / weights.sum()
32 | # now wrap the loss
33 | loss = DeepSupervisionWrapper(loss, weights)
34 | return loss
35 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/nnUNetTrainerCosAnneal.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.optim.lr_scheduler import CosineAnnealingLR
3 |
4 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
5 |
6 |
7 | class nnUNetTrainerCosAnneal(nnUNetTrainer):
8 | def configure_optimizers(self):
9 | optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
10 | momentum=0.99, nesterov=True)
11 | lr_scheduler = CosineAnnealingLR(optimizer, T_max=self.num_epochs)
12 | return optimizer, lr_scheduler
13 |
14 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/optimizer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/optimizer/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdam.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.optim import Adam, AdamW
3 |
4 | from nnunetv2.training.lr_scheduler.polylr import PolyLRScheduler
5 | from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer
6 |
7 |
8 | class nnUNetTrainerAdam(nnUNetTrainer):
9 | def configure_optimizers(self):
10 | optimizer = AdamW(self.network.parameters(),
11 | lr=self.initial_lr,
12 | weight_decay=self.weight_decay,
13 | amsgrad=True)
14 | # optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
15 | # momentum=0.99, nesterov=True)
16 | lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs)
17 | return optimizer, lr_scheduler
18 |
19 |
20 | class nnUNetTrainerVanillaAdam(nnUNetTrainer):
21 | def configure_optimizers(self):
22 | optimizer = Adam(self.network.parameters(),
23 | lr=self.initial_lr,
24 | weight_decay=self.weight_decay)
25 | # optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
26 | # momentum=0.99, nesterov=True)
27 | lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs)
28 | return optimizer, lr_scheduler
29 |
30 |
31 | class nnUNetTrainerVanillaAdam1en3(nnUNetTrainerVanillaAdam):
32 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
33 | device: torch.device = torch.device('cuda')):
34 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
35 | self.initial_lr = 1e-3
36 |
37 |
38 | class nnUNetTrainerVanillaAdam3en4(nnUNetTrainerVanillaAdam):
39 | # https://twitter.com/karpathy/status/801621764144971776?lang=en
40 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
41 | device: torch.device = torch.device('cuda')):
42 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
43 | self.initial_lr = 3e-4
44 |
45 |
46 | class nnUNetTrainerAdam1en3(nnUNetTrainerAdam):
47 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
48 | device: torch.device = torch.device('cuda')):
49 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
50 | self.initial_lr = 1e-3
51 |
52 |
53 | class nnUNetTrainerAdam3en4(nnUNetTrainerAdam):
54 | # https://twitter.com/karpathy/status/801621764144971776?lang=en
55 | def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,
56 | device: torch.device = torch.device('cuda')):
57 | super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
58 | self.initial_lr = 3e-4
59 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/sampling/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/sampling/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/training_length/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/training/nnUNetTrainer/variants/training_length/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/collate_outputs.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | import numpy as np
4 |
5 |
6 | def collate_outputs(outputs: List[dict]):
7 | """
8 | used to collate default train_step and validation_step outputs. If you want something different then you gotta
9 | extend this
10 |
11 | we expect outputs to be a list of dictionaries where each of the dict has the same set of keys
12 | """
13 | collated = {}
14 | for k in outputs[0].keys():
15 | if np.isscalar(outputs[0][k]):
16 | collated[k] = [o[k] for o in outputs]
17 | elif isinstance(outputs[0][k], np.ndarray):
18 | collated[k] = np.vstack([o[k][None] for o in outputs])
19 | elif isinstance(outputs[0][k], list):
20 | collated[k] = [item for o in outputs for item in o[k]]
21 | else:
22 | raise ValueError(f'Cannot collate input of type {type(outputs[0][k])}. '
23 | f'Modify collate_outputs to add this functionality')
24 | return collated
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/ddp_allgather.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | from typing import Any, Optional, Tuple
15 |
16 | import torch
17 | from torch import distributed
18 |
19 |
20 | def print_if_rank0(*args):
21 | if distributed.get_rank() == 0:
22 | print(*args)
23 |
24 |
25 | class AllGatherGrad(torch.autograd.Function):
26 | # stolen from pytorch lightning
27 | @staticmethod
28 | def forward(
29 | ctx: Any,
30 | tensor: torch.Tensor,
31 | group: Optional["torch.distributed.ProcessGroup"] = None,
32 | ) -> torch.Tensor:
33 | ctx.group = group
34 |
35 | gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())]
36 |
37 | torch.distributed.all_gather(gathered_tensor, tensor, group=group)
38 | gathered_tensor = torch.stack(gathered_tensor, dim=0)
39 |
40 | return gathered_tensor
41 |
42 | @staticmethod
43 | def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]:
44 | grad_output = torch.cat(grad_output)
45 |
46 | torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group)
47 |
48 | return grad_output[torch.distributed.get_rank()], None
49 |
50 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/default_n_proc_DA.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 |
4 |
5 | def get_allowed_n_proc_DA():
6 | """
7 | This function is used to set the number of processes used on different Systems. It is specific to our cluster
8 | infrastructure at DKFZ. You can modify it to suit your needs. Everything is allowed.
9 |
10 | IMPORTANT: if the environment variable nnUNet_n_proc_DA is set it will overwrite anything in this script
11 | (see first line).
12 |
13 | Interpret the output as the number of processes used for data augmentation PER GPU.
14 |
15 | The way it is implemented here is simply a look up table. We know the hostnames, CPU and GPU configurations of our
16 | systems and set the numbers accordingly. For example, a system with 4 GPUs and 48 threads can use 12 threads per
17 | GPU without overloading the CPU (technically 11 because we have a main process as well), so that's what we use.
18 | """
19 |
20 | if 'nnUNet_n_proc_DA' in os.environ.keys():
21 | use_this = int(os.environ['nnUNet_n_proc_DA'])
22 | else:
23 | hostname = subprocess.getoutput(['hostname'])
24 | if hostname in ['Fabian', ]:
25 | use_this = 12
26 | elif hostname in ['hdf19-gpu16', 'hdf19-gpu17', 'hdf19-gpu18', 'hdf19-gpu19', 'e230-AMDworkstation']:
27 | use_this = 16
28 | elif hostname.startswith('e230-dgx1'):
29 | use_this = 10
30 | elif hostname.startswith('hdf18-gpu') or hostname.startswith('e132-comp'):
31 | use_this = 16
32 | elif hostname.startswith('e230-dgx2'):
33 | use_this = 6
34 | elif hostname.startswith('e230-dgxa100-'):
35 | use_this = 28
36 | elif hostname.startswith('lsf22-gpu'):
37 | use_this = 28
38 | elif hostname.startswith('hdf19-gpu') or hostname.startswith('e071-gpu'):
39 | use_this = 12
40 | else:
41 | use_this = 12 # default value
42 |
43 | use_this = min(use_this, os.cpu_count())
44 | return use_this
45 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/find_class_by_name.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import pkgutil
3 |
4 | from batchgenerators.utilities.file_and_folder_operations import *
5 |
6 |
7 | def recursive_find_python_class(folder: str, class_name: str, current_module: str):
8 | tr = None
9 | for importer, modname, ispkg in pkgutil.iter_modules([folder]):
10 | # print(modname, ispkg)
11 | if not ispkg:
12 | m = importlib.import_module(current_module + "." + modname)
13 | if hasattr(m, class_name):
14 | tr = getattr(m, class_name)
15 | break
16 |
17 | if tr is None:
18 | for importer, modname, ispkg in pkgutil.iter_modules([folder]):
19 | if ispkg:
20 | next_current_module = current_module + "." + modname
21 | tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module)
22 | if tr is not None:
23 | break
24 | return tr
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor:
5 | return torch.softmax(x, 0)
6 |
7 |
8 | def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor:
9 | return torch.softmax(x, 1)
10 |
11 |
12 | def empty_cache(device: torch.device):
13 | if device.type == 'cuda':
14 | torch.cuda.empty_cache()
15 | elif device.type == 'mps':
16 | from torch import mps
17 | mps.empty_cache()
18 | else:
19 | pass
20 |
21 |
22 | class dummy_context(object):
23 | def __enter__(self):
24 | pass
25 |
26 | def __exit__(self, exc_type, exc_val, exc_tb):
27 | pass
28 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/json_export.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Iterable
2 |
3 | import numpy as np
4 | import torch
5 |
6 |
7 | def recursive_fix_for_json_export(my_dict: dict):
8 | # json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro.
9 | keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys....
10 | for k in keys:
11 | if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)):
12 | tmp = my_dict[k]
13 | del my_dict[k]
14 | my_dict[int(k)] = tmp
15 | del tmp
16 | k = int(k)
17 |
18 | if isinstance(my_dict[k], dict):
19 | recursive_fix_for_json_export(my_dict[k])
20 | elif isinstance(my_dict[k], np.ndarray):
21 | assert len(my_dict[k].shape) == 1, 'only 1d arrays are supported'
22 | my_dict[k] = fix_types_iterable(my_dict[k], output_type=list)
23 | elif isinstance(my_dict[k], (np.bool_,)):
24 | my_dict[k] = bool(my_dict[k])
25 | elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)):
26 | my_dict[k] = int(my_dict[k])
27 | elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)):
28 | my_dict[k] = float(my_dict[k])
29 | elif isinstance(my_dict[k], list):
30 | my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k]))
31 | elif isinstance(my_dict[k], tuple):
32 | my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple)
33 | elif isinstance(my_dict[k], torch.device):
34 | my_dict[k] = str(my_dict[k])
35 | else:
36 | pass # pray it can be serialized
37 |
38 |
39 | def fix_types_iterable(iterable, output_type):
40 | # this sh!t is hacky as hell and will break if you use it for anything outside nnunet. Keep you hands off of this.
41 | out = []
42 | for i in iterable:
43 | if type(i) in (np.int64, np.int32, np.int8, np.uint8):
44 | out.append(int(i))
45 | elif isinstance(i, dict):
46 | recursive_fix_for_json_export(i)
47 | out.append(i)
48 | elif type(i) in (np.float32, np.float64, np.float16):
49 | out.append(float(i))
50 | elif type(i) in (np.bool_,):
51 | out.append(bool(i))
52 | elif isinstance(i, str):
53 | out.append(i)
54 | elif isinstance(i, Iterable):
55 | # print('recursive call on', i, type(i))
56 | out.append(fix_types_iterable(i, type(i)))
57 | else:
58 | out.append(i)
59 | return output_type(out)
60 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/label_handling/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/label_handling/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/network_initialization.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 |
4 | class InitWeights_He(object):
5 | def __init__(self, neg_slope=1e-2):
6 | self.neg_slope = neg_slope
7 |
8 | def __call__(self, module):
9 | if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
10 | module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope)
11 | if module.bias is not None:
12 | module.bias = nn.init.constant_(module.bias, 0)
13 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/plans_handling/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yannick_Kirchhoff/nnunetv2/utilities/plans_handling/__init__.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/readme.txt:
--------------------------------------------------------------------------------
1 | - to use nnUNet with the toothfairy data you will need to bring it into the nnUNet format, as specified in the nnUNet documentation. I attached the dataset.json file I used, the specified NumpyIO reader/writer lets nnUNet work with npy files directly, so you don't need to convert the data to nifti or any other file format
2 | - all loss functions and trainers used can be found in the directories nnunetv2/training/loss and nnunetv2/training/nnUNetTrainer/variants/loss, respectively
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yannick_Kirchhoff/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 |
3 | if __name__ == "__main__":
4 | setuptools.setup()
5 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
3 |
4 | docker build -t toothfairy_generic:v1.0 "$SCRIPTPATH"
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/dockerfile:
--------------------------------------------------------------------------------
1 | FROM pytorch/pytorch
2 |
3 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user
4 |
5 | RUN mkdir -p /workspace /input /output /workspace/networks /workspace/weight\
6 | && chown user:user /workspace /input /output /workspace/networks /workspace/weight
7 | # RUN mkdir -p /output/images/inferior-alveolar-canal
8 |
9 | USER user
10 | WORKDIR /workspace
11 |
12 | ENV PATH="/home/user/.local/bin:${PATH}"
13 |
14 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools
15 |
16 | #COPY --chown=user:user ./ /workspace
17 |
18 | # RUN pip install pip -U
19 | RUN pip install pip -U
20 | RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple/ --user
21 |
22 | COPY --chown=user:user requirements.txt /workspace
23 | RUN python -m pip install --user -r requirements.txt
24 |
25 |
26 | COPY --chown=user:user test_3D.py /workspace
27 | #COPY --chown=user:user predict.sh /workspace
28 | #COPY --chown=user:user name_config.py /workspace
29 | COPY --chown=user:user test_3D_util_mirror.py /workspace
30 | #COPY --chown=user:user spacing_config.py /workspace
31 | COPY --chown=user:user /weight /workspace/weight
32 | COPY --chown=user:user /networks /workspace/networks
33 |
34 | #CMD ["/workspace/predict.sh"]
35 | #ENTRYPOINT ["sh"]
36 |
37 | ENTRYPOINT [ "python", "test_3D.py" ]
38 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/framework.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AImageLab-zip/ToothFairy/8486f68c65b37ee4398fb02276558ccf467583ea/ToothFairy/algorithms/Yusheng_Liu/framework.JPG
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/name_config.py:
--------------------------------------------------------------------------------
1 | import SimpleITK as sitk
2 | import numpy as np
3 | import sys
4 | import os
5 | import random
6 | from multiprocessing import Pool, cpu_count
7 |
8 | def Normalize(Image, LowerBound, UpperBound):
9 | Spacing = Image.GetSpacing()
10 | Origin = Image.GetOrigin()
11 | Direction = Image.GetDirection()
12 | Array = sitk.GetArrayFromImage(Image)
13 |
14 | Array[Array < LowerBound] = LowerBound
15 | Array[Array > UpperBound] = UpperBound
16 | # Array = (Array - np.mean(Array )) / np.std(Array )
17 | Array = (Array.astype(np.float64) - LowerBound) / (UpperBound - LowerBound)
18 | Array = (Array * 255).astype(np.uint8)
19 | Image = sitk.GetImageFromArray(Array)
20 | Image.SetSpacing(Spacing)
21 | Image.SetOrigin(Origin)
22 | Image.SetDirection(Direction)
23 | return Image
24 |
25 | def Resample(Image, NewSpacing, Label, Size = None):
26 | Spacing = Image.GetSpacing()
27 | Origin = Image.GetOrigin()
28 | Direction = Image.GetDirection()
29 | Array = sitk.GetArrayFromImage(Image)
30 | if not Size:
31 | NewSize = [int(Array.shape[2] * Spacing[0] / NewSpacing[0]), int(Array.shape[1] * Spacing[1] / NewSpacing[1]),
32 | int(Array.shape[0] * Spacing[2] / NewSpacing[2])]
33 | else:
34 | NewSize = Size
35 | Resample = sitk.ResampleImageFilter()
36 | Resample.SetOutputDirection(Direction)
37 | Resample.SetOutputOrigin(Origin)
38 | Resample.SetSize(NewSize)
39 | if Label:
40 | Resample.SetInterpolator(sitk.sitkNearestNeighbor)
41 | else:
42 | Resample.SetInterpolator(sitk.sitkLinear)
43 | Resample.SetOutputSpacing(NewSpacing)
44 |
45 | NewImage = Resample.Execute(Image)
46 |
47 | return NewImage
48 |
49 | def main(img_path,case):
50 | Image = sitk.ReadImage(os.path.join(img_path,case))
51 | if max(np.unique(sitk.GetArrayFromImage(Image)) )> 256:
52 | print('Doning Preprocessing')
53 | Image = Normalize(Image, -750, 3000)
54 | Image.SetSpacing((0.3,0.3,0.3))
55 | sitk.WriteImage(Image,os.path.join(img_path,case))
56 | # os.rename(os.path.join(img_path,case),os.path.join(img_path,case))
57 |
58 | if __name__ == "__main__":
59 | img_path = '/input/images/cbct'
60 | pool = Pool(int(cpu_count() / 2))
61 | print('pool count',int(cpu_count() / 2))
62 | for case in os.listdir(img_path):
63 | print('***************')
64 | print(case)
65 | if '_0000' in case:
66 | continue
67 | try:
68 | pool.apply_async(main, (img_path, case))
69 | except Exception as err:
70 | print('Outer single copy throws exception %s, with case name %s!' % (err, case))
71 |
72 | pool.close()
73 | pool.join()
74 |
75 |
76 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/networks/net_factory_3d.py:
--------------------------------------------------------------------------------
1 | from networks.generic_UNet import initialize_nnunet
2 | from networks.generic_UNetv2 import initialize_nnunetv2
3 | from networks.generic_UNet_small import initialize_nnunet_small
4 |
5 | def net_factory_3d(net_type="unet_3D", in_chns=1, class_num=2):
6 | if net_type == "nnUNet":
7 | net = initialize_nnunet(num_classes=class_num).cuda()
8 | # net = initialize_network(num_classes=class_num).cuda()
9 | elif net_type == "nnUNetv2":
10 | net = initialize_nnunetv2(num_classes=class_num).cuda()
11 | elif net_type == "nnUNet_small":
12 | net = initialize_nnunet_small(num_classes=class_num).cuda()
13 | else:
14 | net = None
15 | return net
16 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/output/results.json:
--------------------------------------------------------------------------------
1 | [{"outputs": [{"type": "metaio_image", "filename": "P23.mha"}], "inputs": [{"type": "metaio_image", "filename": "P23.mha"}], "error_messages": []}]
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/predict.sh:
--------------------------------------------------------------------------------
1 | python name_config.py
2 |
3 | python test_3D.py
4 |
5 | python spacing_config.py
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/process/Check_Component.py:
--------------------------------------------------------------------------------
1 | import SimpleITK as sitk
2 | import numpy as np
3 | import sys
4 | import os
5 | import shutil
6 | from utils import connected_domain_check
7 | dataset_dir = r'E:\CBCT_Project\IAN\Tooth_Fairy\ToothFairy_Dataset_new\dataset\DenseMask'
8 | badcase_dir = r'E:\CBCT_Project\IAN\Tooth_Fairy\ToothFairy_Dataset_new\Dense_Mask\bad_case'
9 | finecase_dir = r'E:\CBCT_Project\IAN\Tooth_Fairy\ToothFairy_Dataset_new\Dense_Mask\fine_case'
10 | bad_case = []
11 | for case_name in os.listdir(dataset_dir):
12 | print('*' * 25)
13 | print(case_name)
14 | mask = sitk.ReadImage(os.path.join(dataset_dir,case_name))
15 | mask_array = sitk.GetArrayFromImage(mask)
16 |
17 | num_con = connected_domain_check(mask_array)
18 |
19 | if num_con != 2:
20 | bad_case.append(case_name)
21 | shutil.copy(os.path.join(dataset_dir,case_name),os.path.join(badcase_dir,case_name))
22 | else:
23 | shutil.copy(os.path.join(dataset_dir, case_name), os.path.join(finecase_dir, case_name))
24 |
25 | print('Bad Case Num:',len(bad_case))
26 | print('Bad Case:',bad_case)
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/process/data_read.py:
--------------------------------------------------------------------------------
1 | import SimpleITK as sitk
2 | import numpy as np
3 | import sys
4 | import os
5 | import matplotlib.pyplot as pl
6 | from PIL import Image as Img
7 | import random
8 | from glob import glob
9 |
10 | imgList = sorted(glob(r'E:\CBCT_Project\IAN\Tooth_Fairy\ToothFairy_Dataset_new\ToothFairy_Dataset\Dataset\*\data.npy'))
11 | DenseMaskList = sorted(glob(r'E:\CBCT_Project\IAN\Tooth_Fairy\ToothFairy_Dataset_new\ToothFairy_Dataset\Dataset\*\gt_alpha.npy'))
12 | SparseMaskList = sorted(glob(r'E:\CBCT_Project\IAN\Tooth_Fairy\ToothFairy_Dataset_new\ToothFairy_Dataset\Dataset\*\gt_sparse.npy'))
13 | save_path = r'E:\CBCT_Project\IAN\Tooth_Fairy\ToothFairy_Dataset_new\dataset'
14 | List = dict()
15 | List['img'] = imgList
16 | List['DenseMask'] = DenseMaskList
17 | List['SparseMask'] = SparseMaskList
18 | for phase in ['img','SparseMask','DenseMask']:
19 | for case in List[phase]:
20 | data = np.load(case,allow_pickle=True)
21 | data = np.flip(data,axis=0)
22 |
23 | data = sitk.GetImageFromArray(data)
24 | data.SetSpacing((0.3,0.3,0.3))
25 | path = os.path.join(save_path,phase)
26 | if not os.path.exists(path):
27 | os.mkdir(path)
28 | sitk.WriteImage(data,os.path.join(path,case.split('\\')[-2]+'.nii.gz'))
29 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/process/transforms/compose.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) OpenMMLab. All rights reserved.
2 | import collections
3 |
4 | from Common.utils.registry import build_from_cfg, Registry
5 |
6 |
7 | PIPELINES = Registry('pipeline')
8 |
9 |
10 | @PIPELINES.register()
11 | class Compose(object):
12 | """Compose multiple transforms sequentially.
13 |
14 | Args:
15 | transforms (Sequence[dict | callable]): Sequence of transform object or
16 | config dict to be composed.
17 | """
18 |
19 | def __init__(self, transforms):
20 | assert isinstance(transforms, collections.abc.Sequence)
21 | self.transforms = []
22 | for transform in transforms:
23 | if isinstance(transform, dict):
24 | transform = build_from_cfg(transform, PIPELINES)
25 | self.transforms.append(transform)
26 | elif callable(transform):
27 | self.transforms.append(transform)
28 | else:
29 | raise TypeError('transform must be callable or a dict')
30 |
31 | def __call__(self, data):
32 | """Call function to apply transforms sequentially.
33 |
34 | Args:
35 | data (dict): A result dict contains the data to transform.
36 |
37 | Returns:
38 | dict: Transformed data.
39 | """
40 |
41 | for t in self.transforms:
42 | data = t(data)
43 | if data is None:
44 | return None
45 | return data
46 |
47 | def __repr__(self):
48 | format_string = self.__class__.__name__ + '('
49 | for t in self.transforms:
50 | format_string += '\n'
51 | format_string += f' {t}'
52 | format_string += '\n)'
53 | return format_string
54 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/process/transforms/connected_component_labeling.py:
--------------------------------------------------------------------------------
1 | import cc3d
2 | import fastremap
3 | import numpy as np
4 |
5 | from typing import List
6 |
7 |
8 | class ConnectedComponentLabeling:
9 | """Connected component labelling"""
10 | merge_mask = None
11 |
12 | def __init__(self):
13 | pass
14 |
15 | @staticmethod
16 | def keep_multi_topk_target(mask: np.ndarray):
17 | pass
18 |
19 | @classmethod
20 | def extract_non_zeros_mask(cls, masks: np.ndarray, area_least: int) -> [list, List[np.ndarray]]:
21 | """
22 | Extract the non-zeros mask from multi-channel masks.
23 | :param masks: multi-channel masks.
24 | :param area_least: the least area of connected region.
25 | :return:
26 | """
27 | mask_shape = masks.shape
28 | merge_mask = np.zeros(mask_shape[1:], np.uint8)
29 | out_idx = []
30 | out_masks = []
31 | for i in range(mask_shape[0]):
32 | t_mask = masks[i].copy()
33 | if np.sum(t_mask) < area_least:
34 | continue
35 | merge_mask[t_mask != 0] = i + 1
36 | out_idx.append(i + 1)
37 | out_masks.append(t_mask)
38 | cls.merge_mask = merge_mask
39 |
40 | return out_idx, out_masks
41 |
42 | @staticmethod
43 | def keep_topk_target(mask: np.ndarray, k: int, area_least: int, out_mask: np.ndarray, out_label: int = 1) -> None:
44 | """Keep the topK largest connected region from single channel mask.
45 | :param mask: single channel mask.
46 | :param k: top k.
47 | :param area_least: the least area of connected region.
48 | :param out_mask: return target mask in place.
49 | :param out_label: target label.
50 | :return: None
51 | """
52 | labeled_mask = cc3d.connected_components(mask, connectivity=26)
53 | areas = {}
54 | for label, extracted in cc3d.each(labeled_mask, binary=True, in_place=True):
55 | areas[label] = fastremap.foreground(extracted)
56 | candidates = sorted(areas.items(), key=lambda item: item[1], reverse=True)
57 |
58 | k = max(1, k)
59 | for i in range(min(k, len(candidates))):
60 | if candidates[i][1] > area_least:
61 | out_mask[labeled_mask == int(candidates[i][0])] = out_label
62 |
63 |
64 |
--------------------------------------------------------------------------------
/ToothFairy/algorithms/Yusheng_Liu/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.10
3 | # by the following command:
4 | #
5 | # pip-compile --resolver=backtracking
6 | #
7 | arrow==1.2.3
8 | # via jinja2-time
9 | binaryornot==0.4.4
10 | # via cookiecutter
11 | build==0.10.0
12 | # via pip-tools
13 | batchgenerators>=0.25
14 |
15 | certifi==2022.12.7
16 | # via requests
17 | chardet==5.1.0
18 | # via binaryornot
19 | charset-normalizer==3.1.0
20 | # via requests
21 | click==8.1.3
22 | # via
23 | # cookiecutter
24 | # evalutils
25 | # pip-tools
26 | cookiecutter==2.1.1
27 | # via evalutils
28 | dynamic_network_architectures==0.2
29 | evalutils==0.4.0
30 | # via -r requirements.in
31 | idna==3.4
32 | # via requests
33 | imageio[tifffile]==2.26.0
34 | # via evalutils
35 | jinja2==3.1.2
36 | # via
37 | # cookiecutter
38 | # jinja2-time
39 | jinja2-time==0.2.0
40 | # via cookiecutter
41 | joblib==1.2.0
42 | # via scikit-learn
43 | markupsafe==2.1.2
44 | # via jinja2
45 | medpy==0.4.0
46 | numpy==1.24.2
47 | # via
48 | # evalutils
49 | # imageio
50 | # pandas
51 | # scikit-learn
52 | # scipy
53 | # tifffile
54 | packaging==23.0
55 | # via build
56 | pandas==1.5.3
57 | # via evalutils
58 | pillow==9.4.0
59 | # via imageio
60 | pip-tools==6.12.3
61 | # via evalutils
62 | pyproject-hooks==1.0.0
63 | # via build
64 | python-dateutil==2.8.2
65 | # via
66 | # arrow
67 | # pandas
68 | python-slugify==8.0.1
69 | # via cookiecutter
70 | pytz==2022.7.1
71 | # via pandas
72 | pyyaml==6.0
73 | # via cookiecutter
74 | requests==2.28.2
75 | # via cookiecutter
76 | scikit-learn==1.2.2
77 | # via evalutils
78 | scikit-image==0.19.3
79 | scipy==1.10.1
80 | # via
81 | # evalutils
82 | # scikit-learn
83 | simpleitk==2.2.1
84 | # via evalutils
85 | six==1.16.0
86 | # via python-dateutil
87 | text-unidecode==1.3
88 | # via python-slugify
89 | threadpoolctl==3.1.0
90 | # via scikit-learn
91 | tifffile==2023.3.15
92 | # via imageio
93 | tomli==2.0.1
94 | # via
95 | # build
96 | # pyproject-hooks
97 | urllib3==1.26.15
98 | # via requests
99 | wheel==0.40.0
100 | # via pip-tools
101 | torchio==0.18.91
102 |
103 | # The following packages are considered to be unsafe in a requirements file:
104 | # pip
105 | # setuptools
106 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/.dockerignore:
--------------------------------------------------------------------------------
1 | test/
2 | .git/
3 | *.tar.gz
4 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/.gitattributes:
--------------------------------------------------------------------------------
1 | ground-truth/* filter=lfs diff=lfs merge=lfs -text
2 | test/* filter=lfs diff=lfs merge=lfs -text
3 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on: [push, pull_request]
4 |
5 | env:
6 | PYTHON_VERSION: '3.10'
7 |
8 | jobs:
9 |
10 | tests:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Install Python ${{ env.PYTHON_VERSION }}
14 | uses: actions/setup-python@v4
15 | with:
16 | python-version: ${{ env.PYTHON_VERSION }}
17 | - uses: actions/checkout@v3
18 | - name: Build the containers
19 | run: |
20 | ./build.sh
21 | - name: Run the tests
22 | run: |
23 | ./test.sh
24 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 |
58 | # Flask stuff:
59 | instance/
60 | .webassets-cache
61 |
62 | # Scrapy stuff:
63 | .scrapy
64 |
65 | # Sphinx documentation
66 | docs/_build/
67 |
68 | # PyBuilder
69 | target/
70 |
71 | # Jupyter Notebook
72 | .ipynb_checkpoints
73 |
74 | # pyenv
75 | .python-version
76 |
77 | # celery beat schedule file
78 | celerybeat-schedule
79 |
80 | # SageMath parsed files
81 | *.sage.py
82 |
83 | # dotenv
84 | .env
85 |
86 | # virtualenv
87 | .venv
88 | venv/
89 | ENV/
90 |
91 | # Spyder project settings
92 | .spyderproject
93 | .spyproject
94 |
95 | # Rope project settings
96 | .ropeproject
97 |
98 | # mkdocs documentation
99 | /site
100 |
101 | # mypy
102 | .mypy_cache/
103 |
104 | # Pycharm
105 | .idea/
106 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10-slim
2 |
3 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user
4 |
5 | RUN mkdir -p /opt/app /input /output \
6 | && chown user:user /opt/app /input /output
7 |
8 | USER user
9 | WORKDIR /opt/app
10 |
11 | ENV PATH="/home/user/.local/bin:${PATH}"
12 |
13 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools
14 |
15 |
16 |
17 | COPY --chown=user:user requirements.txt /opt/app/
18 | RUN python -m piptools sync requirements.txt
19 |
20 | COPY --chown=user:user ground-truth /opt/app/ground-truth
21 | COPY --chown=user:user evaluation.py /opt/app/
22 |
23 | ENTRYPOINT [ "python", "-m", "evaluation" ]
24 |
25 |
26 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/README.md:
--------------------------------------------------------------------------------
1 | # ToothFairy Evaluation
2 |
3 | This is the source code for the evaluation of the submitted algorithms on the
4 | Grand-Challenge platform. The code for computing the Dice coefficient and the
5 | Hausdorf distance 95% can be found inside the `evaluation.py` file, under the
6 | `compute_dice()` and `compute_hd95()` functions.
7 |
8 | If you found any problem related to such algorithm, please report it to us by
9 | opening an issue in this repository.
10 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
3 |
4 | docker build -t toothfairy_evaluation "$SCRIPTPATH"
5 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/export.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ./build.sh
4 |
5 | docker save toothfairy_evaluation | gzip -c > ToothFairy_Evaluation.tar.gz
6 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.10
3 | # by the following command:
4 | #
5 | # pip-compile --resolver=backtracking
6 | #
7 | arrow==1.2.3
8 | # via jinja2-time
9 | binaryornot==0.4.4
10 | # via cookiecutter
11 | build==0.10.0
12 | # via pip-tools
13 | certifi==2022.12.7
14 | # via requests
15 | chardet==5.1.0
16 | # via binaryornot
17 | charset-normalizer==3.1.0
18 | # via requests
19 | click==8.1.3
20 | # via
21 | # cookiecutter
22 | # evalutils
23 | # pip-tools
24 | cookiecutter==2.1.1
25 | # via evalutils
26 | evalutils==0.4.0
27 | # via -r requirements.in
28 | idna==3.4
29 | # via requests
30 | imageio[tifffile]==2.26.0
31 | # via evalutils
32 | jinja2==3.1.2
33 | # via
34 | # cookiecutter
35 | # jinja2-time
36 | jinja2-time==0.2.0
37 | # via cookiecutter
38 | joblib==1.2.0
39 | # via scikit-learn
40 | markupsafe==2.1.2
41 | # via jinja2
42 | numpy==1.24.2
43 | # via
44 | # evalutils
45 | # imageio
46 | # pandas
47 | # scikit-learn
48 | # scipy
49 | # tifffile
50 | packaging==23.0
51 | # via build
52 | pandas==1.5.3
53 | # via evalutils
54 | pillow==9.4.0
55 | # via imageio
56 | pip-tools==6.12.3
57 | # via evalutils
58 | pyproject-hooks==1.0.0
59 | # via build
60 | python-dateutil==2.8.2
61 | # via
62 | # arrow
63 | # pandas
64 | python-slugify==8.0.1
65 | # via cookiecutter
66 | pytz==2022.7.1
67 | # via pandas
68 | pyyaml==6.0
69 | # via cookiecutter
70 | requests==2.28.2
71 | # via cookiecutter
72 | scikit-image==0.22.0
73 | scikit-learn==1.2.2
74 | # via evalutils
75 | scipy==1.10.1
76 | # via
77 | # evalutils
78 | # scikit-learn
79 | simpleitk==2.2.1
80 | # via evalutils
81 | six==1.16.0
82 | # via python-dateutil
83 | text-unidecode==1.3
84 | # via python-slugify
85 | threadpoolctl==3.1.0
86 | # via scikit-learn
87 | tifffile==2023.3.15
88 | # via imageio
89 | tomli==2.0.1
90 | # via
91 | # build
92 | # pyproject-hooks
93 | urllib3==1.26.15
94 | # via requests
95 | wheel==0.40.0
96 | # via pip-tools
97 |
98 | # The following packages are considered to be unsafe in a requirements file:
99 | # pip
100 | # setuptools
101 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
4 |
5 | ./build.sh
6 |
7 | VOLUME_SUFFIX=$(dd if=/dev/urandom bs=32 count=1 | md5sum | cut --delimiter=' ' --fields=1)
8 |
9 | MEM_LIMIT="4g"
10 |
11 | docker volume create toothfairy_evaluation-output-$VOLUME_SUFFIX
12 |
13 | # Do not change any of the parameters to docker run, these are fixed
14 | docker run --rm \
15 | --memory="${MEM_LIMIT}" \
16 | --memory-swap="${MEM_LIMIT}" \
17 | --network="none" \
18 | --cap-drop="ALL" \
19 | --security-opt="no-new-privileges" \
20 | --shm-size="128m" \
21 | --pids-limit="256" \
22 | -v $SCRIPTPATH/test:/input/ \
23 | -v toothfairy_evaluation-output-$VOLUME_SUFFIX:/output/ \
24 | toothfairy_evaluation
25 |
26 | docker run --rm \
27 | -v toothfairy_evaluation-output-$VOLUME_SUFFIX:/output/ \
28 | python:3.10-slim cat /output/metrics.json | python -m json.tool
29 |
30 | docker volume rm toothfairy_evaluation-output-$VOLUME_SUFFIX
31 |
--------------------------------------------------------------------------------
/ToothFairy/evaluation/test/predictions.json:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:78fbb12ac51324d304a9c29bd4555f1134da6deabec224058feea47d785869f5
3 | size 4297
4 |
--------------------------------------------------------------------------------
/ToothFairy2/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | # ToothFairy2 Challenge (MICCAI 2024) - Multi-Structure Segmentation in CBCT Volumes
6 |
7 | This is the second edition of the ToothFairy challenge organized by the University of Modena and Reggio Emilia with the collaboration of Raudboud University. The challenge is hosted by grand-challenge and is part of MICCAI 2024.
8 |
9 | For any information, please visit the [challenge page](https://toothfairy2.grand-challenge.org/) or the [Ditto website](https://ditto.ing.unimore.it/toothfairy2/).
10 |
11 | We are currently working to provide a initial example of algorithm as a starting point for your submissions. In the meantime, you can check the template of the previous edition, which would be quite the same.
12 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/.dockerignore:
--------------------------------------------------------------------------------
1 | test/
2 | .git/
3 | *.tar.gz
4 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/.gitattributes:
--------------------------------------------------------------------------------
1 | ground-truth/* filter=lfs diff=lfs merge=lfs -text
2 | test/* filter=lfs diff=lfs merge=lfs -text
3 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM pytorch/pytorch
2 |
3 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user
4 |
5 | RUN mkdir -p /opt/app /input /output \
6 | && chown user:user /opt/app /input /output
7 |
8 | USER user
9 | WORKDIR /opt/app
10 |
11 | ENV PATH="/home/user/.local/bin:${PATH}"
12 |
13 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools
14 |
15 |
16 |
17 | COPY --chown=user:user requirements.txt /opt/app/
18 | COPY --chown=user:user resources /opt/app/
19 | # RUN python -m piptools sync requirements.txt
20 | RUN python -m pip install --user -r requirements.txt
21 |
22 |
23 | COPY --chown=user:user process.py /opt/app/
24 | # COPY --chown=user:user PosPadUNet3D.py /opt/app/
25 | # COPY --chown=user:user checkpoints.pth /opt/app/
26 |
27 | ENTRYPOINT [ "python", "-m", "process" ]
28 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/README.md:
--------------------------------------------------------------------------------
1 | # ToothFairy Algorithm
2 | This is a template that you can use to develop and test your algorithm.
3 |
4 | To run it, you'll need to install [docker](https://docs.docker.com/engine/install/) and [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
5 |
6 | First of all, you have to clone this repository and `cd` in the algorithm directory:
7 | ```
8 | git clone https://github.com/AImageLab-zip/ToothFairy.git
9 | cd ToothFairy/algorithm
10 | ```
11 |
12 | All the code has been tested on Linux (6.2.8-arch1-1)
13 |
14 | ## Testing Your Algorithm
15 | To test your algorithm, you can use the samples provided in the `test` folder,
16 | which are already converted to the `.mha` format that grand-challenge use
17 | behind the scenes. If you wish to load more test samples, you will have to
18 | convert all the `data.npy` to `.mha`. This conversion can be made
19 | using SimpleITK library for python.
20 |
21 | Inside the `process.py` file you have to
22 | add all the steps required by your algorithm. A simple example is already
23 | provided: A `SimpleNet` is declared (a `torch.nn.Module`) and inside the
24 | `predict()` function I've already took care of converting the `SimpleITK.Image`
25 | input to a `torch.tensor`. and the output from a `torch.tensor` back to a
26 | `SimpleITK.Image`. Feel free to modify this script but keep in mind that
27 | GrandChallenge will give you *a single image* as input and wants *a single
28 | image* as output, both as a `SimpleITK.Image`.
29 |
30 | When you are ready, check that everything works properly by running `./test.sh`.
31 |
32 |
33 | ## Submit Your Algorithm
34 | Once you have checked that everything works properly using `test.sh`, you are ready to export your algorithm into a docker container using `./export.sh` and ship it to Grand-Challenge from the [submission page](https://toothfairy.grand-challenge.org/evaluation/challenge/submissions/create/) of the challenge. Be carefull because you have a limited amount of submissions: 15 for the *Prelimaniry Test Phase*, 2 for the *Final Test Phase*.
35 |
36 |
37 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/build.ps1:
--------------------------------------------------------------------------------
1 | $ErrorActionPreference = "Stop"
2 | $SCRIPT_DIR = Split-Path -Parent $MyInvocation.MyCommand.Path
3 | $DOCKER_TAG = "toothfairy2-example-algorithm"
4 |
5 | docker build $SCRIPT_DIR `
6 | --platform=linux/amd64 `
7 | --quiet `
8 | --tag $DOCKER_TAG
9 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
3 |
4 | # docker build --no-cache -t toothfairy_algorithm "$SCRIPTPATH"
5 | docker build -t toothfairy_algorithm "$SCRIPTPATH"
6 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/export.ps1:
--------------------------------------------------------------------------------
1 | # Define the Docker tag and construct the output file name
2 | $DOCKER_TAG = "toothfairy2-example-algorithm"
3 | $OUTPUT_FILE = "$DOCKER_TAG.tar.gz"
4 |
5 | # Save the Docker image to a tar file
6 | $TEMP_TAR = "$DOCKER_TAG.tar"
7 | docker save $DOCKER_TAG -o $TEMP_TAR
8 |
9 | # Path to the 7-Zip executable
10 | $SevenZipPath = "C:\Program Files\7-Zip\7z.exe"
11 |
12 | # Check if the 7-Zip executable exists
13 | if (-Not (Test-Path $SevenZipPath)) {
14 | Write-Error "7z.exe not found at path $SevenZipPath. Please check the installation."
15 | exit 1
16 | }
17 |
18 | # Compress the tar file to a .gz file using 7-Zip
19 | & $SevenZipPath a -tgzip $OUTPUT_FILE $TEMP_TAR
20 |
21 | # Remove the temporary tar file
22 | Remove-Item $TEMP_TAR
23 |
24 | Write-Host "Docker image saved and compressed to $OUTPUT_FILE"
25 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/export.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ./build.sh
4 |
5 | docker save toothfairy_algorithm | gzip -c > ToothFairy_Algorithm.tar.gz
6 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/output/toothfairy_algorithm-output-aef7d44fa8239aa6ee100726344ed4c6/_data/results.json:
--------------------------------------------------------------------------------
1 | [{"outputs": [{"type": "metaio_image", "filename": "052ce942-40dd-4a15-95a5-cc632c4a3033.mha"}], "inputs": [{"type": "metaio_image", "filename": "052ce942-40dd-4a15-95a5-cc632c4a3033.mha"}], "error_messages": []}]
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/process.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import SimpleITK as sitk
3 | import numpy as np
4 | import torch
5 |
6 | from evalutils import SegmentationAlgorithm
7 | from evalutils.validators import (
8 | UniquePathIndicesValidator,
9 | UniqueImagesValidator,
10 | )
11 |
12 |
13 | def get_default_device():
14 | if torch.cuda.is_available():
15 | return torch.device('cuda')
16 | return torch.device('cpu')
17 |
18 |
19 | def your_awesome_algorithm(input_tensor):
20 | # Just as example, provide your algorithm logic here
21 | output_tensor = (input_tensor > 1500)
22 |
23 | assert input_tensor.shape == output_tensor.shape, f"Your output tensor should have the same shape of the input tensor! {input_tensor.shape=} != {output_tensor.shape=}"
24 | return output_tensor.detach().cpu().numpy().squeeze().astype(np.uint8)
25 |
26 |
27 | class Toothfairy2_algorithm(SegmentationAlgorithm):
28 | def __init__(self):
29 | super().__init__(
30 | input_path=Path('/input/images/cbct/'),
31 | output_path=Path('/output/images/oral-pharyngeal-segmentation/'),
32 | validators=dict(
33 | input_image=(
34 | UniqueImagesValidator(),
35 | UniquePathIndicesValidator(),
36 | )
37 | ),
38 | )
39 | if not self._output_path.exists():
40 | self._output_path.mkdir(parents=True)
41 |
42 | @torch.no_grad()
43 | def predict(self, *, input_image: sitk.Image):
44 | input_array = sitk.GetArrayFromImage(input_image)
45 | input_tensor = torch.from_numpy(input_array.astype(np.float32))
46 | input_tensor = input_tensor[None, ...].to(get_default_device())
47 |
48 | output = your_awesome_algorithm(input_tensor)
49 |
50 | output = sitk.GetImageFromArray(output)
51 | return output
52 |
53 |
54 | if __name__ == "__main__":
55 | Toothfairy2_algorithm().process()
56 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.10
3 | # by the following command:
4 | #
5 | # pip-compile --resolver=backtracking
6 | #
7 | arrow==1.2.3
8 | # via jinja2-time
9 | binaryornot==0.4.4
10 | # via cookiecutter
11 | build==0.10.0
12 | # via pip-tools
13 | certifi==2022.12.7
14 | # via requests
15 | chardet==5.1.0
16 | # via binaryornot
17 | charset-normalizer==3.1.0
18 | # via requests
19 | click==8.1.3
20 | # via
21 | # cookiecutter
22 | # evalutils
23 | # pip-tools
24 | cookiecutter==2.1.1
25 | # via evalutils
26 | evalutils==0.4.0
27 | # via -r requirements.in
28 | idna==3.4
29 | # via requests
30 | imageio[tifffile]==2.26.0
31 | # via evalutils
32 | jinja2==3.1.2
33 | # via
34 | # cookiecutter
35 | # jinja2-time
36 | jinja2-time==0.2.0
37 | # via cookiecutter
38 | joblib==1.2.0
39 | # via scikit-learn
40 | markupsafe==2.1.2
41 | # via jinja2
42 | numpy==1.24.2
43 | # via
44 | # evalutils
45 | # imageio
46 | # pandas
47 | # scikit-learn
48 | # scipy
49 | # tifffile
50 | packaging==23.0
51 | # via build
52 | pandas==1.5.3
53 | # via evalutils
54 | pillow==9.4.0
55 | # via imageio
56 | pip-tools==6.12.3
57 | # via evalutils
58 | pyproject-hooks==1.0.0
59 | # via build
60 | python-dateutil==2.8.2
61 | # via
62 | # arrow
63 | # pandas
64 | python-slugify==8.0.1
65 | # via cookiecutter
66 | pytz==2022.7.1
67 | # via pandas
68 | pyyaml==6.0
69 | # via cookiecutter
70 | requests==2.28.2
71 | # via cookiecutter
72 | scikit-learn==1.2.2
73 | # via evalutils
74 | scipy==1.10.1
75 | # via
76 | # evalutils
77 | # scikit-learn
78 | simpleitk==2.2.1
79 | # via evalutils
80 | six==1.16.0
81 | # via python-dateutil
82 | text-unidecode==1.3
83 | # via python-slugify
84 | threadpoolctl==3.1.0
85 | # via scikit-learn
86 | tifffile==2023.3.15
87 | # via imageio
88 | tomli==2.0.1
89 | # via
90 | # build
91 | # pyproject-hooks
92 | urllib3==1.26.15
93 | # via requests
94 | wheel==0.40.0
95 | # via pip-tools
96 | torchio==0.18.92
97 |
98 | # The following packages are considered to be unsafe in a requirements file:
99 | # pip
100 | # setuptools
101 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/resources/README.md:
--------------------------------------------------------------------------------
1 | You can add in this folder anything you require to include in the Docker
2 | i.e., model's weights or additional python files
3 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
4 |
5 | ./build.sh
6 |
7 | VOLUME_SUFFIX=$(dd if=/dev/urandom bs=32 count=1 | md5sum | cut --delimiter=' ' --fields=1)
8 | # Maximum is currently 30g, configurable in your algorithm image settings on grand challenge
9 | MEM_LIMIT="10g"
10 |
11 | docker volume create toothfairy_algorithm-output-$VOLUME_SUFFIX
12 |
13 | # Do not change any of the parameters to docker run, these are fixed
14 | # You are free to add --gpus all if you would like to locally test
15 | # your algorithm with your GPU hardware. In the grand-challenge container
16 | # all the docker will have a single T4 with 16GB and they will be
17 | # run using such flag
18 | docker run --rm \
19 | --memory="${MEM_LIMIT}" \
20 | --memory-swap="${MEM_LIMIT}" \
21 | --network="none" \
22 | --cap-drop="ALL" \
23 | --security-opt="no-new-privileges" \
24 | --shm-size="128m" \
25 | --pids-limit="256" \
26 | -v $SCRIPTPATH/test/:/input/ \
27 | -v toothfairy_algorithm-output-$VOLUME_SUFFIX:/output/ \
28 | toothfairy_algorithm
29 |
30 | docker run --rm \
31 | -v toothfairy_algorithm-output-$VOLUME_SUFFIX:/output/ \
32 | python:3.10-slim cat /output/results.json | python -m json.tool
33 | #
34 | # docker run --rm \
35 | # -v toothfairy_algorithm-output-$VOLUME_SUFFIX:/output/ \
36 | # python:3.10-slim ls -lah /output/images/inferior-alveolar-canal/
37 |
38 | cp -r /var/lib/docker/volumes/toothfairy_algorithm-output-$VOLUME_SUFFIX/ output
39 | chown llumetti:llumetti -R output
40 |
41 | docker volume rm toothfairy_algorithm-output-$VOLUME_SUFFIX
42 |
--------------------------------------------------------------------------------
/ToothFairy2/algorithm/test/.gitignore:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ac016a87246c27aef2d546151a4b76a66d9a84326dbbd156f1ae4a30c558a18f
3 | size 9
4 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/.dockerignore:
--------------------------------------------------------------------------------
1 | test/
2 | .git/
3 | *.tar.gz
4 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/.gitattributes:
--------------------------------------------------------------------------------
1 | ground-truth/* filter=lfs diff=lfs merge=lfs -text
2 | test/* filter=lfs diff=lfs merge=lfs -text
3 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on: [push, pull_request]
4 |
5 | env:
6 | PYTHON_VERSION: '3.10'
7 |
8 | jobs:
9 |
10 | tests:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Install Python ${{ env.PYTHON_VERSION }}
14 | uses: actions/setup-python@v4
15 | with:
16 | python-version: ${{ env.PYTHON_VERSION }}
17 | - uses: actions/checkout@v3
18 | - name: Build the containers
19 | run: |
20 | ./build.sh
21 | - name: Run the tests
22 | run: |
23 | ./test.sh
24 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 |
58 | # Flask stuff:
59 | instance/
60 | .webassets-cache
61 |
62 | # Scrapy stuff:
63 | .scrapy
64 |
65 | # Sphinx documentation
66 | docs/_build/
67 |
68 | # PyBuilder
69 | target/
70 |
71 | # Jupyter Notebook
72 | .ipynb_checkpoints
73 |
74 | # pyenv
75 | .python-version
76 |
77 | # celery beat schedule file
78 | celerybeat-schedule
79 |
80 | # SageMath parsed files
81 | *.sage.py
82 |
83 | # dotenv
84 | .env
85 |
86 | # virtualenv
87 | .venv
88 | venv/
89 | ENV/
90 |
91 | # Spyder project settings
92 | .spyderproject
93 | .spyproject
94 |
95 | # Rope project settings
96 | .ropeproject
97 |
98 | # mkdocs documentation
99 | /site
100 |
101 | # mypy
102 | .mypy_cache/
103 |
104 | # Pycharm
105 | .idea/
106 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10-slim
2 |
3 | RUN groupadd -r user && useradd -m --no-log-init -r -g user user
4 |
5 | RUN mkdir -p /opt/app /input /output \
6 | && chown user:user /opt/app /input /output
7 |
8 | USER user
9 | WORKDIR /opt/app
10 |
11 | ENV PATH="/home/user/.local/bin:${PATH}"
12 |
13 | RUN python -m pip install --user -U pip && python -m pip install --user pip-tools
14 |
15 |
16 |
17 | COPY --chown=user:user requirements.txt /opt/app/
18 | RUN python -m pip install -r requirements.txt
19 |
20 | COPY --chown=user:user ground-truth /opt/app/ground-truth
21 | COPY --chown=user:user evaluation.py /opt/app/
22 |
23 | ENTRYPOINT [ "python", "-m", "evaluation" ]
24 |
25 |
26 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/README.md:
--------------------------------------------------------------------------------
1 | # ToothFairy Evaluation
2 |
3 | This is the source code for the evaluation of the submitted algorithms on the
4 | Grand-Challenge platform. The code for computing the Dice coefficient and the
5 | Hausdorf distance 95% can be found inside the `evaluation.py` file, under the
6 | `compute_dice()` and `compute_hd95()` functions.
7 |
8 | If you found any problem related to such algorithm, please report it to us by
9 | opening an issue in this repository.
10 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
3 |
4 | docker build -t toothfairy_evaluation "$SCRIPTPATH"
5 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/export.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ./build.sh
4 |
5 | docker save toothfairy_evaluation | gzip -c > ToothFairy_Evaluation.tar.gz
6 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/ground-truth/ToothFairy2F_012_0000.mha:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:2cf96b86e9a4de5c8fbda3294a9e714a800afb8ddbeb6f7713b7d75566557bbf
3 | size 45555558
4 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/ground-truth/ToothFairy2F_027_0000.mha:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:799b1ee5261b6dee2bad696b08f1d84ac76a474e4857a2e67dd31702453c7569
3 | size 46059858
4 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/ground-truth/ToothFairy2F_056_0000.mha:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:1fdbf9c1350c3c9faa8fa778d69eab43efa5995047b658cf2a47e494dbb7d1ed
3 | size 46059858
4 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/ground-truth/ToothFairy2F_065_0000.mha:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:bf729c127c30b22a88ceb8f24eee3482d23fbb77b759bdfd90cef87f69240fee
3 | size 44210758
4 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.10
3 | # by the following command:
4 | #
5 | # pip-compile --resolver=backtracking
6 | #
7 | arrow==1.2.3
8 | # via jinja2-time
9 | binaryornot==0.4.4
10 | # via cookiecutter
11 | build==0.10.0
12 | # via pip-tools
13 | certifi==2022.12.7
14 | # via requests
15 | chardet==5.1.0
16 | # via binaryornot
17 | charset-normalizer==3.1.0
18 | # via requests
19 | click==8.1.3
20 | # via
21 | # cookiecutter
22 | # evalutils
23 | # pip-tools
24 | cookiecutter==2.1.1
25 | # via evalutils
26 | evalutils==0.4.0
27 | # via -r requirements.in
28 | idna==3.4
29 | # via requests
30 | imageio[tifffile]
31 | # via evalutils
32 | jinja2==3.1.2
33 | # via
34 | # cookiecutter
35 | # jinja2-time
36 | jinja2-time==0.2.0
37 | # via cookiecutter
38 | joblib==1.2.0
39 | # via scikit-learn
40 | markupsafe==2.1.2
41 | # via jinja2
42 | numpy==1.24.2
43 | # via
44 | # evalutils
45 | # imageio
46 | # pandas
47 | # scikit-learn
48 | # scipy
49 | # tifffile
50 | packaging==23.0
51 | # via build
52 | pandas==1.5.3
53 | # via evalutils
54 | pillow==9.4.0
55 | # via imageio
56 | pip-tools==6.12.3
57 | # via evalutils
58 | pyproject-hooks==1.0.0
59 | # via build
60 | python-dateutil==2.8.2
61 | # via
62 | # arrow
63 | # pandas
64 | python-slugify==8.0.1
65 | # via cookiecutter
66 | pytz==2022.7.1
67 | # via pandas
68 | pyyaml==6.0
69 | # via cookiecutter
70 | requests==2.28.2
71 | # via cookiecutter
72 | scikit-image==0.22.0
73 | scikit-learn==1.2.2
74 | # via evalutils
75 | scipy==1.10.1
76 | # via
77 | # evalutils
78 | # scikit-learn
79 | simpleitk==2.2.1
80 | # via evalutils
81 | six==1.16.0
82 | # via python-dateutil
83 | text-unidecode==1.3
84 | # via python-slugify
85 | threadpoolctl==3.1.0
86 | # via scikit-learn
87 | tifffile==2023.3.15
88 | # via imageio
89 | tomli==2.0.1
90 | # via
91 | # build
92 | # pyproject-hooks
93 | urllib3==1.26.15
94 | # via requests
95 | wheel==0.40.0
96 | # via pip-tools
97 | MedPy==0.5.1
98 |
99 | # The following packages are considered to be unsafe in a requirements file:
100 | # pip
101 | # setuptools
102 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
4 |
5 | ./build.sh
6 |
7 | VOLUME_SUFFIX=$(dd if=/dev/urandom bs=32 count=1 | md5sum | cut --delimiter=' ' --fields=1)
8 |
9 | MEM_LIMIT="4g"
10 |
11 | docker volume create toothfairy_evaluation-output-$VOLUME_SUFFIX
12 |
13 | # Do not change any of the parameters to docker run, these are fixed
14 | docker run --rm \
15 | --memory="${MEM_LIMIT}" \
16 | --memory-swap="${MEM_LIMIT}" \
17 | --network="none" \
18 | --cap-drop="ALL" \
19 | --security-opt="no-new-privileges" \
20 | --shm-size="128m" \
21 | --pids-limit="256" \
22 | -v $SCRIPTPATH/test:/input/ \
23 | -v toothfairy_evaluation-output-$VOLUME_SUFFIX:/output/ \
24 | toothfairy_evaluation
25 |
26 | docker run --rm \
27 | -v toothfairy_evaluation-output-$VOLUME_SUFFIX:/output/ \
28 | python:3.10-slim cat /output/metrics.json | python -m json.tool
29 |
30 | docker volume rm toothfairy_evaluation-output-$VOLUME_SUFFIX
31 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/test/F012/output/images/oral-pharyngeal-segmentation/README.md:
--------------------------------------------------------------------------------
1 | Place your algorithm output here and name it F012.mha
2 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/test/F027/output/images/oral-pharyngeal-segmentation/README.md:
--------------------------------------------------------------------------------
1 | Place your algorithm output here and name it F027.mha
2 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/test/F056/output/images/oral-pharyngeal-segmentation/README.md:
--------------------------------------------------------------------------------
1 | Place your algorithm output here and name it F056.mha
2 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/test/F065/output/images/oral-pharyngeal-segmentation/README.md:
--------------------------------------------------------------------------------
1 | Place your algorithm output here and name it F065.mha
2 |
--------------------------------------------------------------------------------
/ToothFairy2/evaluation/test/predictions.json:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:3a52c4972f8b29276123b2f21ff2601a3ff305b921735c7c6c166efc1584ce1c
3 | size 40043
4 |
--------------------------------------------------------------------------------