├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── question.md ├── pull_request_template.md └── workflows │ ├── blossom-ci.yml │ ├── chatops.yml │ ├── code-format-check.yml │ ├── premerge-cpu.yml │ ├── update-model-info.yml │ └── weekly-tests-cpu.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── ci ├── bundle_custom_data.py ├── download_latest_bundle.py ├── get_bundle_list.py ├── get_bundle_requirements.py ├── get_changed_bundle.py ├── get_required_resources.py ├── get_required_resources.sh ├── install_scripts │ └── install_maisi_ct_generative_dependency.sh ├── prepare_schema.py ├── run_premerge_cpu.sh ├── run_premerge_gpu.sh ├── run_regular_tests_cpu.sh ├── unit_tests │ ├── runner.py │ ├── test_brain_image_synthesis_latent_diffusion_model.py │ ├── test_brats_mri_axial_slices_generative_diffusion.py │ ├── test_brats_mri_axial_slices_generative_diffusion_dist.py │ ├── test_brats_mri_generative_diffusion.py │ ├── test_brats_mri_generative_diffusion_dist.py │ ├── test_brats_mri_segmentation.py │ ├── test_brats_mri_segmentation_dist.py │ ├── test_cxr_image_synthesis_latent_diffusion_model.py │ ├── test_endoscopic_inbody_classification.py │ ├── test_endoscopic_inbody_classification_dist.py │ ├── test_endoscopic_tool_segmentation.py │ ├── test_endoscopic_tool_segmentation_dist.py │ ├── test_lung_nodule_ct_detection.py │ ├── test_maisi_ct_generative.py │ ├── test_maisi_ct_generative_dist.py │ ├── test_pancreas_ct_dints_segmentation.py │ ├── test_pancreas_ct_dints_segmentation_dist.py │ ├── test_pathology_nuclei_classification.py │ ├── test_pathology_nuclei_classification_dist.py │ ├── test_pathology_nuclei_segmentation_classification.py │ ├── test_pathology_nuclei_segmentation_classification_dist.py │ ├── test_pathology_nuclick_annotation.py │ ├── test_pathology_nuclick_annotation_dist.py │ ├── test_pathology_tumor_detection.py │ ├── test_pathology_tumor_detection_dist.py │ ├── test_renalStructures_CECT_segmentation.py │ ├── test_spleen_ct_segmentation.py │ ├── test_spleen_ct_segmentation_dist.py │ ├── test_spleen_deepedit_annotation.py │ ├── test_spleen_deepedit_annotation_dist.py │ ├── test_swin_unetr_btcv_segmentation.py │ ├── test_swin_unetr_btcv_segmentation_dist.py │ ├── test_vista2d.py │ ├── test_vista2d_dist.py │ ├── test_vista3d.py │ ├── test_vista3d_dist.py │ ├── test_wholeBody_ct_segmentation.py │ ├── test_wholeBody_ct_segmentation_dist.py │ └── utils.py ├── update_model_info.py ├── update_model_info_deparate.py ├── update_model_info_deparate_ngc.py ├── utils.py ├── utils_deparate.py ├── utils_deparate_ngc.py ├── verify_bundle.py ├── verify_hf_model.py └── verify_tensorrt.py ├── docs └── readme_template.md ├── hf_models ├── README.md ├── ct_chat │ ├── LICENSE │ ├── README.md │ └── metadata.json ├── exaonepath-crc-msi-predictor │ ├── LICENSE │ ├── README.md │ └── metadata.json ├── exaonepath │ ├── LICENSE │ ├── README.md │ └── metadata.json ├── llama3_vila_m3_13b │ ├── LICENSE │ ├── README.md │ └── metadata.json ├── llama3_vila_m3_3b │ ├── LICENSE │ ├── README.md │ └── metadata.json └── llama3_vila_m3_8b │ ├── LICENSE │ ├── README.md │ └── metadata.json ├── models ├── brain_image_synthesis_latent_diffusion_model │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── logging.conf │ │ └── metadata.json │ ├── docs │ │ ├── README.md │ │ └── figure_1.png │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ └── sampler.py ├── brats_mri_axial_slices_generative_diffusion │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── inference_autoencoder.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train_autoencoder.json │ │ ├── multi_gpu_train_diffusion.json │ │ ├── train_autoencoder.json │ │ └── train_diffusion.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── ldm_sampler.py │ │ ├── ldm_trainer.py │ │ ├── losses.py │ │ └── utils.py ├── brats_mri_generative_diffusion │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── inference_autoencoder.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train_autoencoder.json │ │ ├── multi_gpu_train_diffusion.json │ │ ├── train_autoencoder.json │ │ └── train_diffusion.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── ldm_sampler.py │ │ ├── ldm_trainer.py │ │ ├── losses.py │ │ └── utils.py ├── brats_mri_segmentation │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ └── prepare_datalist.py ├── breast_density_classification │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── logging.conf │ │ └── metadata.json │ ├── docs │ │ └── README.md │ ├── large_files.yml │ ├── sample_data │ │ ├── A │ │ │ ├── sample_A1.jpg │ │ │ ├── sample_A2.jpg │ │ │ ├── sample_A3.jpg │ │ │ └── sample_A4.jpg │ │ ├── B │ │ │ ├── sample_B1.jpg │ │ │ ├── sample_B2.jpg │ │ │ ├── sample_B3.jpg │ │ │ └── sample_B4.jpg │ │ ├── C │ │ │ ├── sample_C1.jpg │ │ │ ├── sample_C2.jpg │ │ │ ├── sample_C3.jpg │ │ │ └── sample_C4.jpg │ │ └── D │ │ │ ├── sample_D1.jpg │ │ │ ├── sample_D2.jpg │ │ │ ├── sample_D3.jpg │ │ │ └── sample_D4.jpg │ └── scripts │ │ ├── __init__.py │ │ ├── createList.py │ │ ├── create_dataset.py │ │ └── script.py ├── classification_template │ ├── LICENSE │ ├── configs │ │ ├── evaluate.yaml │ │ ├── inference.yaml │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.yaml │ │ └── train.yaml │ ├── docs │ │ ├── README.md │ │ └── generate_data.ipynb │ └── large_files.yml ├── cxr_image_synthesis_latent_diffusion_model │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── logging.conf │ │ └── metadata.json │ ├── docs │ │ ├── README.md │ │ └── figure_1.png │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── sampler.py │ │ └── saver.py ├── endoscopic_inbody_classification │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── data_process.py │ │ └── export_to_onnx.py ├── endoscopic_tool_segmentation │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_evaluate.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ └── export_to_onnx.py ├── lung_nodule_ct_detection │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── cocometric_ignite.py │ │ ├── detection_inferer.py │ │ ├── detection_saver.py │ │ ├── evaluator.py │ │ ├── trainer.py │ │ ├── utils.py │ │ └── warmup_scheduler.py ├── maisi_ct_generative │ ├── LICENSE │ ├── configs │ │ ├── image_median_statistics.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── integration_test_masks.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── augmentation.py │ │ ├── find_masks.py │ │ ├── quality_check.py │ │ ├── rectified_flow.py │ │ ├── sample.py │ │ ├── trainer.py │ │ └── utils.py ├── mednist_ddpm │ ├── LICENSE │ ├── configs │ │ ├── inference.yaml │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── train.yaml │ │ └── train_multigpu.yaml │ ├── docs │ │ ├── 2d_ddpm_bundle_tutorial.ipynb │ │ ├── README.md │ │ ├── sub_train.sh │ │ └── sub_train_multigpu.sh │ ├── large_files.yml │ └── scripts │ │ └── __init__.py ├── mednist_gan │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── models │ │ └── model.pt │ └── scripts │ │ ├── __init__.py │ │ └── losses.py ├── mednist_reg │ ├── LICENSE │ ├── configs │ │ ├── inference.yaml │ │ ├── logging.conf │ │ ├── metadata.json │ │ └── train.yaml │ ├── docs │ │ ├── README.md │ │ ├── data_license.txt │ │ └── examples │ │ │ ├── 008501_fixed_7.png │ │ │ ├── 008502_fixed_6.png │ │ │ ├── 008502_moving_6.png │ │ │ ├── 008502_pred_6.png │ │ │ ├── 008504_moving_7.png │ │ │ └── 008504_pred_7.png │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ └── net.py ├── model_info.json ├── multi_organ_segmentation │ ├── LICENSE │ ├── configs │ │ ├── dataset_0.json │ │ ├── evaluate.yaml │ │ ├── inference.yaml │ │ ├── inference_trt.yaml │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.yaml │ │ ├── search.yaml │ │ └── train.yaml │ ├── docs │ │ ├── README.md │ │ └── output_example.png │ └── large_files.yml ├── pancreas_ct_dints_segmentation │ ├── LICENSE │ ├── configs │ │ ├── evaluate.yaml │ │ ├── inference.yaml │ │ ├── inference_trt.yaml │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.yaml │ │ ├── search.yaml │ │ └── train.yaml │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── prepare_datalist.py │ │ └── search.py ├── pathology_nuclei_classification │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_evaluate.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── data_process.py │ │ ├── dataset.py │ │ ├── handlers.py │ │ └── writer.py ├── pathology_nuclei_segmentation_classification │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ └── prepare_patches.py ├── pathology_nuclick_annotation │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_evaluate.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── data_process.py │ │ ├── dataset.py │ │ └── handlers.py ├── pathology_tumor_detection │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── evaluate_froc.sh │ │ └── lesion_froc.py ├── pediatric_abdominal_ct_segmentation │ ├── LICENSE │ ├── configs │ │ ├── TS_test.json │ │ ├── evaluate-standalone-parallel.yaml │ │ ├── evaluate-standalone.yaml │ │ ├── evaluate.yaml │ │ ├── inference.yaml │ │ ├── inference_segresnet.yaml │ │ ├── inference_swinunetr.yaml │ │ ├── inference_trt.yaml │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── train-multigpu.yaml │ │ └── train.yaml │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yaml │ └── scripts │ │ ├── __init__.py │ │ ├── compute_metric.py │ │ ├── lr_scheduler.py │ │ ├── monai_utils.py │ │ ├── prepare_datalist_monailabel.py │ │ └── utils.py ├── prostate_mri_anatomy │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ └── train.json │ ├── docs │ │ └── README.md │ ├── large_files.yaml │ └── scripts │ │ └── center_crop.py ├── renalStructures_CECT_segmentation │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── download_data.py │ │ └── my_transforms.py ├── renalStructures_UNEST_segmentation │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ ├── demos.png │ │ ├── renal.png │ │ ├── unest.png │ │ └── val_dice.png │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ └── networks │ │ ├── __init__.py │ │ ├── nest │ │ ├── __init__.py │ │ └── utils.py │ │ ├── nest_transformer_3D.py │ │ ├── patchEmbed3D.py │ │ ├── unest.py │ │ └── unest_block.py ├── segmentation_template │ ├── LICENSE │ ├── configs │ │ ├── inference.yaml │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.yaml │ │ ├── test.yaml │ │ └── train.yaml │ ├── docs │ │ ├── README.md │ │ ├── generate_data.ipynb │ │ ├── inference.sh │ │ ├── run_monailabel.sh │ │ ├── test.sh │ │ ├── train.sh │ │ ├── train_multigpu.sh │ │ └── visualise_inference.ipynb │ └── large_files.yml ├── spleen_ct_segmentation │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_evaluate.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ └── large_files.yml ├── spleen_deepedit_annotation │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── early_stop_score_function.py │ │ └── transforms.py ├── swin_unetr_btcv_segmentation │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ └── large_files.yml ├── valve_landmarks │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── metadata.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ ├── prediction_example.png │ │ └── view_results.ipynb │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ └── valve_landmarks.py ├── ventricular_short_axis_3label │ ├── LICENSE │ ├── configs │ │ ├── inference.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── visualise.ipynb │ └── large_files.yml ├── vista2d │ ├── LICENSE │ ├── configs │ │ ├── hyper_parameters.yaml │ │ ├── inference.json │ │ ├── inference_trt.json │ │ └── metadata.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ ├── download_preprocessor │ │ ├── all_file_downloader.py │ │ ├── cellpose_agreement.png │ │ ├── cellpose_links.png │ │ ├── data_tree.png │ │ ├── generate_json.py │ │ ├── kaggle_download.png │ │ ├── omnipose_download.png │ │ ├── process_data.py │ │ ├── readme.md │ │ ├── tissuenet_download.png │ │ ├── tissuenet_login.png │ │ └── urls.txt │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── cell_distributed_weighted_sampler.py │ │ ├── components.py │ │ ├── utils.py │ │ └── workflow.py ├── vista3d │ ├── LICENSE │ ├── configs │ │ ├── batch_inference.json │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── mgpu_evaluate.json │ │ ├── msd_task09_spleen_folds.json │ │ ├── multi_gpu_train.json │ │ ├── train.json │ │ └── train_continual.json │ ├── docs │ │ ├── README.md │ │ ├── data.md │ │ ├── data_license.txt │ │ ├── finetune.md │ │ └── inference.md │ ├── large_files.yml │ └── scripts │ │ ├── __init__.py │ │ ├── early_stop_score_function.py │ │ ├── evaluator.py │ │ ├── inferer.py │ │ └── trainer.py ├── wholeBody_ct_segmentation │ ├── LICENSE │ ├── configs │ │ ├── evaluate.json │ │ ├── inference.json │ │ ├── inference_trt.json │ │ ├── logging.conf │ │ ├── metadata.json │ │ ├── multi_gpu_evaluate.json │ │ ├── multi_gpu_train.json │ │ └── train.json │ ├── docs │ │ ├── README.md │ │ └── data_license.txt │ └── large_files.yml └── wholeBrainSeg_Large_UNEST_segmentation │ ├── LICENSE │ ├── configs │ ├── inference.json │ ├── logging.conf │ ├── metadata.json │ ├── multi_gpu_train.json │ └── train.json │ ├── docs │ ├── 3DSlicer_use.png │ ├── README.md │ ├── demo.png │ ├── training.png │ ├── unest.png │ └── wholebrain.png │ ├── large_files.yml │ └── scripts │ ├── __init__.py │ └── networks │ ├── __init__.py │ ├── nest │ ├── __init__.py │ └── utils.py │ ├── nest_transformer_3D.py │ ├── patchEmbed3D.py │ ├── unest_base_patch_4.py │ └── unest_block.py ├── pyproject.toml ├── requirements-dev.txt ├── requirements-update-model.txt ├── requirements.txt ├── runtests.sh └── setup.cfg /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Install '....' 17 | 3. Run commands '....' 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Screenshots** 23 | If applicable, add screenshots to help explain your problem. 24 | 25 | **Environment** 26 | 27 | Ensuring you use the relevant python executable, please paste the output of: 28 | 29 | ``` 30 | python -c 'import monai; monai.config.print_debug_info()' 31 | ``` 32 | 33 | **Additional context** 34 | Add any other context about the problem here. 35 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question (please use the Discussion tab) 3 | about: https://github.com/Project-MONAI/model-zoo/discussions 4 | title: 'Please use MONAI Model Zoo Discussion tab for questions' 5 | labels: '' 6 | assignees: '' 7 | --- 8 | 9 | **Please use MONAI Model Zoo's Discussions tab** 10 | For questions relating to MONAI usage, please do not create an issue. 11 | 12 | Instead, use [MONAI Model Zoo's GitHub Discussions tab](https://github.com/Project-MONAI/model-zoo/discussions). This can be found next to Issues and Pull Requests along the top of our repository. 13 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | Fixes # . 2 | 3 | ### Description 4 | A few sentences describing the changes proposed in this pull request. 5 | 6 | ### Status 7 | **Ready/Work in progress/Hold** 8 | 9 | ### Please ensure all the checkboxes: 10 | 11 | - [x] Codeformat tests passed locally by running `./runtests.sh --codeformat`. 12 | - [ ] In-line docstrings updated. 13 | - [ ] Update `version` and `changelog` in `metadata.json` if changing an existing bundle. 14 | - [ ] Please ensure the naming rules in config files meet our requirements (please refer to: `CONTRIBUTING.md`). 15 | - [ ] Ensure versions of packages such as `monai`, `pytorch` and `numpy` are correct in `metadata.json`. 16 | - [ ] Descriptions should be consistent with the content, such as `eval_metrics` of the provided weights and TorchScript modules. 17 | - [ ] Files larger than 25MB are excluded and replaced by providing download links in `large_file.yml`. 18 | - [ ] Avoid using path that contains personal information within config files (such as use `/home/your_name/` for `"bundle_root"`). 19 | -------------------------------------------------------------------------------- /.github/workflows/chatops.yml: -------------------------------------------------------------------------------- 1 | name: chatops 2 | 3 | # currently dispatches /black command to project-monai/monai-code-formatter 4 | on: 5 | issue_comment: 6 | types: [created, edited] 7 | jobs: 8 | dispatch_command: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: dispatch 12 | uses: peter-evans/slash-command-dispatch@v1.2.0 13 | with: 14 | token: ${{ secrets.PR_MAINTAIN }} 15 | reaction-token: ${{ secrets.GITHUB_TOKEN }} 16 | reactions: false 17 | config: > 18 | [ 19 | { 20 | "command": "black", 21 | "permission": "none", 22 | "issue_type": "pull-request", 23 | "allow_edits": true, 24 | "repository": "project-monai/monai-code-formatter" 25 | } 26 | ] 27 | -------------------------------------------------------------------------------- /.github/workflows/code-format-check.yml: -------------------------------------------------------------------------------- 1 | name: code-format-check 2 | 3 | on: 4 | # code format check for pull requests and the dev branch 5 | push: 6 | branches: 7 | - dev 8 | pull_request: 9 | 10 | concurrency: 11 | # automatically cancel the previously triggered workflows when there's a newer version 12 | group: build-${{ github.event.pull_request.number || github.ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | flake8-py3: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: Set up Python 3.10 21 | uses: actions/setup-python@v4 22 | with: 23 | python-version: 3.10.14 24 | - name: cache weekly timestamp 25 | id: pip-cache 26 | run: | 27 | echo "::set-output name=datew::$(date '+%Y-%V')" 28 | - name: cache for pip 29 | uses: actions/cache@v4 30 | id: cache 31 | with: 32 | path: ~/.cache/pip 33 | key: ${{ runner.os }}-pip-${{ steps.pip-cache.outputs.datew }} 34 | - name: Install dependencies 35 | run: | 36 | python -m pip install --upgrade pip wheel 37 | python -m pip install --upgrade setuptools 38 | python -m pip install -r requirements-dev.txt 39 | - name: type check 40 | run: | 41 | # clean up temporary files 42 | $(pwd)/runtests.sh --clean 43 | # Git hub actions have 2 cores, so parallize pytype 44 | $(pwd)/runtests.sh --codeformat -j 2 45 | -------------------------------------------------------------------------------- /.github/workflows/premerge-cpu.yml: -------------------------------------------------------------------------------- 1 | name: premerge-cpu-check 2 | 3 | on: 4 | # code format check for pull requests and the dev branch 5 | push: 6 | branches: 7 | - dev 8 | pull_request: 9 | 10 | concurrency: 11 | # automatically cancel the previously triggered workflows when there's a newer version 12 | group: premerge-${{ github.event.pull_request.number || github.ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | premerge-cpu: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: Set up Python 3.10 21 | uses: actions/setup-python@v4 22 | with: 23 | python-version: 3.10.14 24 | - name: cache weekly timestamp 25 | id: pip-cache 26 | run: | 27 | echo "::set-output name=datew::$(date '+%Y-%V')" 28 | - name: cache for pip 29 | uses: actions/cache@v4 30 | id: cache 31 | with: 32 | path: ~/.cache/pip 33 | key: ${{ runner.os }}-pip-${{ steps.pip-cache.outputs.datew }} 34 | - name: Install dependencies 35 | run: | 36 | python -m pip install --upgrade pip wheel 37 | python -m pip install --upgrade setuptools 38 | - name: check 39 | run: | 40 | # clean up temporary files 41 | $(pwd)/runtests.sh --clean 42 | df -h 43 | bash $(pwd)/ci/run_premerge_cpu.sh changed 44 | shell: bash 45 | -------------------------------------------------------------------------------- /.github/workflows/update-model-info.yml: -------------------------------------------------------------------------------- 1 | name: update-model-info 2 | 3 | on: 4 | # schedule: 5 | # - cron: "0 10 * * *" # 10:00, everyday 6 | # Allows you to run this workflow manually from the Actions tab 7 | workflow_dispatch: 8 | 9 | jobs: 10 | build: 11 | if: github.ref == 'refs/heads/dev' 12 | permissions: write-all 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | with: 17 | fetch-depth: 2 18 | - name: update model info 19 | run: | 20 | python -m pip install -r requirements-dev.txt 21 | changes=$(git diff --name-only HEAD^..HEAD -- models) 22 | if [ ! -z "$changes" ]; then 23 | python $(pwd)/ci/update_model_info_deparate.py --f "$changes" 24 | fi 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 27 | username: ${{ secrets.UPDATE_MODEL_USERNAME }} 28 | email: ${{ secrets.UPDATE_MODEL_EMAIL }} 29 | -------------------------------------------------------------------------------- /ci/download_latest_bundle.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | 13 | import argparse 14 | import os 15 | 16 | from monai.bundle import download 17 | from utils import get_latest_version 18 | 19 | 20 | def download_latest_bundle(bundle_name: str, models_path: str, download_path: str): 21 | model_info_path = os.path.join(models_path, "model_info.json") 22 | version = get_latest_version(bundle_name=bundle_name, model_info_path=model_info_path) 23 | download(name=bundle_name, source="monaihosting", version=version, bundle_dir=download_path) 24 | 25 | 26 | if __name__ == "__main__": 27 | parser = argparse.ArgumentParser(description="") 28 | parser.add_argument("-b", "--b", type=str, help="bundle name.") 29 | parser.add_argument("-models_path", "--models_path", type=str, help="models path.") 30 | parser.add_argument("-p", "--p", type=str, help="download path.") 31 | args = parser.parse_args() 32 | bundle_name = args.b 33 | models_path = args.models_path 34 | download_path = args.p 35 | download_latest_bundle(bundle_name, models_path, download_path) 36 | -------------------------------------------------------------------------------- /ci/get_bundle_list.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | 13 | import argparse 14 | 15 | from utils import get_sub_folders 16 | 17 | # new added bundles should temporarily be added to this list, and remove until they can be downloaded successfully 18 | EXCLUDE_LIST = [] 19 | 20 | 21 | def main(models_path): 22 | bundle_list = get_sub_folders(root_dir=models_path) 23 | bundle_list = [b for b in bundle_list if b not in EXCLUDE_LIST] 24 | print(bundle_list) 25 | 26 | 27 | if __name__ == "__main__": 28 | parser = argparse.ArgumentParser(description="") 29 | parser.add_argument("-models_path", "--models_path", type=str, help="models path.") 30 | args = parser.parse_args() 31 | models_path = args.models_path 32 | main(models_path) 33 | -------------------------------------------------------------------------------- /ci/get_changed_bundle.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | 13 | import argparse 14 | 15 | from utils import get_changed_bundle_list 16 | 17 | 18 | def get_changed_bundle(changed_dirs): 19 | """ 20 | This function is used to get all changed bundles, a string which 21 | contains all bundle names will be printed, and can be used in shell scripts. 22 | """ 23 | bundle_names = "" 24 | root_path = "models" 25 | bundle_list = get_changed_bundle_list(changed_dirs, root_path=root_path) 26 | 27 | for bundle in bundle_list: 28 | bundle_names += f"{bundle} " 29 | print(bundle_names) 30 | 31 | 32 | def get_changed_hf_model(changed_dirs): 33 | """ 34 | This function is used to get all changed hf models, a string which 35 | contains all hf model names will be printed, and can be used in shell scripts. 36 | """ 37 | hf_model_names = "" 38 | root_path = "hf_models" 39 | hf_model_list = get_changed_bundle_list(changed_dirs, root_path=root_path) 40 | for hf_model in hf_model_list: 41 | hf_model_names += f"{hf_model} " 42 | print(hf_model_names) 43 | 44 | 45 | if __name__ == "__main__": 46 | parser = argparse.ArgumentParser(description="") 47 | parser.add_argument("-f", "--f", type=str, help="changed files.") 48 | parser.add_argument("--hf_model", type=bool, default=False, help="if true, get changed hf models.") 49 | args = parser.parse_args() 50 | changed_dirs = args.f.splitlines() 51 | if args.hf_model: 52 | get_changed_hf_model(changed_dirs) 53 | else: 54 | get_changed_bundle(changed_dirs) 55 | -------------------------------------------------------------------------------- /ci/get_required_resources.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | head_ref=$(git rev-parse HEAD) 19 | git fetch origin dev $head_ref 20 | changes=$(git diff --name-only $head_ref origin/dev -- models) 21 | if [ ! -z "$changes" ] 22 | then 23 | flags=$(python $(pwd)/ci/get_required_resources.py --f "$changes") 24 | else 25 | flags=$"False False" 26 | fi 27 | echo $flags 28 | -------------------------------------------------------------------------------- /ci/install_scripts/install_maisi_ct_generative_dependency.sh: -------------------------------------------------------------------------------- 1 | pip install --extra-index-url https://urm.nvidia.com/artifactory/api/pypi/sw-dlmed-pypi-local/simple xformers==0.0.26+622595c.d20240617 2 | -------------------------------------------------------------------------------- /ci/prepare_schema.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | 13 | import argparse 14 | 15 | from utils import prepare_schema 16 | 17 | 18 | def main(bundle_list, models_path): 19 | if "hf_models" in models_path: 20 | hf_model = True 21 | else: 22 | hf_model = False 23 | prepare_schema(bundle_list, root_path=models_path, hf_model=hf_model) 24 | 25 | 26 | if __name__ == "__main__": 27 | parser = argparse.ArgumentParser(description="") 28 | parser.add_argument("-l", "--l", type=str, help="bundle list.") 29 | parser.add_argument("-p", "--p", type=str, default="models", help="models path.") 30 | args = parser.parse_args() 31 | bundle_list = args.l.split(" ") 32 | bundle_list = [f for f in bundle_list if f != ""] 33 | models_path = args.p 34 | main(bundle_list, models_path) 35 | -------------------------------------------------------------------------------- /ci/unit_tests/test_brain_image_synthesis_latent_diffusion_model.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import os 13 | import unittest 14 | 15 | from monai.bundle import ConfigWorkflow 16 | from parameterized import parameterized 17 | from utils import check_workflow 18 | 19 | TEST_CASE_1 = [ # inference 20 | { 21 | "bundle_root": "models/brain_image_synthesis_latent_diffusion_model", 22 | "gender": 1.0, 23 | "age": 0.7, 24 | "ventricular_vol": 0.7, 25 | "brain_vol": 0.5, 26 | } 27 | ] 28 | 29 | 30 | class BrainImageSynthesisLatentDiffusionModel(unittest.TestCase): 31 | @parameterized.expand([TEST_CASE_1]) 32 | def test_inference(self, params): 33 | bundle_root = params["bundle_root"] 34 | inference_file = os.path.join(bundle_root, "configs/inference.json") 35 | trainer = ConfigWorkflow( 36 | workflow_type="inference", 37 | config_file=inference_file, 38 | logging_file=os.path.join(bundle_root, "configs/logging.conf"), 39 | meta_file=os.path.join(bundle_root, "configs/metadata.json"), 40 | **params, 41 | ) 42 | check_workflow(trainer, check_properties=True) 43 | 44 | 45 | if __name__ == "__main__": 46 | loader = unittest.TestLoader() 47 | unittest.main(testLoader=loader) 48 | -------------------------------------------------------------------------------- /ci/unit_tests/test_cxr_image_synthesis_latent_diffusion_model.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import os 13 | import unittest 14 | 15 | from monai.bundle import ConfigWorkflow 16 | from parameterized import parameterized 17 | from utils import check_workflow 18 | 19 | TEST_CASE_1 = [ # inference 20 | { 21 | "bundle_root": "models/cxr_image_synthesis_latent_diffusion_model", 22 | "prompt": "Big right-sided pleural effusion. Normal left lung.", 23 | "guidance_scale": 7.0, 24 | } 25 | ] 26 | 27 | 28 | class TestCXRLatentDiffusionInference(unittest.TestCase): 29 | @parameterized.expand([TEST_CASE_1]) 30 | def test_inference(self, params): 31 | bundle_root = params["bundle_root"] 32 | inference_file = os.path.join(bundle_root, "configs/inference.json") 33 | trainer = ConfigWorkflow( 34 | workflow_type="inference", 35 | config_file=inference_file, 36 | logging_file=os.path.join(bundle_root, "configs/logging.conf"), 37 | meta_file=os.path.join(bundle_root, "configs/metadata.json"), 38 | **params, 39 | ) 40 | check_workflow(trainer, check_properties=True) 41 | 42 | 43 | if __name__ == "__main__": 44 | loader = unittest.TestLoader() 45 | unittest.main(testLoader=loader) 46 | -------------------------------------------------------------------------------- /hf_models/README.md: -------------------------------------------------------------------------------- 1 | # Hugging Face Models 2 | 3 | This directory contains models that are hosted on Hugging Face. **Important: These models do not follow the traditional MONAI Bundle format and cannot be run using the standard MONAI Bundle APIs.** 4 | 5 | Each model directory contains: 6 | 7 | 1. `metadata.json` - Model metadata following a similar schema to MONAI Bundles 8 | 2. `README.md` - Detailed documentation about the model 9 | 3. `LICENSE` - Model license 10 | 11 | ## Using HF Models 12 | 13 | These models must be accessed directly from Hugging Face using the `huggingface_hub` and `transformers` libraries. For complete usage instructions and examples, please visit the corresponding Hugging Face model repository linked below. 14 | 15 | ### Available Models 16 | 17 | | Model | Description | HF Repository | 18 | |-------|-------------|--------------| 19 | | exaonepath | EXAONEPath is a patch-level pathology pretrained model with 86 million parameters | [LGAI-EXAONE/EXAONEPath](https://huggingface.co/LGAI-EXAONE/EXAONEPath) | 20 | | exaonepath-crc-msi-predictor | MSI classification of CRC tumors using EXAONEPath 1.0.0 Patch-level Foundation Model for Pathology | [LGAI-EXAONE/EXAONEPath-CRC-MSI-Predictor](https://huggingface.co/LGAI-EXAONE/EXAONEPath-CRC-MSI-Predictor) | 21 | | llama3_vila_m3_3b | Lightweight medical vision language model that enhances VLMs with medical expert knowledge (3B parameters) | [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) | 22 | | llama3_vila_m3_8b | Medical vision language model that utilizes domain-expert models to improve precision in medical imaging tasks (8B parameters) | [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) | 23 | | llama3_vila_m3_13b | Enhanced medical vision language model with improved capabilities for various medical imaging tasks (13B parameters) | [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) | 24 | | ct_chat | Vision-language foundational chat model for 3D chest CT volumes | [ibrahimhamamci/CT-RATE](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) | 25 | -------------------------------------------------------------------------------- /hf_models/ct_chat/LICENSE: -------------------------------------------------------------------------------- 1 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0) 2 | 3 | This is a human-readable summary of (and not a substitute for) the license. See the full license text at: https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode 4 | 5 | You are free to: 6 | - Share — copy and redistribute the material in any medium or format 7 | - Adapt — remix, transform, and build upon the material 8 | 9 | Under the following terms: 10 | - Attribution — You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. 11 | - NonCommercial — You may not use the material for commercial purposes. 12 | - ShareAlike — If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. 13 | 14 | No additional restrictions — You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits. 15 | 16 | Notices: 17 | You do not have to comply with the license for elements of the material in the public domain or where your use is permitted by an applicable exception or limitation. 18 | No warranties are given. The license may not give you all of the permissions necessary for your intended use. For example, other rights such as publicity, privacy, or moral rights may limit how you use the material. 19 | -------------------------------------------------------------------------------- /hf_models/exaonepath-crc-msi-predictor/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", 3 | "version": "1.0.0", 4 | "changelog": { 5 | "1.0.0": "initial release of EXAONEPath CRC MSI Predictor" 6 | }, 7 | "monai_version": "1.4.0", 8 | "pytorch_version": "2.4.0", 9 | "numpy_version": "1.24.4", 10 | "required_packages_version": { 11 | "torch": "2.4.0", 12 | "torchvision": "0.15.0", 13 | "torchstain": "1.3.0", 14 | "pillow": "10.0.0", 15 | "huggingface_hub": "0.24.2", 16 | "transformers": "4.43.3" 17 | }, 18 | "supported_apps": { 19 | "exaonepath-crc-msi-predictor": "" 20 | }, 21 | "name": "EXAONEPath-CRC-MSI-Predictor", 22 | "task": "MSI classification of CRC tumors using EXAONEPath model", 23 | "description": "MSI classification of CRC tumors using EXAONEPath - a patch-level foundation model for pathology.", 24 | "authors": "LG AI Research", 25 | "copyright": "LG AI Research", 26 | "data_source": "LG AI Research", 27 | "data_type": "WSI patches", 28 | "image_classes": "RGB pathology image patches", 29 | "huggingface_model_id": "LGAI-EXAONE/EXAONEPath-CRC-MSI-Predictor", 30 | "huggingface_url": "https://huggingface.co/LGAI-EXAONE/EXAONEPath-CRC-MSI-Predictor", 31 | "intended_use": "Research and clinical support for pathology image analysis", 32 | "references": [ 33 | "Yun, Juseung, et al. 'EXAONEPath 1.0 Patch-level Foundation Model for Pathology', arXiv preprint arXiv:2408.00380 (2024)." 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /hf_models/exaonepath/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", 3 | "version": "1.0.0", 4 | "changelog": { 5 | "1.0.0": "initial release of EXAONEPath 1.0" 6 | }, 7 | "monai_version": "1.4.0", 8 | "pytorch_version": "2.4.0", 9 | "numpy_version": "1.24.4", 10 | "required_packages_version": { 11 | "torch": "2.4.0", 12 | "torchvision": "0.15.0", 13 | "torchstain": "1.3.0", 14 | "pillow": "10.0.0", 15 | "huggingface_hub": "0.24.2", 16 | "transformers": "4.43.3" 17 | }, 18 | "supported_apps": { 19 | "exaonepath": "" 20 | }, 21 | "name": "EXAONEPath", 22 | "task": "Pathology foundation model", 23 | "description": "EXAONEPath is a patch-level pathology pretrained model with 86 million parameters, pretrained on 285,153,903 patches extracted from 34,795 WSIs.", 24 | "authors": "LG AI Research", 25 | "copyright": "LG AI Research", 26 | "data_source": "LG AI Research", 27 | "data_type": "WSI patches", 28 | "image_classes": "RGB pathology image patches", 29 | "huggingface_model_id": "LGAI-EXAONE/EXAONEPath", 30 | "huggingface_url": "https://huggingface.co/LGAI-EXAONE/EXAONEPath", 31 | "intended_use": "Research and clinical support for pathology image analysis", 32 | "references": [ 33 | "Yun, Juseung, et al. 'EXAONEPath 1.0 Patch-level Foundation Model for Pathology', arXiv preprint arXiv:2408.00380 (2024)." 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /hf_models/llama3_vila_m3_13b/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 NVIDIA Corporation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /hf_models/llama3_vila_m3_13b/README.md: -------------------------------------------------------------------------------- 1 | # VILA_M3_13B 2 | 3 | VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks. 4 | 5 | This model is available at: [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) 6 | 7 | ## Citation 8 | 9 | ``` 10 | @article{nath2025vila, 11 | title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge}, 12 | author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang}, 13 | journal={arXiv preprint arXiv:2411.12915}, 14 | year={2025} 15 | } 16 | ``` 17 | -------------------------------------------------------------------------------- /hf_models/llama3_vila_m3_13b/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", 3 | "version": "1.0.0", 4 | "changelog": { 5 | "1.0.0": "initial release of VILA_M3_13B model" 6 | }, 7 | "monai_version": "1.4.0", 8 | "pytorch_version": "2.4.0", 9 | "numpy_version": "1.24.4", 10 | "required_packages_version": { 11 | "torch": "2.4.0", 12 | "huggingface_hub": "0.24.2", 13 | "transformers": "4.43.3" 14 | }, 15 | "name": "VILA_M3_13B", 16 | "task": "Medical vision-language model", 17 | "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.", 18 | "authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH", 19 | "copyright": "NVIDIA", 20 | "data_source": "NVIDIA", 21 | "data_type": "Medical images and text", 22 | "image_classes": "Various medical imaging modalities", 23 | "huggingface_model_id": "MONAI/Llama3-VILA-M3-13B", 24 | "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-13B", 25 | "intended_use": "Research in medical vision-language tasks", 26 | "references": [ 27 | "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)." 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /hf_models/llama3_vila_m3_3b/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 NVIDIA Corporation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /hf_models/llama3_vila_m3_3b/README.md: -------------------------------------------------------------------------------- 1 | # VILA_M3_3B 2 | 3 | VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks. 4 | 5 | This model is available at: [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) 6 | 7 | ## Citation 8 | 9 | ``` 10 | @article{nath2025vila, 11 | title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge}, 12 | author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang}, 13 | journal={arXiv preprint arXiv:2411.12915}, 14 | year={2025} 15 | } 16 | ``` 17 | -------------------------------------------------------------------------------- /hf_models/llama3_vila_m3_3b/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", 3 | "version": "1.0.0", 4 | "changelog": { 5 | "1.0.0": "initial release of VILA_M3_3B model" 6 | }, 7 | "monai_version": "1.4.0", 8 | "pytorch_version": "2.4.0", 9 | "numpy_version": "1.24.4", 10 | "required_packages_version": { 11 | "torch": "2.4.0", 12 | "huggingface_hub": "0.24.2", 13 | "transformers": "4.43.3" 14 | }, 15 | "name": "VILA_M3_3B", 16 | "task": "Medical vision-language model", 17 | "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.", 18 | "authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH", 19 | "copyright": "NVIDIA", 20 | "data_source": "NVIDIA", 21 | "data_type": "Medical images and text", 22 | "image_classes": "Various medical imaging modalities", 23 | "huggingface_model_id": "MONAI/Llama3-VILA-M3-3B", 24 | "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-3B", 25 | "intended_use": "Research in medical vision-language tasks", 26 | "references": [ 27 | "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)." 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /hf_models/llama3_vila_m3_8b/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 NVIDIA Corporation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /hf_models/llama3_vila_m3_8b/README.md: -------------------------------------------------------------------------------- 1 | # VILA_M3_8B 2 | 3 | VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks. 4 | 5 | This model is available at: [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) 6 | 7 | ## Citation 8 | 9 | ``` 10 | @article{nath2025vila, 11 | title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge}, 12 | author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang}, 13 | journal={arXiv preprint arXiv:2411.12915}, 14 | year={2025} 15 | } 16 | ``` 17 | -------------------------------------------------------------------------------- /hf_models/llama3_vila_m3_8b/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", 3 | "version": "1.0.0", 4 | "changelog": { 5 | "1.0.0": "initial release of VILA_M3_8B model" 6 | }, 7 | "monai_version": "1.4.0", 8 | "pytorch_version": "2.4.0", 9 | "numpy_version": "1.24.4", 10 | "required_packages_version": { 11 | "torch": "2.4.0", 12 | "huggingface_hub": "0.24.2", 13 | "transformers": "4.43.3" 14 | }, 15 | "name": "VILA_M3_8B", 16 | "task": "Medical vision-language model", 17 | "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.", 18 | "authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH", 19 | "copyright": "NVIDIA", 20 | "data_source": "NVIDIA", 21 | "data_type": "Medical images and text", 22 | "image_classes": "Various medical imaging modalities", 23 | "huggingface_model_id": "MONAI/Llama3-VILA-M3-8B", 24 | "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-8B", 25 | "intended_use": "Research in medical vision-language tasks", 26 | "references": [ 27 | "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)." 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /models/brain_image_synthesis_latent_diffusion_model/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/brain_image_synthesis_latent_diffusion_model/docs/figure_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/brain_image_synthesis_latent_diffusion_model/docs/figure_1.png -------------------------------------------------------------------------------- /models/brain_image_synthesis_latent_diffusion_model/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/autoencoder.pt" 3 | url: "https://drive.google.com/uc?export=download&id=1CZHwxHJWybOsDavipD0EorDPOo_mzNeX" 4 | hash_val: "329e97b3085643ff235f11f049856242" 5 | hash_type: "md5" 6 | - path: "models/model.pt" 7 | url: "https://drive.google.com/uc?export=download&id=1XO-ak93ZuOcGTCpgRtqgIeZq3dG5ExN6" 8 | hash_val: "21c3047556fb671caf0556f1cce6ef22" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/brain_image_synthesis_latent_diffusion_model/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/brain_image_synthesis_latent_diffusion_model/scripts/__init__.py -------------------------------------------------------------------------------- /models/brain_image_synthesis_latent_diffusion_model/scripts/sampler.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import torch 4 | import torch.nn as nn 5 | from monai.utils import optional_import 6 | from torch.cuda.amp import autocast 7 | 8 | tqdm, has_tqdm = optional_import("tqdm", name="tqdm") 9 | 10 | 11 | class Sampler: 12 | def __init__(self) -> None: 13 | super().__init__() 14 | 15 | @torch.no_grad() 16 | def sampling_fn( 17 | self, 18 | input_noise: torch.Tensor, 19 | autoencoder_model: nn.Module, 20 | diffusion_model: nn.Module, 21 | scheduler: nn.Module, 22 | conditioning: torch.Tensor, 23 | ) -> torch.Tensor: 24 | if has_tqdm: 25 | progress_bar = tqdm(scheduler.timesteps) 26 | else: 27 | progress_bar = iter(scheduler.timesteps) 28 | 29 | image = input_noise 30 | cond_concat = conditioning.squeeze(1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) 31 | cond_concat = cond_concat.expand(list(cond_concat.shape[0:2]) + list(input_noise.shape[2:])) 32 | for t in progress_bar: 33 | with torch.no_grad(): 34 | model_output = diffusion_model( 35 | torch.cat((image, cond_concat), dim=1), 36 | timesteps=torch.Tensor((t,)).to(input_noise.device).long(), 37 | context=conditioning, 38 | ) 39 | image, _ = scheduler.step(model_output, t, image) 40 | 41 | with torch.no_grad(): 42 | with autocast(): 43 | sample = autoencoder_model.decode_stage_2_outputs(image) 44 | 45 | return sample 46 | -------------------------------------------------------------------------------- /models/brats_mri_axial_slices_generative_diffusion/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "+imports": [ 3 | "$from monai.networks import trt_compile" 4 | ], 5 | "diffusion": "$trt_compile(@network_def.to(@device), @load_diffusion_path)", 6 | "autoencoder": "$trt_compile(@autoencoder_def.to(@device), @load_autoencoder_path)" 7 | } 8 | -------------------------------------------------------------------------------- /models/brats_mri_axial_slices_generative_diffusion/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/brats_mri_axial_slices_generative_diffusion/configs/multi_gpu_train_autoencoder.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "gnetwork": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@autoencoder_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ], 9 | "find_unused_parameters": true 10 | }, 11 | "dnetwork": { 12 | "_target_": "torch.nn.parallel.DistributedDataParallel", 13 | "module": "$@discriminator_def.to(@device)", 14 | "device_ids": [ 15 | "@device" 16 | ], 17 | "find_unused_parameters": true 18 | }, 19 | "train#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@train#dataset", 22 | "even_divisible": true, 23 | "shuffle": true 24 | }, 25 | "train#dataloader#sampler": "@train#sampler", 26 | "train#dataloader#shuffle": false, 27 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 28 | "initialize": [ 29 | "$import torch.distributed as dist", 30 | "$import os", 31 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 32 | "$torch.cuda.set_device(@device)", 33 | "$monai.utils.set_determinism(seed=123)", 34 | "$import logging", 35 | "$@train#trainer.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 36 | ], 37 | "run": [ 38 | "$@train#trainer.run()" 39 | ], 40 | "finalize": [ 41 | "$dist.is_initialized() and dist.destroy_process_group()" 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /models/brats_mri_axial_slices_generative_diffusion/configs/multi_gpu_train_diffusion.json: -------------------------------------------------------------------------------- 1 | { 2 | "diffusion": { 3 | "_target_": "torch.nn.parallel.DistributedDataParallel", 4 | "module": "$@network_def.to(@device)", 5 | "device_ids": [ 6 | "@device" 7 | ], 8 | "find_unused_parameters": true 9 | }, 10 | "run": [ 11 | "@load_autoencoder", 12 | "$@autoencoder.eval()", 13 | "$print('scale factor:',@scale_factor)", 14 | "$@train#trainer.run()" 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /models/brats_mri_axial_slices_generative_diffusion/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model_autoencoder.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_autoencoder_brats_mri_axial_slices_generative_diffusion_v1.pt" 4 | hash_val: "847a61ad13a68ebfca9c0a8fa6d0d6bd" 5 | hash_type: "md5" 6 | - path: "models/model.pt" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_brats_mri_axial_slices_generative_diffusion_v1.pt" 8 | hash_val: "93a19ea3eaafd9781b4140286b121f37" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/brats_mri_axial_slices_generative_diffusion/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | from . import ldm_sampler, ldm_trainer, losses, utils 13 | -------------------------------------------------------------------------------- /models/brats_mri_axial_slices_generative_diffusion/scripts/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | 11 | import numpy as np 12 | import torch 13 | from monai.utils import first 14 | from monai.utils.type_conversion import convert_to_numpy 15 | 16 | 17 | def compute_scale_factor(autoencoder, train_loader, device): 18 | with torch.no_grad(): 19 | check_data = first(train_loader) 20 | z = autoencoder.encode_stage_2_inputs(check_data["image"].to(device)) 21 | scale_factor = 1 / torch.std(z) 22 | return scale_factor.item() 23 | 24 | 25 | def normalize_image_to_uint8(image): 26 | """ 27 | Normalize image to uint8 28 | Args: 29 | image: numpy array 30 | """ 31 | draw_img = image 32 | if np.amin(draw_img) < 0: 33 | draw_img[draw_img < 0] = 0 34 | if np.amax(draw_img) > 0.1: 35 | draw_img /= np.amax(draw_img) 36 | draw_img = (255 * draw_img).astype(np.uint8) 37 | return draw_img 38 | 39 | 40 | def visualize_2d_image(image): 41 | """ 42 | Prepare a 2D image for visualization. 43 | Args: 44 | image: image numpy array, sized (H, W) 45 | """ 46 | image = convert_to_numpy(image) 47 | # draw image 48 | draw_img = normalize_image_to_uint8(image) 49 | draw_img = np.stack([draw_img, draw_img, draw_img], axis=-1) 50 | return draw_img 51 | -------------------------------------------------------------------------------- /models/brats_mri_generative_diffusion/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "+imports": [ 3 | "$from monai.networks import trt_compile" 4 | ], 5 | "diffusion": "$trt_compile(@network_def.to(@device), @load_diffusion_path)", 6 | "autoencoder": "$trt_compile(@autoencoder_def.to(@device), @load_autoencoder_path)" 7 | } 8 | -------------------------------------------------------------------------------- /models/brats_mri_generative_diffusion/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/brats_mri_generative_diffusion/configs/multi_gpu_train_autoencoder.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "gnetwork": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@autoencoder_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "dnetwork": { 11 | "_target_": "torch.nn.parallel.DistributedDataParallel", 12 | "module": "$@discriminator_def.to(@device)", 13 | "device_ids": [ 14 | "@device" 15 | ] 16 | }, 17 | "train#sampler": { 18 | "_target_": "DistributedSampler", 19 | "dataset": "@train#dataset", 20 | "even_divisible": true, 21 | "shuffle": true 22 | }, 23 | "train#dataloader#sampler": "@train#sampler", 24 | "train#dataloader#shuffle": false, 25 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 26 | "initialize": [ 27 | "$import torch.distributed as dist", 28 | "$import os", 29 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)", 32 | "$import logging", 33 | "$@train#trainer.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 34 | ], 35 | "run": [ 36 | "$@train#trainer.run()" 37 | ], 38 | "finalize": [ 39 | "$dist.is_initialized() and dist.destroy_process_group()" 40 | ] 41 | } 42 | -------------------------------------------------------------------------------- /models/brats_mri_generative_diffusion/configs/multi_gpu_train_diffusion.json: -------------------------------------------------------------------------------- 1 | { 2 | "diffusion": { 3 | "_target_": "torch.nn.parallel.DistributedDataParallel", 4 | "module": "$@network_def.to(@device)", 5 | "device_ids": [ 6 | "@device" 7 | ], 8 | "find_unused_parameters": true 9 | }, 10 | "run": [ 11 | "@load_autoencoder", 12 | "$@autoencoder.eval()", 13 | "$print('scale factor:',@scale_factor)", 14 | "$@train#trainer.run()" 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /models/brats_mri_generative_diffusion/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model_autoencoder.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_autoencoder_brats_mri_generative_diffusion_v1.pt" 4 | hash_val: "9e6df4cc9a2decf49ab3332606b32c55" 5 | hash_type: "md5" 6 | - path: "models/model.pt" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_brats_mri_generative_diffusion_v1.pt" 8 | hash_val: "35258b1112f701f3d485676d33141a55" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/brats_mri_generative_diffusion/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | from . import ldm_sampler, ldm_trainer, losses, utils 13 | -------------------------------------------------------------------------------- /models/brats_mri_generative_diffusion/scripts/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | 11 | import monai 12 | import torch 13 | 14 | 15 | def compute_scale_factor(autoencoder, train_loader, device): 16 | with torch.no_grad(): 17 | check_data = monai.utils.first(train_loader) 18 | z = autoencoder.encode_stage_2_inputs(check_data["image"].to(device)) 19 | scale_factor = 1 / torch.std(z) 20 | return scale_factor.item() 21 | -------------------------------------------------------------------------------- /models/brats_mri_segmentation/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import glob", 4 | "$import os", 5 | "$import torch_tensorrt" 6 | ], 7 | "network_def": "$torch.jit.load(@bundle_root + '/models/model_trt.ts')", 8 | "evaluator#amp": false, 9 | "initialize": [ 10 | "$monai.utils.set_determinism(seed=123)" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /models/brats_mri_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/brats_mri_segmentation/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "initialize": [ 28 | "$import torch.distributed as dist", 29 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)", 32 | "$setattr(torch.backends.cudnn, 'benchmark', True)" 33 | ], 34 | "run": [ 35 | "$@train#trainer.run()" 36 | ], 37 | "finalize": [ 38 | "$dist.is_initialized() and dist.destroy_process_group()" 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /models/brats_mri_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_brats_mri_segmentation.pt" 4 | hash_val: "870e677b782a5184cbc48db1456b78e8" 5 | hash_type: "md5" 6 | - path: "models/model.ts" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_brats_mri_segmentation.ts" 8 | hash_val: "c82f693c8f671e9899d21c2f241892f0" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/breast_density_classification/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/breast_density_classification/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://drive.google.com/file/d/1rmIW3Cnv_Ss6raAIfB5WDXywJFmU_BKa/view?usp=sharing" 4 | hash_val: "4a474db59cc1cc85411cb4bdcbc3efd7" 5 | hash_type: "md5" 6 | -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/A/sample_A1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/A/sample_A1.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/A/sample_A2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/A/sample_A2.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/A/sample_A3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/A/sample_A3.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/A/sample_A4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/A/sample_A4.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/B/sample_B1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/B/sample_B1.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/B/sample_B2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/B/sample_B2.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/B/sample_B3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/B/sample_B3.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/B/sample_B4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/B/sample_B4.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/C/sample_C1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/C/sample_C1.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/C/sample_C2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/C/sample_C2.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/C/sample_C3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/C/sample_C3.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/C/sample_C4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/C/sample_C4.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/D/sample_D1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/D/sample_D1.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/D/sample_D2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/D/sample_D2.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/D/sample_D3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/D/sample_D3.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/sample_data/D/sample_D4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/breast_density_classification/sample_data/D/sample_D4.jpg -------------------------------------------------------------------------------- /models/breast_density_classification/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | from .createList import CreateImageLabelList 2 | -------------------------------------------------------------------------------- /models/breast_density_classification/scripts/createList.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | class CreateImageLabelList: 5 | def __init__(self, filename): 6 | self.filename = filename 7 | fid = open(self.filename, "r") 8 | self.json_dict = json.load(fid) 9 | 10 | def create_dataset(self, grp): 11 | image_list = [] 12 | label_list = [] 13 | image_label_list = self.json_dict[grp] 14 | for _ in image_label_list: 15 | image_list.append(_["image"]) 16 | label_list.append(_["label"]) 17 | return image_list, label_list 18 | -------------------------------------------------------------------------------- /models/breast_density_classification/scripts/create_dataset.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | import sys 5 | 6 | 7 | def main(base_dir: str, output_file: str): 8 | list_classes = ["A", "B", "C", "D"] 9 | 10 | output_list = [] 11 | for _class in list_classes: 12 | data_dir = os.path.join(base_dir, _class) 13 | list_files = os.listdir(data_dir) 14 | if _class == "A": 15 | _label = [1, 0, 0, 0] 16 | elif _class == "B": 17 | _label = [0, 1, 0, 0] 18 | elif _class == "C": 19 | _label = [0, 0, 1, 0] 20 | elif _class == "D": 21 | _label = [0, 0, 0, 1] 22 | 23 | for _file in list_files: 24 | _out = {"image": os.path.join(data_dir, _file), "label": _label} 25 | output_list.append(_out) 26 | 27 | data_dict = {"Test": output_list} 28 | 29 | fid = open(output_file, "w") 30 | json.dump(data_dict, fid, indent=1) 31 | 32 | 33 | if __name__ == "__main__": 34 | parser = argparse.ArgumentParser(description="") 35 | 36 | parser.add_argument("-base_dir", "--base_dir", default="sample_data", help="dir of dataset") 37 | parser.add_argument( 38 | "-output_file", "--output_file", default="configs/sample_image_data.json", help="output file name" 39 | ) 40 | parser_args, _ = parser.parse_known_args(sys.argv) 41 | main(base_dir=parser_args.base_dir, output_file=parser_args.output_file) 42 | -------------------------------------------------------------------------------- /models/breast_density_classification/scripts/script.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from monai.bundle.config_parser import ConfigParser 4 | 5 | os.environ["CUDA_DEVICE_IRDER"] = "PCI_BUS_ID" 6 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 7 | 8 | parser = ConfigParser() 9 | parser.read_config("../configs/inference.json") 10 | data = parser.get_parsed_content("data") 11 | device = parser.get_parsed_content("device") 12 | network = parser.get_parsed_content("network_def") 13 | 14 | inference = parser.get_parsed_content("evaluator") 15 | inference.run() 16 | 17 | print(type(network)) 18 | 19 | 20 | datalist = parser.get_parsed_content("test_imagelist") 21 | print(datalist) 22 | 23 | inference = parser.get_parsed_content("evaluator") 24 | inference.run() 25 | -------------------------------------------------------------------------------- /models/classification_template/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 MONAI Consortium 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /models/classification_template/configs/evaluate.yaml: -------------------------------------------------------------------------------- 1 | # This implements the workflow for applying the network to a directory of images and measuring network performance with metrics. 2 | 3 | # these transforms are used for inference to load and regularise inputs 4 | transforms: 5 | - _target_: AsDiscreted 6 | keys: ['@pred', '@label'] 7 | argmax: [true, false] 8 | to_onehot: '@num_classes' 9 | - _target_: ToTensord 10 | keys: ['@pred', '@label'] 11 | device: '@device' 12 | 13 | postprocessing: 14 | _target_: Compose 15 | transforms: $@transforms 16 | 17 | # inference handlers to load checkpoint, gather statistics 18 | val_handlers: 19 | - _target_: CheckpointLoader 20 | _disabled_: $not os.path.exists(@ckpt_path) 21 | load_path: '@ckpt_path' 22 | load_dict: 23 | model: '@network' 24 | - _target_: StatsHandler 25 | name: null # use engine.logger as the Logger object to log to 26 | output_transform: '$lambda x: None' 27 | - _target_: MetricsSaver 28 | save_dir: '@output_dir' 29 | metrics: ['val_accuracy'] 30 | metric_details: ['val_accuracy'] 31 | batch_transform: "$lambda x: [xx['image'].meta for xx in x]" 32 | summary_ops: "*" 33 | 34 | initialize: 35 | - "$monai.utils.set_determinism(seed=123)" 36 | - "$setattr(torch.backends.cudnn, 'benchmark', True)" 37 | run: 38 | - $@evaluator.run() 39 | -------------------------------------------------------------------------------- /models/classification_template/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/classification_template/configs/multi_gpu_train.yaml: -------------------------------------------------------------------------------- 1 | # This file contains the changes to implement DDP training with the train.yaml config. 2 | 3 | device: "$torch.device('cuda:' + os.environ['LOCAL_RANK'])" # assumes GPU # matches rank # 4 | 5 | # wrap the network in a DistributedDataParallel instance, moving it to the chosen device for this process 6 | network: 7 | _target_: torch.nn.parallel.DistributedDataParallel 8 | module: $@network_def.to(@device) 9 | device_ids: ['@device'] 10 | find_unused_parameters: true 11 | 12 | train_sampler: 13 | _target_: DistributedSampler 14 | dataset: '@train_dataset' 15 | even_divisible: true 16 | shuffle: true 17 | 18 | train_dataloader#sampler: '@train_sampler' 19 | train_dataloader#shuffle: false 20 | 21 | val_sampler: 22 | _target_: DistributedSampler 23 | dataset: '@val_dataset' 24 | even_divisible: false 25 | shuffle: false 26 | 27 | val_dataloader#sampler: '@val_sampler' 28 | 29 | initialize: 30 | - $import torch.distributed as dist 31 | - $dist.init_process_group(backend='nccl') 32 | - $torch.cuda.set_device(@device) 33 | - $monai.utils.set_determinism(seed=123) # may want to choose a different seed or not do this here 34 | run: 35 | - '$@trainer.run()' 36 | finalize: 37 | - '$dist.is_initialized() and dist.destroy_process_group()' 38 | -------------------------------------------------------------------------------- /models/classification_template/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_key_metric=0.9500.pt" 4 | hash_val: "915f54538655e9e6091c5d09dfdee621" 5 | hash_type: "md5" 6 | -------------------------------------------------------------------------------- /models/cxr_image_synthesis_latent_diffusion_model/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/cxr_image_synthesis_latent_diffusion_model/docs/figure_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/cxr_image_synthesis_latent_diffusion_model/docs/figure_1.png -------------------------------------------------------------------------------- /models/cxr_image_synthesis_latent_diffusion_model/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/autoencoder.pt" 3 | url: "https://drive.google.com/uc?export=download&id=1paDN1m-Q_Oy8d_BanPkRTi3RlNB_Sv_h" 4 | hash_val: "7f579cb789597db7bb5de1488f54bc6c" 5 | hash_type: "md5" 6 | - path: "models/model.pt" 7 | url: "https://drive.google.com/uc?export=download&id=1CjcmiPu5_QWr-f7wDJsXrCCcVeczneGT" 8 | hash_val: "c3fd4c8e38cd1d7250a8903cca935823" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/cxr_image_synthesis_latent_diffusion_model/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/cxr_image_synthesis_latent_diffusion_model/scripts/__init__.py -------------------------------------------------------------------------------- /models/cxr_image_synthesis_latent_diffusion_model/scripts/sampler.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import torch 4 | import torch.nn as nn 5 | from monai.utils import optional_import 6 | from torch.cuda.amp import autocast 7 | 8 | tqdm, has_tqdm = optional_import("tqdm", name="tqdm") 9 | 10 | 11 | class Sampler: 12 | def __init__(self) -> None: 13 | super().__init__() 14 | 15 | @torch.no_grad() 16 | def sampling_fn( 17 | self, 18 | noise: torch.Tensor, 19 | autoencoder_model: nn.Module, 20 | diffusion_model: nn.Module, 21 | scheduler: nn.Module, 22 | prompt_embeds: torch.Tensor, 23 | guidance_scale: float = 7.0, 24 | scale_factor: float = 0.3, 25 | ) -> torch.Tensor: 26 | if has_tqdm: 27 | progress_bar = tqdm(scheduler.timesteps) 28 | else: 29 | progress_bar = iter(scheduler.timesteps) 30 | 31 | for t in progress_bar: 32 | noise_input = torch.cat([noise] * 2) 33 | model_output = diffusion_model( 34 | noise_input, timesteps=torch.Tensor((t,)).to(noise.device).long(), context=prompt_embeds 35 | ) 36 | noise_pred_uncond, noise_pred_text = model_output.chunk(2) 37 | noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) 38 | noise, _ = scheduler.step(noise_pred, t, noise) 39 | 40 | with autocast(): 41 | sample = autoencoder_model.decode_stage_2_outputs(noise / scale_factor) 42 | 43 | return sample 44 | -------------------------------------------------------------------------------- /models/cxr_image_synthesis_latent_diffusion_model/scripts/saver.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import numpy as np 4 | import torch 5 | from PIL import Image 6 | 7 | 8 | class JPGSaver: 9 | def __init__(self, output_dir: str) -> None: 10 | super().__init__() 11 | self.output_dir = output_dir 12 | 13 | def save(self, image_data: torch.Tensor, file_name: str) -> None: 14 | image_data = np.clip(image_data.cpu().numpy(), 0, 1) 15 | image_data = (image_data * 255).astype(np.uint8) 16 | im = Image.fromarray(image_data[0, 0]) 17 | im.save(self.output_dir + "/" + file_name + ".jpg") 18 | -------------------------------------------------------------------------------- /models/endoscopic_inbody_classification/configs/evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "validate#postprocessing": { 3 | "_target_": "Compose", 4 | "transforms": [ 5 | { 6 | "_target_": "AsDiscreted", 7 | "keys": [ 8 | "pred", 9 | "label" 10 | ], 11 | "argmax": [ 12 | true, 13 | false 14 | ], 15 | "to_onehot": 2 16 | } 17 | ] 18 | }, 19 | "validate#handlers": [ 20 | { 21 | "_target_": "CheckpointLoader", 22 | "load_path": "$@ckpt_dir + '/model.pt'", 23 | "load_dict": { 24 | "model": "@network" 25 | } 26 | }, 27 | { 28 | "_target_": "StatsHandler", 29 | "iteration_log": false 30 | }, 31 | { 32 | "_target_": "MetricsSaver", 33 | "save_dir": "@output_dir", 34 | "metrics": [ 35 | "val_accu" 36 | ], 37 | "metric_details": [ 38 | "val_accu" 39 | ], 40 | "batch_transform": "$lambda x: [xx['image'].meta for xx in x]", 41 | "summary_ops": "*" 42 | } 43 | ], 44 | "run": [ 45 | "$@validate#evaluator.run()" 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /models/endoscopic_inbody_classification/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import os", 4 | "$import json", 5 | "$import torch_tensorrt" 6 | ], 7 | "network_def": "$torch.jit.load(@bundle_root + '/models/model_trt.ts')", 8 | "evaluator#amp": false, 9 | "initialize": [ 10 | "$monai.utils.set_determinism(seed=123)" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /models/endoscopic_inbody_classification/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/endoscopic_inbody_classification/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "initialize": [ 28 | "$import torch.distributed as dist", 29 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)" 32 | ], 33 | "run": [ 34 | "$@train#trainer.run()" 35 | ], 36 | "finalize": [ 37 | "$dist.is_initialized() and dist.destroy_process_group()", 38 | "$@train_fp.close()", 39 | "$@val_fp.close()" 40 | ] 41 | } 42 | -------------------------------------------------------------------------------- /models/endoscopic_inbody_classification/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Datasets used in this work were provided by Activ Surgical. 2 | -------------------------------------------------------------------------------- /models/endoscopic_inbody_classification/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_endoscopic_inbody_classification_v1.pt" 4 | hash_val: "e373019524bda213ccbce2251f057a5f" 5 | hash_type: "md5" 6 | - path: "models/model.ts" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_endoscopic_inbody_classification_v1.ts" 8 | hash_val: "8596d8a288edc28a34384a1c3f464110" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/endoscopic_tool_segmentation/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import glob", 4 | "$import os", 5 | "$import torch_tensorrt" 6 | ], 7 | "network_def": "$torch.jit.load(@bundle_root + '/models/model_trt.ts')", 8 | "evaluator#amp": false, 9 | "initialize": [ 10 | "$monai.utils.set_determinism(seed=123)" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /models/endoscopic_tool_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/endoscopic_tool_segmentation/configs/multi_gpu_evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "validate#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@validate#dataset", 13 | "even_divisible": false, 14 | "shuffle": false 15 | }, 16 | "validate#dataloader#sampler": "@validate#sampler", 17 | "validate#handlers#1#_disabled_": "$dist.get_rank() > 0", 18 | "initialize": [ 19 | "$import torch.distributed as dist", 20 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 21 | "$torch.cuda.set_device(@device)", 22 | "$import logging", 23 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 24 | ], 25 | "run": [ 26 | "$@validate#evaluator.run()" 27 | ], 28 | "finalize": [ 29 | "$dist.is_initialized() and dist.destroy_process_group()" 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /models/endoscopic_tool_segmentation/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ], 9 | "find_unused_parameters": true 10 | }, 11 | "train#sampler": { 12 | "_target_": "DistributedSampler", 13 | "dataset": "@train#dataset", 14 | "even_divisible": true, 15 | "shuffle": true 16 | }, 17 | "train#dataloader#sampler": "@train#sampler", 18 | "train#dataloader#shuffle": false, 19 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 20 | "validate#sampler": { 21 | "_target_": "DistributedSampler", 22 | "dataset": "@validate#dataset", 23 | "even_divisible": false, 24 | "shuffle": false 25 | }, 26 | "validate#dataloader#sampler": "@validate#sampler", 27 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 28 | "initialize": [ 29 | "$import torch.distributed as dist", 30 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 31 | "$torch.cuda.set_device(@device)", 32 | "$monai.utils.set_determinism(seed=123)", 33 | "$import logging", 34 | "$@train#trainer.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)", 35 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 36 | ], 37 | "run": [ 38 | "$@train#trainer.run()" 39 | ], 40 | "finalize": [ 41 | "$dist.is_initialized() and dist.destroy_process_group()" 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /models/endoscopic_tool_segmentation/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Datasets used in this work were provided by Activ Surgical. 2 | -------------------------------------------------------------------------------- /models/endoscopic_tool_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_endoscopic_tool_segmentation.pt" 4 | hash_val: "7ca94167f05ac9f62a16b1898155ff71" 5 | hash_type: "md5" 6 | - path: "models/model.ts" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_endoscopic_tool_segmentation.ts" 8 | hash_val: "889a9659f5ab245bd5c4a0a36c80728f" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/lung_nodule_ct_detection/configs/evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "test_datalist": "$monai.data.load_decathlon_datalist(@data_list_file_path, is_segmentation=True, data_list_key='validation', base_dir=@dataset_dir)", 3 | "validate#dataset": { 4 | "_target_": "Dataset", 5 | "data": "$@test_datalist", 6 | "transform": "@validate#preprocessing" 7 | }, 8 | "validate#key_metric": { 9 | "val_coco": { 10 | "_target_": "scripts.cocometric_ignite.IgniteCocoMetric", 11 | "coco_metric_monai": "$monai.apps.detection.metrics.coco.COCOMetric(classes=['nodule'], iou_list=[0.1], max_detection=[100])", 12 | "output_transform": "$monai.handlers.from_engine(['pred', 'label'])", 13 | "box_key": "box", 14 | "label_key": "label", 15 | "pred_score_key": "label_scores", 16 | "reduce_scalar": false 17 | } 18 | }, 19 | "validate#handlers": [ 20 | { 21 | "_target_": "CheckpointLoader", 22 | "load_path": "$@ckpt_dir + '/model.pt'", 23 | "load_dict": { 24 | "model": "@network" 25 | } 26 | }, 27 | { 28 | "_target_": "StatsHandler", 29 | "iteration_log": false 30 | }, 31 | { 32 | "_target_": "MetricsSaver", 33 | "save_dir": "@output_dir", 34 | "metrics": [ 35 | "val_coco" 36 | ], 37 | "metric_details": [ 38 | "val_coco" 39 | ], 40 | "batch_transform": "$lambda x: [xx['image'].meta for xx in x]", 41 | "summary_ops": "*" 42 | } 43 | ], 44 | "initialize": [ 45 | "$setattr(torch.backends.cudnn, 'benchmark', True)" 46 | ], 47 | "run": [ 48 | "$@validate#evaluator.run()" 49 | ] 50 | } 51 | -------------------------------------------------------------------------------- /models/lung_nodule_ct_detection/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import glob", 4 | "$import os", 5 | "$import torch_tensorrt" 6 | ], 7 | "force_sliding_window": true, 8 | "network_def": "$torch.jit.load(@bundle_root + '/models/model_trt.ts')", 9 | "evaluator#amp": false, 10 | "initialize": [ 11 | "$setattr(torch.backends.cudnn, 'benchmark', True)" 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /models/lung_nodule_ct_detection/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/lung_nodule_ct_detection/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. LUng Nodule Analysis 2016 6 | https://luna16.grand-challenge.org/Home/ 7 | https://creativecommons.org/licenses/by/4.0/ 8 | 9 | ii. Lung Image Database Consortium image collection (LIDC-IDRI) 10 | https://wiki.cancerimagingarchive.net/display/Public/LIDC-IDRI 11 | https://creativecommons.org/licenses/by/3.0/ 12 | -------------------------------------------------------------------------------- /models/lung_nodule_ct_detection/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_lung_nodule_ct_detection.pt" 4 | hash_val: "b2e732a7c2786e2f8398fa45293b1e73" 5 | hash_type: "md5" 6 | - path: "models/model.ts" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_lung_nodule_ct_detection.ts" 8 | hash_val: "f5567e1c74b41964b2a0b19ea855f3b0" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/lung_nodule_ct_detection/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | # from .evaluator import EnsembleEvaluator, Evaluator, SupervisedEvaluator 13 | # from .multi_gpu_supervised_trainer import create_multigpu_supervised_evaluator, create_multigpu_supervised_trainer 14 | from .trainer import DetectionTrainer 15 | -------------------------------------------------------------------------------- /models/lung_nodule_ct_detection/scripts/utils.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Union 2 | 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def detach_to_numpy(data: Union[List, Dict, torch.Tensor]) -> Union[List, Dict, torch.Tensor]: 8 | """ 9 | Recursively detach elements in data 10 | """ 11 | if isinstance(data, torch.Tensor): 12 | return data.cpu().detach().numpy() # pytype: disable=attribute-error 13 | 14 | elif isinstance(data, np.ndarray): 15 | return data 16 | 17 | elif isinstance(data, list): 18 | return [detach_to_numpy(d) for d in data] 19 | 20 | elif isinstance(data, dict): 21 | for k in data.keys(): 22 | data[k] = detach_to_numpy(data[k]) 23 | return data 24 | 25 | else: 26 | raise ValueError("data should be tensor, numpy array, dict, or list.") 27 | -------------------------------------------------------------------------------- /models/maisi_ct_generative/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "+imports": [ 3 | "$from monai.networks import trt_compile" 4 | ], 5 | "c_trt_args": { 6 | "export_args": { 7 | "dynamo": "$False", 8 | "report": "$True" 9 | }, 10 | "output_lists": [ 11 | [ 12 | -1 13 | ], 14 | [] 15 | ] 16 | }, 17 | "controlnet": "$trt_compile(@controlnet_def.to(@device), @trained_controlnet_path, @c_trt_args)", 18 | "diffusion_unet": "$trt_compile(@diffusion_unet_def.to(@device), @trained_diffusion_path)" 19 | } 20 | -------------------------------------------------------------------------------- /models/maisi_ct_generative/configs/integration_test_masks.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "bottom_region_index": [ 4 | 0, 5 | 0, 6 | 0, 7 | 1 8 | ], 9 | "dim": [ 10 | 512, 11 | 512, 12 | 512 13 | ], 14 | "label_list": [ 15 | 1, 16 | 3, 17 | 4, 18 | 5, 19 | 6, 20 | 7, 21 | 8, 22 | 9, 23 | 10, 24 | 11, 25 | 12, 26 | 13, 27 | 14, 28 | 15, 29 | 17, 30 | 19, 31 | 25, 32 | 28, 33 | 29, 34 | 31, 35 | 32, 36 | 33, 37 | 34, 38 | 35, 39 | 36, 40 | 37, 41 | 38, 42 | 39, 43 | 40, 44 | 41, 45 | 42, 46 | 58, 47 | 59, 48 | 60, 49 | 61, 50 | 62, 51 | 69, 52 | 70, 53 | 71, 54 | 72, 55 | 73, 56 | 74, 57 | 81, 58 | 82, 59 | 83, 60 | 84, 61 | 85, 62 | 86, 63 | 93, 64 | 94, 65 | 95, 66 | 96, 67 | 97, 68 | 98, 69 | 99, 70 | 100, 71 | 101, 72 | 102, 73 | 103, 74 | 104, 75 | 105, 76 | 106, 77 | 107, 78 | 114, 79 | 115, 80 | 118, 81 | 121, 82 | 122, 83 | 127 84 | ], 85 | "pseudo_label_filename": "./IntegrationTest-AbdomenCT.nii.gz", 86 | "spacing": [ 87 | 1.0, 88 | 1.0, 89 | 1.0 90 | ], 91 | "top_region_index": [ 92 | 0, 93 | 1, 94 | 0, 95 | 0 96 | ] 97 | } 98 | ] 99 | -------------------------------------------------------------------------------- /models/maisi_ct_generative/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/maisi_ct_generative/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "use_tensorboard": "$dist.get_rank() == 0", 4 | "controlnet": { 5 | "_target_": "torch.nn.parallel.DistributedDataParallel", 6 | "module": "$@controlnet_def.to(@device)", 7 | "find_unused_parameters": true, 8 | "device_ids": [ 9 | "@device" 10 | ] 11 | }, 12 | "load_controlnet": "$@controlnet.module.load_state_dict(@checkpoint_controlnet['controlnet_state_dict'], strict=True)", 13 | "train#sampler": { 14 | "_target_": "DistributedSampler", 15 | "dataset": "@train#dataset", 16 | "even_divisible": true, 17 | "shuffle": true 18 | }, 19 | "train#dataloader#sampler": "@train#sampler", 20 | "train#dataloader#shuffle": false, 21 | "train#trainer#train_handlers": "$@train#handlers[: -1 if dist.get_rank() > 0 else None]", 22 | "initialize": [ 23 | "$import torch.distributed as dist", 24 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 25 | "$torch.cuda.set_device(@device)", 26 | "$monai.utils.set_determinism(seed=123)" 27 | ], 28 | "run": [ 29 | "$@train#trainer.run()" 30 | ], 31 | "finalize": [ 32 | "$dist.is_initialized() and dist.destroy_process_group()" 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /models/maisi_ct_generative/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | from . import sample, utils 13 | -------------------------------------------------------------------------------- /models/mednist_ddpm/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 MONAI Consortium 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /models/mednist_ddpm/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/mednist_ddpm/docs/README.md: -------------------------------------------------------------------------------- 1 | 2 | # MedNIST DDPM Example Bundle 3 | 4 | This implements roughly equivalent code to the "Denoising Diffusion Probabilistic Models with MedNIST Dataset" 5 | example notebook. This includes scripts for training with single or multiple GPUs and a visualisation notebook. 6 | 7 | 8 | The files included here demonstrate how to use the bundle: 9 | * [2d_ddpm_bundle_tutorial.ipynb](./2d_ddpm_bundle_tutorial.ipynb) - demonstrates command line and in-code invocation of the bundle's training and inference scripts 10 | * [sub_train.sh](sub_train.sh) - SLURM submission script example for training 11 | * [sub_train_multigpu.sh](sub_train_multigpu.sh) - SLURM submission script example for training with multiple GPUs 12 | -------------------------------------------------------------------------------- /models/mednist_ddpm/docs/sub_train.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | #SBATCH --nodes=1 3 | #SBATCH -J mednist_train 4 | #SBATCH -c 4 5 | #SBATCH --gres=gpu:1 6 | #SBATCH --time=2:00:00 7 | #SBATCH -p small 8 | 9 | set -v 10 | 11 | # change this if run submitted from a different directory 12 | export BUNDLE="$(pwd)/.." 13 | 14 | # change this to load a checkpoint instead of started from scratch 15 | CKPT=none 16 | 17 | CONFIG="$BUNDLE/configs/train.yaml" 18 | 19 | # change this to point to where MedNIST is located 20 | DATASET="$(pwd)" 21 | 22 | # it's useful to include the configuration in the log file 23 | cat "$BUNDLE/configs/train.yaml" 24 | 25 | python -m monai.bundle run training \ 26 | --meta_file "$BUNDLE/configs/metadata.json" \ 27 | --config_file "$CONFIG" \ 28 | --logging_file "$BUNDLE/configs/logging.conf" \ 29 | --bundle_root "$BUNDLE" \ 30 | --dataset_dir "$DATASET" 31 | -------------------------------------------------------------------------------- /models/mednist_ddpm/docs/sub_train_multigpu.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | #SBATCH --nodes=1 3 | #SBATCH -J mednist_train 4 | #SBATCH -c 4 5 | #SBATCH --gres=gpu:2 6 | #SBATCH --time=2:00:00 7 | #SBATCH -p big 8 | 9 | set -v 10 | 11 | # change this if run submitted from a different directory 12 | export BUNDLE="$(pwd)/.." 13 | 14 | # change this to load a checkpoint instead of started from scratch 15 | CKPT=none 16 | 17 | CONFIG="'$BUNDLE/configs/train.yaml', '$BUNDLE/configs/train_multigpu.yaml'" 18 | 19 | # change this to point to where MedNIST is located 20 | DATASET="$(pwd)" 21 | 22 | # it's useful to include the configuration in the log file 23 | cat "$BUNDLE/configs/train.yaml" 24 | cat "$BUNDLE/configs/train_multigpu.yaml" 25 | 26 | # remember to change arguments to match how many nodes and GPUs you have 27 | torchrun --standalone --nnodes=1 --nproc_per_node=2 -m monai.bundle run training \ 28 | --meta_file "$BUNDLE/configs/metadata.json" \ 29 | --config_file "$CONFIG" \ 30 | --logging_file "$BUNDLE/configs/logging.conf" \ 31 | --bundle_root "$BUNDLE" \ 32 | --dataset_dir "$DATASET" 33 | -------------------------------------------------------------------------------- /models/mednist_ddpm/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/mednist_ddpm.pt" 4 | hash_val: "02fd8c8e8ed5f7cda5deeed72b69f4f1" 5 | hash_type: "md5" 6 | -------------------------------------------------------------------------------- /models/mednist_ddpm/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | 4 | def inv_metric_cmp_fn(current_metric: float, prev_best: float) -> bool: 5 | """ 6 | This inverts comparison for those metrics which reduce like loss values, such that the lower one is better. 7 | 8 | Args: 9 | current_metric: metric value of current round computation. 10 | prev_best: the best metric value of previous rounds to compare with. 11 | """ 12 | return current_metric < prev_best 13 | -------------------------------------------------------------------------------- /models/mednist_gan/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/mednist_gan/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Copyright 2022 MONAI Consortium 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | 12 | Third Party Licenses 13 | ----------------------------------------------------------------------- 14 | 15 | /*********************************************************************/ 16 | i. MedNIST Dataset 17 | The dataset is kindly made available by Dr. Bradley J. Erickson M.D., Ph.D. (https://www.mayo.edu/research/labs/radiology-informatics/overview), Department of Radiology, Mayo Clinic under the Creative Commons [CC BY-SA 4.0 license](https://creativecommons.org/licenses/by-sa/4.0/). 18 | 19 | The MedNIST dataset was gathered from several sets from: 20 | * TCIA (https://wiki.cancerimagingarchive.net/display/Public/Data+Usage+Policies+and+Restrictions) 21 | * the RSNA Bone Age Challenge (http://rsnachallenges.cloudapp.net/competitions/4), 22 | * the NIH Chest X-ray dataset (https://cloud.google.com/healthcare/docs/resources/public-datasets/nih-chest). 23 | 24 | If you use the MedNIST dataset, please acknowledge the source. For the license and usage conditions of the source datasets, please see their respective sites. 25 | -------------------------------------------------------------------------------- /models/mednist_gan/models/model.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/mednist_gan/models/model.pt -------------------------------------------------------------------------------- /models/mednist_gan/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | from . import losses 2 | -------------------------------------------------------------------------------- /models/mednist_gan/scripts/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | disc_loss_criterion = torch.nn.BCELoss() 4 | gen_loss_criterion = torch.nn.BCELoss() 5 | real_label = 1 6 | fake_label = 0 7 | 8 | 9 | def discriminator_loss(gen_images, real_images, disc_net): 10 | real = real_images.new_full((real_images.shape[0], 1), real_label) 11 | gen = gen_images.new_full((gen_images.shape[0], 1), fake_label) 12 | 13 | realloss = disc_loss_criterion(disc_net(real_images), real) 14 | genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen) 15 | 16 | return (genloss + realloss) / 2 17 | 18 | 19 | def generator_loss(gen_images, disc_net): 20 | output = disc_net(gen_images) 21 | cats = output.new_full(output.shape, real_label) 22 | return gen_loss_criterion(output, cats) 23 | -------------------------------------------------------------------------------- /models/mednist_reg/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/mednist_reg/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Copyright 2022 MONAI Consortium 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | 12 | Third Party Licenses 13 | ----------------------------------------------------------------------- 14 | 15 | /*********************************************************************/ 16 | i. MedNIST Dataset 17 | The dataset is kindly made available by Dr. Bradley J. Erickson M.D., Ph.D. (https://www.mayo.edu/research/labs/radiology-informatics/overview), Department of Radiology, Mayo Clinic under the Creative Commons [CC BY-SA 4.0 license](https://creativecommons.org/licenses/by-sa/4.0/). 18 | 19 | The MedNIST dataset was gathered from several sets from: 20 | * TCIA (https://wiki.cancerimagingarchive.net/display/Public/Data+Usage+Policies+and+Restrictions) 21 | * the RSNA Bone Age Challenge (http://rsnachallenges.cloudapp.net/competitions/4), 22 | * the NIH Chest X-ray dataset (https://cloud.google.com/healthcare/docs/resources/public-datasets/nih-chest). 23 | 24 | If you use the MedNIST dataset, please acknowledge the source. For the license and usage conditions of the source datasets, please see their respective sites. 25 | -------------------------------------------------------------------------------- /models/mednist_reg/docs/examples/008501_fixed_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/mednist_reg/docs/examples/008501_fixed_7.png -------------------------------------------------------------------------------- /models/mednist_reg/docs/examples/008502_fixed_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/mednist_reg/docs/examples/008502_fixed_6.png -------------------------------------------------------------------------------- /models/mednist_reg/docs/examples/008502_moving_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/mednist_reg/docs/examples/008502_moving_6.png -------------------------------------------------------------------------------- /models/mednist_reg/docs/examples/008502_pred_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/mednist_reg/docs/examples/008502_pred_6.png -------------------------------------------------------------------------------- /models/mednist_reg/docs/examples/008504_moving_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/mednist_reg/docs/examples/008504_moving_7.png -------------------------------------------------------------------------------- /models/mednist_reg/docs/examples/008504_pred_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/mednist_reg/docs/examples/008504_pred_7.png -------------------------------------------------------------------------------- /models/mednist_reg/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_key_metric=-0.0065.pt" 4 | hash_val: "7970f6df5daaa2dff272afd448ea944e" 5 | hash_type: "md5" 6 | -------------------------------------------------------------------------------- /models/mednist_reg/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /models/mednist_reg/scripts/net.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import torch.nn as nn 13 | from monai.networks.blocks import Warp 14 | from monai.networks.nets import resnet18 15 | from monai.networks.nets.regunet import AffineHead 16 | 17 | 18 | class RegResNet(nn.Module): 19 | def __init__( 20 | self, 21 | image_size=(64, 64), 22 | spatial_dims=2, 23 | mod=None, 24 | mode="bilinear", 25 | padding_mode="border", 26 | features=400, # feature dimension of `mod` 27 | ): 28 | super().__init__() 29 | self.features = resnet18(n_input_channels=2, spatial_dims=spatial_dims) if mod is None else mod 30 | self.affine_head = AffineHead( 31 | spatial_dims=spatial_dims, image_size=image_size, decode_size=[1] * spatial_dims, in_channels=features 32 | ) 33 | self.warp = Warp(mode=mode, padding_mode=padding_mode) 34 | self.image_size = image_size 35 | 36 | def forward(self, x): 37 | self.features.to(device=x.device) 38 | self.affine_head.to(device=x.device) 39 | out = self.features(x) 40 | ddf = self.affine_head([out], self.image_size) 41 | f = self.warp(x[:, :1], ddf) # warp the first channel 42 | return f 43 | -------------------------------------------------------------------------------- /models/multi_organ_segmentation/LICENSE: -------------------------------------------------------------------------------- 1 | Private Use, Non-Commercial, Non-Reverse-Engineering License 2 | 3 | The Licensee is not allowed to distribute or make the model to any third party, either for free or for a fee. Reverse engineering of the model is not allowed. This includes, but is not limited to, providing the model as part of a commercial offering, sharing the model on a public or private network, or making the model available for download on the Internet. 4 | -------------------------------------------------------------------------------- /models/multi_organ_segmentation/configs/evaluate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | validate#postprocessing: 3 | _target_: Compose 4 | transforms: 5 | - _target_: Activationsd 6 | keys: pred 7 | softmax: true 8 | - _target_: Invertd 9 | keys: 10 | - pred 11 | - label 12 | transform: "@validate#preprocessing" 13 | orig_keys: image 14 | meta_key_postfix: meta_dict 15 | nearest_interp: 16 | - false 17 | - true 18 | to_tensor: true 19 | - _target_: AsDiscreted 20 | keys: 21 | - pred 22 | - label 23 | argmax: 24 | - true 25 | - false 26 | to_onehot: 8 27 | - _target_: CopyItemsd 28 | keys: "pred" 29 | times: 1 30 | names: "pred_save" 31 | - _target_: AsDiscreted 32 | keys: 33 | - pred_save 34 | argmax: 35 | - true 36 | - _target_: SaveImaged 37 | keys: pred_save 38 | meta_keys: pred_meta_dict 39 | output_dir: "@output_dir" 40 | resample: false 41 | squeeze_end_dims: true 42 | validate#dataset: 43 | _target_: Dataset 44 | data: "@val_datalist" 45 | transform: "@validate#preprocessing" 46 | validate#handlers: 47 | - _target_: CheckpointLoader 48 | load_path: "$@ckpt_dir + '/model.pt'" 49 | load_dict: 50 | model: "@network" 51 | - _target_: StatsHandler 52 | iteration_log: false 53 | - _target_: MetricsSaver 54 | save_dir: "@output_dir" 55 | metrics: 56 | - val_mean_dice 57 | - val_acc 58 | metric_details: 59 | - val_mean_dice 60 | batch_transform: "$monai.handlers.from_engine(['image_meta_dict'])" 61 | summary_ops: "*" 62 | initialize: 63 | - "$setattr(torch.backends.cudnn, 'benchmark', True)" 64 | run: 65 | - "$@validate#evaluator.run()" 66 | -------------------------------------------------------------------------------- /models/multi_organ_segmentation/configs/inference_trt.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | imports: 3 | - "$import glob" 4 | - "$import os" 5 | - "$import torch_tensorrt" 6 | handlers#0#_disabled_: true 7 | network_def: "$torch.jit.load(@bundle_root + '/models/model.ts')" 8 | evaluator#amp: false 9 | -------------------------------------------------------------------------------- /models/multi_organ_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/multi_organ_segmentation/docs/output_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/multi_organ_segmentation/docs/output_example.png -------------------------------------------------------------------------------- /models/multi_organ_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/search_code_18590.pt" 3 | url: "https://drive.google.com/uc?export=download&id=1G2YZo1HKkvf5sMvaaRQeZp_YA9GDlvL-" 4 | hash_val: "55dc7fe4bee4a93d25a1dd329dfdd159" 5 | hash_type: "md5" 6 | - path: "models/model.pt" 7 | url: "https://drive.google.com/uc?export=download&id=1kH0yTyiXUNqdYXpnSXI2p5-vFwDYoCzl" 8 | hash_val: "e21d663b33d29a8ca4e28d60a14a3d66" 9 | hash_type: "md5" 10 | - path: "models/model.ts" 11 | url: "https://drive.google.com/uc?export=download&id=1qwV99IfYvLzpjsHgfrlciqDp8yQ5uLXs" 12 | hash_val: "7c08cb505b719914015d9fae746bea7c" 13 | hash_type: "md5" 14 | -------------------------------------------------------------------------------- /models/pancreas_ct_dints_segmentation/configs/evaluate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | validate#postprocessing: 3 | _target_: Compose 4 | transforms: 5 | - _target_: Activationsd 6 | keys: pred 7 | softmax: true 8 | - _target_: Invertd 9 | keys: 10 | - pred 11 | - label 12 | transform: "@validate#preprocessing" 13 | orig_keys: image 14 | meta_key_postfix: meta_dict 15 | nearest_interp: 16 | - false 17 | - true 18 | to_tensor: true 19 | - _target_: AsDiscreted 20 | keys: 21 | - pred 22 | - label 23 | argmax: 24 | - true 25 | - false 26 | to_onehot: 3 27 | - _target_: CopyItemsd 28 | keys: "pred" 29 | times: 1 30 | names: "pred_save" 31 | - _target_: AsDiscreted 32 | keys: 33 | - pred_save 34 | argmax: 35 | - true 36 | - _target_: SaveImaged 37 | keys: pred_save 38 | meta_keys: pred_meta_dict 39 | output_dir: "@output_dir" 40 | resample: false 41 | squeeze_end_dims: true 42 | validate#dataset: 43 | _target_: Dataset 44 | data: "@val_datalist" 45 | transform: "@validate#preprocessing" 46 | validate#handlers: 47 | - _target_: CheckpointLoader 48 | load_path: "$@ckpt_dir + '/model.pt'" 49 | load_dict: 50 | model: "@network" 51 | - _target_: StatsHandler 52 | iteration_log: false 53 | - _target_: MetricsSaver 54 | save_dir: "@output_dir" 55 | metrics: 56 | - val_mean_dice 57 | - val_acc 58 | metric_details: 59 | - val_mean_dice 60 | batch_transform: "$monai.handlers.from_engine(['image_meta_dict'])" 61 | summary_ops: "*" 62 | initialize: 63 | - "$setattr(torch.backends.cudnn, 'benchmark', True)" 64 | run: 65 | - "$@validate#evaluator.run()" 66 | -------------------------------------------------------------------------------- /models/pancreas_ct_dints_segmentation/configs/inference_trt.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | imports: 3 | - "$import glob" 4 | - "$import os" 5 | - "$import torch_tensorrt" 6 | network_def: "$torch.jit.load(@bundle_root + '/models/model_trt.ts')" 7 | evaluator#amp: false 8 | initialize: 9 | - "$setattr(torch.backends.cudnn, 'benchmark', True)" 10 | -------------------------------------------------------------------------------- /models/pancreas_ct_dints_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/pancreas_ct_dints_segmentation/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. Medical Segmentation Decathlon 6 | http://medicaldecathlon.com/ 7 | -------------------------------------------------------------------------------- /models/pancreas_ct_dints_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/search_code_18590.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pancreas_ct_dints_segmentation_search_code.pt" 4 | hash_val: "2ff53724aa9d80d9025034d36785cdb8" 5 | hash_type: "md5" 6 | - path: "models/model.pt" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pancreas_ct_dints_segmentation.pt" 8 | hash_val: "ce8531b5143b417f65d5b481f72bc911" 9 | hash_type: "md5" 10 | - path: "models/model.ts" 11 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pancreas_ct_dints_segmentation.ts" 12 | hash_val: "b42221639099a5548c1a0e27772252ec" 13 | hash_type: "md5" 14 | -------------------------------------------------------------------------------- /models/pancreas_ct_dints_segmentation/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /models/pathology_nuclei_classification/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "+imports": [ 3 | "$from monai.networks import trt_compile" 4 | ], 5 | "network": "$trt_compile(@network_def.to(@device), @checkpoint)" 6 | } 7 | -------------------------------------------------------------------------------- /models/pathology_nuclei_classification/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/pathology_nuclei_classification/configs/multi_gpu_evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "validate#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@validate#dataset", 13 | "even_divisible": false, 14 | "shuffle": false 15 | }, 16 | "validate#dataloader#sampler": "@validate#sampler", 17 | "validate#handlers#1#_disabled_": "$dist.get_rank() > 0", 18 | "initialize": [ 19 | "$import sys", 20 | "$sys.path.append(@bundle_root)", 21 | "$import torch.distributed as dist", 22 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 23 | "$torch.cuda.set_device(@device)", 24 | "$monai.utils.set_determinism(seed=123)", 25 | "$import logging", 26 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)", 27 | "$import scripts", 28 | "$monai.data.register_writer('json', scripts.ClassificationWriter)" 29 | ], 30 | "run": [ 31 | "$@validate#evaluator.run()" 32 | ], 33 | "finalize": [ 34 | "$dist.is_initialized() and dist.destroy_process_group()" 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /models/pathology_nuclei_classification/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "initialize": [ 28 | "$import sys", 29 | "$sys.path.append(@bundle_root)", 30 | "$import torch.distributed as dist", 31 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 32 | "$torch.cuda.set_device(@device)", 33 | "$monai.utils.set_determinism(seed=123)", 34 | "$import logging", 35 | "$@train#trainer.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)", 36 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 37 | ], 38 | "run": [ 39 | "$@train#trainer.run()" 40 | ], 41 | "finalize": [ 42 | "$dist.is_initialized() and dist.destroy_process_group()" 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /models/pathology_nuclei_classification/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. HoVer-Net: Simultaneous Segmentation and Classification of Nuclei in Multi-Tissue Histology Images 6 | https://warwick.ac.uk/fac/cross_fac/tia/data/hovernet/ 7 | -------------------------------------------------------------------------------- /models/pathology_nuclei_classification/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_nuclei_classification.pt" 4 | hash_val: "066c6ef8739c4d86e167561b9ad8524d" 5 | hash_type: "md5" 6 | - path: "models/model.ts" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_nuclei_classification.ts" 8 | hash_val: "e6aceee58f55abafd0125b3dd6a6c1b8" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/pathology_nuclei_classification/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | from .handlers import TensorBoardImageHandler 13 | from .writer import ClassificationWriter 14 | -------------------------------------------------------------------------------- /models/pathology_nuclei_segmentation_classification/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "+imports": [ 3 | "$from monai.networks import trt_compile" 4 | ], 5 | "trt_args": { 6 | "output_names": "$@sub_keys", 7 | "dynamic_batchsize": "$[1, @sw_batch_size, @sw_batch_size]" 8 | }, 9 | "network": "$trt_compile(@network_def.to(@device), @bundle_root + '/models/model.pt', args=@trt_args)" 10 | } 11 | -------------------------------------------------------------------------------- /models/pathology_nuclei_segmentation_classification/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/pathology_nuclei_segmentation_classification/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#train_handlers[: -3 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "initialize": [ 28 | "$import torch.distributed as dist", 29 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=321)", 32 | "$setattr(torch.backends.cudnn, 'benchmark', True)" 33 | ], 34 | "run": [ 35 | "$@train#trainer.run()" 36 | ], 37 | "finalize": [ 38 | "$dist.is_initialized() and dist.destroy_process_group()" 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /models/pathology_nuclei_segmentation_classification/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. CoNSeP dataset 6 | https://warwick.ac.uk/fac/cross_fac/tia/data/hovernet/ 7 | -------------------------------------------------------------------------------- /models/pathology_nuclei_segmentation_classification/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_nuclei_segmentation_classification_v2.pt" 4 | hash_val: "0d704f4b938fec725e7fcc9ae5ea6aca" 5 | hash_type: "md5" 6 | - path: "models/stage0/model.pt" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_nuclei_segmentation_classification_v2_stage0.pt" 8 | hash_val: "50ae2aca76816c767a304269bb0bc003" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/pathology_nuclick_annotation/configs/evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "validate#dataset#cache_rate": 0, 3 | "validate#postprocessing": { 4 | "_target_": "Compose", 5 | "transforms": [ 6 | { 7 | "_target_": "Activationsd", 8 | "keys": "pred", 9 | "sigmoid": true 10 | }, 11 | { 12 | "_target_": "AsDiscreted", 13 | "keys": "pred", 14 | "threshold": 0.5 15 | }, 16 | { 17 | "_target_": "SaveImaged", 18 | "_disabled_": true, 19 | "keys": "pred", 20 | "meta_keys": "pred_meta_dict", 21 | "output_dir": "@output_dir", 22 | "output_ext": ".png" 23 | } 24 | ] 25 | }, 26 | "validate#handlers": [ 27 | { 28 | "_target_": "CheckpointLoader", 29 | "load_path": "$@ckpt_dir + '/model.pt'", 30 | "load_dict": { 31 | "model": "@network" 32 | } 33 | }, 34 | { 35 | "_target_": "StatsHandler", 36 | "iteration_log": false 37 | }, 38 | { 39 | "_target_": "MetricsSaver", 40 | "save_dir": "@output_dir", 41 | "metrics": [ 42 | "val_mean_dice", 43 | "val_accuracy" 44 | ], 45 | "metric_details": [ 46 | "val_mean_dice" 47 | ], 48 | "batch_transform": "$monai.handlers.from_engine(['image_meta_dict'])", 49 | "summary_ops": "*" 50 | } 51 | ], 52 | "run": [ 53 | "$@validate#evaluator.run()" 54 | ] 55 | } 56 | -------------------------------------------------------------------------------- /models/pathology_nuclick_annotation/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import glob", 4 | "$import json", 5 | "$import pathlib", 6 | "$import os", 7 | "$import torch_tensorrt" 8 | ], 9 | "network_def": "$torch.jit.load(@bundle_root + '/models/model_trt.ts')", 10 | "evaluator#amp": false, 11 | "initialize": [ 12 | "$import sys", 13 | "$sys.path.append(@bundle_root)", 14 | "$monai.utils.set_determinism(seed=123)" 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /models/pathology_nuclick_annotation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/pathology_nuclick_annotation/configs/multi_gpu_evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "validate#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@validate#dataset", 13 | "even_divisible": false, 14 | "shuffle": false 15 | }, 16 | "validate#dataloader#sampler": "@validate#sampler", 17 | "validate#handlers#1#_disabled_": "$dist.get_rank() > 0", 18 | "initialize": [ 19 | "$import sys", 20 | "$sys.path.append(@bundle_root)", 21 | "$import torch.distributed as dist", 22 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 23 | "$torch.cuda.set_device(@device)", 24 | "$monai.utils.set_determinism(seed=123)", 25 | "$import logging", 26 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 27 | ], 28 | "run": [ 29 | "$@validate#evaluator.run()" 30 | ], 31 | "finalize": [ 32 | "$dist.is_initialized() and dist.destroy_process_group()" 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /models/pathology_nuclick_annotation/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "initialize": [ 28 | "$import sys", 29 | "$sys.path.append(@bundle_root)", 30 | "$import torch.distributed as dist", 31 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 32 | "$torch.cuda.set_device(@device)", 33 | "$monai.utils.set_determinism(seed=123)", 34 | "$import logging", 35 | "$@train#trainer.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)", 36 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 37 | ], 38 | "run": [ 39 | "$@train#trainer.run()" 40 | ], 41 | "finalize": [ 42 | "$dist.is_initialized() and dist.destroy_process_group()" 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /models/pathology_nuclick_annotation/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. HoVer-Net: Simultaneous Segmentation and Classification of Nuclei in Multi-Tissue Histology Images 6 | https://warwick.ac.uk/fac/cross_fac/tia/data/hovernet/ 7 | -------------------------------------------------------------------------------- /models/pathology_nuclick_annotation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_nuclick_annotation.pt" 4 | hash_val: "" 5 | hash_type: "" 6 | - path: "models/model.ts" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_nuclick_annotation.ts" 8 | hash_val: "" 9 | hash_type: "" 10 | -------------------------------------------------------------------------------- /models/pathology_nuclick_annotation/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | from .handlers import TensorBoardImageHandler 13 | -------------------------------------------------------------------------------- /models/pathology_tumor_detection/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import glob", 4 | "$import os", 5 | "$import torch_tensorrt" 6 | ], 7 | "handlers#0#_disabled_": true, 8 | "network_def": "$torch.jit.load(@bundle_root + '/models/model_trt.ts')", 9 | "evaluator#amp": false 10 | } 11 | -------------------------------------------------------------------------------- /models/pathology_tumor_detection/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/pathology_tumor_detection/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "initialize": [ 28 | "$import torch.distributed as dist", 29 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)", 32 | "$import logging", 33 | "$@train#trainer.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)", 34 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 35 | ], 36 | "run": [ 37 | "$@train#trainer.run()" 38 | ], 39 | "finalize": [ 40 | "$dist.destroy_process_group()" 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /models/pathology_tumor_detection/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_tumor_detection_v1.pt" 4 | hash_val: "56b78ef17fd5e693a818dd947f6f232a" 5 | hash_type: "md5" 6 | - path: "training.csv" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_tumor_detection_training.csv" 8 | hash_val: "6e6696a5fb3cb76f15ad0d64d867e457" 9 | hash_type: "md5" 10 | - path: "validation.csv" 11 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_tumor_detection_validation.csv" 12 | hash_val: "dbfa883358fdd931cee5e887047dbb0a" 13 | hash_type: "md5" 14 | - path: "testing.csv" 15 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_pathology_tumor_detection_test.csv" 16 | hash_val: "b6f57855f6d7c38c6e4c859e8df60dd8" 17 | hash_type: "md5" 18 | -------------------------------------------------------------------------------- /models/pathology_tumor_detection/scripts/evaluate_froc.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | LEVEL=6 4 | SPACING=0.243 5 | READER=openslide 6 | EVAL_DIR=../eval 7 | GROUND_TRUTH_DIR=/workspace/data/medical/pathology/testing/ground_truths 8 | 9 | echo "=> Level= ${LEVEL}" 10 | echo "=> Spacing = ${SPACING}" 11 | echo "=> WSI Reader: ${READER}" 12 | echo "=> Evaluation output directory: ${EVAL_DIR}" 13 | echo "=> Ground truth directory: ${GROUND_TRUTH_DIR}" 14 | 15 | python3 ./lesion_froc.py \ 16 | --level $LEVEL \ 17 | --spacing $SPACING \ 18 | --reader $READER \ 19 | --eval-dir ${EVAL_DIR} \ 20 | --ground-truth-dir ${GROUND_TRUTH_DIR} 21 | -------------------------------------------------------------------------------- /models/pediatric_abdominal_ct_segmentation/configs/evaluate-standalone-parallel.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | imports: 3 | - "$import glob" 4 | - "$import json" 5 | - "$import os" 6 | - "$from scripts.compute_metric import compute_abdominal_ct_metrics" 7 | - "$from scripts.compute_metric import compute" 8 | workflow_type: evaluate 9 | spatial_dims: "$len(@spatial_size)" 10 | bundle_root: "." 11 | output_dir: "$@bundle_root + '/eval/dynunet_FT_trt_32'" 12 | dataset_dir: "/processed/Public/CT_TotalSegmentator/TS_split/test/" 13 | data_list_file_path: "$@bundle_root + '/configs/TS_test.json'" 14 | datalist: "$monai.data.load_decathlon_datalist(@data_list_file_path, data_list_key='validation')" 15 | datalist_pred: "$[{**d, 'pred': os.path.join(@output_dir, d['label'].split('/')[-1].split('.')[0] + '_trans.nii.gz')} for d in @datalist]" 16 | run: 17 | #- "$compute_abdominal_ct_metrics(@datalist_pred, @output_dir)" 18 | - "$compute(@datalist_pred, @output_dir)" 19 | -------------------------------------------------------------------------------- /models/pediatric_abdominal_ct_segmentation/configs/evaluate-standalone.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | imports: 3 | - "$import glob" 4 | - "$import json" 5 | - "$import os" 6 | - "$from scripts.compute_metric import compute_abdominal_ct_metrics" 7 | - "$from scripts.compute_metric import compute" 8 | workflow_type: evaluate 9 | spatial_dims: "$len(@spatial_size)" 10 | bundle_root: "." 11 | output_dir: "$@bundle_root + '/eval/dynunet_FT_trt_32'" 12 | dataset_dir: "/processed/Public/CT_TotalSegmentator/TS_split/test/" 13 | data_list_file_path: "$@bundle_root + '/configs/TS_test.json'" 14 | datalist: "$monai.data.load_decathlon_datalist(@data_list_file_path, data_list_key='validation')" 15 | datalist_pred: "$[{**d, 'pred': os.path.join(@output_dir, d['label'].split('/')[-1].split('.')[0] + '_trans.nii.gz')} for d in @datalist]" 16 | run: 17 | - "$compute_single_node(@datalist_pred, @output_dir)" 18 | -------------------------------------------------------------------------------- /models/pediatric_abdominal_ct_segmentation/configs/evaluate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | validate#postprocessing: 3 | _target_: Compose 4 | transforms: 5 | - _target_: Activationsd 6 | keys: pred 7 | softmax: true 8 | - _target_: Invertd 9 | keys: 10 | - pred 11 | - label 12 | transform: "@validate#preprocessing" 13 | orig_keys: image 14 | meta_key_postfix: meta_dict 15 | nearest_interp: 16 | - false 17 | - true 18 | to_tensor: true 19 | - _target_: AsDiscreted 20 | keys: 21 | - pred 22 | - label 23 | argmax: 24 | - true 25 | - false 26 | to_onehot: 4 27 | - _target_: CopyItemsd 28 | keys: "pred" 29 | times: 1 30 | names: "pred_save" 31 | - _target_: AsDiscreted 32 | keys: 33 | - pred_save 34 | argmax: 35 | - true 36 | - _target_: SaveImaged 37 | keys: pred_save 38 | meta_keys: pred_meta_dict 39 | output_dir: "@output_dir" 40 | resample: false 41 | squeeze_end_dims: true 42 | validate#dataset: 43 | _target_: Dataset 44 | data: "@val_datalist" 45 | transform: "@validate#preprocessing" 46 | validate#handlers: 47 | - _target_: CheckpointLoader 48 | load_path: "$@ckpt_dir + '/dynunet_FT.pt'" 49 | load_dict: 50 | model: "@network" 51 | - _target_: StatsHandler 52 | iteration_log: false 53 | - _target_: MetricsSaver 54 | save_dir: "@output_dir" 55 | metrics: val_dice 56 | metric_details: 57 | - val_dice 58 | #batch_transform: "$monai.handlers.from_engine(['image_meta_dict'])" 59 | summary_ops: "*" 60 | initialize: 61 | - "$setattr(torch.backends.cudnn, 'benchmark', True)" 62 | run: 63 | - "$@validate#evaluator.run()" 64 | -------------------------------------------------------------------------------- /models/pediatric_abdominal_ct_segmentation/configs/inference_trt.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | imports: 3 | - "$import glob" 4 | - "$import os" 5 | - "$import torch_tensorrt" 6 | handlers#0#_disabled_: true 7 | network_def: "$torch.jit.load(@bundle_root + '/models/A100/dynunet_FT_trt_16.ts')" 8 | evaluator#amp: false 9 | -------------------------------------------------------------------------------- /models/pediatric_abdominal_ct_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/pediatric_abdominal_ct_segmentation/large_files.yaml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/dynunet_FT.pt" 3 | url: "https://drive.google.com/file/d/1bR9iQjc5Qo_yD-XN6ZYf3eiXhX9SEIyM/view?usp=sharing" 4 | hash_val: "" 5 | hash_type: "" 6 | - path: "models/A100/dynunet_FT_trt_16.ts" 7 | url: "https://drive.google.com/file/d/1k44aoW-bXAuG5i9t2nw1qWTidcDsfelQ/view?usp=sharing" 8 | -------------------------------------------------------------------------------- /models/pediatric_abdominal_ct_segmentation/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | import scripts.utils 2 | -------------------------------------------------------------------------------- /models/pediatric_abdominal_ct_segmentation/scripts/utils.py: -------------------------------------------------------------------------------- 1 | def test(): 2 | print("Test Function PRINTS") 3 | -------------------------------------------------------------------------------- /models/prostate_mri_anatomy/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/prostate_mri_anatomy/large_files.yaml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://zenodo.org/record/7040585/files/anatomy.pt?download=1" 4 | - path: "models/model.ts" 5 | url: "https://zenodo.org/record/7040585/files/anatomy.ts?download=1" 6 | -------------------------------------------------------------------------------- /models/renalStructures_CECT_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/renalStructures_CECT_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://drive.google.com/file/d/1Hm9293ypfWk9-uWHlxuolyxVA_FxfkVQ/view?usp=sharing" 4 | hash_val: "164752169d98459d30592d72a3fa86f4" 5 | hash_type: "md5" 6 | -------------------------------------------------------------------------------- /models/renalStructures_CECT_segmentation/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | from .download_data import download_cect_data 2 | from .my_transforms import ConcatImages 3 | -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device(f'cuda:{dist.get_rank()}')", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "training": [ 28 | "$import torch.distributed as dist", 29 | "$dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)", 32 | "$setattr(torch.backends.cudnn, 'benchmark', True)", 33 | "$@train#trainer.run()", 34 | "$dist.destroy_process_group()" 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/docs/demos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/renalStructures_UNEST_segmentation/docs/demos.png -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/docs/renal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/renalStructures_UNEST_segmentation/docs/renal.png -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/docs/unest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/renalStructures_UNEST_segmentation/docs/unest.png -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/docs/val_dice.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/renalStructures_UNEST_segmentation/docs/val_dice.png -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_renalstructure_unest_segmentation.pt" 4 | hash_val: "5a89c036d87b5eaba23275f209744e2a" 5 | hash_type: "md5" 6 | -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/scripts/networks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /models/renalStructures_UNEST_segmentation/scripts/networks/nest/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .utils import ( 3 | Conv3dSame, 4 | DropPath, 5 | Linear, 6 | Mlp, 7 | _assert, 8 | conv3d_same, 9 | create_conv3d, 10 | create_pool3d, 11 | get_padding, 12 | get_same_padding, 13 | pad_same, 14 | to_ntuple, 15 | trunc_normal_, 16 | ) 17 | -------------------------------------------------------------------------------- /models/segmentation_template/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 MONAI Consortium 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /models/segmentation_template/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/segmentation_template/configs/multi_gpu_train.yaml: -------------------------------------------------------------------------------- 1 | # This file contains the changes to implement DDP training with the train.yaml config. 2 | 3 | is_dist: '$dist.is_initialized()' 4 | rank: '$dist.get_rank() if @is_dist else 0' 5 | device: '$torch.device(f"cuda:{@rank}" if torch.cuda.is_available() else "cpu")' # assumes GPU # matches rank # 6 | 7 | # wrap the network in a DistributedDataParallel instance, moving it to the chosen device for this process 8 | network: 9 | _target_: torch.nn.parallel.DistributedDataParallel 10 | module: $@network_def.to(@device) 11 | device_ids: ['@device'] 12 | find_unused_parameters: true 13 | 14 | train_sampler: 15 | _target_: DistributedSampler 16 | dataset: '@train_dataset' 17 | even_divisible: true 18 | shuffle: true 19 | 20 | train_dataloader#sampler: '@train_sampler' 21 | train_dataloader#shuffle: false 22 | 23 | val_sampler: 24 | _target_: DistributedSampler 25 | dataset: '@val_dataset' 26 | even_divisible: false 27 | shuffle: false 28 | 29 | val_dataloader#sampler: '@val_sampler' 30 | 31 | run: 32 | - $import torch.distributed as dist 33 | - $dist.init_process_group(backend='nccl') 34 | - $torch.cuda.set_device(@device) 35 | - $monai.utils.set_determinism(seed=123) # may want to choose a different seed or not do this here 36 | - $@trainer.run() 37 | - $dist.destroy_process_group() 38 | -------------------------------------------------------------------------------- /models/segmentation_template/docs/inference.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | eval "$(conda shell.bash hook)" 4 | conda activate monai 5 | 6 | homedir="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | 8 | BUNDLE="$(cd "$homedir/.." && pwd)" 9 | 10 | echo "Bundle root: $BUNDLE" 11 | 12 | export PYTHONPATH="$BUNDLE" 13 | 14 | python -m monai.bundle run \ 15 | --meta_file "$BUNDLE/configs/metadata.json" \ 16 | --config_file "$BUNDLE/configs/inference.yaml" \ 17 | --bundle_root "$BUNDLE" \ 18 | $@ 19 | -------------------------------------------------------------------------------- /models/segmentation_template/docs/run_monailabel.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | eval "$(conda shell.bash hook)" 4 | conda activate monailabel 5 | 6 | export CUDA_VISIBLE_DEVICES=0 7 | 8 | homedir="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 9 | 10 | BUNDLE="$(cd "$homedir/.." && pwd)" 11 | 12 | LABELDIR="$BUNDLE/monailabel" 13 | 14 | BUNDLENAME=$(basename "$BUNDLE") 15 | 16 | if [ ! -d "$LABELDIR" ] 17 | then 18 | mkdir "$LABELDIR" 19 | mkdir "$LABELDIR/datasets" 20 | cd "$LABELDIR" 21 | monailabel apps --download --name monaibundle 22 | mkdir "$LABELDIR/monaibundle/model" 23 | cd "$LABELDIR/monaibundle/model" 24 | ln -s "$BUNDLE" $BUNDLENAME 25 | fi 26 | 27 | cd "$LABELDIR" 28 | monailabel start_server --app monaibundle --studies datasets --conf models $BUNDLENAME $* 29 | -------------------------------------------------------------------------------- /models/segmentation_template/docs/test.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | eval "$(conda shell.bash hook)" 4 | conda activate monai 5 | 6 | homedir="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | 8 | BUNDLE="$(cd "$homedir/.." && pwd)" 9 | 10 | echo "Bundle root: $BUNDLE" 11 | 12 | export PYTHONPATH="$BUNDLE" 13 | 14 | python -m monai.bundle run \ 15 | --meta_file "$BUNDLE/configs/metadata.json" \ 16 | --config_file "$BUNDLE/configs/test.yaml" \ 17 | --bundle_root "$BUNDLE" \ 18 | $@ 19 | -------------------------------------------------------------------------------- /models/segmentation_template/docs/train.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | eval "$(conda shell.bash hook)" 4 | conda activate monai 5 | 6 | homedir="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | 8 | BUNDLE="$(cd "$homedir/.." && pwd)" 9 | 10 | echo "Bundle root: $BUNDLE" 11 | 12 | export PYTHONPATH="$BUNDLE" 13 | 14 | python -m monai.bundle run \ 15 | --meta_file "$BUNDLE/configs/metadata.json" \ 16 | --config_file "$BUNDLE/configs/train.yaml" \ 17 | --bundle_root "$BUNDLE" \ 18 | $@ 19 | -------------------------------------------------------------------------------- /models/segmentation_template/docs/train_multigpu.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | set -v 4 | 5 | eval "$(conda shell.bash hook)" 6 | conda activate monai 7 | 8 | homedir="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 9 | 10 | BUNDLE="$(cd "$homedir/.." && pwd)" 11 | 12 | echo "Bundle root: $BUNDLE" 13 | 14 | export PYTHONPATH="$BUNDLE" 15 | 16 | # set this to something else to use different numbered GPUs on your system 17 | export CUDA_VISIBLE_DEVICES="0,1" 18 | 19 | # seems to resolve some multiprocessing issues with certain libraries 20 | export OMP_NUM_THREADS=1 21 | 22 | CKPT=none 23 | 24 | # need to change this if you have multiple nodes or not 2 GPUs 25 | PYTHON="torchrun --standalone --nnodes=1 --nproc_per_node=2" 26 | 27 | CONFIG="['$BUNDLE/configs/train.yaml','$BUNDLE/configs/multi_gpu_train.yaml']" 28 | 29 | $PYTHON -m monai.bundle run \ 30 | --meta_file $BUNDLE/configs/metadata.json \ 31 | --logging_file $BUNDLE/configs/logging.conf \ 32 | --config_file "$CONFIG" \ 33 | --bundle_root $BUNDLE \ 34 | $@ 35 | -------------------------------------------------------------------------------- /models/segmentation_template/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://github.com/KCL-BMEIS/bundle-data/raw/main/segmentation_template/model.pt" 4 | hash_val: "b11405475d2d3da1f8a2aec9286f0548" 5 | hash_type: "md5" 6 | -------------------------------------------------------------------------------- /models/spleen_ct_segmentation/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import glob", 4 | "$import os", 5 | "$import torch_tensorrt" 6 | ], 7 | "network_def": "$torch.jit.load(@bundle_root + '/models/model_trt.ts')", 8 | "evaluator#amp": false, 9 | "initialize": [ 10 | "$monai.utils.set_determinism(seed=123)" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /models/spleen_ct_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/spleen_ct_segmentation/configs/multi_gpu_evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "validate#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@validate#dataset", 13 | "even_divisible": false, 14 | "shuffle": false 15 | }, 16 | "validate#dataloader#sampler": "@validate#sampler", 17 | "validate#handlers#1#_disabled_": "$dist.get_rank() > 0", 18 | "initialize": [ 19 | "$import torch.distributed as dist", 20 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 21 | "$torch.cuda.set_device(@device)", 22 | "$import logging", 23 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 24 | ], 25 | "run": [ 26 | "$@validate#evaluator.run()" 27 | ], 28 | "finalize": [ 29 | "$dist.is_initialized() and dist.destroy_process_group()" 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /models/spleen_ct_segmentation/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "initialize": [ 28 | "$import torch.distributed as dist", 29 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)", 32 | "$import logging", 33 | "$@train#trainer.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)", 34 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 35 | ], 36 | "run": [ 37 | "$@train#trainer.run()" 38 | ], 39 | "finalize": [ 40 | "$dist.is_initialized() and dist.destroy_process_group()" 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /models/spleen_ct_segmentation/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. Medical Segmentation Decathlon 6 | http://medicaldecathlon.com/ 7 | -------------------------------------------------------------------------------- /models/spleen_ct_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_spleen_ct_segmentation_v1.pt" 4 | hash_val: "be4fa95e236df91766147c112ee6fafd" 5 | hash_type: "md5" 6 | - path: "models/model.ts" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_spleen_ct_segmentation_v1.ts" 8 | hash_val: "3e163505054a1623956de555033b23db" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/spleen_deepedit_annotation/configs/evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "validate#dataset#cache_rate": 0, 3 | "validate#postprocessing": { 4 | "_target_": "Compose", 5 | "transforms": [ 6 | { 7 | "_target_": "Activationsd", 8 | "keys": "pred", 9 | "softmax": true 10 | }, 11 | { 12 | "_target_": "AsDiscreted", 13 | "keys": [ 14 | "pred", 15 | "label" 16 | ], 17 | "argmax": [ 18 | true, 19 | false 20 | ], 21 | "to_onehot": "$len(@label_names)+1" 22 | }, 23 | { 24 | "_target_": "SaveImaged", 25 | "_disabled_": true, 26 | "keys": "pred", 27 | "output_dir": "@output_dir", 28 | "resample": false, 29 | "squeeze_end_dims": true 30 | } 31 | ] 32 | }, 33 | "validate#handlers": [ 34 | { 35 | "_target_": "CheckpointLoader", 36 | "load_path": "$@ckpt_dir + '/model.pt'", 37 | "load_dict": { 38 | "model": "@network" 39 | } 40 | }, 41 | { 42 | "_target_": "StatsHandler", 43 | "iteration_log": false 44 | }, 45 | { 46 | "_target_": "MetricsSaver", 47 | "save_dir": "@output_dir", 48 | "metrics": [ 49 | "val_mean_dice", 50 | "val_acc" 51 | ], 52 | "metric_details": [ 53 | "val_mean_dice" 54 | ], 55 | "batch_transform": "$lambda x: [xx['image'].meta for xx in x]", 56 | "summary_ops": "*" 57 | } 58 | ], 59 | "run": [ 60 | "$@validate#evaluator.run()" 61 | ] 62 | } 63 | -------------------------------------------------------------------------------- /models/spleen_deepedit_annotation/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import glob", 4 | "$import os", 5 | "$import ignite", 6 | "$import torch_tensorrt" 7 | ], 8 | "network_def": "$torch.jit.load(@bundle_root + '/models/model_trt.ts')", 9 | "evaluator#amp": false, 10 | "initialize": [ 11 | "$monai.utils.set_determinism(seed=123)" 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /models/spleen_deepedit_annotation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/spleen_deepedit_annotation/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$@validate#handlers[: -3 if dist.get_rank() > 0 else None]", 27 | "initialize": [ 28 | "$import torch.distributed as dist", 29 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)" 32 | ], 33 | "run": [ 34 | "$@validate#handlers#0.set_trainer(trainer=@train#trainer) if @early_stop else None", 35 | "$@train#trainer.run()" 36 | ], 37 | "finalize": [ 38 | "$dist.is_initialized() and dist.destroy_process_group()" 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /models/spleen_deepedit_annotation/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. Medical Segmentation Decathlon 6 | http://medicaldecathlon.com/ 7 | -------------------------------------------------------------------------------- /models/spleen_deepedit_annotation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_spleen_deepedit_annotation.pt" 4 | hash_val: "" 5 | hash_type: "" 6 | - path: "models/model.ts" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_spleen_deepedit_annotation.ts" 8 | hash_val: "" 9 | hash_type: "" 10 | -------------------------------------------------------------------------------- /models/spleen_deepedit_annotation/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | from .early_stop_score_function import score_function 2 | -------------------------------------------------------------------------------- /models/spleen_deepedit_annotation/scripts/early_stop_score_function.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | import torch.distributed as dist 5 | 6 | 7 | def score_function(engine): 8 | val_metric = engine.state.metrics["val_mean_dice"] 9 | if dist.is_initialized(): 10 | device = torch.device("cuda:" + os.environ["LOCAL_RANK"]) 11 | val_metric = torch.tensor([val_metric]).to(device) 12 | dist.all_reduce(val_metric, op=dist.ReduceOp.SUM) 13 | val_metric /= dist.get_world_size() 14 | return val_metric.item() 15 | return val_metric 16 | -------------------------------------------------------------------------------- /models/spleen_deepedit_annotation/scripts/transforms.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | import numpy as np 4 | from einops import rearrange 5 | from monai.transforms.transform import Transform 6 | 7 | 8 | class OrientationGuidanceMultipleLabelDeepEditd(Transform): 9 | def __init__(self, ref_image="image", label_names=None): 10 | """ 11 | Convert the guidance to the RAS orientation 12 | """ 13 | self.ref_image = ref_image 14 | self.label_names = label_names 15 | 16 | def transform_points(self, point, affine): 17 | """transform point to the coordinates of the transformed image 18 | point: numpy array [bs, N, 3] 19 | """ 20 | bs, n = point.shape[:2] 21 | point = np.concatenate((point, np.ones((bs, n, 1))), axis=-1) 22 | point = rearrange(point, "b n d -> d (b n)") 23 | point = affine @ point 24 | point = rearrange(point, "d (b n)-> b n d", b=bs)[:, :, :3] 25 | return point 26 | 27 | def __call__(self, data): 28 | d: Dict = dict(data) 29 | for key_label in self.label_names.keys(): 30 | points = d.get(key_label, []) 31 | if len(points) < 1: 32 | continue 33 | reoriented_points = self.transform_points( 34 | np.array(points)[None], 35 | np.linalg.inv(d[self.ref_image].meta["affine"].numpy()) @ d[self.ref_image].meta["original_affine"], 36 | ) 37 | d[key_label] = reoriented_points[0] 38 | return d 39 | -------------------------------------------------------------------------------- /models/swin_unetr_btcv_segmentation/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "+imports": [ 3 | "$from monai.networks import trt_compile" 4 | ], 5 | "trt_args": { 6 | "dynamic_batchsize": "$[1, @inferer#sw_batch_size, @inferer#sw_batch_size]" 7 | }, 8 | "network": "$trt_compile(@network_def.to(@device), @checkpoint, args=@trt_args)" 9 | } 10 | -------------------------------------------------------------------------------- /models/swin_unetr_btcv_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/swin_unetr_btcv_segmentation/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "initialize": [ 28 | "$import torch.distributed as dist", 29 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)" 32 | ], 33 | "run": [ 34 | "$@train#trainer.run()" 35 | ], 36 | "finalize": [ 37 | "$dist.is_initialized() and dist.destroy_process_group()" 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /models/swin_unetr_btcv_segmentation/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. Medical Segmentation Decathlon 6 | http://medicaldecathlon.com/ 7 | -------------------------------------------------------------------------------- /models/swin_unetr_btcv_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_swin_unetr_btcv_segmentation_v1.pt" 4 | hash_val: "50dd67a01b28a1d5487fd9ac27e682fb" 5 | hash_type: "md5" 6 | -------------------------------------------------------------------------------- /models/valve_landmarks/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Eric Kerfoot 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /models/valve_landmarks/docs/prediction_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/valve_landmarks/docs/prediction_example.png -------------------------------------------------------------------------------- /models/valve_landmarks/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://github.com/KCL-BMEIS/bundle-data/raw/main/valve_landmarks/model.pt" 4 | hash_val: "232556ebb8e3c20790c7e83d603edb2c" 5 | hash_type: "md5" 6 | - path: "docs/AMRGAtlas_0031.nii.gz" 7 | url: "https://github.com/KCL-BMEIS/bundle-data/raw/main/valve_landmarks/AMRGAtlas_0031.nii.gz" 8 | hash_val: "07acb0e23af37bb78b477c96b1f80971" 9 | hash_type: "md5" 10 | - path: "docs/AMRGAtlas_0031_key-pred.npy" 11 | url: "https://github.com/KCL-BMEIS/bundle-data/raw/main/valve_landmarks/AMRGAtlas_0031_key-pred.npy" 12 | hash_val: "c5ce17595f08f5c3bec5d2ab0b6a0d30" 13 | hash_type: "md5" 14 | -------------------------------------------------------------------------------- /models/valve_landmarks/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | from . import valve_landmarks 2 | -------------------------------------------------------------------------------- /models/ventricular_short_axis_3label/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Eric Kerfoot 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /models/ventricular_short_axis_3label/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/ventricular_short_axis_3label/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://drive.google.com/uc?id=1ETezrTQyh5PdkBc8kKKdwWwJvTqFFz_f" 4 | hash_val: "0ffb4506fbfa4b3709a28a7f71122ec8" 5 | hash_type: "md5" 6 | - path: "models/model.ts" 7 | url: "https://drive.google.com/uc?id=1BMFFwiKLwTYevAZq7V5keNm8Zws4vHq4" 8 | hash_val: "9f8fb52a3092911a0b65aa0763248771" 9 | hash_type: "md5" 10 | - path: "docs/SC-N-2-3-0.npy" 11 | url: "https://drive.google.com/uc?id=158RMzaGtgMgY1hCneDXmqCx7u046RbZE" 12 | hash_val: "f834b8b9b662e5b7b0492f4a0f382591" 13 | hash_type: "md5" 14 | -------------------------------------------------------------------------------- /models/vista2d/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import numpy", 4 | "$from monai.networks import trt_compile" 5 | ], 6 | "trt_args": { 7 | "dynamic_batchsize": "$[1, @inferer#sw_batch_size, @inferer#sw_batch_size]" 8 | }, 9 | "network": "$trt_compile(@network_def.to(@device), @pretrained_ckpt_path, args=@trt_args)" 10 | } 11 | -------------------------------------------------------------------------------- /models/vista2d/download_preprocessor/cellpose_agreement.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/vista2d/download_preprocessor/cellpose_agreement.png -------------------------------------------------------------------------------- /models/vista2d/download_preprocessor/cellpose_links.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/vista2d/download_preprocessor/cellpose_links.png -------------------------------------------------------------------------------- /models/vista2d/download_preprocessor/data_tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/vista2d/download_preprocessor/data_tree.png -------------------------------------------------------------------------------- /models/vista2d/download_preprocessor/kaggle_download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/vista2d/download_preprocessor/kaggle_download.png -------------------------------------------------------------------------------- /models/vista2d/download_preprocessor/omnipose_download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/vista2d/download_preprocessor/omnipose_download.png -------------------------------------------------------------------------------- /models/vista2d/download_preprocessor/tissuenet_download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/vista2d/download_preprocessor/tissuenet_download.png -------------------------------------------------------------------------------- /models/vista2d/download_preprocessor/tissuenet_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/vista2d/download_preprocessor/tissuenet_login.png -------------------------------------------------------------------------------- /models/vista2d/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/sam_vit_b_01ec64.pth" 3 | url: "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth" 4 | hash_val: "01ec64d29a2fca3f0661936605ae66f8" 5 | hash_type: "md5" 6 | - path: "models/model.pt" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_vista2d_v1.pt" 8 | hash_val: "c8e2f4a6236fe63e75c9d1ba8294344c" 9 | hash_type: "md5" 10 | - path: "datalists.zip" 11 | url: "https://github.com/Project-MONAI/model-zoo/releases/download/model_zoo_bundle_data/vista2d_datalists.zip" 12 | hash_val: "1a07b7fe5e169f56a15e3c3931b40730" 13 | hash_type: "md5" 14 | -------------------------------------------------------------------------------- /models/vista2d/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /models/vista3d/configs/batch_inference.json: -------------------------------------------------------------------------------- 1 | { 2 | "input_dir": "@bundle_root", 3 | "input_suffix": "*.nii.gz", 4 | "input_list": "$sorted(glob.glob(os.path.join(@input_dir, @input_suffix)))", 5 | "input_dicts": "$[{'image': x, 'label_prompt': @everything_labels} for x in @input_list]", 6 | "dataset#data": "@input_dicts" 7 | } 8 | -------------------------------------------------------------------------------- /models/vista3d/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "base_path": null, 3 | "+imports": [ 4 | "$from monai.networks import trt_compile" 5 | ], 6 | "max_prompt_size": 4, 7 | "head_trt_enabled": false, 8 | "network_trt_args": { 9 | "dynamic_batchsize": "$[1, @inferer#sw_batch_size, @inferer#sw_batch_size]" 10 | }, 11 | "network_dev": "$@network_def.to(@device)", 12 | "encoder": "$trt_compile(@network_dev, @bundle_root + '/models/model.pt' if not @base_path else @base_path, args=@network_trt_args, submodule=['image_encoder.encoder'])", 13 | "head_trt_args": { 14 | "dynamic_batchsize": "$[1, 1, @max_prompt_size]", 15 | "fallback": "$True" 16 | }, 17 | "head": "$trt_compile(@network_dev, @bundle_root + '/models/model.pt' if not @base_path else @base_path, args=@head_trt_args, submodule=['class_head']) if @head_trt_enabled else @network_dev", 18 | "network": "$None if @encoder is None else @head" 19 | } 20 | -------------------------------------------------------------------------------- /models/vista3d/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler,fileHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler,fileHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [handler_fileHandler] 21 | class=FileHandler 22 | level=INFO 23 | formatter=fullFormatter 24 | args=('training.log',) 25 | 26 | [formatter_fullFormatter] 27 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 28 | -------------------------------------------------------------------------------- /models/vista3d/configs/mgpu_evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "validate#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@validate#dataset", 13 | "even_divisible": false, 14 | "shuffle": false 15 | }, 16 | "validate#dataloader#sampler": "@validate#sampler", 17 | "validate#handlers#1#_disabled_": "$dist.get_rank() > 0", 18 | "initialize": [ 19 | "$import torch.distributed as dist", 20 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 21 | "$torch.cuda.set_device(@device)" 22 | ], 23 | "run": [ 24 | "$@validate#evaluator.run()" 25 | ], 26 | "finalize": [ 27 | "$dist.is_initialized() and dist.destroy_process_group()" 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /models/vista3d/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "use_tensorboard": "$dist.get_rank() == 0", 4 | "network": { 5 | "_target_": "torch.nn.parallel.DistributedDataParallel", 6 | "module": "$@network_def.to(@device)", 7 | "find_unused_parameters": true, 8 | "device_ids": [ 9 | "@device" 10 | ] 11 | }, 12 | "train#sampler": { 13 | "_target_": "DistributedSampler", 14 | "dataset": "@train#dataset", 15 | "even_divisible": true, 16 | "shuffle": true 17 | }, 18 | "train#dataloader#sampler": "@train#sampler", 19 | "train#dataloader#shuffle": false, 20 | "train#trainer#train_handlers": "$@train#handlers[: -1 if dist.get_rank() > 0 else None]", 21 | "validate#sampler": { 22 | "_target_": "DistributedSampler", 23 | "dataset": "@validate#dataset", 24 | "even_divisible": false, 25 | "shuffle": false 26 | }, 27 | "validate#dataloader#sampler": "@validate#sampler", 28 | "validate#evaluator#val_handlers": "$@validate#handlers[: -2 if dist.get_rank() > 0 else None]", 29 | "initialize": [ 30 | "$import torch.distributed as dist", 31 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 32 | "$torch.cuda.set_device(@device)", 33 | "$monai.utils.set_determinism(seed=123)" 34 | ], 35 | "run": [ 36 | "$@validate#handlers#0.set_trainer(trainer=@train#trainer) if @early_stop else None", 37 | "$@train#trainer.run()" 38 | ], 39 | "finalize": [ 40 | "$dist.is_initialized() and dist.destroy_process_group()" 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /models/vista3d/docs/data.md: -------------------------------------------------------------------------------- 1 | ### Best practice to generate data list 2 | User can use monai to generate the 5-fold data lists. Full exampls can be found in VISTA3D open source [codebase](https://github.com/Project-MONAI/VISTA/blob/main/vista3d/data/make_datalists.py) 3 | ```python 4 | from monai.data.utils import partition_dataset 5 | from monai.bundle import ConfigParser 6 | base_url = "/path_to_your_folder/" 7 | json_name = "./your_5_folds.json" 8 | # create matching image and label lists. 9 | # The code to generate the lists is based on your local data structure. 10 | # You can use glob.glob("**.nii.gz") e.t.c. 11 | image_list = ['images/1.nii.gz', 'images/2.nii.gz', ...] 12 | label_list = ['labels/1.nii.gz', 'labels/2.nii.gz', ...] 13 | items = [{"image": img, "label": lab} for img, lab in zip(image_list, label_list)] 14 | # 80% for training 20% for testing. 15 | train_test = partition_dataset(items, ratios=[0.8, 0.2], shuffle=True, seed=0) 16 | print(f"training: {len(train_test[0])}, testing: {len(train_test[1])}") 17 | # num_partitions-fold split for the training set. 18 | train_val = partition_dataset(train_test[0], num_partitions=5, shuffle=True, seed=0) 19 | print(f"training validation folds sizes: {[len(x) for x in train_val]}") 20 | # add the fold index to each training data. 21 | training = [] 22 | for f, x in enumerate(train_val): 23 | for item in x: 24 | item["fold"] = f 25 | training.append(item) 26 | # save json file 27 | parser = ConfigParser({}) 28 | parser["training"] = training 29 | parser["testing"] = train_test[1] 30 | print(f"writing {json_name}\n\n") 31 | if os.path.exists(json_name): 32 | logger.warning(f"rewrite existing datalist file: {json_name}") 33 | ConfigParser.export_config_file(parser.config, json_name, indent=4) 34 | ``` 35 | -------------------------------------------------------------------------------- /models/vista3d/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. Medical Segmentation Decathlon 6 | http://medicaldecathlon.com/ 7 | -------------------------------------------------------------------------------- /models/vista3d/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_vista3d.pt" 4 | hash_val: "6ce45a8edde4400c5d28d5e74d7b61d5" 5 | hash_type: "md5" 6 | - path: "docs/labels.json" 7 | url: "https://raw.githubusercontent.com/Project-MONAI/tutorials/e66be5955d2b4f5959884ca026932762954b19c5/vista_3d/label_dict.json" 8 | hash_val: "71ff501086a80fe973ab843d2f7974c2" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/vista3d/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | # from .evaluator import EnsembleEvaluator, Evaluator, SupervisedEvaluator 13 | # from .multi_gpu_supervised_trainer import create_multigpu_supervised_evaluator, create_multigpu_supervised_trainer 14 | 15 | from .early_stop_score_function import score_function 16 | -------------------------------------------------------------------------------- /models/vista3d/scripts/early_stop_score_function.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | import torch.distributed as dist 5 | 6 | 7 | def score_function(engine): 8 | val_metric = engine.state.metrics["val_mean_dice"] 9 | if dist.is_initialized(): 10 | device = torch.device("cuda:" + os.environ["LOCAL_RANK"]) 11 | val_metric = torch.tensor([val_metric]).to(device) 12 | dist.all_reduce(val_metric, op=dist.ReduceOp.SUM) 13 | val_metric /= dist.get_world_size() 14 | return val_metric.item() 15 | return val_metric 16 | -------------------------------------------------------------------------------- /models/wholeBody_ct_segmentation/configs/inference_trt.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": [ 3 | "$import glob", 4 | "$import os", 5 | "$import torch_tensorrt" 6 | ], 7 | "network_def": "$torch.jit.load(@bundle_root + '/models/model_trt.ts')", 8 | "evaluator#amp": false, 9 | "initialize": [ 10 | "$setattr(torch.backends.cudnn, 'benchmark', True)" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /models/wholeBody_ct_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/wholeBody_ct_segmentation/configs/multi_gpu_evaluate.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "validate#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@validate#dataset", 13 | "even_divisible": false, 14 | "shuffle": false 15 | }, 16 | "validate#dataloader#sampler": "@validate#sampler", 17 | "validate#handlers#1#_disabled_": "$dist.get_rank() > 0", 18 | "initialize": [ 19 | "$import torch.distributed as dist", 20 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 21 | "$torch.cuda.set_device(@device)", 22 | "$setattr(torch.backends.cudnn, 'benchmark', True)", 23 | "$import logging", 24 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 25 | ], 26 | "run": [ 27 | "$@validate#evaluator.run()" 28 | ], 29 | "finalize": [ 30 | "$dist.is_initialized() and dist.destroy_process_group()" 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /models/wholeBody_ct_segmentation/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device('cuda:' + os.environ['LOCAL_RANK'])", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "initialize": [ 28 | "$import torch.distributed as dist", 29 | "$dist.is_initialized() or dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)", 32 | "$setattr(torch.backends.cudnn, 'benchmark', True)", 33 | "$import logging", 34 | "$@train#trainer.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)", 35 | "$@validate#evaluator.logger.setLevel(logging.WARNING if dist.get_rank() > 0 else logging.INFO)" 36 | ], 37 | "run": [ 38 | "$@train#trainer.run()" 39 | ], 40 | "finalize": [ 41 | "$dist.is_initialized() and dist.destroy_process_group()" 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /models/wholeBody_ct_segmentation/docs/data_license.txt: -------------------------------------------------------------------------------- 1 | Third Party Licenses 2 | ----------------------------------------------------------------------- 3 | 4 | /*********************************************************************/ 5 | i. TotalSegmentator 6 | https://zenodo.org/record/6802614#.Y9iTydLMJ6I 7 | -------------------------------------------------------------------------------- /models/wholeBody_ct_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_wholebody_ct_segmentation.pt" 4 | hash_val: "aded45dea52f9c00b37922bcd0a98502" 5 | hash_type: "md5" 6 | - path: "models/model_lowres.pt" 7 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_lowres_wholebody_ct_segmentation.pt" 8 | hash_val: "dbdebeae650f4fb5ea7f4ad01e0d2431" 9 | hash_type: "md5" 10 | -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/configs/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root 3 | 4 | [handlers] 5 | keys=consoleHandler 6 | 7 | [formatters] 8 | keys=fullFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler 13 | 14 | [handler_consoleHandler] 15 | class=StreamHandler 16 | level=INFO 17 | formatter=fullFormatter 18 | args=(sys.stdout,) 19 | 20 | [formatter_fullFormatter] 21 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 22 | -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/configs/multi_gpu_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "device": "$torch.device(f'cuda:{dist.get_rank()}')", 3 | "network": { 4 | "_target_": "torch.nn.parallel.DistributedDataParallel", 5 | "module": "$@network_def.to(@device)", 6 | "device_ids": [ 7 | "@device" 8 | ] 9 | }, 10 | "train#sampler": { 11 | "_target_": "DistributedSampler", 12 | "dataset": "@train#dataset", 13 | "even_divisible": true, 14 | "shuffle": true 15 | }, 16 | "train#dataloader#sampler": "@train#sampler", 17 | "train#dataloader#shuffle": false, 18 | "train#trainer#train_handlers": "$@train#handlers[: -2 if dist.get_rank() > 0 else None]", 19 | "validate#sampler": { 20 | "_target_": "DistributedSampler", 21 | "dataset": "@validate#dataset", 22 | "even_divisible": false, 23 | "shuffle": false 24 | }, 25 | "validate#dataloader#sampler": "@validate#sampler", 26 | "validate#evaluator#val_handlers": "$None if dist.get_rank() > 0 else @validate#handlers", 27 | "training": [ 28 | "$import torch.distributed as dist", 29 | "$dist.init_process_group(backend='nccl')", 30 | "$torch.cuda.set_device(@device)", 31 | "$monai.utils.set_determinism(seed=123)", 32 | "$setattr(torch.backends.cudnn, 'benchmark', True)", 33 | "$@train#trainer.run()", 34 | "$dist.destroy_process_group()" 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/docs/3DSlicer_use.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/wholeBrainSeg_Large_UNEST_segmentation/docs/3DSlicer_use.png -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/docs/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/wholeBrainSeg_Large_UNEST_segmentation/docs/demo.png -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/docs/training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/wholeBrainSeg_Large_UNEST_segmentation/docs/training.png -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/docs/unest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/wholeBrainSeg_Large_UNEST_segmentation/docs/unest.png -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/docs/wholebrain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/model-zoo/f3ae2588766a08a82f88bce1c345aeec4d4c4298/models/wholeBrainSeg_Large_UNEST_segmentation/docs/wholebrain.png -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/large_files.yml: -------------------------------------------------------------------------------- 1 | large_files: 2 | - path: "models/model.pt" 3 | url: "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_wholebrainseg_large_unest_segmentation.pt" 4 | hash_val: "1bbb4b4d01ba785422a94f3ad2a28942" 5 | hash_type: "md5" 6 | -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/scripts/networks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /models/wholeBrainSeg_Large_UNEST_segmentation/scripts/networks/nest/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .utils import ( 3 | Conv3dSame, 4 | DropPath, 5 | Linear, 6 | Mlp, 7 | _assert, 8 | conv3d_same, 9 | create_conv3d, 10 | create_pool3d, 11 | get_padding, 12 | get_same_padding, 13 | pad_same, 14 | to_ntuple, 15 | trunc_normal_, 16 | ) 17 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 120 3 | target-version = ['py37', 'py38', 'py39'] 4 | include = '\.pyi?$' 5 | exclude = ''' 6 | ( 7 | /( 8 | # exclude a few common directories in the root of the project 9 | \.eggs 10 | | \.git 11 | | \.hg 12 | | \.mypy_cache 13 | | \.tox 14 | | \.venv 15 | | \.pytype 16 | | _build 17 | | buck-out 18 | | build 19 | | dist 20 | )/ 21 | # also separately exclude a file named versioneer.py 22 | | monai/_version.py 23 | ) 24 | ''' 25 | 26 | [tool.pytype] 27 | # Space-separated list of files or directories to process. 28 | inputs = ["models", "ci"] 29 | # Keep going past errors to analyze as many files as possible. 30 | keep_going = true 31 | # Run N jobs in parallel. 32 | jobs = 8 33 | # All pytype output goes here. 34 | output = ".pytype" 35 | # Paths to source code directories, separated by ':'. 36 | pythonpath = "." 37 | # Check attribute values against their annotations. 38 | check_attribute_types = true 39 | # Check container mutations against their annotations. 40 | check_container_types = true 41 | # Check parameter defaults and assignments against their annotations. 42 | check_parameter_types = true 43 | # Check variable values against their annotations. 44 | check_variable_types = true 45 | # Comma or space separated list of error names to ignore. 46 | disable = ["pyi-error", "import-error"] 47 | # Report errors. 48 | report_errors = true 49 | # Experimental: Infer precise return types even for invalid function calls. 50 | precise_return = true 51 | # Experimental: solve unknown types to label with structural types. 52 | protocols = true 53 | # Experimental: Only load submodules that are explicitly imported. 54 | strict_import = false 55 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # Full requirements for developments 2 | flake8>=3.8.1 3 | flake8-bugbear 4 | flake8-comprehensions 5 | flake8-executable 6 | pylint!=2.13 # https://github.com/PyCQA/pylint/issues/5969 7 | mccabe 8 | pep8-naming 9 | pycodestyle 10 | pyflakes 11 | black<25 12 | isort<6.0.0 13 | pytype>=2020.6.1; platform_system != "Windows" 14 | types-setuptools 15 | mypy>=0.790 16 | pre-commit 17 | fire 18 | pytorch-ignite>=0.4.9 19 | einops 20 | nibabel 21 | pyyaml 22 | jsonschema 23 | gdown>=4.5.4 24 | tensorboard 25 | parameterized 26 | monai>=1.2.0 27 | pillow!=8.3.0 # https://github.com/python-pillow/Pillow/issues/5571 28 | itk>=5.2 29 | scikit-learn 30 | pandas 31 | cucim==22.8.1; platform_system == "Linux" 32 | scikit-image>=0.19.0 33 | PyGithub 34 | huggingface_hub==0.29.3 35 | -------------------------------------------------------------------------------- /requirements-update-model.txt: -------------------------------------------------------------------------------- 1 | monai>=1.0.1 2 | gdown>=4.5.4 3 | PyGithub 4 | pyyaml 5 | huggingface_hub==0.29.3 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | monai>=1.0.1 2 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | select = B,C,E,F,N,P,T4,W,B9 3 | max_line_length = 120 4 | # C408 ignored because we like the dict keyword argument syntax 5 | # E501 is not flexible enough, we're using B950 instead 6 | # N812 lowercase 'torch.nn.functional' imported as non lowercase 'F' 7 | # B023 https://github.com/Project-MONAI/MONAI/issues/4627 8 | # B028 https://github.com/Project-MONAI/MONAI/issues/5855 9 | # B907 https://github.com/Project-MONAI/MONAI/issues/5868 10 | ignore = 11 | E203 12 | E501 13 | E741 14 | W503 15 | W504 16 | C408 17 | N812 18 | B023 19 | B905 20 | B028 21 | B907 22 | C419 23 | per_file_ignores = __init__.py: F401, __main__.py: F401 24 | exclude = *.pyi,.git,.eggs,monai/_version.py,versioneer.py,venv,.venv,_version.py 25 | 26 | [isort] 27 | known_first_party = models 28 | profile = black 29 | line_length = 120 30 | skip = .git, .eggs, venv, .venv, versioneer.py, _version.py, conf.py 31 | skip_glob = *.pyi 32 | --------------------------------------------------------------------------------