├── UNETR └── BTCV │ ├── .gitkeep │ ├── runs │ └── __init__.py │ ├── utils │ ├── __init__.py │ └── utils.py │ ├── dataset │ └── __init__.py │ ├── networks │ └── __init__.py │ ├── optimizers │ └── __init__.py │ ├── pretrained_models │ └── __init__.py │ └── requirements.txt ├── SwinMM ├── WORD │ ├── runs │ │ └── __init__.py │ ├── dataset │ │ ├── __init__.py │ │ └── Download Finetune Jsons Here │ ├── outputs │ │ └── __init__.py │ ├── utils │ │ ├── __init__.py │ │ ├── view_ops.py │ │ ├── test_view_transforms.py │ │ ├── view_transforms.py │ │ ├── misc.py │ │ └── dataset_in_memory.py │ ├── optimizers │ │ └── __init__.py │ ├── pretrained_models │ │ └── __init__.py │ ├── requirements.txt │ ├── models │ │ └── __init__.py │ ├── run.sh │ └── README.md ├── Pretrain │ ├── jsons │ │ ├── __init__.py │ │ └── Download Pretrain Jsons Here │ ├── utils │ │ ├── __init__.py │ │ ├── view_transforms.py │ │ ├── dataset_in_memory.py │ │ ├── view_ops.py │ │ └── ops.py │ ├── optimizers │ │ └── __init__.py │ ├── run.sh │ └── losses │ │ └── loss.py ├── figures │ ├── ACDC.png │ ├── Result.png │ ├── finetune.png │ ├── pretrain.png │ └── SwinMMArch.png ├── scripts │ ├── setup_env.sh │ └── start_redis.sh ├── requirements.txt ├── TRAINING.md ├── README.md └── INSTALL.md ├── SwinUNETR ├── BTCV │ ├── runs │ │ └── __init__.py │ ├── utils │ │ ├── __init__.py │ │ └── utils.py │ ├── dataset │ │ └── __init__.py │ ├── optimizers │ │ └── __init__.py │ ├── outputs │ │ └── __init__.py │ ├── pretrained_models │ │ └── __init__.py │ ├── assets │ │ └── swin_unetr.png │ └── requirements.txt ├── BRATS21 │ ├── jsons │ │ └── __init__.py │ ├── outputs │ │ └── __init__.py │ ├── utils │ │ ├── __init__.py │ │ └── utils.py │ ├── optimizers │ │ ├── __init__.py │ │ └── lr_scheduler.py │ ├── pretrained_models │ │ └── __init__.py │ ├── assets │ │ ├── fig_brats21.png │ │ ├── swin_unetr.png │ │ └── swin_unetr_attention_.png │ └── requirements.txt └── Pretrain │ ├── jsons │ └── __init__.py │ ├── runs │ └── __init__.py │ ├── utils │ ├── __init__.py │ └── ops.py │ ├── optimizers │ └── __init__.py │ ├── assets │ ├── inpaint.gif │ ├── ssl_swin.png │ └── swin_unetr.png │ ├── requirements.txt │ └── losses │ └── loss.py ├── DAE ├── BTCV_Finetune │ ├── utils │ │ ├── __init__.py │ │ └── utils.py │ ├── optimizers │ │ └── __init__.py │ ├── requirements.txt │ ├── README.md │ └── mlp_new.py ├── Feta_Finetune │ ├── utils │ │ ├── __init__.py │ │ └── utils.py │ ├── optimizers │ │ └── __init__.py │ ├── requirements.txt │ ├── README.md │ └── mlp_new.py └── Pretrain_full_contrast │ ├── models │ ├── __init__.py │ └── build.py │ ├── requirements.txt │ ├── figs │ ├── dae_recon.png │ └── dae_overview.png │ ├── data │ └── __init__.py │ ├── run_me_local.sh │ ├── logger.py │ └── scripts │ └── ablation_m20.sh ├── SkullRec ├── output_monai │ └── README.md ├── figs │ ├── dataset.png │ ├── facial_rec.png │ ├── cranial_rec.png │ └── monai_results.png ├── pre_trained_weights │ └── readme.md ├── cranialDefects.py ├── skullfix_pretrained_weight │ └── README.md └── facialDefects.py ├── DiNTS ├── arch_code_cvpr.pth ├── run_install_monai.sh ├── run_ensemble.sh ├── download_msd_datasets.py ├── run_infer_multi-gpu.sh ├── configs │ ├── config_Task01_BrainTumour.yaml │ ├── config_Task04_Hippocampus.yaml │ ├── config_Task02_Heart.yaml │ ├── config_Task05_Prostate.yaml │ ├── config_Task03_Liver.yaml │ ├── config_Task10_Colon.yaml │ ├── config_Task06_Lung.yaml │ ├── config_Task07_Pancreas.yaml │ ├── config_Task08_HepaticVessel.yaml │ └── config_Task09_Spleen.yaml ├── run_train_4gpu.sh ├── run_train_multi-gpu.sh └── utils.py ├── prostate-mri-lesion-seg ├── imgs │ ├── organ_seg.png │ ├── lesion_mask.png │ ├── lesion_prob.png │ └── workflow-diagram.png ├── prostate_mri_lesion_seg_app │ ├── requirements.txt │ ├── app.yaml │ └── __main__.py ├── scripts │ ├── compare_output.sh │ ├── eval_dice.py │ ├── test_local.sh │ └── test_MAP.sh └── LICENSE ├── coplenet-pneumonia-lesion-segmentation ├── fig │ ├── img.png │ └── seg.png ├── __init__.py ├── README.md ├── test_coplenet.py └── run_inference.py ├── auto3dseg ├── algorithm_templates │ ├── dints │ │ ├── scripts │ │ │ ├── arch_code.pth │ │ │ └── __init__.py │ │ ├── configs │ │ │ ├── network_search.yaml │ │ │ ├── transforms_infer.yaml │ │ │ ├── network.yaml │ │ │ ├── transforms_validate.yaml │ │ │ ├── hyper_parameters_search.yaml │ │ │ ├── transforms_train.yaml │ │ │ └── hyper_parameters.yaml │ │ ├── __init__.py │ │ └── docs │ │ │ └── README.md │ ├── swinunetr │ │ ├── configs │ │ │ ├── network.yaml │ │ │ ├── transforms_infer.yaml │ │ │ ├── transforms_validate.yaml │ │ │ ├── transforms_train.yaml │ │ │ └── hyper_parameters.yaml │ │ ├── __init__.py │ │ ├── scripts │ │ │ └── __init__.py │ │ └── docs │ │ │ └── README.md │ ├── segresnet │ │ ├── __init__.py │ │ ├── scripts │ │ │ ├── __init__.py │ │ │ ├── train.py │ │ │ ├── validate.py │ │ │ └── infer.py │ │ ├── docs │ │ │ └── README.md │ │ └── configs │ │ │ └── hyper_parameters.yaml │ └── segresnet2d │ │ ├── __init__.py │ │ ├── scripts │ │ ├── __init__.py │ │ ├── train.py │ │ ├── validate.py │ │ └── infer.py │ │ ├── docs │ │ └── README.md │ │ └── configs │ │ └── hyper_parameters.yaml ├── configs │ └── metadata.json └── README.md ├── lamp-automated-model-parallelism ├── fig │ └── acc_speed_han_0_5hor.png ├── __init__.py ├── test_unet_pipe.py ├── README.md └── data_utils.py ├── setup.cfg ├── pyproject.toml ├── .github ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md └── workflows │ ├── chatops.yml │ ├── packaging-algo.yml │ └── integration.yml ├── README.md ├── .gitignore └── .pre-commit-config.yaml /UNETR/BTCV/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/WORD/runs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UNETR/BTCV/runs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UNETR/BTCV/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/jsons/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/WORD/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/WORD/outputs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/WORD/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BTCV/runs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BTCV/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UNETR/BTCV/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UNETR/BTCV/networks/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UNETR/BTCV/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /DAE/BTCV_Finetune/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /DAE/Feta_Finetune/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/WORD/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/jsons/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/outputs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BTCV/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BTCV/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BTCV/outputs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/jsons/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/runs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /DAE/BTCV_Finetune/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /DAE/Feta_Finetune/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/WORD/pretrained_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/WORD/requirements.txt: -------------------------------------------------------------------------------- 1 | timm>=0.6 2 | -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UNETR/BTCV/pretrained_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/WORD/dataset/Download Finetune Jsons Here: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/pretrained_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinUNETR/BTCV/pretrained_models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/jsons/Download Pretrain Jsons Here: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SwinMM/WORD/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .swin_unetr import * 2 | -------------------------------------------------------------------------------- /DAE/Pretrain_full_contrast/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .build import build_model 2 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/utils/view_transforms.py: -------------------------------------------------------------------------------- 1 | # ../../WORD/utils/view_transforms.py 2 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/utils/dataset_in_memory.py: -------------------------------------------------------------------------------- 1 | # ../../WORD/utils/dataset_in_memory.py 2 | -------------------------------------------------------------------------------- /SkullRec/output_monai/README.md: -------------------------------------------------------------------------------- 1 | ### output folder for the predictions of the test data 2 | -------------------------------------------------------------------------------- /SwinMM/figures/ACDC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinMM/figures/ACDC.png -------------------------------------------------------------------------------- /DiNTS/arch_code_cvpr.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/DiNTS/arch_code_cvpr.pth -------------------------------------------------------------------------------- /SkullRec/figs/dataset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SkullRec/figs/dataset.png -------------------------------------------------------------------------------- /SwinMM/figures/Result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinMM/figures/Result.png -------------------------------------------------------------------------------- /SkullRec/figs/facial_rec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SkullRec/figs/facial_rec.png -------------------------------------------------------------------------------- /SwinMM/figures/finetune.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinMM/figures/finetune.png -------------------------------------------------------------------------------- /SwinMM/figures/pretrain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinMM/figures/pretrain.png -------------------------------------------------------------------------------- /SkullRec/figs/cranial_rec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SkullRec/figs/cranial_rec.png -------------------------------------------------------------------------------- /SwinMM/figures/SwinMMArch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinMM/figures/SwinMMArch.png -------------------------------------------------------------------------------- /SkullRec/figs/monai_results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SkullRec/figs/monai_results.png -------------------------------------------------------------------------------- /UNETR/BTCV/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.9.1 2 | monai==0.7.0 3 | nibabel==3.1.1 4 | tqdm==4.59.0 5 | einops==0.3.0 6 | tensorboardX==2.1 7 | -------------------------------------------------------------------------------- /DAE/BTCV_Finetune/requirements.txt: -------------------------------------------------------------------------------- 1 | monai==0.8.0 2 | nibabel==3.1.1 3 | tqdm==4.59.0 4 | einops==0.4.1 5 | tensorboardX==2.1 6 | scipy==1.10.0 7 | -------------------------------------------------------------------------------- /DAE/Feta_Finetune/requirements.txt: -------------------------------------------------------------------------------- 1 | monai==0.8.0 2 | nibabel==3.1.1 3 | tqdm==4.59.0 4 | einops==0.4.1 5 | tensorboardX==2.1 6 | scipy==1.10.0 7 | -------------------------------------------------------------------------------- /SwinUNETR/BTCV/assets/swin_unetr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinUNETR/BTCV/assets/swin_unetr.png -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/assets/inpaint.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinUNETR/Pretrain/assets/inpaint.gif -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/assets/ssl_swin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinUNETR/Pretrain/assets/ssl_swin.png -------------------------------------------------------------------------------- /DAE/Pretrain_full_contrast/requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml 2 | scipy 3 | termcolor 4 | timm==0.4.12 5 | yacs 6 | einops 7 | tensorboardX 8 | monai==0.8.0 9 | -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/assets/fig_brats21.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinUNETR/BRATS21/assets/fig_brats21.png -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/assets/swin_unetr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinUNETR/BRATS21/assets/swin_unetr.png -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/assets/swin_unetr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinUNETR/Pretrain/assets/swin_unetr.png -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/Project-MONAI/MONAI.git@a23c7f54 2 | nibabel==3.1.1 3 | einops==0.4.1 4 | tensorboardX==2.1 5 | -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/imgs/organ_seg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/prostate-mri-lesion-seg/imgs/organ_seg.png -------------------------------------------------------------------------------- /DAE/Pretrain_full_contrast/figs/dae_recon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/DAE/Pretrain_full_contrast/figs/dae_recon.png -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/imgs/lesion_mask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/prostate-mri-lesion-seg/imgs/lesion_mask.png -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/imgs/lesion_prob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/prostate-mri-lesion-seg/imgs/lesion_prob.png -------------------------------------------------------------------------------- /DAE/Pretrain_full_contrast/figs/dae_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/DAE/Pretrain_full_contrast/figs/dae_overview.png -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/assets/swin_unetr_attention_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/SwinUNETR/BRATS21/assets/swin_unetr_attention_.png -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/Project-MONAI/MONAI.git@07de215c 2 | nibabel==3.1.1 3 | tqdm==4.59.0 4 | einops==0.4.1 5 | tensorboardX==2.1 6 | -------------------------------------------------------------------------------- /coplenet-pneumonia-lesion-segmentation/fig/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/coplenet-pneumonia-lesion-segmentation/fig/img.png -------------------------------------------------------------------------------- /coplenet-pneumonia-lesion-segmentation/fig/seg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/coplenet-pneumonia-lesion-segmentation/fig/seg.png -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/imgs/workflow-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/prostate-mri-lesion-seg/imgs/workflow-diagram.png -------------------------------------------------------------------------------- /SwinUNETR/BTCV/requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/Project-MONAI/MONAI.git@07de215c 2 | nibabel==3.1.1 3 | tqdm==4.59.0 4 | einops==0.4.1 5 | tensorboardX==2.1 6 | scipy==1.2.1 7 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/scripts/arch_code.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/auto3dseg/algorithm_templates/dints/scripts/arch_code.pth -------------------------------------------------------------------------------- /lamp-automated-model-parallelism/fig/acc_speed_han_0_5hor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/research-contributions/HEAD/lamp-automated-model-parallelism/fig/acc_speed_han_0_5hor.png -------------------------------------------------------------------------------- /SwinMM/scripts/setup_env.sh: -------------------------------------------------------------------------------- 1 | # Activate conda corresponding env first 2 | conda install -c conda-forge redis redis-py 3 | pip install timm 4 | pip install bagua-cuda111 5 | pip install setuptools==59.5.0 6 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | known_first_party = monai 3 | profile = black 4 | line_length = 120 5 | skip = .git, .eggs, venv, .venv, versioneer.py, _version.py, conf.py, monai/__init__.py 6 | skip_glob = *.pyi 7 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/swinunetr/configs/network.yaml: -------------------------------------------------------------------------------- 1 | network: 2 | _target_: SwinUNETR 3 | feature_size: 48 4 | in_channels: "@input_channels" 5 | out_channels: "@output_classes" 6 | spatial_dims: 3 7 | use_checkpoint: false 8 | use_v2: false 9 | -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/prostate_mri_lesion_seg_app/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==2.6.0 2 | monai==1.4.0 3 | holoscan==2.9.0 4 | holoscan-cli==2.9.0 5 | monai-deploy-app-sdk==2.0.0 6 | pydicom==3.0.1 7 | highdicom==0.25.1 8 | scikit-image==0.25.2 9 | SimpleITK==2.4.1 10 | nibabel==5.3.2 11 | -------------------------------------------------------------------------------- /DiNTS/run_install_monai.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | pip install pandas 5 | python -m pip install -U scikit-image 6 | pip install nibabel 7 | 8 | pip uninstall --yes monai 9 | pip install git+https://github.com/Project-MONAI/MONAI#egg=monai 10 | 11 | pip uninstall --yes tqdm 12 | pip install tqdm 13 | -------------------------------------------------------------------------------- /DAE/Pretrain_full_contrast/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .data_finetune import build_loader_finetune 2 | from .data_pretrain import build_loader_simmim 3 | 4 | 5 | def build_loader(args, is_pretrain): 6 | if is_pretrain: 7 | return build_loader_simmim(args) 8 | else: 9 | return build_loader_finetune(args) 10 | -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/prostate_mri_lesion_seg_app/app.yaml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | application: 4 | title: MONAI Deploy App Package - Simple Imaging App 5 | version: 1.0 6 | inputFormats: ["file"] 7 | outputFormats: ["file"] 8 | 9 | resources: 10 | cpu: 8 11 | gpu: 1 12 | memory: 7Gi 13 | gpuMemory: 7Gi 14 | -------------------------------------------------------------------------------- /SwinMM/WORD/run.sh: -------------------------------------------------------------------------------- 1 | python -m torch.distributed.launch --nproc_per_node=8 --master_port=11223 main.py --batch_size=2 \ 2 | --num_steps=30000 \ 3 | --lrdecay \ 4 | --eval_num=500 \ 5 | --lr=5e-4 \ 6 | --decay=0.1 \ 7 | --norm_pix_loss \ 8 | --redis_ports 39996 39997 39998 39999 \ 9 | --redis_compression zlib 10 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/run.sh: -------------------------------------------------------------------------------- 1 | python -m torch.distributed.launch --nproc_per_node=8 --master_port=11223 main.py \ 2 | --batch_size=2 \ 3 | --num_steps=30000 \ 4 | --lrdecay \ 5 | --eval_num=500 \ 6 | --lr=5e-4 \ 7 | --decay=0.1 \ 8 | --norm_pix_loss \ 9 | --redis_ports 39996 39997 39998 39999 \ 10 | --redis_compression zlib 11 | -------------------------------------------------------------------------------- /SwinMM/scripts/start_redis.sh: -------------------------------------------------------------------------------- 1 | ports="39999 39998 39997 39996" 2 | for port in ${ports}; do 3 | echo "run redis at localhost:${port}" 4 | redis-server \ 5 | --daemonize yes \ 6 | --port ${port} \ 7 | --maxclients 100000 \ 8 | --maxmemory 0 \ 9 | --maxmemory-policy noeviction \ 10 | --appendonly no \ 11 | --save "" \ 12 | --protected-mode no 13 | done 14 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/configs/network_search.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | searching_network: 3 | dints_space: 4 | _target_: TopologySearch 5 | channel_mul: 0.5 6 | num_blocks: 12 7 | num_depths: 4 8 | use_downsample: true 9 | device: "$torch.device('cuda')" 10 | network: 11 | _target_: DiNTS 12 | dints_space: "$@searching_network#dints_space" 13 | in_channels: "@searching#input_channels" 14 | num_classes: "@searching#output_classes" 15 | use_downsample: true 16 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 120 3 | target-version = ['py37', 'py38', 'py39'] 4 | include = '\.pyi?$' 5 | exclude = ''' 6 | ( 7 | /( 8 | # exclude a few common directories in the root of the project 9 | \.eggs 10 | | \.git 11 | | \.hg 12 | | \.mypy_cache 13 | | \.tox 14 | | \.venv 15 | | \.pytype 16 | | _build 17 | | buck-out 18 | | build 19 | | dist 20 | )/ 21 | # also separately exclude a file named versioneer.py 22 | | monai/_version.py 23 | ) 24 | ''' 25 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/utils/view_ops.py: -------------------------------------------------------------------------------- 1 | """View operations.""" 2 | 3 | from typing import Tuple 4 | 5 | import numpy as np 6 | import torch 7 | from utils import view_transforms 8 | 9 | 10 | def rot_rand(xs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: 11 | img_n = xs.size()[0] 12 | x_aug = xs.detach().clone() 13 | x_rot = torch.zeros(img_n, dtype=torch.int64, device=xs.device) 14 | for i in range(img_n): 15 | orientation = np.random.randint(0, 4) 16 | x_aug[i] = view_transforms.rotation_transforms[orientation](xs[i].unsqueeze(0)) 17 | x_rot[i] = orientation 18 | return x_aug, x_rot 19 | -------------------------------------------------------------------------------- /DAE/Pretrain_full_contrast/models/build.py: -------------------------------------------------------------------------------- 1 | from .simmim_advanced import build_simmim 2 | from .swin_transformer import build_swin 3 | from .vision_transformer import build_vit 4 | 5 | 6 | def build_model(args, is_pretrain=True): 7 | if is_pretrain: 8 | model = build_simmim(args) 9 | else: 10 | model_type = args.model_type 11 | if model_type == "swin": 12 | model = build_swin(args) 13 | elif model_type == "vit": 14 | model = build_vit(args) 15 | else: 16 | raise NotImplementedError(f"Unknown fine-tune model: {model_type}") 17 | 18 | return model 19 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /lamp-automated-model-parallelism/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet2d/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/swinunetr/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /coplenet-pneumonia-lesion-segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/swinunetr/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet2d/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/swinunetr/configs/transforms_infer.yaml: -------------------------------------------------------------------------------- 1 | image_key: image 2 | transforms_infer: 3 | _target_: Compose 4 | transforms: 5 | - _target_: LoadImaged 6 | keys: "@image_key" 7 | image_only: True 8 | - _target_: EnsureChannelFirstd 9 | keys: "@image_key" 10 | - PLACEHOLDER_INTENSITY_NORMALIZATION 11 | - _target_: Orientationd 12 | keys: "@image_key" 13 | axcodes: RAS 14 | - _target_: Spacingd 15 | keys: "@image_key" 16 | pixdim: "$@transforms#resample_resolution" 17 | mode: bilinear 18 | align_corners: true 19 | - _target_: CastToTyped 20 | keys: "@image_key" 21 | dtype: "$torch.float32" 22 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/configs/transforms_infer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | image_key: image 3 | transforms_infer: 4 | _target_: Compose 5 | transforms: 6 | - _target_: LoadImaged 7 | keys: "@image_key" 8 | image_only: false 9 | - _target_: EnsureChannelFirstd 10 | keys: "@image_key" 11 | - PLACEHOLDER_INTENSITY_NORMALIZATION 12 | - _target_: Orientationd 13 | keys: "@image_key" 14 | axcodes: RAS 15 | - _target_: Spacingd 16 | keys: "@image_key" 17 | pixdim: "@training#transforms#resample_resolution" 18 | mode: bilinear 19 | align_corners: true 20 | - _target_: CastToTyped 21 | keys: "@image_key" 22 | dtype: "$torch.float32" 23 | -------------------------------------------------------------------------------- /SwinMM/requirements.txt: -------------------------------------------------------------------------------- 1 | apex==0.9.10dev 2 | batchgenerators==0.25 3 | caffe2==0.8.1 4 | colorama==0.4.6 5 | Flask==2.3.2 6 | gevent==23.9.0 7 | gorilla==0.4.0 8 | hypothesis==6.81.2 9 | lz4==4.3.2 10 | monai==1.2.0 11 | nibabel==5.1.0 12 | numba==0.57.1 13 | numpy==1.25.1 14 | pssh==2.3.1 15 | ptvsd==4.3.2 16 | pydantic==2.0.3 17 | pylzma==0.5.0 18 | pytest==7.4.0 19 | redis==4.6.0 20 | Requests==2.31.0 21 | scipy==1.11.1 22 | setuptools==65.6.3 23 | setuptools_rust==1.6.0 24 | tensorboardX==2.6.1 25 | tensorflow_datasets==4.9.2 26 | timm==0.9.2 27 | torchvision==0.15.2 28 | tqdm==4.65.0 29 | transformers==4.30.2 30 | typing_extensions==4.7.1 31 | urllib3==1.26.18 32 | xmlrunner==1.7.7 33 | xxhash==3.2.0 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /auto3dseg/configs/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.0.8", 3 | "changelog": { 4 | "0.0.8": "Update swin unetr pretrained weights link", 5 | "0.0.7": "Add support for MLFlow experiment name.", 6 | "0.0.6": "Move metadata.json under 'configs' to be consistent with bundles.", 7 | "0.0.5": "Enable support of mlflow in segresnet algorithm template for public server.", 8 | "0.0.4": "Enable support of mlflow in swinunetr algorithm template for public server.", 9 | "0.0.3": "update hyper-parameter naming and mlflow in swinunetr algorithm template.", 10 | "0.0.2": "update hyper-parameter naming in dints algorithm template.", 11 | "0.0.1": "this version is based on commit 03a6d4effb9223670f439c3a29198ef34938922f." 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /SwinMM/TRAINING.md: -------------------------------------------------------------------------------- 1 | # Training 2 | 3 | ## Launch Redis 4 | 5 | Launch in-memory database, only need once 6 | 7 | ```bash 8 | # It launches redis at ports 39996-39999. 9 | bash ./scripts/start_redis.sh 10 | ``` 11 | 12 | **NOTE** 13 | 14 | - If **data or preprocessing** changed, run `pkill redis-server` before further experiments 15 | - Try `--workers` from 8 to 32 for best performance 16 | - First epoch after launch the server could be slow, but should be fast later 17 | - Set `--redis_ports ` according to your redis setup. 18 | 19 | ## Pre-training 20 | 21 | ```bash 22 | cd Pretrain 23 | bash run.sh 24 | ``` 25 | 26 | ## Finetuning 27 | 28 | - Prepare pretrained models: Copy the pretrained model to the `pretrained_models` directory in BTCV 29 | 30 | ```bash 31 | cd WORD 32 | bash run.sh 33 | ``` 34 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/configs/network.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | training_network: 3 | arch_ckpt_path: "$@bundle_root + '/scripts/arch_code.pth'" 4 | arch_ckpt: "$torch.load(@training_network#arch_ckpt_path, map_location=torch.device('cuda'), weights_only=False)" 5 | dints_space: 6 | _target_: TopologyInstance 7 | channel_mul: 1 8 | num_blocks: 12 9 | num_depths: 4 10 | use_downsample: true 11 | arch_code: 12 | - "$@training_network#arch_ckpt['code_a']" 13 | - "$@training_network#arch_ckpt['code_c']" 14 | device: "$torch.device('cuda')" 15 | network: 16 | _target_: DiNTS 17 | dints_space: "$@training_network#dints_space" 18 | in_channels: "@training#input_channels" 19 | num_classes: "@training#output_classes" 20 | use_downsample: true 21 | node_a: "$@training_network#arch_ckpt['node_a']" 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Install '....' 17 | 3. Run commands '....' 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Screenshots** 23 | If applicable, add screenshots to help explain your problem. 24 | 25 | **Environment (please complete the following information):** 26 | - OS 27 | - Python version 28 | - MONAI version [e.g. git commit hash] 29 | - CUDA/cuDNN version 30 | - GPU models and configuration 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /SwinMM/WORD/README.md: -------------------------------------------------------------------------------- 1 | # Note for SwinMM Finetuning 2 | 3 | ## Training 4 | 5 | ### FIXME(outdated) 6 | 7 | ```bash 8 | python main.py 9 | --feature_size=48 10 | --batch_size=1 11 | --logdir="swin_mm_test/" 12 | --roi_x=64 13 | --roi_y=64 14 | --roi_z=64 15 | --optim_lr=1e-4 16 | --lrschedule="warmup_cosine" 17 | --infer_overlap=0.5 18 | --save_checkpoint 19 | --data_dir="/dataset/dataset0/" 20 | --distributed 21 | --use_ssl_pretrained 22 | --pretrained_dir="./pretrained_models/" 23 | --pretrained_model_name="model_bestValRMSE.pt" 24 | ``` 25 | 26 | ## Testing 27 | 28 | ### FIXME(outdated) 29 | 30 | ```bash 31 | python test.py 32 | --feature_size=48 33 | --batch_size=1 34 | --exp_name="swin_mm_test/" 35 | --roi_x=64 36 | --roi_y=64 37 | --roi_z=64 38 | --infer_overlap=0.5 39 | --data_dir="/dataset/dataset0/" 40 | --pretrained_dir="./runs/multiview_101021/" 41 | --pretrained_model_name="model.pt" 42 | ``` 43 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/swinunetr/configs/transforms_validate.yaml: -------------------------------------------------------------------------------- 1 | image_key: image 2 | label_key: label 3 | transforms_validate: 4 | _target_: Compose 5 | transforms: 6 | - _target_: LoadImaged 7 | keys: ["@image_key", "@label_key"] 8 | image_only: true 9 | - _target_: EnsureChannelFirstd 10 | keys: ["@image_key", "@label_key"] 11 | - PLACEHOLDER_INTENSITY_NORMALIZATION 12 | - _target_: Orientationd 13 | keys: ["@image_key", "@label_key"] 14 | axcodes: RAS 15 | - _target_: Spacingd 16 | keys: ["@image_key", "@label_key"] 17 | pixdim: "$@transforms#resample_resolution" 18 | mode: [bilinear, nearest] 19 | align_corners: [true, true] 20 | - _target_: CastToTyped 21 | keys: ["@image_key", "@label_key"] 22 | dtype: ["$torch.float32", "$torch.uint8"] 23 | - _target_: EnsureTyped 24 | keys: ['@image_key', '@label_key'] 25 | track_meta: true 26 | -------------------------------------------------------------------------------- /.github/workflows/chatops.yml: -------------------------------------------------------------------------------- 1 | name: chatops 2 | 3 | # currently dispatches /black command to project-monai/monai-code-formatter 4 | on: 5 | issue_comment: 6 | types: [created, edited] 7 | jobs: 8 | dispatch_command: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: dispatch 12 | uses: peter-evans/slash-command-dispatch@v3.0.0 13 | with: 14 | token: ${{ secrets.PR_MAINTAIN_BOT }} 15 | # reaction-token: ${{ secrets.GITHUB_TOKEN }} 16 | reactions: false 17 | config: > 18 | [ 19 | { 20 | "command": "black", 21 | "permission": "none", 22 | "issue_type": "pull-request", 23 | "allow_edits": true, 24 | "repository": "project-monai/monai-code-formatter" 25 | }, 26 | { 27 | "command": "integration-test", 28 | "permission": "none", 29 | "issue_type": "pull-request", 30 | "allow_edits": true 31 | } 32 | ] 33 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet/scripts/train.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | from typing import Optional, Sequence, Union 13 | 14 | import fire 15 | 16 | if __package__ in (None, ""): 17 | from segmenter import run_segmenter 18 | else: 19 | from .segmenter import run_segmenter 20 | 21 | 22 | def run(config_file: Optional[Union[str, Sequence[str]]] = None, **override): 23 | run_segmenter(config_file=config_file, **override) 24 | 25 | 26 | if __name__ == "__main__": 27 | fire.Fire() 28 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet2d/scripts/train.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | from typing import Optional, Sequence, Union 13 | 14 | import fire 15 | 16 | if __package__ in (None, ""): 17 | from segmenter_2d import run_segmenter 18 | else: 19 | from .segmenter_2d import run_segmenter 20 | 21 | 22 | def run(config_file: Optional[Union[str, Sequence[str]]] = None, **override): 23 | run_segmenter(config_file=config_file, **override) 24 | 25 | 26 | if __name__ == "__main__": 27 | fire.Fire() 28 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/configs/transforms_validate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | image_key: image 3 | label_key: label 4 | transforms_validate: 5 | _target_: Compose 6 | transforms: 7 | - _target_: Compose 8 | transforms: 9 | - _target_: LoadImaged 10 | keys: "@image_key" 11 | dtype: "$np.float32" 12 | image_only: false 13 | - _target_: LoadImaged 14 | keys: "@label_key" 15 | dtype: "$np.uint8" 16 | image_only: false 17 | - _target_: EnsureChannelFirstd 18 | keys: ["@image_key", "@label_key"] 19 | - PLACEHOLDER_INTENSITY_NORMALIZATION 20 | - _target_: Orientationd 21 | keys: ["@image_key", "@label_key"] 22 | axcodes: RAS 23 | - _target_: Spacingd 24 | keys: ["@image_key", "@label_key"] 25 | pixdim: "@training#transforms#resample_resolution" 26 | mode: [bilinear, nearest] 27 | align_corners: [true, true] 28 | - _target_: CastToTyped 29 | keys: ["@image_key", "@label_key"] 30 | dtype: ["$torch.float32", "$torch.uint8"] 31 | - _target_: EnsureTyped 32 | keys: ['@image_key', '@label_key'] 33 | track_meta: false 34 | -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/scripts/compare_output.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Computes the dice scores for the organ mask and lesion mask outputs 4 | # 5 | # Usage: 6 | # ./compare_output.sh 7 | 8 | # Check if the correct number of arguments were passed 9 | if [ "$#" -ne 2 ]; then 10 | echo "Usage: ./compare_output.sh " 11 | exit 1 12 | fi 13 | 14 | # Check if the reference directory exists 15 | if [ ! -d "$1" ]; then 16 | echo "Reference directory does not exist" 17 | exit 1 18 | fi 19 | 20 | # Check if the output directory exists 21 | if [ ! -d "$2" ]; then 22 | echo "Output directory does not exist" 23 | exit 1 24 | fi 25 | 26 | # Evaluate the dice scores for the organ mask and lesion mask outputs 27 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 28 | echo "Evaluating Organ Mask..." 29 | python $SCRIPT_DIR/eval_dice.py $1/organ/organ.nii.gz $2/organ/organ.nii.gz 30 | 31 | echo "Evaluating Lesion Mask..." 32 | python $SCRIPT_DIR/eval_dice.py $1/lesion/lesion_mask.nii.gz $2/lesion/lesion_mask.nii.gz 33 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet/scripts/validate.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | from typing import Optional, Sequence, Union 13 | 14 | import fire 15 | 16 | if __package__ in (None, ""): 17 | from segmenter import run_segmenter 18 | else: 19 | from .segmenter import run_segmenter 20 | 21 | 22 | def run(config_file: Optional[Union[str, Sequence[str]]] = None, **override): 23 | override["validate#enabled"] = True 24 | run_segmenter(config_file=config_file, **override) 25 | 26 | 27 | if __name__ == "__main__": 28 | fire.Fire() 29 | -------------------------------------------------------------------------------- /SkullRec/pre_trained_weights/readme.md: -------------------------------------------------------------------------------- 1 | ## Examplary Skull Reconstruction Results Using the Pre-trained Model 2 | 3 | Download the pre-trained weights [here]( https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/skull_rec_best_metric_model.pth).
4 | The (auto-encoder) network was trained on 429-79=350 (79 images for validation) skull images from the [pre-processed dataset](https://files.icg.tugraz.at/f/9642058af1744b4b961b/?dl=1). 5 | 6 | ### cranial reconstruction (input-pred-gt) 7 | Cranial reconstruction is relatively easy, since the cranium contains no subtle and complex structures. 8 | 9 | dataset 10 | 11 | ### facial reconstruction (input-pred-gt) 12 | 13 | It is obvious from the results that the subtle facial structures are difficult to be reconstructed and recovered.
14 | The pre-trained model serve only as a baseline.
15 |
16 | dataset 17 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet2d/scripts/validate.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | from typing import Optional, Sequence, Union 13 | 14 | import fire 15 | 16 | if __package__ in (None, ""): 17 | from segmenter_2d import run_segmenter 18 | else: 19 | from .segmenter_2d import run_segmenter 20 | 21 | 22 | def run(config_file: Optional[Union[str, Sequence[str]]] = None, **override): 23 | override["validate#enabled"] = True 24 | run_segmenter(config_file=config_file, **override) 25 | 26 | 27 | if __name__ == "__main__": 28 | fire.Fire() 29 | -------------------------------------------------------------------------------- /DiNTS/run_ensemble.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | clear 3 | 4 | ALGORITHM="AM" 5 | CONFIG="config.yaml" 6 | FOLDER_0="." 7 | INPUT_ROOT="${PWD}" 8 | OUTPUT_ROOT="${PWD}/${FOLDER_0}/Submission_${ALGORITHM}" 9 | 10 | NUM_GPUS_PER_NODE=4 11 | NUM_NODES=1 12 | 13 | if [ ${NUM_GPUS_PER_NODE} -eq 2 ] 14 | then 15 | export CUDA_VISIBLE_DEVICES=0,1 16 | elif [ ${NUM_GPUS_PER_NODE} -eq 4 ] 17 | then 18 | export CUDA_VISIBLE_DEVICES=0,1,2,3 19 | elif [ ${NUM_GPUS_PER_NODE} -eq 8 ] 20 | then 21 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 22 | fi 23 | 24 | python -m torch.distributed.launch --nproc_per_node=${NUM_GPUS_PER_NODE} \ 25 | ensemble_multi-gpu.py --algorithm ${ALGORITHM} \ 26 | --config=${CONFIG} \ 27 | --input_root ${INPUT_ROOT} \ 28 | --output_root ${OUTPUT_ROOT} \ 29 | --dir_list "${FOLDER_0}/Seg_Fold0_test" \ 30 | "${FOLDER_0}/Seg_Fold1_test" \ 31 | "${FOLDER_0}/Seg_Fold2_test" \ 32 | "${FOLDER_0}/Seg_Fold3_test" \ 33 | "${FOLDER_0}/Seg_Fold4_test" \ 34 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet/docs/README.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | A 3D neural network based algorithm for volumetric segmentation of 3D medical images. 4 | 5 | # Model Overview 6 | 7 | This is a template for training the state-of-the-art algorithm [1] of the "Brain Tumor Segmentation (BraTS) Challenge 2018". 8 | 9 | ## Training configuration 10 | 11 | The training was performed with at least 16GB-memory GPUs. 12 | 13 | ## commands example 14 | 15 | Execute model training: 16 | 17 | ``` 18 | CUDA_VISIBLE_DEVICES=0 python scripts/train.py run --config_file=configs/hyper_parameters.yaml 19 | ``` 20 | 21 | Execute multi-GPU model training (recommended): 22 | 23 | ``` 24 | torchrun --nproc_per_node=gpu scripts/train.py run --config_file=configs/hyper_parameters.yaml 25 | ``` 26 | 27 | Execute validation: 28 | 29 | ``` 30 | python scripts/validate.py run --config_file=configs/hyper_parameters.yaml 31 | ``` 32 | 33 | Execute inference: 34 | 35 | ``` 36 | python scripts/infer.py run --config_file=configs/hyper_parameters.yaml 37 | ``` 38 | 39 | # References 40 | 41 | [1] Myronenko, A., 2018, September. 3D MRI brain tumor segmentation using autoencoder regularization. In International MICCAI Brainlesion Workshop (pp. 311-320). Springer, Cham. 42 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet2d/docs/README.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | A 2D neural network based algorithm for volumetric segmentation of 3D medical images. 4 | 5 | # Model Overview 6 | 7 | This is a template for training the 2D version of the state-of-the-art algorithm [1] of the "Brain Tumor Segmentation (BraTS) Challenge 2018". 8 | 9 | ## Training configuration 10 | 11 | The training was performed with at least 16GB-memory GPUs. 12 | 13 | ## commands example 14 | 15 | Execute model training: 16 | 17 | ``` 18 | CUDA_VISIBLE_DEVICES=0 python scripts/train.py run --config_file=configs/hyper_parameters.yaml 19 | ``` 20 | 21 | Execute multi-GPU model training (recommended): 22 | 23 | ``` 24 | torchrun --nproc_per_node=gpu scripts/train.py run --config_file=configs/hyper_parameters.yaml 25 | ``` 26 | 27 | Execute validation: 28 | 29 | ``` 30 | python scripts/validate.py run --config_file=configs/hyper_parameters.yaml 31 | ``` 32 | 33 | Execute inference: 34 | 35 | ``` 36 | python scripts/infer.py run --config_file=configs/hyper_parameters.yaml 37 | ``` 38 | 39 | # References 40 | 41 | [1] Myronenko, A., 2018, September. 3D MRI brain tumor segmentation using autoencoder regularization. In International MICCAI Brainlesion Workshop (pp. 311-320). Springer, Cham. 42 | -------------------------------------------------------------------------------- /DAE/Pretrain_full_contrast/run_me_local.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | NPROC_PER_NODE=1 4 | EXP_NAME='upsample_vae' 5 | OUTPUT_PATH='./output/'${EXP_NAME} 6 | LOG_PATH='./logdir/'${EXP_NAME} 7 | BATCH_SIZE=3 8 | IMG_SIZE=96 9 | SW_BATCH_SIZE=1 10 | MASK_RATIO=0.6 11 | MASK_PATCH_SIZE=16 12 | EPOCHS=100 13 | WARMUP_EPOCHS=10 14 | BASE_LR=2e-4 15 | WARMUP_LR=1e-6 16 | MIN_LR=1e-5 17 | WEIGHT_DECAY=0.05 18 | SAVE_FREQ=5 19 | PRINT_FREQ=5 20 | CACHE_RATE=0.5 21 | DECODER='pixel_shuffle' 22 | LOSS_TYPE='l2' 23 | LOSS_TYPE='mask_only' 24 | DECODER='deconv' 25 | DECODER='swin' 26 | DECODER='vae2' 27 | MODEL_TYPE='swin_skip' 28 | 29 | python -m torch.distributed.launch --nproc_per_node ${NPROC_PER_NODE} main.py \ 30 | --batch_size=${BATCH_SIZE} --sw_batch_size=${SW_BATCH_SIZE} --mask_ratio=${MASK_RATIO} \ 31 | --epoch=${EPOCHS} --mask_patch_size=${MASK_PATCH_SIZE} --img_size=${IMG_SIZE} \ 32 | --min_lr=${MIN_LR} --warmpup_epoch=${WARMUP_EPOCHS} --decoder=${DECODER} --model_type=${MODEL_TYPE} --loss_type=${LOSS_TYPE} --base_lr=${BASE_LR} --warmup_lr=${WARMUP_LR} \ 33 | --weight_decay=${WEIGHT_DECAY} --save_freq=${SAVE_FREQ} --print_freq=${PRINT_FREQ} --log_dir=${LOG_PATH} --cache_dataset --output=${OUTPUT_PATH}\ 34 | --local --decoder_off\ 35 | 36 | # --use_grad_checkpoint 37 | # --thread_loader 38 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/utils/ops.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def mask_rand_patch( 8 | window_sizes: Tuple[int, int, int], input_sizes: Tuple[int, int, int], mask_ratio: float, samples: torch.Tensor 9 | ) -> Tuple[torch.Tensor, torch.Tensor]: 10 | """Patch-wise random masking.""" 11 | if len(window_sizes) != len(input_sizes) or any( 12 | [input_size % window_size != 0 for window_size, input_size in zip(window_sizes, input_sizes)] 13 | ): 14 | raise ValueError(f"{window_sizes} & {input_sizes} is not compatible.") 15 | 16 | mask_shape = [input_size // window_size for input_size, window_size in zip(input_sizes, window_sizes)] 17 | num_patches = np.prod(mask_shape).item() 18 | mask = np.ones(num_patches, dtype=bool) 19 | indices = np.random.choice(num_patches, round(num_patches * mask_ratio), replace=False) 20 | mask[indices] = False 21 | mask = mask.reshape(mask_shape) 22 | wh, ww, wd = window_sizes 23 | mask = np.logical_or(mask[:, None, :, None, :, None], np.zeros([1, wh, 1, ww, 1, wd], dtype=bool)).reshape( 24 | input_sizes 25 | ) 26 | mask = torch.from_numpy(mask).to(samples.device) 27 | 28 | res = samples.detach().clone() 29 | res[:, :, mask] = 0 30 | return res, mask 31 | -------------------------------------------------------------------------------- /DAE/Pretrain_full_contrast/logger.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import logging 3 | import os 4 | import sys 5 | 6 | from termcolor import colored 7 | 8 | 9 | @functools.lru_cache() 10 | def create_logger(output_dir, dist_rank=0, name=""): 11 | # create logger 12 | logger = logging.getLogger(name) 13 | logger.setLevel(logging.DEBUG) 14 | logger.propagate = False 15 | 16 | # create formatter 17 | fmt = "[%(asctime)s %(name)s] (%(filename)s %(lineno)d): %(levelname)s %(message)s" 18 | color_fmt = ( 19 | colored("[%(asctime)s %(name)s]", "green") 20 | + colored("(%(filename)s %(lineno)d)", "yellow") 21 | + ": %(levelname)s %(message)s" 22 | ) 23 | 24 | # create console handlers for master process 25 | if dist_rank == 0: 26 | console_handler = logging.StreamHandler(sys.stdout) 27 | console_handler.setLevel(logging.DEBUG) 28 | console_handler.setFormatter(logging.Formatter(fmt=color_fmt, datefmt="%Y-%m-%d %H:%M:%S")) 29 | logger.addHandler(console_handler) 30 | 31 | # create file handlers 32 | file_handler = logging.FileHandler(os.path.join(output_dir, f"log_rank{dist_rank}.txt"), mode="a") 33 | file_handler.setLevel(logging.DEBUG) 34 | file_handler.setFormatter(logging.Formatter(fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S")) 35 | logger.addHandler(file_handler) 36 | 37 | return logger 38 | -------------------------------------------------------------------------------- /DiNTS/download_msd_datasets.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import argparse 13 | import os 14 | 15 | from monai.apps import download_and_extract 16 | 17 | 18 | def main(): 19 | parser = argparse.ArgumentParser(description="training") 20 | parser.add_argument("--msd_task", action="store", default="Task07_Pancreas", help="msd task") 21 | parser.add_argument("--root", action="store", default="./data_msd", help="data root") 22 | args = parser.parse_args() 23 | 24 | resource = "https://msd-for-monai.s3-us-west-2.amazonaws.com/" + args.msd_task + ".tar" 25 | compressed_file = os.path.join(args.root, args.msd_task + ".tar") 26 | if not os.path.exists(args.root): 27 | download_and_extract(resource, compressed_file, args.root) 28 | 29 | 30 | if __name__ == "__main__": 31 | main() 32 | -------------------------------------------------------------------------------- /DiNTS/run_infer_multi-gpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | ARCH_CKPT="search_code_20000.pth" 5 | CONFIG="config.yaml" 6 | DATA_ROOT="/home/dongy/Data/MSD/Task09_Spleen" 7 | JSON_PATH="${DATA_ROOT}/dataset.json" 8 | NUM_FOLDS=5 9 | 10 | NUM_GPUS_PER_NODE=4 11 | NUM_NODES=1 12 | 13 | if [ ${NUM_GPUS_PER_NODE} -eq 2 ] 14 | then 15 | export CUDA_VISIBLE_DEVICES=0,1 16 | elif [ ${NUM_GPUS_PER_NODE} -eq 4 ] 17 | then 18 | export CUDA_VISIBLE_DEVICES=0,1,2,3 19 | elif [ ${NUM_GPUS_PER_NODE} -eq 8 ] 20 | then 21 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 22 | fi 23 | 24 | for FOLD in 0 1 2 3 4 25 | do 26 | CHECKPOINT="Fold${FOLD}/best_metric_model.pth" 27 | JSON_KEY="test" 28 | OUTPUT_ROOT="Seg_Fold${FOLD}_${JSON_KEY}" 29 | 30 | python -m torch.distributed.launch \ 31 | --nproc_per_node=${NUM_GPUS_PER_NODE} \ 32 | --nnodes=${NUM_NODES} \ 33 | --node_rank=0 \ 34 | --master_addr=localhost \ 35 | --master_port=1234 \ 36 | infer_multi-gpu.py --arch_ckpt=${ARCH_CKPT} \ 37 | --checkpoint=${CHECKPOINT} \ 38 | --config=${CONFIG} \ 39 | --json=${JSON_PATH} \ 40 | --json_key=${JSON_KEY} \ 41 | --output_root=${OUTPUT_ROOT} \ 42 | --prob \ 43 | --root=${DATA_ROOT} 44 | done 45 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task01_BrainTumour.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 96,96,96 19 | input_channels: 4 20 | intensity_norm: NormalizeIntensityd|keys~image|nonzero~True|channel_wise~True 21 | interpolation: Identityd|keys~image,label 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 1695 25 | num_epochs_per_validation: 80 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 4 30 | overlap_ratio: 0.625 31 | patch_size: 96,96,96 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task04_Hippocampus.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 32,32,32 19 | input_channels: 1 20 | intensity_norm: NormalizeIntensityd|keys~image|nonzero~True|channel_wise~True 21 | interpolation: Identityd|keys~image,label 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 3200 25 | num_epochs_per_validation: 150 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 3 30 | overlap_ratio: 0.625 31 | patch_size: 32,32,32 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /SwinMM/WORD/utils/view_ops.py: -------------------------------------------------------------------------------- 1 | """View operations.""" 2 | 3 | from typing import Sequence, Tuple 4 | 5 | import numpy as np 6 | import torch 7 | from utils import view_transforms 8 | 9 | PermuteType = view_transforms.PermuteType 10 | TransformFuncType = view_transforms.TransformFuncType 11 | 12 | 13 | def get_permute_transform(view_src: PermuteType, view_dst: PermuteType) -> TransformFuncType: 14 | """Gets transform function from view src to view dst.""" 15 | 16 | def transform(x: torch.Tensor) -> torch.Tensor: 17 | x_view_0 = view_transforms.permutation_inverse_transforms[view_src](x) 18 | return view_transforms.permutation_transforms[view_dst](x_view_0).contiguous() 19 | 20 | return transform 21 | 22 | 23 | def permute_inverse(xs: Sequence[torch.Tensor], views: Sequence[PermuteType]) -> Sequence[torch.Tensor]: 24 | """Transforms data back to origin view.""" 25 | return [get_permute_transform(view, 0)(x) for x, view in zip(xs, views)] 26 | 27 | 28 | def permute_rand(x: torch.Tensor, num_samples: int = 2) -> Tuple[Sequence[torch.Tensor], Sequence[PermuteType]]: 29 | """Samples different transforms of data.""" 30 | num_permutes = len(view_transforms.permutation_transforms) 31 | if num_samples > num_permutes: 32 | raise ValueError("Duplicate samples.") 33 | view_dsts = np.random.permutation(num_permutes)[:num_samples].tolist() 34 | return [get_permute_transform(0, view)(x) for view in view_dsts], view_dsts 35 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task02_Heart.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 96,96,96 19 | input_channels: 1 20 | intensity_norm: NormalizeIntensityd|keys~image|nonzero~True|channel_wise~True 21 | interpolation: Spacingd|keys~image,label|pixdim~1.0,1.0,1.0|mode~bilinear,nearest|align_corners~True,True 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 20800 #4GPU 25 | num_epochs_per_validation: 1000 #4GPU 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 2 30 | overlap_ratio: 0.625 31 | patch_size: 96,96,96 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task05_Prostate.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 96,96,32 19 | input_channels: 2 20 | intensity_norm: NormalizeIntensityd|keys~image|nonzero~True|channel_wise~True 21 | interpolation: Spacingd|keys~image,label|pixdim~1.0,1.0,1.0|mode~bilinear,nearest|align_corners~True,True 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 12800 # 4GPU 25 | num_epochs_per_validation: 625 # 4GPU 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 3 30 | overlap_ratio: 0.625 31 | patch_size: 96,96,32 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task03_Liver.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 96,96,96 19 | input_channels: 1 20 | intensity_norm: ScaleIntensityRanged|keys~image|a_min~-21.0|a_max~189.0|b_min~0.0|b_max~1.0|clip~True 21 | interpolation: Spacingd|keys~image,label|pixdim~1.0,1.0,1.0|mode~bilinear,nearest|align_corners~True,True 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 6400 25 | num_epochs_per_validation: 305 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 3 30 | overlap_ratio: 0.625 31 | patch_size: 96,96,96 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task10_Colon.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 96,96,96 19 | input_channels: 1 20 | intensity_norm: ScaleIntensityRanged|keys~image|a_min~-57.0|a_max~175.0|b_min~0.0|b_max~1.0|clip~True 21 | interpolation: Spacingd|keys~image,label|pixdim~1.0,1.0,1.0|mode~bilinear,nearest|align_corners~True,True 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 6500 25 | num_epochs_per_validation: 317 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 2 30 | overlap_ratio: 0.625 31 | patch_size: 96,96,96 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task06_Lung.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 96,96,96 19 | input_channels: 1 20 | intensity_norm: ScaleIntensityRanged|keys~image|a_min~-1000.0|a_max~1000.0|b_min~0.0|b_max~1.0|clip~True 21 | interpolation: Spacingd|keys~image,label|pixdim~1.0,1.0,1.0|mode~bilinear,nearest|align_corners~True,True 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 12800 25 | num_epochs_per_validation: 625 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 2 30 | overlap_ratio: 0.625 31 | patch_size: 96,96,96 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task07_Pancreas.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 96,96,96 19 | input_channels: 1 20 | intensity_norm: ScaleIntensityRanged|keys~image|a_min~-87.0|a_max~199.0|b_min~0.0|b_max~1.0|clip~True 21 | interpolation: Spacingd|keys~image,label|pixdim~1.0,1.0,1.0|mode~bilinear,nearest|align_corners~True,True 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 2920 25 | num_epochs_per_validation: 140 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 3 30 | overlap_ratio: 0.625 31 | patch_size: 96,96,96 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task08_HepaticVessel.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 96,96,96 19 | input_channels: 1 20 | intensity_norm: ScaleIntensityRanged|keys~image|a_min~0.0|a_max~230.0|b_min~0.0|b_max~1.0|clip~True 21 | interpolation: Spacingd|keys~image,label|pixdim~1.0,1.0,1.0|mode~bilinear,nearest|align_corners~True,True 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 2720 25 | num_epochs_per_validation: 132 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 3 30 | overlap_ratio: 0.625 31 | patch_size: 96,96,96 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /DiNTS/configs/config_Task09_Spleen.yaml: -------------------------------------------------------------------------------- 1 | augmentation_monai: 2 | aug_0: RandRotated|keys~image,label|range_x~0.3|range_y~0.3|range_z~0.3|mode~bilinear,nearest|prob~0.2 3 | aug_1: RandZoomd|keys~image,label|min_zoom~0.8|max_zoom~1.2|mode~trilinear,nearest|align_corners~True,None|prob~0.16 4 | aug_2: RandGaussianSmoothd|keys~image|sigma_x~0.5,1.15|sigma_y~0.5,1.15|sigma_z~0.5,1.15|prob~0.15 5 | aug_3: RandScaleIntensityd|keys~image|factors~0.3|prob~0.5 6 | aug_4: RandShiftIntensityd|keys~image|offsets~0.1|prob~0.5 7 | aug_5: RandGaussianNoised|keys~image|std~0.01|prob~0.15 8 | aug_6: RandFlipd|keys~image,label|spatial_axis~0|prob~0.5 9 | aug_7: RandFlipd|keys~image,label|spatial_axis~1|prob~0.5 10 | aug_8: RandFlipd|keys~image,label|spatial_axis~2|prob~0.5 11 | core: 12 | amp: true 13 | deterministic: false 14 | foreground_crop_margin: 0 15 | infer_num_sw_batch_size: 6 16 | infer_num_tta: 1 17 | infer_overlap_ratio: 0.625 18 | infer_patch_size: 96,96,96 19 | input_channels: 1 20 | intensity_norm: ScaleIntensityRanged|keys~image|a_min~-125.0|a_max~275.0|b_min~0.0|b_max~1.0|clip~True 21 | interpolation: Spacingd|keys~image,label|pixdim~1.0,1.0,1.0|mode~bilinear,nearest|align_corners~True,True 22 | learning_rate: 0.025 23 | learning_rate_milestones: 0.2,0.4,0.6,0.8 24 | num_epochs: 20000 25 | num_epochs_per_validation: 975 26 | num_images_per_batch: 2 27 | num_patches_per_image: 1 28 | num_sw_batch_size: 6 29 | output_classes: 2 30 | overlap_ratio: 0.625 31 | patch_size: 96,96,96 32 | random_seed: 888 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **MONAI Research Contributions** is a platform built to showcase cutting-edge research utilizing MONAI. This enables the community to see MONAI “in action” and researchers to gain visibility for their MONAI-based work. The repository is regularly reviewed and selected contributions that have demonstrated their popularity or relevance can be integrated into MONAI components in a second step. Contributions are welcome! Simply follow the contribution guidelines stated below and file a pull request. 2 | 3 | **Contribution Guidelines:** 4 | 5 | 1. Contributions are required to be published and have successfully undergone peer-review (If the contributing person is not the author of the work, no approval from the authors of the paper is required, but the contribution needs to be clearly labeled as a “Third-party contribution”). 6 | 7 | 2. The implementation is required to feature MONAI components to a substantial extent. 8 | 9 | 3. The implementation is required to include a boilerplate shell script (to be published) allowing code-reviewers to execute the code and reproduce the paper’s results with one click. 10 | 11 | 4. Under the hood, code quality does not have to match standards of the MONAI main repository, as the Research Contribution repository aims at a fast track for code change proposals and demonstrating cutting-edge research ideas. 12 | 13 | 5. New contributions will be given a MONAI version tag (visible in the associated Readme) according to the utilized version of MONAI. This avoids the need to maintain compatibility of contributions for following MONAI releases. 14 | -------------------------------------------------------------------------------- /DiNTS/run_train_4gpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | TASK=${1} 5 | 6 | # TASK="Task02_Heart" 7 | # TASK="Task05_Prostate" 8 | 9 | ARCH_CKPT="arch_code_cvpr.pth" 10 | CONFIG="configs/config_${TASK}.yaml" 11 | DATA_ROOT="/workspace/data_msd/${TASK}" 12 | JSON_PATH="${DATA_ROOT}/dataset.json" 13 | NUM_FOLDS=5 14 | 15 | NUM_GPUS_PER_NODE=4 16 | NUM_NODES=1 17 | 18 | if [ ${NUM_GPUS_PER_NODE} -eq 2 ] 19 | then 20 | export CUDA_VISIBLE_DEVICES=0,1 21 | elif [ ${NUM_GPUS_PER_NODE} -eq 4 ] 22 | then 23 | export CUDA_VISIBLE_DEVICES=0,1,2,3 24 | elif [ ${NUM_GPUS_PER_NODE} -eq 8 ] 25 | then 26 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 27 | fi 28 | 29 | for FOLD in 0 1 2 3 4 30 | do 31 | CHECKPOINT_ROOT="models/${TASK}/Fold${FOLD}" 32 | CHECKPOINT="${CHECKPOINT_ROOT}/best_metric_model.pth" 33 | JSON_KEY="training" 34 | 35 | python -m torch.distributed.launch \ 36 | --nproc_per_node=${NUM_GPUS_PER_NODE} \ 37 | --nnodes=${NUM_NODES} \ 38 | --node_rank=0 \ 39 | --master_addr=localhost \ 40 | --master_port=1234 \ 41 | train_multi-gpu.py --arch_ckpt=${ARCH_CKPT} \ 42 | --checkpoint=${CHECKPOINT} \ 43 | --config=${CONFIG} \ 44 | --fold=${FOLD} \ 45 | --json=${JSON_PATH} \ 46 | --json_key=${JSON_KEY} \ 47 | --num_folds=${NUM_FOLDS} \ 48 | --output_root=${CHECKPOINT_ROOT} \ 49 | --root=${DATA_ROOT} 50 | done 51 | -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/scripts/eval_dice.py: -------------------------------------------------------------------------------- 1 | # Script that loads two nifti files and evaluates them using the DICE metric. 2 | # Usage: python eval.py 3 | 4 | import os 5 | import nibabel as nib 6 | import argparse 7 | import torch 8 | from monai.metrics import DiceMetric 9 | 10 | def main(): 11 | parser = argparse.ArgumentParser(description="Evaluate two nifti files using the DICE metric.") 12 | parser.add_argument("ground_truth", type=str, help="Path to the ground truth nifti file") 13 | parser.add_argument("prediction", type=str, help="Path to the prediction nifti file") 14 | 15 | args = parser.parse_args() 16 | 17 | gt_path = os.path.abspath(os.path.expanduser(os.path.expandvars(args.ground_truth))) 18 | pred_path = os.path.abspath(os.path.expanduser(os.path.expandvars(args.prediction))) 19 | 20 | gt = nib.load(gt_path).get_fdata() 21 | pred = nib.load(pred_path).get_fdata() 22 | 23 | # Print the dimensions of the loaded images 24 | print(f"Ground truth dimensions: {gt.shape}") 25 | print(f"Prediction dimensions: {pred.shape}") 26 | 27 | # Convert numpy arrays to PyTorch tensors and add batch and channel dimensions 28 | gt_tensor = torch.tensor(gt).unsqueeze(0).unsqueeze(0) 29 | pred_tensor = torch.tensor(pred).unsqueeze(0).unsqueeze(0) 30 | 31 | # Compute the DICE metric 32 | dice_metric = DiceMetric(include_background=True, reduction="mean") 33 | dice = dice_metric(pred_tensor, gt_tensor) 34 | print(f"DICE: {dice.item()}") 35 | 36 | if __name__ == "__main__": 37 | main() 38 | -------------------------------------------------------------------------------- /SkullRec/cranialDefects.py: -------------------------------------------------------------------------------- 1 | import random 2 | from glob import glob 3 | 4 | import nrrd 5 | import numpy as np 6 | from scipy.ndimage import zoom 7 | 8 | 9 | def generate_hole_implants(data, cube_dim): 10 | x_ = data.shape[0] 11 | y_ = data.shape[1] 12 | z_ = data.shape[2] 13 | full_masking = np.ones(shape=(x_, y_, z_)) 14 | x = random.randint(int(cube_dim / 2), x_ - int(cube_dim / 2)) 15 | y = random.randint(int(cube_dim / 2), y_ - int(cube_dim / 2)) 16 | z = int(z_ * (3 / 4)) 17 | cube_masking = np.zeros(shape=(cube_dim, cube_dim, z_ - z)) 18 | print(cube_masking.shape) 19 | full_masking[x - int(cube_dim / 2) : x + int(cube_dim / 2), y - int(cube_dim / 2) : y + int(cube_dim / 2), z:z_] = ( 20 | cube_masking 21 | ) 22 | return full_masking 23 | 24 | 25 | def generate_cude(size): 26 | for i in range(len(pair_list)): 27 | print("generating data:", pair_list[i]) 28 | temp, header = nrrd.read(pair_list[i]) 29 | 30 | full_masking = generate_hole_implants(temp, size) 31 | 32 | c_masking_1 = full_masking == 1 33 | c_masking_1 = c_masking_1 + 1 - 1 34 | 35 | defected_image = c_masking_1 * temp 36 | 37 | c_masking = full_masking == 0 38 | c_masking = c_masking + 1 - 1 39 | f1 = defected_dir + pair_list[i][-10:-5] + ".nrrd" 40 | f2 = implant_dir + pair_list[i][-10:-5] + ".nrrd" 41 | nrrd.write(f1, defected_image, header) 42 | 43 | 44 | if __name__ == "__main__": 45 | pair_list = glob("{}/*.nrrd".format("./complete_skull/")) 46 | 47 | defected_dir = "./defects_cranial/" 48 | -------------------------------------------------------------------------------- /SkullRec/skullfix_pretrained_weight/README.md: -------------------------------------------------------------------------------- 1 | 1. Download the pretrained weights on the SkullFix dataset [here](https://files.icg.tugraz.at/f/d6b9f18c422948a8b0f1/?dl=1).
2 | Download the Facial Defects SkullFix dataset [here](https://files.icg.tugraz.at/f/5b7f31c4465b437e996d/?dl=1) 3 | 4 | 2. The following python snippet aligns the reconstruction results with the input using a similarity transformation 5 | 6 | ```Python 7 | import ants 8 | import os 9 | from glob import glob 10 | 11 | 12 | baseDir1 = './input_defective_skull/' 13 | files1 = glob(baseDir1+'/*.nii.gz') 14 | 15 | 16 | baseDir2 = './reconstruction_results/' 17 | files2 = glob(baseDir2+'/*.nii.gz') 18 | 19 | 20 | for i in range(len(files1)): 21 | fixed = ants.image_read(files1[i]) 22 | moving = ants.image_read(files2[i]) 23 | outs = ants.registration(fixed, moving, type_of_transforme = 'Similarity') 24 | warped_img = outs['warpedmovout'] 25 | warped_to_moving = outs['invtransforms'] 26 | NamePrefix = str(i).zfill(3) 27 | ants.image_write(warped_img, './registered_reconstruction_results/'+ NamePrefix +'.nii.gz') 28 | 29 | 30 | ``` 31 | 32 | 33 | 3. Facial reconstruction results on the SkullFix dataset. The first to the last column shows the axial view of the reconstruction (shown in brown) and input (shown in red) before and after alignment, the reconstructed face and input in 3D, respectively. The recovered facial area can be obtained via subtraction between the input and the aligned reconstruction results: 34 | dataset 35 | -------------------------------------------------------------------------------- /SwinMM/WORD/utils/test_view_transforms.py: -------------------------------------------------------------------------------- 1 | """Unit test for view transforms.""" 2 | 3 | import itertools 4 | import unittest 5 | 6 | import numpy as np 7 | import torch 8 | from utils import view_transforms 9 | 10 | 11 | class ViewTransformsTest(unittest.TestCase): 12 | def test_len(self): 13 | self.assertTrue(len(view_transforms.all_forward_transforms), len(view_transforms.all_backward_transforms)) 14 | 15 | def test_inverse_transforms(self): 16 | x = np.random.uniform(size=(2, 6, 3, 4, 5)) 17 | x_torch = torch.from_numpy(x) 18 | for group_name, transforms in view_transforms.all_forward_transforms.items(): 19 | inverse_transforms = view_transforms.all_backward_transforms[group_name] 20 | self.assertEqual(len(transforms), len(inverse_transforms)) 21 | for key in transforms: 22 | x_recon = inverse_transforms[key](transforms[key](x_torch)).numpy() 23 | np.testing.assert_allclose(x, x_recon) 24 | 25 | def test_get_transforms_func(self): 26 | x = np.random.uniform(size=(2, 6, 3, 4, 5)) 27 | x_torch = torch.from_numpy(x) 28 | 29 | for order in [view_transforms.DEFAULT_ORDER, view_transforms.DEFAULT_ORDER[::-1]]: 30 | views_all = itertools.product(*[view_transforms.all_forward_transforms[gn].keys() for gn in order]) 31 | for views in views_all: 32 | func, inv_func = [ 33 | view_transforms.get_transforms_func(views, order, inverse) for inverse in [False, True] 34 | ] 35 | np.testing.assert_allclose(x, inv_func(func(x_torch)).numpy()) 36 | 37 | 38 | if __name__ == "__main__": 39 | unittest.main() 40 | -------------------------------------------------------------------------------- /DiNTS/run_train_multi-gpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | TASK=${1} 5 | 6 | # TASK="Task01_BrainTumour" 7 | # TASK="Task03_Liver" 8 | # TASK="Task04_Hippocampus" 9 | # TASK="Task06_Lung" 10 | # TASK="Task07_Pancreas" 11 | # TASK="Task08_HepaticVessel" 12 | # TASK="Task09_Spleen" 13 | # TASK="Task10_Colon" 14 | 15 | ARCH_CKPT="arch_code_cvpr.pth" 16 | CONFIG="configs/config_${TASK}.yaml" 17 | DATA_ROOT="/workspace/data_msd/${TASK}" 18 | JSON_PATH="${DATA_ROOT}/dataset.json" 19 | NUM_FOLDS=5 20 | 21 | NUM_GPUS_PER_NODE=8 22 | NUM_NODES=1 23 | 24 | if [ ${NUM_GPUS_PER_NODE} -eq 2 ] 25 | then 26 | export CUDA_VISIBLE_DEVICES=0,1 27 | elif [ ${NUM_GPUS_PER_NODE} -eq 4 ] 28 | then 29 | export CUDA_VISIBLE_DEVICES=0,1,2,3 30 | elif [ ${NUM_GPUS_PER_NODE} -eq 8 ] 31 | then 32 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 33 | fi 34 | 35 | for FOLD in 0 1 2 3 4 36 | do 37 | CHECKPOINT_ROOT="models/${TASK}/Fold${FOLD}" 38 | CHECKPOINT="${CHECKPOINT_ROOT}/best_metric_model.pth" 39 | JSON_KEY="training" 40 | 41 | python -m torch.distributed.launch \ 42 | --nproc_per_node=${NUM_GPUS_PER_NODE} \ 43 | --nnodes=${NUM_NODES} \ 44 | --node_rank=0 \ 45 | --master_addr=localhost \ 46 | --master_port=1234 \ 47 | train_multi-gpu.py --arch_ckpt=${ARCH_CKPT} \ 48 | --checkpoint=${CHECKPOINT} \ 49 | --config=${CONFIG} \ 50 | --fold=${FOLD} \ 51 | --json=${JSON_PATH} \ 52 | --json_key=${JSON_KEY} \ 53 | --num_folds=${NUM_FOLDS} \ 54 | --output_root=${CHECKPOINT_ROOT} \ 55 | --root=${DATA_ROOT} 56 | done 57 | -------------------------------------------------------------------------------- /coplenet-pneumonia-lesion-segmentation/README.md: -------------------------------------------------------------------------------- 1 | # COPLE-Net for COVID-19 Pneumonia Lesion Segmentation 2 | 3 |

4 | lung-ct 5 | lung-ct-seg 6 |

7 | 8 | 9 | > If you use this work in your research, please cite the paper. 10 | 11 | A reimplementation of the COPLE-Net originally proposed by: 12 | 13 | G. Wang, X. Liu, C. Li, Z. Xu, J. Ruan, H. Zhu, T. Meng, K. Li, N. Huang, S. Zhang. (2020) 14 | "A Noise-robust Framework for Automatic Segmentation of COVID-19 Pneumonia Lesions from CT Images." 15 | IEEE Transactions on Medical Imaging. 2020. DOI: [10.1109/TMI.2020.3000314](https://doi.org/10.1109/TMI.2020.3000314) 16 | 17 | 18 | This research prototype is adapted from: 19 | - [The `HiLab-git/COPLE-Net` GitHub repo](https://github.com/HiLab-git/COPLE-Net/) 20 | - [PyMIC, a Pytorch-based toolkit for medical image computing.](https://github.com/HiLab-git/PyMIC) 21 | 22 | To run the inference demo: 23 | 24 | - Install MONAI 0.2.0: 25 | ```bash 26 | pip install "monai[nibabel]==0.2.0" 27 | ``` 28 | The rest of the steps assume that this repo is cloned to your local file system and the current directory is the folder of this README file. 29 | - download the input examples from [google drive folder](https://drive.google.com/drive/folders/1pIoSSc4Iq8R9_xXo0NzaOhIHZ3-PqqDC) to `./images`. 30 | - download the adapted pretrained model from this [link](https://developer.download.nvidia.com/assets/Clara/monai/research/coplenet_pretrained_monai_dict.pt) to `./model`. 31 | - run `python run_inference.py` and segmentation results will be saved at `./output`. 32 | 33 | _(To segment COVID-19 pneumonia lesions from your own images, make sure that the images have been cropped into the lung region, 34 | and the intensity has been normalized into [0, 1] using window width/level of 1500/-650.)_ 35 | -------------------------------------------------------------------------------- /SkullRec/facialDefects.py: -------------------------------------------------------------------------------- 1 | import random 2 | from glob import glob 3 | 4 | import cc3d 5 | import nrrd 6 | import numpy as np 7 | 8 | 9 | def bbox_cal(data): 10 | a = np.round(data) 11 | x0 = np.sum(a, axis=2) 12 | xx = np.sum(x0, axis=1) 13 | yy = np.sum(x0, axis=0) 14 | resx = next(x for x, val in enumerate(list(xx)) if val > 0) 15 | 16 | resxx = next(x for x, val in enumerate(list(xx)[::-1]) if val > 0) 17 | 18 | resy = next(x for x, val in enumerate(list(yy)) if val > 0) 19 | 20 | resyy = next(x for x, val in enumerate(list(yy)[::-1]) if val > 0) 21 | z0 = np.sum(a, axis=1) 22 | zz = np.sum(z0, axis=0) 23 | resz = next(x for x, val in enumerate(list(zz)) if val > 0) 24 | 25 | reszz = next(x for x, val in enumerate(list(zz)[::-1]) if val > 0) 26 | 27 | return resx, resxx, resy, resyy, resz, reszz 28 | 29 | 30 | data_list = glob("{}/*.nrrd".format("./complete_skull/")) 31 | defected_dir = "./defects_facial/" 32 | 33 | 34 | for i in range(len(data_list)): 35 | a, h = nrrd.read(data_list[i]) 36 | 37 | x_s, y_s, z_s = a.shape 38 | resx, resxx, resy, resyy, resz, reszz = bbox_cal(a) 39 | x_extend = random.randint(resx, resx + (x_s - resxx - resx)) 40 | y_extend = random.randint(resy, resy + (y_s - resyy - resy)) 41 | z_extend = random.randint(resz, resz + (z_s - reszz - resz)) 42 | a[resx : resx + x_extend, resy : resy + 98, resz : z_s - reszz] = 0 43 | fname1 = defected_dir + data_list[i][-10:-5] + ".nrrd" 44 | nrrd.write(fname1, a, h) 45 | 46 | 47 | """ 48 | 49 | from pathlib import Path 50 | import shutil 51 | pathlist = Path('./9616319/0_labelsTr').glob('**/*.nrrd') 52 | for path in pathlist: 53 | # because path is object not string 54 | path_in_str = str(path) 55 | shutil.copyfile(path_in_str, './complete_nrrds/'+path_in_str[-10:-5]+'.nrrd') 56 | print(path_in_str) 57 | """ 58 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet/scripts/infer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import os 13 | import sys 14 | from typing import Optional, Sequence, Union 15 | 16 | import fire 17 | 18 | sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) 19 | 20 | if __package__ in (None, ""): 21 | from segmenter import Segmenter, dist_launched, run_segmenter 22 | else: 23 | from .segmenter import Segmenter, dist_launched, run_segmenter 24 | 25 | 26 | class InferClass: 27 | def __init__( 28 | self, config_file: Optional[Union[str, Sequence[str]]] = None, rank: int = 0, global_rank: int = 0, **override 29 | ): 30 | override["infer#enabled"] = True 31 | 32 | if dist_launched(): 33 | rank = int(os.getenv("LOCAL_RANK")) 34 | global_rank = int(os.getenv("RANK")) 35 | 36 | self.segmenter = Segmenter(config_file=config_file, rank=rank, global_rank=global_rank, config_dict=override) 37 | 38 | def infer(self, image_file): 39 | pred = self.segmenter.infer_image(image_file) 40 | return pred 41 | 42 | 43 | def run(config_file: Optional[Union[str, Sequence[str]]] = None, **override): 44 | override["infer#enabled"] = True 45 | run_segmenter(config_file=config_file, **override) 46 | 47 | 48 | if __name__ == "__main__": 49 | fire.Fire() 50 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet2d/scripts/infer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import os 13 | import sys 14 | from typing import Optional, Sequence, Union 15 | 16 | import fire 17 | 18 | sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) 19 | 20 | if __package__ in (None, ""): 21 | from segmenter import dist_launched 22 | from segmenter_2d import Segmenter2D, dist_launched, run_segmenter 23 | else: 24 | from .segmenter import dist_launched 25 | from .segmenter_2d import Segmenter2D, dist_launched, run_segmenter 26 | 27 | 28 | class InferClass: 29 | def __init__( 30 | self, config_file: Optional[Union[str, Sequence[str]]] = None, rank: int = 0, global_rank: int = 0, **override 31 | ): 32 | override["infer#enabled"] = True 33 | 34 | if dist_launched(): 35 | rank = int(os.getenv("LOCAL_RANK")) 36 | global_rank = int(os.getenv("RANK")) 37 | 38 | self.segmenter = Segmenter2D(config_file=config_file, rank=rank, global_rank=global_rank, config_dict=override) 39 | 40 | def infer(self, image_file): 41 | pred = self.segmenter.infer_image(image_file) 42 | return pred 43 | 44 | 45 | def run(config_file: Optional[Union[str, Sequence[str]]] = None, **override): 46 | override["infer#enabled"] = True 47 | run_segmenter(config_file=config_file, **override) 48 | 49 | 50 | if __name__ == "__main__": 51 | fire.Fire() 52 | -------------------------------------------------------------------------------- /auto3dseg/README.md: -------------------------------------------------------------------------------- 1 | # Auto3DSeg algorithm templates 2 | 3 | ## Template testing 4 | 5 | A unit test script is provided to evaluate the integrity of all algorithm templates in `auto3dseg/algorithm_templates`. This includes a 2-epoch training and the inference of trained models on a single GPU ("cuda:0") in the testing process. 6 | 7 | 8 | ``` 9 | python auto3dseg/tests/test_algo_templates.py 10 | python auto3dseg/tests/test_gpu_customization.py 11 | ``` 12 | 13 | ## Version control 14 | 15 | If the folder `auto3dseg` is changed, a new `version` and the corresponding `changelog` should be added into the `metadata.json` file. 16 | 17 | ## Adding new templates 18 | 19 | ### Class/Folder naming convention 20 | 21 | - Folder name: name of the algorithm in lowercase 22 | - Class name: folder name with the first letter in upper case + "Algo". e.g. UnetAlgo, SegresnetAlgo, etc. 23 | 24 | ## Testing with MONAI core 25 | 26 | Start a docker container: 27 | 28 | ```bash 29 | docker run --ipc=host --net=host --gpus all -ti --rm projectmonai/monai 30 | ``` 31 | 32 | To test the github fork branch (assuming `my_test_branch` of `https://github.com/my_github/research-contributions.git`), 33 | run the following script: 34 | ```bash 35 | cd /tmp/ 36 | git clone --depth 1 --branch dev https://github.com/project-monai/monai.git 37 | git clone --depth 1 --branch my_test_branch https://github.com/my_github/research-contributions.git 38 | cp -r research-contributions/auto3dseg/algorithm_templates/ monai/ 39 | cd monai/ 40 | export OMP_NUM_THREADS=4 MKL_NUM_THREADS=4 PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python 41 | MONAI_TESTING_ALGO_TEMPLATE=algorithm_templates python -m unittest -vvv tests.test_auto3dseg_ensemble 42 | MONAI_TESTING_ALGO_TEMPLATE=algorithm_templates python -m unittest -vvv tests.test_auto3dseg_hpo 43 | MONAI_TESTING_ALGO_TEMPLATE=algorithm_templates python -m unittest -vvv tests.test_integration_autorunner 44 | MONAI_TESTING_ALGO_TEMPLATE=algorithm_templates python -m unittest -vvv tests.test_integration_gpu_customization 45 | ``` 46 | -------------------------------------------------------------------------------- /lamp-automated-model-parallelism/test_unet_pipe.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import unittest 13 | 14 | import torch 15 | from parameterized import parameterized 16 | from unet_pipe import UNetPipe 17 | 18 | TEST_CASES = [ 19 | [ # 1-channel 3D, batch 12 20 | {"spatial_dims": 3, "out_channels": 2, "in_channels": 1, "depth": 3, "n_feat": 8}, 21 | torch.randn(12, 1, 32, 64, 48), 22 | (12, 2, 32, 64, 48), 23 | ], 24 | [ # 1-channel 3D, batch 16 25 | {"spatial_dims": 3, "out_channels": 2, "in_channels": 1, "depth": 3}, 26 | torch.randn(16, 1, 32, 64, 48), 27 | (16, 2, 32, 64, 48), 28 | ], 29 | [ # 4-channel 3D, batch 16, batch normalisation 30 | {"spatial_dims": 3, "out_channels": 3, "in_channels": 2}, 31 | torch.randn(16, 2, 64, 64, 64), 32 | (16, 3, 64, 64, 64), 33 | ], 34 | ] 35 | 36 | 37 | class TestUNETPipe(unittest.TestCase): 38 | @parameterized.expand(TEST_CASES) 39 | def test_shape(self, input_param, input_data, expected_shape): 40 | net = UNetPipe(**input_param) 41 | if torch.cuda.is_available(): 42 | net = net.to(torch.device("cuda")) 43 | input_data = input_data.to(torch.device("cuda")) 44 | net.eval() 45 | with torch.no_grad(): 46 | result = net.forward(input_data.float()) 47 | self.assertEqual(result.shape, expected_shape) 48 | 49 | 50 | if __name__ == "__main__": 51 | unittest.main() 52 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/configs/hyper_parameters_search.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | searching: 3 | # hyper-parameters 4 | amp: true 5 | arch_path: "$@bundle_root + '/arch_ram' + str(@searching#ram_cost_factor) + '_fold' + str(@fold)" 6 | determ: false 7 | input_channels: null 8 | sw_input_on_cpu: false 9 | learning_rate: 0.025 10 | learning_rate_arch: 0.001 11 | log_output_file: "$@bundle_root + '/arch_ram' + str(@searching#ram_cost_factor) + '_fold' + str(@fold) + '/searching.log'" 12 | num_images_per_batch: 2 13 | num_epochs: 1000 14 | num_epochs_per_validation: 20 15 | num_warmup_epochs: 500 16 | num_crops_per_image: 1 17 | num_sw_batch_size: 2 18 | num_workers: 6 19 | num_cache_workers: 8 20 | output_classes: null 21 | overlap_ratio: 0.625 22 | roi_size: null 23 | roi_size_valid: null 24 | ram_cost_factor: 0.8 25 | resample_resolution: null 26 | softmax: true 27 | cache_rate: 1 28 | train_cache_rate: "@searching#cache_rate" 29 | validate_cache_rate: "@searching#cache_rate" 30 | transforms: 31 | resample_resolution: "@searching#resample_resolution" 32 | # architecture searching 33 | loss: 34 | _target_: DiceFocalLoss 35 | include_background: true 36 | to_onehot_y: "@searching#softmax" 37 | softmax: "@searching#softmax" 38 | sigmoid: "$not @searching#softmax" 39 | squared_pred: true 40 | batch: true 41 | smooth_nr: 1.0e-05 42 | smooth_dr: 1.0e-05 43 | optimizer: 44 | _target_: torch.optim.SGD 45 | lr: "@searching#learning_rate" 46 | momentum: 0.9 47 | weight_decay: 4.0e-05 48 | arch_optimizer_a: 49 | _target_: torch.optim.Adam 50 | lr: "@searching#learning_rate_arch" 51 | betas: [0.5, 0.999] 52 | weight_decay: 0 53 | arch_optimizer_c: 54 | _target_: torch.optim.Adam 55 | lr: "@searching#learning_rate_arch" 56 | betas: [0.5, 0.999] 57 | weight_decay: 0 58 | lr_scheduler: 59 | _target_: torch.optim.lr_scheduler.StepLR 60 | step_size: "$max(int(float(@searching#num_epochs - @searching#num_warmup_epochs) * 0.4), 1)" 61 | gamma: 0.5 62 | -------------------------------------------------------------------------------- /lamp-automated-model-parallelism/README.md: -------------------------------------------------------------------------------- 1 | # LAMP: Large Deep Nets with Automated Model Parallelism for Image Segmentation 2 | 3 |

4 | LAMP on Head and Neck Dataset 5 |

6 | 7 | 8 | > If you use this work in your research, please cite the paper. 9 | 10 | A reimplementation of the LAMP system originally proposed by: 11 | 12 | Wentao Zhu, Can Zhao, Wenqi Li, Holger Roth, Ziyue Xu, and Daguang Xu (2020) 13 | "LAMP: Large Deep Nets with Automated Model Parallelism for Image Segmentation." 14 | MICCAI 2020 (Early Accept, paper link: https://arxiv.org/abs/2006.12575) 15 | 16 | 17 | ## To run the demo: 18 | 19 | ### Prerequisites 20 | - `pip install monai==0.2.0` 21 | - `pip install torchgpipe` 22 | 23 | The rest of the steps assume that this repo is cloned to your local file system and the current directory is the folder of this README file. 24 | 25 | ### Data 26 | ```bash 27 | mkdir ./data; 28 | cd ./data; 29 | ``` 30 | 31 | Please download and unzip the Head and Neck CT dataset into `./data` folder. 32 | 33 | - `HaN.zip`: https://drive.google.com/file/d/1A2zpVlR3CkvtkJPvtAF3-MH0nr1WZ2Mn/view?usp=sharing 34 | ```bash 35 | unzip HaN.zip; # unzip could be done with other external tools 36 | ``` 37 | 38 | Please find more details of the dataset at https://github.com/wentaozhu/AnatomyNet-for-anatomical-segmentation.git 39 | 40 | 41 | ### Minimal hardware requirements for full image training 42 | - U-Net (`n_feat=32`): 2x 16Gb GPUs 43 | - U-Net (`n_feat=64`): 4x 16Gb GPUs 44 | - U-Net (`n_feat=128`): 2x 32Gb GPUs 45 | 46 | 47 | ### Commands 48 | The number of features in the first block (`--n_feat`) can be 32, 64, or 128. 49 | ```bash 50 | mkdir ./log; 51 | python train.py --n_feat=128 --crop_size='64,64,64' --bs=16 --ep=4800 --lr=0.001 > ./log/YOURLOG.log 52 | python train.py --n_feat=128 --crop_size='128,128,128' --bs=4 --ep=1200 --lr=0.001 --pretrain='./HaN_32_16_1200_64,64,64_0.001_*' > ./log/YOURLOG.log 53 | python train.py --n_feat=128 --crop_size='-1,-1,-1' --bs=1 --ep=300 --lr=0.001 --pretrain='./HaN_32_16_1200_64,64,64_0.001_*' > ./log/YOURLOG.log 54 | ``` 55 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/swinunetr/docs/README.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | A 3D Swin Transformer-based algorithm for volumetric segmentation of medical images integrated with Auto3DSeg pipeline. 4 | 5 | # Model Overview 6 | 7 | This is a template for training the state-of-the-art algorithm [1] of the Beyond the Cranial Vault (BTCV) Segmentation Challenge and the "Medical Segmentation Decathlon Challenge 2018". 8 | 9 | ## Training configuration 10 | 11 | The training was performed with at least 16GB-memory GPUs. 12 | 13 | It's recommended to use Swin Transformer encoder pretrained weights for training, train code will automatically download base Swin-T encoder. 14 | 15 | ## commands example 16 | 17 | Execute model training: 18 | 19 | ``` 20 | python -m scripts.train run --config_file "['configs/hyper_parameters.yaml','configs/network.yaml','configs/transforms_train.yaml','configs/transforms_validate.yaml','configs/transforms_infer.yaml']" 21 | ``` 22 | 23 | Execute multi-GPU model training (recommended): 24 | 25 | ``` 26 | torchrun --nnodes=1 --nproc_per_node=8 -m scripts.train run --config_file "['configs/hyper_parameters.yaml','configs/network.yaml','configs/transforms_train.yaml','configs/transforms_validate.yaml','configs/transforms_infer.yaml']" 27 | ``` 28 | 29 | Execute validation: 30 | 31 | ``` 32 | python -m scripts.validate run --config_file "['configs/hyper_parameters.yaml','configs/network.yaml','configs/transforms_infer.yaml']" 33 | ``` 34 | 35 | Execute inference: 36 | 37 | ``` 38 | python -m scripts.infer run --config_file "['configs/hyper_parameters.yaml','configs/network.yaml','configs/transforms_infer.yaml']" 39 | ``` 40 | 41 | # References 42 | 43 | [1]: Tang, Y., Yang, D., Li, W., Roth, H.R., Landman, B., Xu, D., Nath, V. and Hatamizadeh, A., 2022. Self-supervised pre-training of swin transformers for 3d medical image analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 20730-20740). 44 | 45 | [2]: Hatamizadeh, A., Nath, V., Tang, Y., Yang, D., Roth, H. and Xu, D., 2022. Swin UNETR: Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images. arXiv preprint arXiv:2201.01266. 46 | -------------------------------------------------------------------------------- /SwinMM/WORD/utils/view_transforms.py: -------------------------------------------------------------------------------- 1 | """View operations. 2 | 3 | Input format: [B, C, X, Y, Z, ...] 4 | 5 | NOTE(meijieru): 0 is reserved for identify transform. 6 | """ 7 | 8 | import enum 9 | from typing import Callable, Sequence, Union 10 | 11 | import torch 12 | 13 | RotateType = int 14 | PermuteType = int 15 | TransformFuncType = Callable[[torch.Tensor], torch.Tensor] 16 | # A composition of multiple view transoforms. 17 | TransformsType = Sequence[Union[PermuteType, RotateType]] 18 | 19 | 20 | class GroupName(enum.Enum): 21 | ROTATE = 1 22 | PERMUTE = 2 23 | 24 | 25 | DEFAULT_ORDER = (GroupName.ROTATE, GroupName.PERMUTE) 26 | 27 | rotation_transforms = { 28 | 0: lambda x: x, 29 | 1: lambda x: x.rot90(1, (3, 4)), 30 | 2: lambda x: x.rot90(2, (3, 4)), 31 | 3: lambda x: x.rot90(3, (3, 4)), 32 | } 33 | rotation_inverse_transforms = { 34 | 0: lambda x: x, 35 | 1: lambda x: x.rot90(3, (3, 4)), 36 | 2: lambda x: x.rot90(2, (3, 4)), 37 | 3: lambda x: x.rot90(1, (3, 4)), 38 | } 39 | permutation_transforms = {0: lambda x: x, 1: lambda x: x.permute(0, 1, 3, 2, 4), 2: lambda x: x.permute(0, 1, 4, 3, 2)} 40 | permutation_inverse_transforms = { 41 | 0: lambda x: x, 42 | 1: lambda x: x.permute(0, 1, 3, 2, 4), 43 | 2: lambda x: x.permute(0, 1, 4, 3, 2), 44 | } 45 | 46 | all_forward_transforms = {GroupName.ROTATE: rotation_transforms, GroupName.PERMUTE: permutation_transforms} 47 | all_backward_transforms = { 48 | GroupName.ROTATE: rotation_inverse_transforms, 49 | GroupName.PERMUTE: permutation_inverse_transforms, 50 | } 51 | 52 | 53 | def get_transforms_func( 54 | views: TransformsType, orders: Sequence[GroupName] = DEFAULT_ORDER, inverse: bool = False 55 | ) -> TransformFuncType: 56 | """Gets sequential transform functions.""" 57 | if len(views) != len(orders): 58 | raise ValueError() 59 | 60 | all_transforms = all_forward_transforms if not inverse else all_backward_transforms 61 | funcs = [all_transforms[group_name][view] for view, group_name in zip(views, orders)] 62 | funcs = funcs if not inverse else funcs[::-1] 63 | 64 | def aux(val): 65 | for func in funcs: 66 | val = func(val) 67 | return val 68 | 69 | return aux 70 | -------------------------------------------------------------------------------- /coplenet-pneumonia-lesion-segmentation/test_coplenet.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import unittest 13 | 14 | import torch 15 | from coplenet import CopleNet 16 | from parameterized import parameterized 17 | 18 | TEST_CASES = [ 19 | # single channel 2D, batch 16, no residual 20 | [{"spatial_dims": 2}, torch.randn(16, 1, 32, 32), (16, 2, 32, 32)], 21 | [ 22 | {"spatial_dims": 2, "in_channels": 5, "out_channels": 4}, 23 | torch.randn(16, 5, 32, 32), 24 | (16, 4, 32, 32), 25 | ], # 5-channel 2D, batch 16 26 | # 1-channel 3D, batch 16 27 | [{"spatial_dims": 2}, torch.randn(16, 1, 32, 48, 48), (16, 2, 32, 48, 48)], 28 | [ 29 | {"spatial_dims": 2, "bilinear": False}, 30 | torch.randn(16, 1, 32, 64, 48), 31 | (16, 2, 32, 64, 48), 32 | ], # 1-channel 3D, batch 16 33 | [ 34 | {"spatial_dims": 2, "in_channels": 2, "out_channels": 3, "bilinear": False}, 35 | torch.randn(16, 2, 32, 64, 48), 36 | (16, 3, 32, 64, 48), 37 | ], # 4-channel 3D, batch 16, batch normalisation 38 | ] 39 | 40 | 41 | class TestCopleNET(unittest.TestCase): 42 | @parameterized.expand(TEST_CASES) 43 | def test_shape(self, input_param, input_data, expected_shape): 44 | net = CopleNet(**input_param) 45 | if torch.cuda.is_available(): 46 | net = net.to(torch.device("cuda")) 47 | input_data = input_data.to(torch.device("cuda")) 48 | net.eval() 49 | with torch.no_grad(): 50 | result = net.forward(input_data.float()) 51 | self.assertEqual(result.shape, expected_shape) 52 | 53 | 54 | if __name__ == "__main__": 55 | unittest.main() 56 | -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/scripts/test_local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | error() { 4 | echo "Usage: local-test.sh -i -o -m " 5 | echo "-c flag can be included to run on CPU" 6 | exit 1 7 | } 8 | 9 | SCRIPT_DIR=$(dirname "$(readlink -f "$0")") 10 | APP_DIR=$SCRIPT_DIR/../prostate_mri_lesion_seg_app 11 | 12 | # Remove any existing output directory 13 | rm -rf $SCRIPT_DIR/../output/* 14 | 15 | # Set defaults for optional arguments 16 | OUTPUT_DIR=$SCRIPT_DIR/../output 17 | MODEL_DIR=$APP_DIR/models/ 18 | 19 | has_argument() { 20 | [[ ("$1" == *=* && -n ${1#*=}) || ( ! -z "$2" && "$2" != -*) ]]; 21 | } 22 | 23 | extract_argument() { 24 | echo "${2:-${1#*=}}" 25 | } 26 | 27 | # Function to handle options and arguments 28 | handle_options() { 29 | while [ $# -gt 0 ]; do 30 | case $1 in 31 | -i | --input) 32 | if ! has_argument $@; then 33 | echo "Error: No input data directory specified" && error 34 | fi 35 | DATA_DIR=$(extract_argument $@) 36 | shift 37 | ;; 38 | -o | --output) 39 | if ! has_argument $@; then 40 | echo "Error: No output directory specified" && error 41 | fi 42 | OUTPUT_DIR=$(extract_argument $@) 43 | shift 44 | ;; 45 | -m | --model) 46 | if ! has_argument $@; then 47 | echo "Error: No model directory specified" && error 48 | fi 49 | MODEL_DIR=$(extract_argument $@) 50 | shift 51 | ;; 52 | -c | --cpu) CPU_ARG=1 ;; 53 | *) echo "Invalid option: $1" >&2 && error ;; 54 | esac 55 | shift 56 | done 57 | } 58 | handle_options "$@" 59 | 60 | # Check if data dir exists 61 | if [ ! -d "$DATA_DIR" ]; then 62 | echo "Error: Input data directory does not exist" && error 63 | fi 64 | 65 | # Check if CPU flag is included 66 | if [ $CPU_ARG ]; then 67 | echo "Running local test on CPU..." 68 | time CUDA_VISIBLE_DEVICES='' python $APP_DIR -i $DATA_DIR -o $OUTPUT_DIR -m $MODEL_DIR 69 | else 70 | echo "Running local test on GPU..." 71 | time python $APP_DIR -i $DATA_DIR -o $OUTPUT_DIR -m $MODEL_DIR 72 | fi 73 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/swinunetr/configs/transforms_train.yaml: -------------------------------------------------------------------------------- 1 | image_key: image 2 | label_key: label 3 | transforms_train: 4 | _target_: Compose 5 | transforms: 6 | - _target_: LoadImaged 7 | keys: ["@image_key", "@label_key"] 8 | image_only: true 9 | - _target_: EnsureChannelFirstd 10 | keys: ["@image_key", "@label_key"] 11 | - PLACEHOLDER_INTENSITY_NORMALIZATION 12 | - _target_: Orientationd 13 | keys: ["@image_key", "@label_key"] 14 | axcodes: RAS 15 | - _target_: Spacingd 16 | keys: ["@image_key", "@label_key"] 17 | pixdim: "$@transforms#resample_resolution" 18 | mode: [bilinear, nearest] 19 | align_corners: [true, true] 20 | - _target_: CastToTyped 21 | keys: ["@image_key", "@label_key"] 22 | dtype: ["$torch.float32", "$torch.uint8"] 23 | - _target_: EnsureTyped 24 | keys: ["@image_key", "@label_key"] 25 | track_meta: true 26 | - _target_: SpatialPadd 27 | keys: ["@image_key", "@label_key"] 28 | spatial_size: "@roi_size" 29 | mode: [constant, constant] 30 | 31 | - _target_: IdentityD # make the label uptodate (the next transform requires label_key input) 32 | keys: ["@label_key"] 33 | 34 | # data augmentation 35 | - _target_: RandCropByLabelClassesd 36 | keys: ["@image_key", "@label_key"] 37 | label_key: "@label_key" 38 | num_classes: "@output_classes" 39 | spatial_size: "@roi_size" 40 | num_samples: "@num_crops_per_image" 41 | warn: false 42 | 43 | - _target_: IdentityD # make image up-to-date, before this line the cropping hasn't been applied 44 | keys: ["@image_key"] 45 | 46 | - _target_: RandFlipd 47 | keys: ['@image_key', '@label_key'] 48 | prob: 0.2 49 | spatial_axis: 0 50 | - _target_: RandFlipd 51 | keys: ['@image_key', '@label_key'] 52 | prob: 0.2 53 | spatial_axis: 1 54 | - _target_: RandFlipd 55 | keys: ['@image_key', '@label_key'] 56 | prob: 0.2 57 | spatial_axis: 2 58 | - _target_: RandRotate90d 59 | keys: ['@image_key', '@label_key'] 60 | prob: 0.2 61 | max_k: 3 62 | - _target_: RandScaleIntensityd 63 | keys: ['@image_key'] 64 | prob: 0.1 65 | factors: 0.1 66 | - _target_: RandShiftIntensityd 67 | keys: ['@image_key'] 68 | prob: 0.1 69 | offsets: 0.1 70 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/docs/README.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | A neural architecture search algorithm for volumetric segmentation of 3D medical images. 4 | 5 | # Model Overview 6 | 7 | This serves as a template for the state-of-the-art algorithm [1] of the "Medical Segmentation Decathlon Challenge 2018". 8 | 9 | ## Training Requirements 10 | 11 | The training process was performed with at least 16GB-memory GPUs. 12 | 13 | ## Command Examples 14 | 15 | Execute model searching: 16 | 17 | ``` 18 | python -m scripts.search run --config_file "['configs/hyper_parameters.yaml','configs/hyper_parameters_search.yaml','configs/network_search.yaml','configs/transforms_train.yaml','configs/transforms_validate.yaml']" 19 | ``` 20 | 21 | Execute multi-GPU model searching (recommended): 22 | 23 | ``` 24 | torchrun --nnodes=1 --nproc_per_node=8 -m scripts.search run --config_file "['configs/hyper_parameters.yaml','configs/hyper_parameters_search.yaml','configs/network_search.yaml','configs/transforms_train.yaml','configs/transforms_validate.yaml']" 25 | ``` 26 | 27 | Execute model training: 28 | 29 | ``` 30 | python -m scripts.train run --config_file "['configs/hyper_parameters.yaml','configs/network.yaml','configs/transforms_train.yaml','configs/transforms_validate.yaml','configs/transforms_infer.yaml']" 31 | ``` 32 | 33 | Execute multi-GPU model training (recommended): 34 | 35 | ``` 36 | torchrun --nnodes=1 --nproc_per_node=8 -m scripts.train run --config_file "['configs/hyper_parameters.yaml','configs/network.yaml','configs/transforms_train.yaml','configs/transforms_validate.yaml','configs/transforms_infer.yaml']" 37 | ``` 38 | 39 | Execute validation: 40 | 41 | ``` 42 | python -m scripts.validate run --config_file "['configs/hyper_parameters.yaml','configs/network.yaml','configs/transforms_infer.yaml']" 43 | ``` 44 | 45 | Execute inference: 46 | 47 | ``` 48 | python -m scripts.infer run --config_file "['configs/hyper_parameters.yaml','configs/network.yaml','configs/transforms_infer.yaml']" 49 | ``` 50 | 51 | # References 52 | 53 | [1] He, Y., Yang, D., Roth, H., Zhao, C. and Xu, D., 2021. DiNTS: Differentiable neural network topology search for 3d medical image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 5841-5850). 54 | -------------------------------------------------------------------------------- /DAE/BTCV_Finetune/README.md: -------------------------------------------------------------------------------- 1 | # Finetune on BTCV 2 | 3 | ### Installing Dependencies 4 | Dependencies can be installed using: 5 | ``` bash 6 | pip install -r requirements.txt 7 | ``` 8 | 9 | ### Main Finetuning Exps 10 | 11 | The main fineuning exps are based on Swin UNETR architecture. 12 | 13 | All json files and pre-trained model weights can be downloaded from the below 14 | ([DAE_SSL_Weights](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/dae_weights_midl_2024/DAE_SSL_WEIGHTS.zip)) 15 | ([Feta Json](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/dae_weights_midl_2024/data_folds_feta_json.zip)) 16 | ([BTCV Json](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/dae_weights_midl_2024/json_data_folds_btcv.zip)) 17 | ([Pretraining Json](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/dae_weights_midl_2024/pretrain_jsons.zip)) 18 | 19 | Sample Code: 20 | 21 | To train models from scratch without any pre-trained weights 22 | 23 | ``` bash 24 | 25 | python main_for_ngc.py --json_list=/json_files/can_be/found_in/data_folds/xxxx.json --data_dir=/data_root --feature_size=48 --pos_embed='perceptron' --roi_x=96 --roi_y=96 --roi_z=96 --use_checkpoint --batch_size=4 --max_epochs=1000 --save_checkpoint --model_name swin --logdir ./provide_a_path/for_tensorboard_logs --optim_lr 8e-4 --val_every 5 --set_determ True --seed 120 26 | 27 | ``` 28 | 29 | Train models with pre-trained weights 30 | 31 | ```bash 32 | 33 | python main_for_ngc.py --json_list=/json_files/can_be/found_in/data_folds/xxxx.json --data_dir=/data_root --feature_size=48 --pos_embed='perceptron' --roi_x=96 --roi_y=96 --roi_z=96 --use_checkpoint --batch_size=4 --max_epochs=1000 --save_checkpoint --model_name swin --logdir ./provide_a_path/for_tensorboard_logs --optim_lr 8e-4 --val_every 5 --use_ssl_pretrained --finetune_choice both --load_dir /path/to/ssl_pretrained_checkpoint --set_determ True --seed 120 34 | 35 | ``` 36 | 37 | --load_dir to specify which folder to load pretrained models from 38 | 39 | --finetune_choice to specify which part of network the pretrained model needs to be loaded for. Default is both. If specified "encoder", only encoder weights will be copied from the pretrained model. If specified "decoder", only decoder weights will be copied from the pretrained model. 40 | -------------------------------------------------------------------------------- /DAE/Feta_Finetune/README.md: -------------------------------------------------------------------------------- 1 | # Finetune on Feta Dataset 2 | 3 | ### Installing Dependencies 4 | Dependencies can be installed using: 5 | ``` bash 6 | pip install -r requirements.txt 7 | ``` 8 | 9 | ### Main Finetuning Exps 10 | 11 | The main fineuning exps are based on Swin UNETR architecture. 12 | 13 | All json files and pre-trained model weights can be downloaded from the below 14 | ([DAE_SSL_Weights](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/dae_weights_midl_2024/DAE_SSL_WEIGHTS.zip)) 15 | ([Feta Json](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/dae_weights_midl_2024/data_folds_feta_json.zip)) 16 | ([BTCV Json](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/dae_weights_midl_2024/json_data_folds_btcv.zip)) 17 | ([Pretraining Json](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/dae_weights_midl_2024/pretrain_jsons.zip)) 18 | 19 | Sample Code: 20 | 21 | To train models from scratch without any pre-trained weights 22 | ``` bash 23 | 24 | python main_for_ngc.py --json_list=/json_files/can_be/found_in/data_folds/xxxx.json --data_dir=/data_root --feature_size=48 --pos_embed='perceptron' --roi_x=96 --roi_y=96 --roi_z=96 --use_checkpoint --batch_size=4 --max_epochs=600 --save_checkpoint --model_name swin --logdir ./provide_a_path/for_tensorboard_logs --optim_lr 8e-4 --val_every 10 --set_determ True --seed 120 25 | 26 | ``` 27 | 28 | Train models with pre-trained weights 29 | 30 | ```bash 31 | 32 | python main_for_ngc.py --json_list=/json_files/can_be/found_in/data_folds/xxxx.json --data_dir=/data_root --feature_size=48 --pos_embed='perceptron' --roi_x=96 --roi_y=96 --roi_z=96 --use_checkpoint --batch_size=4 --max_epochs=600 --save_checkpoint --model_name swin --logdir ./provide_a_path/for_tensorboard_logs --optim_lr 8e-4 --val_every 10 --use_ssl_pretrained --finetune_choice both --load_dir /path/to/ssl_pretrained_checkpoint --set_determ True --seed 120 33 | 34 | ``` 35 | 36 | --load_dir to specify which folder to load pretrained models from 37 | 38 | --finetune_choice to specify which part of network the pretrained model needs to be loaded for. Default is both. If specified "encoder", only encoder weights will be copied from the pretrained model. If specified "decoder", only decoder weights will be copied from the pretrained model. 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # pytype cache 104 | .pytype/ 105 | 106 | # mypy 107 | .mypy_cache/ 108 | examples/scd_lvsegs.npz 109 | temp/ 110 | .idea/ 111 | 112 | *~ 113 | 114 | # Remove .pyre temporary config files 115 | .pyre 116 | .pyre_configuration 117 | 118 | # temporary editor files that should not be in git 119 | *.orig 120 | *.bak 121 | *.swp 122 | .DS_Store 123 | 124 | # temporary testing data MedNIST 125 | tests/testing_data/MedNIST* 126 | tests/testing_data/*Hippocampus* 127 | 128 | # clang format tool 129 | .clang-format-bin/ 130 | 131 | # VSCode 132 | .vscode/ 133 | 134 | # Temporary workspace 135 | tmp_*_dir/ 136 | -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/scripts/test_MAP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | error() { 4 | echo "Usage: local-test.sh -i -o -m " 5 | echo "-b flag can be included to build MAP" 6 | exit 1 7 | } 8 | 9 | SCRIPT_DIR=$(dirname "$(readlink -f "$0")") 10 | APP_DIR=$SCRIPT_DIR/../prostate_mri_lesion_seg_app 11 | 12 | # Remove any existing output directory 13 | rm -rf $SCRIPT_DIR/../output/* 14 | 15 | has_argument() { 16 | [[ ("$1" == *=* && -n ${1#*=}) || ( ! -z "$2" && "$2" != -*) ]]; 17 | } 18 | 19 | extract_argument() { 20 | echo "${2:-${1#*=}}" 21 | } 22 | 23 | # Set defaults for optional arguments 24 | GPU_FLAG="--gpus all" 25 | OUTPUT_DIR=$SCRIPT_DIR/../output 26 | MODEL_DIR=$APP_DIR/models/ 27 | 28 | # Function to handle options and arguments 29 | handle_options() { 30 | while [ $# -gt 0 ]; do 31 | case $1 in 32 | -i | --input) 33 | if ! has_argument $@; then 34 | echo "Error: No input data directory specified" && error 35 | fi 36 | DATA_DIR=$(extract_argument $@) 37 | shift 38 | ;; 39 | -o | --output) 40 | if ! has_argument $@; then 41 | echo "Error: No output directory specified" && error 42 | fi 43 | OUTPUT_DIR=$(extract_argument $@) 44 | shift 45 | ;; 46 | -m | --model) 47 | if ! has_argument $@; then 48 | echo "Error: No model directory specified" && error 49 | fi 50 | MODEL_DIR=$(extract_argument $@) 51 | shift 52 | ;; 53 | -b | --build) BUILD_ARG=1 ;; 54 | *) echo "Invalid option: $1" >&2 && error ;; 55 | esac 56 | shift 57 | done 58 | } 59 | handle_options "$@" 60 | 61 | # Check if data dir exists 62 | if [ ! -d "$DATA_DIR" ]; then 63 | echo "Error: Input data directory does not exist" && error 64 | fi 65 | 66 | # Build MAP if -b flag is included 67 | if [ $BUILD_ARG ]; then 68 | echo "Building MAP since -b flag is included..." 69 | monai-deploy package $APP_DIR -l DEBUG -t lesion_seg_workflow_app:1.0 -m $MODEL_DIR -c $APP_DIR/app.yaml --platform x64-workstation 70 | fi 71 | 72 | echo "Running MAP..." 73 | time monai-deploy run lesion_seg_workflow_app-x64-workstation-dgpu-linux-amd64:1.0 -i $DATA_DIR -o $OUTPUT_DIR 74 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | default_language_version: 2 | python: python3.10 3 | 4 | ci: 5 | autofix_prs: true 6 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' 7 | autoupdate_schedule: quarterly 8 | # submodules: true 9 | 10 | repos: 11 | - repo: https://github.com/pre-commit/pre-commit-hooks 12 | rev: v4.4.0 13 | hooks: 14 | - id: end-of-file-fixer 15 | - id: trailing-whitespace 16 | - id: check-yaml 17 | - id: check-toml 18 | - id: check-case-conflict 19 | - id: check-added-large-files 20 | args: ['--maxkb=1024'] 21 | - id: detect-private-key 22 | - id: forbid-new-submodules 23 | - id: pretty-format-json 24 | args: ['--autofix', '--no-sort-keys', '--indent=4'] 25 | - id: end-of-file-fixer 26 | - id: mixed-line-ending 27 | 28 | # - repo: https://github.com/asottile/pyupgrade 29 | # rev: v2.31.1 30 | # hooks: 31 | # - id: pyupgrade 32 | # args: [--py36-plus] 33 | # name: Upgrade code 34 | # exclude: | 35 | # (?x)^( 36 | # versioneer.py| 37 | # monai/_version.py 38 | # )$ 39 | 40 | - repo: https://github.com/asottile/yesqa 41 | rev: v1.5.0 42 | hooks: 43 | - id: yesqa 44 | name: Unused noqa 45 | additional_dependencies: 46 | - flake8>=3.8.1 47 | - flake8-bugbear 48 | - flake8-comprehensions 49 | - flake8-executable 50 | - flake8-pyi 51 | - pep8-naming 52 | exclude: | 53 | (?x)^( 54 | monai/__init__.py| 55 | docs/source/conf.py 56 | )$ 57 | 58 | #- repo: https://github.com/PyCQA/isort 59 | # rev: 5.9.3 60 | # hooks: 61 | # - id: isort 62 | # name: Format imports 63 | 64 | # - repo: https://github.com/psf/black 65 | # rev: 21.7b0 66 | # hooks: 67 | # - id: black 68 | # name: Format code 69 | 70 | #- repo: https://github.com/executablebooks/mdformat 71 | # rev: 0.7.8 72 | # hooks: 73 | # - id: mdformat 74 | # additional_dependencies: 75 | # - mdformat-gfm 76 | # - mdformat_frontmatter 77 | # exclude: CHANGELOG.md 78 | 79 | # - repo: https://github.com/PyCQA/flake8 80 | # rev: 3.9.2 81 | # hooks: 82 | # - id: flake8 83 | # name: Check PEP8 84 | -------------------------------------------------------------------------------- /lamp-automated-model-parallelism/data_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import os 13 | 14 | import numpy as np 15 | 16 | from monai.transforms import DivisiblePad 17 | 18 | STRUCTURES = ( 19 | "BrainStem", 20 | "Chiasm", 21 | "Mandible", 22 | "OpticNerve_L", 23 | "OpticNerve_R", 24 | "Parotid_L", 25 | "Parotid_R", 26 | "Submandibular_L", 27 | "Submandibular_R", 28 | ) 29 | 30 | 31 | def get_filenames(path, maskname=STRUCTURES): 32 | """ 33 | create file names according to the predefined folder structure. 34 | 35 | Args: 36 | path: data folder name 37 | maskname: target structure names 38 | """ 39 | maskfiles = [] 40 | for seg in maskname: 41 | if os.path.exists(os.path.join(path, "./structures/" + seg + "_crp_v2.npy")): 42 | maskfiles.append(os.path.join(path, "./structures/" + seg + "_crp_v2.npy")) 43 | else: 44 | # the corresponding mask is missing seg, path.split("/")[-1] 45 | maskfiles.append(None) 46 | return os.path.join(path, "img_crp_v2.npy"), maskfiles 47 | 48 | 49 | def load_data_and_mask(data, mask_data): 50 | """ 51 | Load data filename and mask_data (list of file names) 52 | into a dictionary of {'image': array, "label": list of arrays, "name": str}. 53 | """ 54 | pad_xform = DivisiblePad(k=32) 55 | img = np.load(data) # z y x 56 | img = pad_xform(img[None])[0] 57 | item = dict(image=img, label=[]) 58 | for maskfnm in mask_data: 59 | if maskfnm is None: 60 | ms = np.zeros(img.shape, np.uint8) 61 | else: 62 | ms = np.load(maskfnm).astype(np.uint8) 63 | assert ms.min() == 0 and ms.max() == 1 64 | mask = pad_xform(ms[None])[0] 65 | item["label"].append(mask) 66 | assert len(item["label"]) == 9 67 | item["name"] = str(data) 68 | return item 69 | -------------------------------------------------------------------------------- /DAE/BTCV_Finetune/mlp_new.py: -------------------------------------------------------------------------------- 1 | # imitations under the License. 2 | 3 | from typing import Tuple, Union 4 | 5 | import torch.nn as nn 6 | 7 | from monai.networks.layers import get_act_layer 8 | from monai.utils import look_up_option 9 | 10 | SUPPORTED_DROPOUT_MODE = {"vit", "swin"} 11 | 12 | 13 | class MLPBlock(nn.Module): 14 | """ 15 | A multi-layer perceptron block, based on: "Dosovitskiy et al., 16 | An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale " 17 | """ 18 | 19 | def __init__( 20 | self, 21 | hidden_size: int, 22 | mlp_dim: int, 23 | dropout_rate: float = 0.0, 24 | act: Union[Tuple, str] = "GELU", 25 | dropout_mode="vit", 26 | ) -> None: 27 | """ 28 | Args: 29 | hidden_size: dimension of hidden layer. 30 | mlp_dim: dimension of feedforward layer. If 0, `hidden_size` will be used. 31 | dropout_rate: faction of the input units to drop. 32 | act: activation type and arguments. Defaults to GELU. 33 | dropout_mode: dropout mode, can be "vit" or "swin". 34 | "vit" mode uses two dropout instances as implemented in 35 | https://github.com/google-research/vision_transformer/blob/main/vit_jax/models.py#L87 36 | "swin" corresponds to one instance as implemented in 37 | https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_mlp.py#L23 38 | """ 39 | 40 | super().__init__() 41 | 42 | if not (0 <= dropout_rate <= 1): 43 | raise ValueError("dropout_rate should be between 0 and 1.") 44 | mlp_dim = mlp_dim or hidden_size 45 | self.linear1 = nn.Linear(hidden_size, mlp_dim) 46 | self.linear2 = nn.Linear(mlp_dim, hidden_size) 47 | self.fn = get_act_layer(act) 48 | self.drop1 = nn.Dropout(dropout_rate) 49 | dropout_opt = look_up_option(dropout_mode, SUPPORTED_DROPOUT_MODE) 50 | if dropout_opt == "vit": 51 | self.drop2 = nn.Dropout(dropout_rate) 52 | elif dropout_opt == "swin": 53 | self.drop2 = self.drop1 54 | else: 55 | raise ValueError(f"dropout_mode should be one of {SUPPORTED_DROPOUT_MODE}") 56 | 57 | def forward(self, x): 58 | x = self.fn(self.linear1(x)) 59 | x = self.drop1(x) 60 | x = self.linear2(x) 61 | x = self.drop2(x) 62 | return x 63 | -------------------------------------------------------------------------------- /DAE/Feta_Finetune/mlp_new.py: -------------------------------------------------------------------------------- 1 | # imitations under the License. 2 | 3 | from typing import Tuple, Union 4 | 5 | import torch.nn as nn 6 | 7 | from monai.networks.layers import get_act_layer 8 | from monai.utils import look_up_option 9 | 10 | SUPPORTED_DROPOUT_MODE = {"vit", "swin"} 11 | 12 | 13 | class MLPBlock(nn.Module): 14 | """ 15 | A multi-layer perceptron block, based on: "Dosovitskiy et al., 16 | An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale " 17 | """ 18 | 19 | def __init__( 20 | self, 21 | hidden_size: int, 22 | mlp_dim: int, 23 | dropout_rate: float = 0.0, 24 | act: Union[Tuple, str] = "GELU", 25 | dropout_mode="vit", 26 | ) -> None: 27 | """ 28 | Args: 29 | hidden_size: dimension of hidden layer. 30 | mlp_dim: dimension of feedforward layer. If 0, `hidden_size` will be used. 31 | dropout_rate: faction of the input units to drop. 32 | act: activation type and arguments. Defaults to GELU. 33 | dropout_mode: dropout mode, can be "vit" or "swin". 34 | "vit" mode uses two dropout instances as implemented in 35 | https://github.com/google-research/vision_transformer/blob/main/vit_jax/models.py#L87 36 | "swin" corresponds to one instance as implemented in 37 | https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_mlp.py#L23 38 | """ 39 | 40 | super().__init__() 41 | 42 | if not (0 <= dropout_rate <= 1): 43 | raise ValueError("dropout_rate should be between 0 and 1.") 44 | mlp_dim = mlp_dim or hidden_size 45 | self.linear1 = nn.Linear(hidden_size, mlp_dim) 46 | self.linear2 = nn.Linear(mlp_dim, hidden_size) 47 | self.fn = get_act_layer(act) 48 | self.drop1 = nn.Dropout(dropout_rate) 49 | dropout_opt = look_up_option(dropout_mode, SUPPORTED_DROPOUT_MODE) 50 | if dropout_opt == "vit": 51 | self.drop2 = nn.Dropout(dropout_rate) 52 | elif dropout_opt == "swin": 53 | self.drop2 = self.drop1 54 | else: 55 | raise ValueError(f"dropout_mode should be one of {SUPPORTED_DROPOUT_MODE}") 56 | 57 | def forward(self, x): 58 | x = self.fn(self.linear1(x)) 59 | x = self.drop1(x) 60 | x = self.linear2(x) 61 | x = self.drop2(x) 62 | return x 63 | -------------------------------------------------------------------------------- /DAE/Feta_Finetune/utils/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.ndimage as ndimage 3 | import torch 4 | 5 | 6 | def resample_3d(img, target_size): 7 | imx, imy, imz = img.shape 8 | tx, ty, tz = target_size 9 | zoom_ratio = (float(tx) / float(imx), float(ty) / float(imy), float(tz) / float(imz)) 10 | img_resampled = ndimage.zoom(img, zoom_ratio, order=0, prefilter=False) 11 | return img_resampled 12 | 13 | 14 | def dice(x, y): 15 | intersect = np.sum(np.sum(np.sum(x * y))) 16 | y_sum = np.sum(np.sum(np.sum(y))) 17 | if y_sum == 0: 18 | return 0.0 19 | x_sum = np.sum(np.sum(np.sum(x))) 20 | return 2 * intersect / (x_sum + y_sum) 21 | 22 | 23 | class AverageMeter(object): 24 | def __init__(self): 25 | self.reset() 26 | 27 | def reset(self): 28 | self.val = 0 29 | self.avg = 0 30 | self.sum = 0 31 | self.count = 0 32 | 33 | def update(self, val, n=1): 34 | self.val = val 35 | self.sum += val * n 36 | self.count += n 37 | self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) 38 | 39 | 40 | def distributed_all_gather( 41 | tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None 42 | ): 43 | if world_size is None: 44 | world_size = torch.distributed.get_world_size() 45 | if valid_batch_size is not None: 46 | valid_batch_size = min(valid_batch_size, world_size) 47 | elif is_valid is not None: 48 | is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) 49 | if not no_barrier: 50 | torch.distributed.barrier() 51 | tensor_list_out = [] 52 | with torch.no_grad(): 53 | if is_valid is not None: 54 | is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] 55 | torch.distributed.all_gather(is_valid_list, is_valid) 56 | is_valid = [x.item() for x in is_valid_list] 57 | for tensor in tensor_list: 58 | gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] 59 | torch.distributed.all_gather(gather_list, tensor) 60 | if valid_batch_size is not None: 61 | gather_list = gather_list[:valid_batch_size] 62 | elif is_valid is not None: 63 | gather_list = [g for g, v in zip(gather_list, is_valid_list) if v] 64 | if out_numpy: 65 | gather_list = [t.cpu().numpy() for t in gather_list] 66 | tensor_list_out.append(gather_list) 67 | return tensor_list_out 68 | -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/losses/loss.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import torch 13 | from torch.nn import functional as F 14 | 15 | 16 | class Contrast(torch.nn.Module): 17 | def __init__(self, args, batch_size, temperature=0.5): 18 | super().__init__() 19 | device = torch.device(f"cuda:{args.local_rank}") 20 | self.batch_size = batch_size 21 | self.register_buffer("temp", torch.tensor(temperature).to(torch.device(f"cuda:{args.local_rank}"))) 22 | self.register_buffer("neg_mask", (~torch.eye(batch_size * 2, batch_size * 2, dtype=bool).to(device)).float()) 23 | 24 | def forward(self, x_i, x_j): 25 | z_i = F.normalize(x_i, dim=1) 26 | z_j = F.normalize(x_j, dim=1) 27 | z = torch.cat([z_i, z_j], dim=0) 28 | sim = F.cosine_similarity(z.unsqueeze(1), z.unsqueeze(0), dim=2) 29 | sim_ij = torch.diag(sim, self.batch_size) 30 | sim_ji = torch.diag(sim, -self.batch_size) 31 | pos = torch.cat([sim_ij, sim_ji], dim=0) 32 | nom = torch.exp(pos / self.temp) 33 | denom = self.neg_mask * torch.exp(sim / self.temp) 34 | return torch.sum(-torch.log(nom / torch.sum(denom, dim=1))) / (2 * self.batch_size) 35 | 36 | 37 | class Loss(torch.nn.Module): 38 | def __init__(self, batch_size, args): 39 | super().__init__() 40 | self.rot_loss = torch.nn.CrossEntropyLoss().cuda() 41 | self.recon_loss = torch.nn.L1Loss().cuda() 42 | self.contrast_loss = Contrast(args, batch_size).cuda() 43 | self.alpha1 = 1.0 44 | self.alpha2 = 1.0 45 | self.alpha3 = 1.0 46 | 47 | def __call__(self, output_rot, target_rot, output_contrastive, target_contrastive, output_recons, target_recons): 48 | rot_loss = self.alpha1 * self.rot_loss(output_rot, target_rot) 49 | contrast_loss = self.alpha2 * self.contrast_loss(output_contrastive, target_contrastive) 50 | recon_loss = self.alpha3 * self.recon_loss(output_recons, target_recons) 51 | total_loss = rot_loss + contrast_loss + recon_loss 52 | 53 | return total_loss, (rot_loss, contrast_loss, recon_loss) 54 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet/configs/hyper_parameters.yaml: -------------------------------------------------------------------------------- 1 | bundle_root: null # root folder of the fold 2 | ckpt_path: $@bundle_root + '/model' # location to save checkpoints and logs 3 | mlflow_tracking_uri: $@ckpt_path + '/mlruns/' 4 | mlflow_experiment_name: "Auto3DSeg" 5 | 6 | data_file_base_dir: null # location of the dataset 7 | data_list_file_path: null # location of the file with a list of files image/label in the dataset 8 | 9 | modality: mri # main image modality, must be one of mri, ct, pet 10 | fold: 0 11 | input_channels: null 12 | output_classes: null 13 | 14 | class_names: null 15 | class_index: null 16 | 17 | debug: false 18 | ckpt_save: true 19 | cache_rate: null 20 | roi_size: [224, 224, 144] 21 | 22 | 23 | auto_scale_allowed: true 24 | auto_scale_batch: true 25 | auto_scale_roi: false 26 | auto_scale_filters: false 27 | 28 | 29 | quick: false 30 | channels_last: true 31 | validate_final_original_res: true 32 | calc_val_loss: false 33 | amp: true 34 | log_output_file: null 35 | cache_class_indices: null 36 | early_stopping_fraction: 0.001 37 | determ: false 38 | orientation_ras: true 39 | crop_foreground: true 40 | 41 | learning_rate: 2.0e-4 42 | batch_size: '@num_images_per_batch' 43 | num_images_per_batch: 1 44 | num_epochs: 300 45 | num_warmup_epochs: 3 46 | sigmoid: false 47 | resample: false 48 | resample_resolution: [1, 1, 1] 49 | crop_mode: ratio 50 | normalize_mode: meanstd 51 | intensity_bounds: null 52 | 53 | num_epochs_per_validation: null 54 | num_epochs_per_saving: 1 55 | num_workers: 4 56 | num_steps_per_image: null 57 | num_crops_per_image: 1 58 | 59 | loss: 60 | _target_: DiceCELoss 61 | include_background: true 62 | squared_pred: true 63 | smooth_nr: 0 64 | smooth_dr: 1.0e-05 65 | softmax: $not @sigmoid 66 | sigmoid: $@sigmoid 67 | to_onehot_y: $not @sigmoid 68 | 69 | optimizer: 70 | _target_: torch.optim.AdamW 71 | lr: '@learning_rate' 72 | weight_decay: 1.e-5 73 | 74 | network: 75 | _target_: SegResNetDS 76 | init_filters: 32 77 | blocks_down: [1, 2, 2, 4, 4] 78 | norm: INSTANCE_NVFUSER 79 | in_channels: '@input_channels' 80 | out_channels: '@output_classes' 81 | dsdepth: 4 82 | 83 | 84 | finetune: 85 | enabled: false 86 | ckpt_name: $@bundle_root + '/model/model.pt' 87 | 88 | validate: 89 | enabled: false 90 | ckpt_name: $@bundle_root + '/model/model.pt' 91 | output_path: $@bundle_root + '/prediction_validation' 92 | save_mask: false 93 | invert: true 94 | 95 | infer: 96 | enabled: false 97 | ckpt_name: $@bundle_root + '/model/model.pt' 98 | output_path: $@bundle_root + '/prediction_' + @infer#data_list_key 99 | data_list_key: testing 100 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/segresnet2d/configs/hyper_parameters.yaml: -------------------------------------------------------------------------------- 1 | bundle_root: null # root folder of the fold 2 | ckpt_path: $@bundle_root + '/model' # location to save checkpoints and logs 3 | mlflow_tracking_uri: $@ckpt_path + '/mlruns/' 4 | mlflow_experiment_name: "Auto3DSeg" 5 | 6 | data_file_base_dir: null # location of the dataset 7 | data_list_file_path: null # location of the file with a list of files image/label in the dataset 8 | 9 | modality: mri # main image modality, must be one of mri, ct, pet 10 | fold: 0 11 | input_channels: null 12 | output_classes: null 13 | 14 | class_names: null 15 | class_index: null 16 | 17 | debug: false 18 | ckpt_save: true 19 | cache_rate: null 20 | roi_size: [448, 448, 32] 21 | 22 | 23 | auto_scale_allowed: true 24 | auto_scale_batch: true 25 | auto_scale_roi: false 26 | auto_scale_filters: false 27 | 28 | 29 | quick: false 30 | channels_last: true 31 | validate_final_original_res: true 32 | calc_val_loss: false 33 | amp: true 34 | log_output_file: null 35 | cache_class_indices: null 36 | early_stopping_fraction: 0.001 37 | determ: false 38 | stop_on_lowacc: false 39 | 40 | learning_rate: 2.0e-4 41 | batch_size: '@num_images_per_batch' 42 | num_images_per_batch: 1 43 | num_epochs: 300 44 | num_warmup_epochs: 3 45 | sigmoid: false 46 | resample: false 47 | resample_resolution: [1, 1, 1] 48 | crop_mode: ratio 49 | normalize_mode: meanstd 50 | intensity_bounds: null 51 | 52 | num_epochs_per_validation: null 53 | num_epochs_per_saving: 1 54 | num_workers: 4 55 | num_steps_per_image: null 56 | num_crops_per_image: 1 57 | 58 | loss: 59 | _target_: DiceCELoss 60 | include_background: true 61 | squared_pred: true 62 | smooth_nr: 0 63 | smooth_dr: 1.0e-05 64 | softmax: $not @sigmoid 65 | sigmoid: $@sigmoid 66 | to_onehot_y: $not @sigmoid 67 | batch: true 68 | 69 | optimizer: 70 | _target_: torch.optim.AdamW 71 | lr: '@learning_rate' 72 | weight_decay: 1.e-5 73 | 74 | network: 75 | _target_: SegResNetDS 76 | init_filters: 32 77 | blocks_down: [1, 2, 2, 4, 4] 78 | norm: BATCH 79 | in_channels: "@input_channels" 80 | out_channels: "@output_classes" 81 | dsdepth: 2 82 | spatial_dims: 2 83 | 84 | 85 | finetune: 86 | enabled: false 87 | ckpt_name: $@bundle_root + '/model/model.pt' 88 | 89 | validate: 90 | enabled: false 91 | ckpt_name: $@bundle_root + '/model/model.pt' 92 | output_path: $@bundle_root + '/prediction_validation' 93 | save_mask: false 94 | invert: true 95 | 96 | infer: 97 | enabled: false 98 | ckpt_name: $@bundle_root + '/model/model.pt' 99 | output_path: $@bundle_root + '/prediction_' + @infer#data_list_key 100 | data_list_key: testing 101 | -------------------------------------------------------------------------------- /UNETR/BTCV/utils/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import torch 14 | 15 | 16 | def dice(x, y): 17 | intersect = np.sum(np.sum(np.sum(x * y))) 18 | y_sum = np.sum(np.sum(np.sum(y))) 19 | if y_sum == 0: 20 | return 0.0 21 | x_sum = np.sum(np.sum(np.sum(x))) 22 | return 2 * intersect / (x_sum + y_sum) 23 | 24 | 25 | class AverageMeter(object): 26 | def __init__(self): 27 | self.reset() 28 | 29 | def reset(self): 30 | self.val = 0 31 | self.avg = 0 32 | self.sum = 0 33 | self.count = 0 34 | 35 | def update(self, val, n=1): 36 | self.val = val 37 | self.sum += val * n 38 | self.count += n 39 | self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) 40 | 41 | 42 | def distributed_all_gather( 43 | tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None 44 | ): 45 | if world_size is None: 46 | world_size = torch.distributed.get_world_size() 47 | if valid_batch_size is not None: 48 | valid_batch_size = min(valid_batch_size, world_size) 49 | elif is_valid is not None: 50 | is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) 51 | if not no_barrier: 52 | torch.distributed.barrier() 53 | tensor_list_out = [] 54 | with torch.no_grad(): 55 | if is_valid is not None: 56 | is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] 57 | torch.distributed.all_gather(is_valid_list, is_valid) 58 | is_valid = [x.item() for x in is_valid_list] 59 | for tensor in tensor_list: 60 | gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] 61 | torch.distributed.all_gather(gather_list, tensor) 62 | if valid_batch_size is not None: 63 | gather_list = gather_list[:valid_batch_size] 64 | elif is_valid is not None: 65 | gather_list = [g for g, v in zip(gather_list, is_valid_list) if v] 66 | if out_numpy: 67 | gather_list = [t.cpu().numpy() for t in gather_list] 68 | tensor_list_out.append(gather_list) 69 | return tensor_list_out 70 | -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/utils/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import torch 14 | 15 | 16 | def dice(x, y): 17 | intersect = np.sum(np.sum(np.sum(x * y))) 18 | y_sum = np.sum(np.sum(np.sum(y))) 19 | if y_sum == 0: 20 | return 0.0 21 | x_sum = np.sum(np.sum(np.sum(x))) 22 | return 2 * intersect / (x_sum + y_sum) 23 | 24 | 25 | class AverageMeter(object): 26 | def __init__(self): 27 | self.reset() 28 | 29 | def reset(self): 30 | self.val = 0 31 | self.avg = 0 32 | self.sum = 0 33 | self.count = 0 34 | 35 | def update(self, val, n=1): 36 | self.val = val 37 | self.sum += val * n 38 | self.count += n 39 | self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) 40 | 41 | 42 | def distributed_all_gather( 43 | tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None 44 | ): 45 | if world_size is None: 46 | world_size = torch.distributed.get_world_size() 47 | if valid_batch_size is not None: 48 | valid_batch_size = min(valid_batch_size, world_size) 49 | elif is_valid is not None: 50 | is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) 51 | if not no_barrier: 52 | torch.distributed.barrier() 53 | tensor_list_out = [] 54 | with torch.no_grad(): 55 | if is_valid is not None: 56 | is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] 57 | torch.distributed.all_gather(is_valid_list, is_valid) 58 | is_valid = [x.item() for x in is_valid_list] 59 | for tensor in tensor_list: 60 | gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] 61 | torch.distributed.all_gather(gather_list, tensor) 62 | if valid_batch_size is not None: 63 | gather_list = gather_list[:valid_batch_size] 64 | elif is_valid is not None: 65 | gather_list = [g for g, v in zip(gather_list, is_valid_list) if v] 66 | if out_numpy: 67 | gather_list = [t.cpu().numpy() for t in gather_list] 68 | tensor_list_out.append(gather_list) 69 | return tensor_list_out 70 | -------------------------------------------------------------------------------- /.github/workflows/packaging-algo.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | # generating package artefacts from the main branch 3 | 4 | on: 5 | workflow_dispatch: 6 | inputs: 7 | tag_name: 8 | description: 'The tag to upload this asset into' 9 | required: true 10 | default: 'algo_templates' 11 | 12 | jobs: 13 | packaging-algo: 14 | if: github.repository == 'Project-MONAI/research-contributions' 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | ref: main 21 | - name: Algo name 22 | id: name 23 | run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT 24 | - name: Build ${{ steps.name.outputs.sha_short }} 25 | env: 26 | release_version: ${{ steps.name.outputs.sha_short }} 27 | run: | 28 | echo $release_version 29 | cd auto3dseg/ 30 | tar -cvzf "$release_version".tar.gz algorithm_templates 31 | - name: Upload ${{ steps.name.outputs.sha_short }} 32 | uses: svenstaro/upload-release-action@v2 33 | with: 34 | tag: ${{ github.event.inputs.tag_name }} 35 | file: auto3dseg/${{ steps.name.outputs.sha_short }}.tar.gz 36 | asset_name: ${{ steps.name.outputs.sha_short }}.tar.gz 37 | overwrite: false 38 | - name: Artifact 39 | uses: actions/upload-artifact@v4 40 | with: 41 | name: ${{ steps.name.outputs.sha_short }} 42 | path: auto3dseg/${{ steps.name.outputs.sha_short }}.tar.gz 43 | - uses: actions/checkout@v4 44 | with: 45 | token: ${{ secrets.PR_MAINTAIN_BOT }} 46 | repository: Project-MONAI/MONAI 47 | ref: dev 48 | path: monai_src_dir 49 | - name: Modify MONAI core source code 50 | id: hash_code 51 | run: | 52 | cd monai_src_dir 53 | git config --global user.name 'monai-bot' 54 | git config --global user.email 'monai.miccai2019@gmail.com' 55 | 56 | # modify hash 57 | filename="monai/utils/misc.py" 58 | current_hash=$(grep -oE "[0-9a-f]{7}" $filename) 59 | sed -i s/$current_hash/${{ steps.name.outputs.sha_short }}/ $filename 60 | 61 | git add . 62 | git diff --cached | cat 63 | changes= 64 | if [ -n "$(git status --porcelain)" ]; then 65 | changes="true" 66 | fi 67 | echo "format=$changes" >> $GITHUB_OUTPUT 68 | shell: bash 69 | - name: make a PR 70 | if: steps.hash_code.outputs.format == 'true' 71 | run: | 72 | cd monai_src_dir 73 | git commit -sam "[MONAI] algo_template hash update" 74 | git diff @~1 75 | git checkout -b auto-update-hash 76 | git push -f --set-upstream origin auto-update-hash 77 | gh pr create --fill --title "Auto3DSeg algo_template hash update" --base dev --head "auto-update-hash" 78 | shell: bash 79 | env: 80 | GITHUB_TOKEN: ${{ secrets.PR_MAINTAIN_BOT }} 81 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/configs/transforms_train.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | image_key: image 3 | label_key: label 4 | transforms_train: 5 | _target_: Compose 6 | 7 | transforms: 8 | - _target_: Compose 9 | transforms: 10 | - _target_: LoadImaged 11 | keys: "@image_key" 12 | dtype: "$np.float32" 13 | image_only: false 14 | - _target_: LoadImaged 15 | keys: "@label_key" 16 | dtype: "$np.uint8" 17 | image_only: false 18 | - _target_: EnsureChannelFirstd 19 | keys: ["@image_key", "@label_key"] 20 | - PLACEHOLDER_INTENSITY_NORMALIZATION 21 | - _target_: Orientationd 22 | keys: ["@image_key", "@label_key"] 23 | axcodes: RAS 24 | - _target_: Spacingd 25 | keys: ["@image_key", "@label_key"] 26 | pixdim: "@training#transforms#resample_resolution" 27 | mode: [bilinear, nearest] 28 | align_corners: [true, true] 29 | - _target_: CastToTyped 30 | keys: ["@image_key", "@label_key"] 31 | dtype: ["$torch.float32", "$torch.uint8"] 32 | - _target_: EnsureTyped 33 | keys: ["@image_key", "@label_key"] 34 | track_meta: true 35 | - _target_: SpatialPadd 36 | keys: ["@image_key", "@label_key"] 37 | spatial_size: "@training#roi_size" 38 | mode: [constant, constant] 39 | 40 | - _target_: IdentityD # make the label uptodate (the next transform requires label_key input) 41 | keys: ["@label_key"] 42 | 43 | # data augmentation 44 | - _target_: RandCropByLabelClassesd 45 | keys: ["@image_key", "@label_key"] 46 | label_key: "@label_key" 47 | num_classes: "@training#output_classes" 48 | spatial_size: "@training#roi_size" 49 | num_samples: "@training#num_crops_per_image" 50 | warn: false 51 | - _target_: RandRotated 52 | keys: ["@image_key", "@label_key"] 53 | range_x: 0.3 54 | range_y: 0.3 55 | range_z: 0.3 56 | mode: [bilinear, nearest] 57 | prob: 0.2 58 | - _target_: RandZoomd 59 | keys: ["@image_key", "@label_key"] 60 | min_zoom: 0.8 61 | max_zoom: 1.2 62 | mode: [trilinear, nearest] 63 | prob: 0.16 64 | 65 | - _target_: IdentityD # make image up-to-date, before this line the cropping hasn't been applied 66 | keys: ["@image_key", "@label_key"] 67 | 68 | - _target_: RandGaussianSmoothd 69 | keys: "@image_key" 70 | sigma_x: [0.5, 1.15] 71 | sigma_y: [0.5, 1.15] 72 | sigma_z: [0.5, 1.15] 73 | prob: 0.15 74 | - _target_: RandScaleIntensityd 75 | keys: "@image_key" 76 | factors: 0.3 77 | prob: 0.5 78 | - _target_: RandShiftIntensityd 79 | keys: "@image_key" 80 | offsets: 0.1 81 | prob: 0.5 82 | - _target_: RandGaussianNoised 83 | keys: "@image_key" 84 | std: 0.01 85 | prob: 0.15 86 | - _target_: RandFlipd 87 | keys: ["@image_key", "@label_key"] 88 | spatial_axis: 0 89 | prob: 0.5 90 | - _target_: RandFlipd 91 | keys: ["@image_key", "@label_key"] 92 | spatial_axis: 1 93 | prob: 0.5 94 | - _target_: RandFlipd 95 | keys: ["@image_key", "@label_key"] 96 | spatial_axis: 2 97 | prob: 0.5 98 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/swinunetr/configs/hyper_parameters.yaml: -------------------------------------------------------------------------------- 1 | bundle_root: null 2 | ckpt_path: "$@bundle_root + '/model_fold' + str(@fold)" 3 | data_file_base_dir: null 4 | data_list_file_path: null 5 | fold: 0 6 | 7 | transforms: 8 | resample_resolution: "$@resample_resolution" 9 | lazy_resampling: false 10 | cache_rate: 0 11 | train_cache_rate: "$@cache_rate" 12 | validate_cache_rate: "$@cache_rate" 13 | show_cache_progress: false 14 | 15 | auto_scale_allowed: true 16 | amp: true 17 | input_channels: null 18 | learning_rate: 0.0004 19 | log_output_file: "$@bundle_root + '/model_fold' + str(@fold) + '/training.log'" 20 | mlflow_tracking_uri: "$@bundle_root + '/model_fold' + str(@fold) + '/mlruns/'" 21 | mlflow_experiment_name: "Auto3DSeg" 22 | num_images_per_batch: 2 23 | num_epochs: null 24 | auto_scale_max_epochs: 1000 25 | num_epochs_per_validation: 5 26 | num_crops_per_image: null 27 | num_patches_per_iter: null 28 | num_sw_batch_size: "$@num_patches_per_iter" 29 | num_workers: 8 30 | num_workers_validation: 2 31 | num_cache_workers: 8 32 | output_classes: null 33 | n_cases: null 34 | overlap_ratio: 0.125 35 | overlap_ratio_final: 0.625 36 | roi_size: null 37 | roi_size_valid: null 38 | random_seed: 0 39 | resample_resolution: null 40 | sw_input_on_cpu: false 41 | softmax: true 42 | valid_at_orig_resolution_at_last: true 43 | valid_at_orig_resolution_only: false 44 | use_pretrain: true 45 | pretrained_path: $@bundle_root + '/pretrained_model' + '/swin_unetr.base_5000ep_f48_lr2e-4_pretrained.pt' 46 | adapt_valid_mode: true 47 | adapt_valid_progress_percentages: [10, 40, 70] 48 | adapt_valid_num_epochs_per_validation: [5, 5, 5] 49 | 50 | early_stop_mode: true 51 | early_stop_delta: 0 52 | early_stop_patience: 5 53 | 54 | loss: 55 | _target_: DiceCELoss 56 | include_background: true 57 | squared_pred: true 58 | smooth_nr: 0 59 | smooth_dr: 1.0e-05 60 | softmax: $@softmax 61 | sigmoid: $not @softmax 62 | to_onehot_y: $@softmax 63 | 64 | optimizer: 65 | _target_: torch.optim.AdamW 66 | lr: "@learning_rate" 67 | weight_decay: 1.0e-05 68 | lr_scheduler: 69 | _target_: monai.optimizers.WarmupCosineSchedule 70 | optimizer: "$@optimizer" 71 | warmup_steps: $@num_epochs//100 72 | t_total: '$@num_epochs // @num_epochs_per_validation + 1' 73 | warmup_multiplier: 0.1 74 | 75 | # fine-tuning 76 | finetune: 77 | activate: false 78 | pretrained_ckpt_name: "$@bundle_root + '/model_fold' + str(@fold) + '/best_metric_model.pt'" 79 | 80 | # validation 81 | validate: 82 | ckpt_name: "$@bundle_root + '/model_fold' + str(@fold) + '/best_metric_model.pt'" 83 | save_mask: true 84 | log_output_file: "$@bundle_root + '/model_fold' + str(@fold) + '/validation.log'" 85 | output_path: "$@bundle_root + '/prediction_fold' + str(@fold)" 86 | 87 | # inference 88 | infer: 89 | ckpt_name: "$@bundle_root + '/model_fold' + str(@fold) + '/best_metric_model.pt'" 90 | fast: false 91 | data_list_key: testing 92 | log_output_file: "$@bundle_root + '/model_fold' + str(@fold) + '/inference.log'" 93 | output_path: "$@bundle_root + '/prediction_' + @infer#data_list_key" 94 | -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/LICENSE: -------------------------------------------------------------------------------- 1 | Prostate-MRI_Lesion_Detection, v3.0 (Release date: September 17, 2024) 2 | DEFINITIONS: AUTHOR(S) NVIDIA Corp. and National Cancer Institute, NIH 3 | 4 | PROVIDER: the National Cancer Institute (NCI), a participating institute of the 5 | National Institutes of Health (NIH), and an agency of the United States Government. 6 | 7 | SOFTWARE: the machine readable, binary, object code form, 8 | and the related documentation for the modules of the Prostate-MRI_Lesion_Detection, v2.0 9 | software package, which is a collection of operators which accept (T2, ADC, and High 10 | b-value DICOM images) and produce prostate organ and lesion segmentation files 11 | 12 | RECIPIENT: the party that downloads the software. 13 | 14 | By downloading or otherwise receiving the SOFTWARE, RECIPIENT may 15 | use and/or redistribute the SOFTWARE, with or without modification, 16 | subject to RECIPIENT’s agreement to the following terms: 17 | 18 | 1. THE SOFTWARE SHALL NOT BE USED IN THE TREATMENT OR DIAGNOSIS 19 | OF HUMAN SUBJECTS. RECIPIENT is responsible for 20 | compliance with all laws and regulations applicable to the use 21 | of the SOFTWARE. 22 | 23 | 2. THE SOFTWARE is distributed for NON-COMMERCIAL RESEARCH PURPOSES ONLY. RECIPIENT is 24 | responsible for appropriate-use compliance. 25 | 26 | 3. RECIPIENT agrees to acknowledge PROVIDER’s contribution and 27 | the name of the author of the SOFTWARE in all written publications 28 | containing any data or information regarding or resulting from use 29 | of the SOFTWARE. 30 | 31 | 4. THE SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 32 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 33 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT 34 | ARE DISCLAIMED. IN NO EVENT SHALL THE PROVIDER OR THE INDIVIDUAL DEVELOPERS 35 | BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 36 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 37 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 38 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 39 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 40 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 41 | THE POSSIBILITY OF SUCH DAMAGE. 42 | 43 | 5. RECIPIENT agrees not to use any trademarks, service marks, trade names, 44 | logos or product names of NVIDIA, NCI or NIH to endorse or promote products derived 45 | from the SOFTWARE without specific, prior and written permission. 46 | 47 | 6. For sake of clarity, and not by way of limitation, RECIPIENT may add its 48 | own copyright statement to its modifications or derivative works of the SOFTWARE 49 | and may provide additional or different license terms and conditions in its 50 | sublicenses of modifications or derivative works of the SOFTWARE provided that 51 | RECIPIENT’s use, reproduction, and distribution of the SOFTWARE otherwise complies 52 | with the conditions stated in this Agreement. Whenever Recipient distributes or 53 | redistributes the SOFTWARE, a copy of this Agreement must be included with 54 | each copy of the SOFTWARE. 55 | -------------------------------------------------------------------------------- /SwinMM/WORD/utils/misc.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import scipy.ndimage as ndimage 14 | import torch 15 | 16 | 17 | def resample_3d(img, target_size): 18 | imx, imy, imz = img.shape 19 | tx, ty, tz = target_size 20 | zoom_ratio = (float(tx) / float(imx), float(ty) / float(imy), float(tz) / float(imz)) 21 | img_resampled = ndimage.zoom(img, zoom_ratio, order=0, prefilter=False) 22 | return img_resampled 23 | 24 | 25 | class AverageMeter(object): 26 | def __init__(self): 27 | self.reset() 28 | 29 | def reset(self): 30 | self.val = 0 31 | self.avg = 0 32 | self.sum = 0 33 | self.count = 0 34 | 35 | def update(self, val, n=1): 36 | self.val = val 37 | self.sum += val * n 38 | self.count += n 39 | self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) 40 | 41 | 42 | def distributed_all_gather( 43 | tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None 44 | ): 45 | if world_size is None: 46 | world_size = torch.distributed.get_world_size() 47 | if valid_batch_size is not None: 48 | valid_batch_size = min(valid_batch_size, world_size) 49 | elif is_valid is not None: 50 | is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) 51 | if not no_barrier: 52 | torch.distributed.barrier() 53 | tensor_list_out = [] 54 | with torch.no_grad(): 55 | if is_valid is not None: 56 | is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] 57 | torch.distributed.all_gather(is_valid_list, is_valid) 58 | is_valid = [x.item() for x in is_valid_list] 59 | for tensor in tensor_list: 60 | gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] 61 | torch.distributed.all_gather(gather_list, tensor) 62 | if valid_batch_size is not None: 63 | gather_list = gather_list[:valid_batch_size] 64 | elif is_valid is not None: 65 | gather_list = [g for g, v in zip(gather_list, is_valid_list) if v] 66 | if out_numpy: 67 | gather_list = [t.cpu().numpy() for t in gather_list] 68 | tensor_list_out.append(gather_list) 69 | return tensor_list_out 70 | 71 | 72 | def dice(x, y): 73 | intersect = np.sum(np.sum(np.sum(x * y))) 74 | y_sum = np.sum(np.sum(np.sum(y))) 75 | if y_sum == 0: 76 | return 0.0 77 | x_sum = np.sum(np.sum(np.sum(x))) 78 | return 2 * intersect / (x_sum + y_sum) 79 | -------------------------------------------------------------------------------- /DAE/BTCV_Finetune/utils/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import scipy.ndimage as ndimage 14 | import torch 15 | 16 | 17 | def resample_3d(img, target_size): 18 | imx, imy, imz = img.shape 19 | tx, ty, tz = target_size 20 | zoom_ratio = (float(tx) / float(imx), float(ty) / float(imy), float(tz) / float(imz)) 21 | img_resampled = ndimage.zoom(img, zoom_ratio, order=0, prefilter=False) 22 | return img_resampled 23 | 24 | 25 | def dice(x, y): 26 | intersect = np.sum(np.sum(np.sum(x * y))) 27 | y_sum = np.sum(np.sum(np.sum(y))) 28 | if y_sum == 0: 29 | return 0.0 30 | x_sum = np.sum(np.sum(np.sum(x))) 31 | return 2 * intersect / (x_sum + y_sum) 32 | 33 | 34 | class AverageMeter(object): 35 | def __init__(self): 36 | self.reset() 37 | 38 | def reset(self): 39 | self.val = 0 40 | self.avg = 0 41 | self.sum = 0 42 | self.count = 0 43 | 44 | def update(self, val, n=1): 45 | self.val = val 46 | self.sum += val * n 47 | self.count += n 48 | self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) 49 | 50 | 51 | def distributed_all_gather( 52 | tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None 53 | ): 54 | if world_size is None: 55 | world_size = torch.distributed.get_world_size() 56 | if valid_batch_size is not None: 57 | valid_batch_size = min(valid_batch_size, world_size) 58 | elif is_valid is not None: 59 | is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) 60 | if not no_barrier: 61 | torch.distributed.barrier() 62 | tensor_list_out = [] 63 | with torch.no_grad(): 64 | if is_valid is not None: 65 | is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] 66 | torch.distributed.all_gather(is_valid_list, is_valid) 67 | is_valid = [x.item() for x in is_valid_list] 68 | for tensor in tensor_list: 69 | gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] 70 | torch.distributed.all_gather(gather_list, tensor) 71 | if valid_batch_size is not None: 72 | gather_list = gather_list[:valid_batch_size] 73 | elif is_valid is not None: 74 | gather_list = [g for g, v in zip(gather_list, is_valid_list) if v] 75 | if out_numpy: 76 | gather_list = [t.cpu().numpy() for t in gather_list] 77 | tensor_list_out.append(gather_list) 78 | return tensor_list_out 79 | -------------------------------------------------------------------------------- /SwinUNETR/BTCV/utils/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import scipy.ndimage as ndimage 14 | import torch 15 | 16 | 17 | def resample_3d(img, target_size): 18 | imx, imy, imz = img.shape 19 | tx, ty, tz = target_size 20 | zoom_ratio = (float(tx) / float(imx), float(ty) / float(imy), float(tz) / float(imz)) 21 | img_resampled = ndimage.zoom(img, zoom_ratio, order=0, prefilter=False) 22 | return img_resampled 23 | 24 | 25 | def dice(x, y): 26 | intersect = np.sum(np.sum(np.sum(x * y))) 27 | y_sum = np.sum(np.sum(np.sum(y))) 28 | if y_sum == 0: 29 | return 0.0 30 | x_sum = np.sum(np.sum(np.sum(x))) 31 | return 2 * intersect / (x_sum + y_sum) 32 | 33 | 34 | class AverageMeter(object): 35 | def __init__(self): 36 | self.reset() 37 | 38 | def reset(self): 39 | self.val = 0 40 | self.avg = 0 41 | self.sum = 0 42 | self.count = 0 43 | 44 | def update(self, val, n=1): 45 | self.val = val 46 | self.sum += val * n 47 | self.count += n 48 | self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) 49 | 50 | 51 | def distributed_all_gather( 52 | tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None 53 | ): 54 | if world_size is None: 55 | world_size = torch.distributed.get_world_size() 56 | if valid_batch_size is not None: 57 | valid_batch_size = min(valid_batch_size, world_size) 58 | elif is_valid is not None: 59 | is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device) 60 | if not no_barrier: 61 | torch.distributed.barrier() 62 | tensor_list_out = [] 63 | with torch.no_grad(): 64 | if is_valid is not None: 65 | is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)] 66 | torch.distributed.all_gather(is_valid_list, is_valid) 67 | is_valid = [x.item() for x in is_valid_list] 68 | for tensor in tensor_list: 69 | gather_list = [torch.zeros_like(tensor) for _ in range(world_size)] 70 | torch.distributed.all_gather(gather_list, tensor) 71 | if valid_batch_size is not None: 72 | gather_list = gather_list[:valid_batch_size] 73 | elif is_valid is not None: 74 | gather_list = [g for g, v in zip(gather_list, is_valid_list) if v] 75 | if out_numpy: 76 | gather_list = [t.cpu().numpy() for t in gather_list] 77 | tensor_list_out.append(gather_list) 78 | return tensor_list_out 79 | -------------------------------------------------------------------------------- /coplenet-pneumonia-lesion-segmentation/run_inference.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import os 13 | from glob import glob 14 | 15 | import numpy as np 16 | import torch 17 | from coplenet import CopleNet 18 | 19 | import monai 20 | from monai.data import NiftiSaver 21 | from monai.inferers import sliding_window_inference 22 | from monai.transforms import AddChanneld, Compose, LoadNiftid, Orientationd, ToTensord 23 | 24 | IMAGE_FOLDER = os.path.join(".", "images") 25 | MODEL_FILE = os.path.join(".", "model", "coplenet_pretrained_monai_dict.pt") 26 | # writer will create this folder if it doesn't exist. 27 | OUTPUT_FOLDER = os.path.join(".", "output") 28 | 29 | 30 | def main(): 31 | images = sorted(glob(os.path.join(IMAGE_FOLDER, "case*.nii.gz"))) 32 | val_files = [{"img": img} for img in images] 33 | 34 | # define transforms for image and segmentation 35 | infer_transforms = Compose( 36 | [ 37 | LoadNiftid("img"), 38 | AddChanneld("img"), 39 | # coplenet works on the plane defined by the last two axes 40 | Orientationd("img", "SPL"), 41 | ToTensord("img"), 42 | ] 43 | ) 44 | test_ds = monai.data.Dataset(data=val_files, transform=infer_transforms) 45 | # sliding window inference need to input 1 image in every iteration 46 | data_loader = torch.utils.data.DataLoader( 47 | test_ds, batch_size=1, num_workers=0, pin_memory=torch.cuda.is_available() 48 | ) 49 | 50 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 51 | model = CopleNet().to(device) 52 | 53 | model.load_state_dict(torch.load(MODEL_FILE)["model_state_dict"]) 54 | model.eval() 55 | 56 | with torch.no_grad(): 57 | saver = NiftiSaver(output_dir=OUTPUT_FOLDER) 58 | for idx, val_data in enumerate(data_loader): 59 | print(f"Inference on {idx+1} of {len(data_loader)}") 60 | val_images = val_data["img"].to(device) 61 | # define sliding window size and batch size for windows inference 62 | slice_shape = np.ceil(np.asarray(val_images.shape[3:]) / 32) * 32 63 | roi_size = (20, int(slice_shape[0]), int(slice_shape[1])) 64 | sw_batch_size = 2 65 | val_outputs = sliding_window_inference( 66 | val_images, roi_size, sw_batch_size, model, 0.0, padding_mode="circular" 67 | ) 68 | # val_outputs = (val_outputs.sigmoid() >= 0.5).float() 69 | val_outputs = val_outputs.argmax(dim=1, keepdim=True) 70 | saver.save_batch(val_outputs, val_data["img_meta_dict"]) 71 | 72 | 73 | if __name__ == "__main__": 74 | main() 75 | -------------------------------------------------------------------------------- /SwinUNETR/Pretrain/utils/ops.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import torch 14 | from numpy.random import randint 15 | 16 | 17 | def patch_rand_drop(args, x, x_rep=None, max_drop=0.3, max_block_sz=0.25, tolr=0.05): 18 | c, h, w, z = x.size() 19 | n_drop_pix = np.random.uniform(0, max_drop) * h * w * z 20 | mx_blk_height = int(h * max_block_sz) 21 | mx_blk_width = int(w * max_block_sz) 22 | mx_blk_slices = int(z * max_block_sz) 23 | tolr = (int(tolr * h), int(tolr * w), int(tolr * z)) 24 | total_pix = 0 25 | while total_pix < n_drop_pix: 26 | rnd_r = randint(0, h - tolr[0]) 27 | rnd_c = randint(0, w - tolr[1]) 28 | rnd_s = randint(0, z - tolr[2]) 29 | rnd_h = min(randint(tolr[0], mx_blk_height) + rnd_r, h) 30 | rnd_w = min(randint(tolr[1], mx_blk_width) + rnd_c, w) 31 | rnd_z = min(randint(tolr[2], mx_blk_slices) + rnd_s, z) 32 | if x_rep is None: 33 | x_uninitialized = torch.empty( 34 | (c, rnd_h - rnd_r, rnd_w - rnd_c, rnd_z - rnd_s), dtype=x.dtype, device=args.local_rank 35 | ).normal_() 36 | x_uninitialized = (x_uninitialized - torch.min(x_uninitialized)) / ( 37 | torch.max(x_uninitialized) - torch.min(x_uninitialized) 38 | ) 39 | x[:, rnd_r:rnd_h, rnd_c:rnd_w, rnd_s:rnd_z] = x_uninitialized 40 | else: 41 | x[:, rnd_r:rnd_h, rnd_c:rnd_w, rnd_s:rnd_z] = x_rep[:, rnd_r:rnd_h, rnd_c:rnd_w, rnd_s:rnd_z] 42 | total_pix = total_pix + (rnd_h - rnd_r) * (rnd_w - rnd_c) * (rnd_z - rnd_s) 43 | return x 44 | 45 | 46 | def rot_rand(args, x_s): 47 | img_n = x_s.size()[0] 48 | x_aug = x_s.detach().clone() 49 | device = torch.device(f"cuda:{args.local_rank}") 50 | x_rot = torch.zeros(img_n).long().to(device) 51 | for i in range(img_n): 52 | x = x_s[i] 53 | orientation = np.random.randint(0, 4) 54 | if orientation == 0: 55 | pass 56 | elif orientation == 1: 57 | x = x.rot90(1, (2, 3)) 58 | elif orientation == 2: 59 | x = x.rot90(2, (2, 3)) 60 | elif orientation == 3: 61 | x = x.rot90(3, (2, 3)) 62 | x_aug[i] = x 63 | x_rot[i] = orientation 64 | return x_aug, x_rot 65 | 66 | 67 | def aug_rand(args, samples): 68 | img_n = samples.size()[0] 69 | x_aug = samples.detach().clone() 70 | for i in range(img_n): 71 | x_aug[i] = patch_rand_drop(args, x_aug[i]) 72 | idx_rnd = randint(0, img_n) 73 | if idx_rnd != i: 74 | x_aug[i] = patch_rand_drop(args, x_aug[i], x_aug[idx_rnd]) 75 | return x_aug 76 | -------------------------------------------------------------------------------- /prostate-mri-lesion-seg/prostate_mri_lesion_seg_app/__main__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Prostate-MRI_Lesion_Detection, v3.0 (Release date: September 17, 2024) 3 | DEFINITIONS: AUTHOR(S) NVIDIA Corp. and National Cancer Institute, NIH 4 | 5 | PROVIDER: the National Cancer Institute (NCI), a participating institute of the 6 | National Institutes of Health (NIH), and an agency of the United States Government. 7 | 8 | SOFTWARE: the machine readable, binary, object code form, 9 | and the related documentation for the modules of the Prostate-MRI_Lesion_Detection, v2.0 10 | software package, which is a collection of operators which accept (T2, ADC, and High 11 | b-value DICOM images) and produce prostate organ and lesion segmentation files 12 | 13 | RECIPIENT: the party that downloads the software. 14 | 15 | By downloading or otherwise receiving the SOFTWARE, RECIPIENT may 16 | use and/or redistribute the SOFTWARE, with or without modification, 17 | subject to RECIPIENT’s agreement to the following terms: 18 | 19 | 1. THE SOFTWARE SHALL NOT BE USED IN THE TREATMENT OR DIAGNOSIS 20 | OF HUMAN SUBJECTS. RECIPIENT is responsible for 21 | compliance with all laws and regulations applicable to the use 22 | of the SOFTWARE. 23 | 24 | 2. THE SOFTWARE is distributed for NON-COMMERCIAL RESEARCH PURPOSES ONLY. RECIPIENT is 25 | responsible for appropriate-use compliance. 26 | 27 | 3. RECIPIENT agrees to acknowledge PROVIDER’s contribution and 28 | the name of the author of the SOFTWARE in all written publications 29 | containing any data or information regarding or resulting from use 30 | of the SOFTWARE. 31 | 32 | 4. THE SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 33 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 34 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT 35 | ARE DISCLAIMED. IN NO EVENT SHALL THE PROVIDER OR THE INDIVIDUAL DEVELOPERS 36 | BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 37 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 38 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 39 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 40 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 41 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 42 | THE POSSIBILITY OF SUCH DAMAGE. 43 | 44 | 5. RECIPIENT agrees not to use any trademarks, service marks, trade names, 45 | logos or product names of NVIDIA, NCI or NIH to endorse or promote products derived 46 | from the SOFTWARE without specific, prior and written permission. 47 | 48 | 6. For sake of clarity, and not by way of limitation, RECIPIENT may add its 49 | own copyright statement to its modifications or derivative works of the SOFTWARE 50 | and may provide additional or different license terms and conditions in its 51 | sublicenses of modifications or derivative works of the SOFTWARE provided that 52 | RECIPIENT’s use, reproduction, and distribution of the SOFTWARE otherwise complies 53 | with the conditions stated in this Agreement. Whenever Recipient distributes or 54 | redistributes the SOFTWARE, a copy of this Agreement must be included with 55 | each copy of the SOFTWARE.''' 56 | 57 | from app import AIProstateLesionSegApp 58 | 59 | if __name__ == "__main__": 60 | AIProstateLesionSegApp().run() 61 | -------------------------------------------------------------------------------- /DiNTS/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import numpy as np 13 | import torch 14 | import torch.nn as nn 15 | import torch.nn.functional as F 16 | from skimage import measure 17 | from skimage.transform import resize 18 | 19 | from monai.metrics.utils import do_metric_reduction, ignore_background 20 | 21 | 22 | def check_number(a): 23 | try: 24 | a = float(a) 25 | if np.abs(a) < np.finfo(np.float32).eps or int(a) / a == 1: 26 | # print("This is Integer") 27 | return int(a) 28 | else: 29 | # print("This is Float") 30 | return float(a) 31 | except ValueError: 32 | # print("This value is String") 33 | if a.lower() == "true": 34 | return True 35 | elif a.lower() == "false": 36 | return False 37 | elif a.lower() == "none": 38 | return None 39 | 40 | return str(a) 41 | 42 | 43 | def check_list_tuple(a): 44 | if not isinstance(a, str): 45 | return a 46 | 47 | a = a.replace(" ", "") 48 | if a[0] == "(" and a[-1] == ")": 49 | part_split = a[1:-1].split(",") 50 | out = [] 51 | for _s in range(len(part_split)): 52 | out.append(check_number(part_split[_s])) 53 | out = tuple(_i for _i in out) 54 | return out 55 | elif a[0] == "[" and a[-1] == "]": 56 | part_split = a[1:-1].split(",") 57 | out = [] 58 | for _s in range(len(part_split)): 59 | out.append(check_number(part_split[_s])) 60 | return out 61 | 62 | return a 63 | 64 | 65 | def parse_monai_specs(component_string): 66 | string_parts = component_string.split("|") 67 | component_name = string_parts[0] 68 | 69 | component_dict = {} 70 | for _k in range(1, len(string_parts)): 71 | part = string_parts[_k] 72 | part_split = part.split("~") 73 | _key = part_split[0] 74 | _val = part_split[1] 75 | 76 | _val_parts = _val.split(",") 77 | if len(_val_parts) == 1: 78 | component_dict[_key] = check_number(_val) 79 | else: 80 | component_dict[_key] = [check_number(_item) for _item in _val_parts] 81 | 82 | return component_name, component_dict 83 | 84 | 85 | def keep_largest_cc(nda): 86 | labels = measure.label(nda > 0) 87 | if labels.max() != 0: 88 | largestCC = labels == np.argmax(np.bincount(labels.flat)[1:]) + 1 89 | largestCC = largestCC.astype(nda.dtype) 90 | return largestCC 91 | 92 | return nda 93 | 94 | 95 | def resize_volume(nda, output_shape, order=1, preserve_range=True, anti_aliasing=False): 96 | return resize(nda, output_shape, order=order, preserve_range=preserve_range, anti_aliasing=anti_aliasing) 97 | -------------------------------------------------------------------------------- /.github/workflows/integration.yml: -------------------------------------------------------------------------------- 1 | # manually trigger integration 2 | name: integration 3 | 4 | on: 5 | repository_dispatch: 6 | type: [integration-test-command] 7 | 8 | jobs: 9 | integration-py3: 10 | container: 11 | image: nvcr.io/nvidia/pytorch:22.04-py3 # CUDA 11.6 py38 12 | options: --gpus "device=3" --ipc host # shm-size 4g works fine 13 | runs-on: [self-hosted, linux, x64, research] 14 | steps: 15 | # checkout the pull request branch 16 | - uses: actions/checkout@v3 17 | with: 18 | repository: ${{ github.event.client_payload.pull_request.head.repo.full_name }} 19 | ref: ${{ github.event.client_payload.pull_request.head.ref }} 20 | path: research-contributions 21 | - uses: actions/checkout@v3 22 | with: 23 | repository: Project-MONAI/MONAI 24 | path: core 25 | - name: Cache weekly timestamp 26 | id: pip-cache 27 | run: | 28 | echo "datew=$(date '+%Y-%V')" >> $GITHUB_OUTPUT 29 | - name: Cache for pip 30 | uses: actions/cache@v3 31 | id: cache 32 | with: 33 | path: | 34 | ~/.cache/pip 35 | ~/.cache/torch 36 | key: docker-py3-pip-${{ steps.pip-cache.outputs.datew }} 37 | - name: Install the dependencies 38 | run: | 39 | which python 40 | pwd 41 | ls -al . 42 | python -m pip install --upgrade pip wheel 43 | pip uninstall -y monai 44 | pip uninstall -y monai 45 | pip uninstall -y monai-weekly 46 | pip uninstall -y monai-weekly 47 | cd core 48 | BUILD_MONAI=0 ./runtests.sh -b 49 | python -m pip install -r requirements-dev.txt 50 | python -m pip install --upgrade torch torchvision torchaudio 51 | rm -rf /github/home/.cache/torch/hub/mmars/ 52 | 53 | - name: Verify install 54 | run: | 55 | python -m pip list 56 | nvidia-smi 57 | python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" 58 | python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' 59 | 60 | - name: Auto3dseg latest algo 61 | shell: bash 62 | env: 63 | BUILD_MONAI: 0 64 | run: | 65 | echo "test latest algo" 66 | ls research-contributions/ 67 | cp -r research-contributions/auto3dseg/algorithm_templates core/ 68 | cd research-contributions && git log -1 && cd ../core 69 | pwd 70 | ls -ll 71 | export OMP_NUM_THREADS=4 72 | export MKL_NUM_THREADS=4 73 | export MONAI_TESTING_ALGO_TEMPLATE=algorithm_templates 74 | export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python 75 | python -m unittest -vvv tests.test_auto3dseg_ensemble 76 | python -m unittest -vvv tests.test_auto3dseg_hpo 77 | python -m unittest -vvv tests.test_integration_autorunner 78 | python -m unittest -vvv tests.test_integration_gpu_customization 79 | 80 | - name: Clean directory 81 | run: | 82 | rm -rf research-contributions 83 | rm -rf core 84 | 85 | - name: Add reaction 86 | uses: peter-evans/create-or-update-comment@v1 87 | with: 88 | token: ${{ secrets.PR_MAINTAIN_BOT }} 89 | repository: ${{ github.event.client_payload.github.payload.repository.full_name }} 90 | comment-id: ${{ github.event.client_payload.github.payload.comment.id }} 91 | reaction-type: rocket 92 | -------------------------------------------------------------------------------- /SwinMM/README.md: -------------------------------------------------------------------------------- 1 | # SwinMM: Masked Multi-view with Swin Transformers for 3D Medical Image Segmentation 2 | 3 |

4 | 5 |

6 | 7 | ## What is SwinMM? 8 | 9 | Masked Multi-view with Swin Transformers, dubbed SwinMM, is the first comprehensive multi-view pipeline for self-supervised medical image analysis. SwinMM yields competitive performance, significantly lower training costs, and higher data efficiency compared to recent state-of-the-art models. SwinMM consists of two key components. 10 | 11 | ### Pretrain 12 | 13 | In the pre-training stage, we introduce a masked multi-view encoder that simultaneously trains masked multi-view observations with a diverse set of proxy tasks. These tasks include image reconstruction, rotation, contrastive learning, and a mutual learning paradigm that comprehensively leverages hidden multi-view information from 3D medical data by maximizing the consistency between predictions from different views. 14 | 15 |

16 | 17 |

18 | 19 | ### Finetune 20 | 21 | In the fine-tuning stage, a cross-view decoder is developed to aggregate the multi-view information using a novel cross-view attention block. 22 | 23 |

24 | 25 |

26 | 27 | ## Pre-trained Models 28 | 29 | We present two checkpoints here: 30 | 31 | - [pretrained_ckpt.pt](https://drive.google.com/file/d/1VFT1Oz5UGjAaXLdWAAdeD0mVeLyCQ0ry/view?usp=sharing) 32 | - [WORD_finetuned_ckpt.pt](https://drive.google.com/file/d/1VFT1Oz5UGjAaXLdWAAdeD0mVeLyCQ0ry/view?usp=sharing) 33 | 34 | Here is the sample testing result on WORD 35 | 36 |

37 | 38 |

39 | 40 | ## Installation 41 | 42 | Please check [INSTALL.md](INSTALL.md) for installation instructions. 43 | 44 | ## Evaluation 45 | 46 | Testing can be done using the following scripts. Please change `pretrained_dir` and `pretrained_model_name` according to the path of the checkpoint you would like to test, and change `data_dir` and `json_list` according to the datasets. 47 | 48 | ```bash 49 | cd WORD 50 | python test_parrallel.py --pretrained_dir ./runs/multiview_101616/ \ 51 | --pretrained_model_name model.pt \ 52 | --distributed \ 53 | --data_dir ./dataset/dataset12_WORD/ \ 54 | --json_list dataset12_WORD.json 55 | ``` 56 | 57 | ## Training 58 | 59 | Please check [TRAINING.md](TRAINING.md) for training instructions. 60 | 61 | ## Acknowledgment 62 | 63 | This work is partially supported by Google Cloud Research Credits program. 64 | This Repo is based on [SwinUNETR](https://github.com/Project-MONAI/research-contributions/tree/main/SwinUNETR), [MONAI](https://monai.io/) and [bagua](https://github.com/BaguaSys/bagua). 65 | 66 | ## Citation 67 | 68 | If you find this repository helpful, please consider citing: 69 | 70 | ``` 71 | @inproceedings{wang2023SwinMM, 72 | title = {SwinMM: Masked Multi-view with Swin Transformers for 3D Medical Image Segmentation}, 73 | author = {Wang, Yiqing and Li, Zihan and Mei, Jieru and Wei, Zihao and Liu, Li and Wang, Chen and Sang, Shengtian and Yuille, Alan and Xie, Cihang and Zhou, Yuyin}, 74 | booktitle = {MICCAI}, 75 | year = {2023} 76 | } 77 | 78 | @article{cardoso2022monai, 79 | title={Monai: An open-source framework for deep learning in healthcare}, 80 | author={Cardoso, M Jorge and Li, Wenqi and Brown, Richard and Ma, Nic and Kerfoot, Eric and Wang, Yiheng and Murrey, Benjamin and Myronenko, Andriy and Zhao, Can and Yang, Dong and others}, 81 | journal={arXiv preprint arXiv:2211.02701}, 82 | year={2022} 83 | } 84 | ``` 85 | -------------------------------------------------------------------------------- /auto3dseg/algorithm_templates/dints/configs/hyper_parameters.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | bundle_root: null 3 | ckpt_path: "$@bundle_root + '/model_fold' + str(@fold)" 4 | data_file_base_dir: null 5 | data_list_file_path: null 6 | fold: 0 7 | 8 | mlflow_tracking_uri: "$@bundle_root + '/model_fold' + str(@fold) + '/mlruns/'" 9 | mlflow_experiment_name: "Auto3DSeg" 10 | show_cache_progress: false 11 | 12 | training: 13 | # hyper-parameters 14 | amp: true 15 | auto_scale_allowed: true 16 | data_list_key: null 17 | epoch_divided_factor: 36 18 | input_channels: null 19 | learning_rate: 0.2 20 | log_output_file: "$@bundle_root + '/model_fold' + str(@fold) + '/training.log'" 21 | num_images_per_batch: 2 22 | num_epochs: 200 23 | num_epochs_per_validation: 2 24 | num_crops_per_image: 1 25 | num_patches_per_iter: 1 26 | num_sw_batch_size: null 27 | num_workers: 8 28 | num_workers_validation: 2 29 | num_cache_workers: 8 30 | output_classes: null 31 | overlap_ratio_train: 0.125 32 | overlap_ratio: 0.625 33 | roi_size: null 34 | roi_size_valid: null 35 | random_seed: 0 36 | resample_resolution: null 37 | sw_input_on_cpu: false 38 | softmax: true 39 | valid_at_orig_resolution_at_last: true 40 | valid_at_orig_resolution_only: false 41 | 42 | adapt_valid_mode: true 43 | adapt_valid_progress_percentages: [10, 40, 70] 44 | adapt_valid_num_epochs_per_validation: [2, 4, 2] 45 | 46 | early_stop_mode: true 47 | early_stop_delta: 0 48 | early_stop_patience: 20 49 | 50 | cache_rate: 0 51 | train_cache_rate: "@training#cache_rate" 52 | validate_cache_rate: "@training#cache_rate" 53 | transforms: 54 | resample_resolution: "@training#resample_resolution" 55 | 56 | loss: 57 | _target_: DiceFocalLoss 58 | include_background: true 59 | to_onehot_y: "@training#softmax" 60 | softmax: "@training#softmax" 61 | sigmoid: "$not @training#softmax" 62 | squared_pred: true 63 | batch: true 64 | smooth_nr: 1.0e-05 65 | smooth_dr: 1.0e-05 66 | 67 | optimizer: 68 | _target_: torch.optim.SGD 69 | lr: "@training#learning_rate" 70 | momentum: 0.9 71 | weight_decay: 4.0e-05 72 | 73 | lr_scheduler: 74 | _target_: torch.optim.lr_scheduler.PolynomialLR 75 | optimizer: "@training#optimizer" 76 | power: 0.5 77 | total_iters: '$@training#num_epochs // @training#num_epochs_per_validation + 1' 78 | 79 | # fine-tuning 80 | finetune: 81 | activate_finetune: false 82 | pretrained_ckpt_name: "$@bundle_root + '/model_fold' + str(@fold) + '/best_metric_model.pt'" 83 | 84 | overwrite: 85 | learning_rate: 0.001 86 | lr_scheduler: 87 | _target_: torch.optim.lr_scheduler.ConstantLR 88 | optimizer: "@training#optimizer" 89 | factor: 1.0 90 | total_iters: '$@training#num_epochs // @training#num_epochs_per_validation + 1' 91 | adapt_valid_mode: false 92 | early_stop_mode: false 93 | num_epochs: 20 94 | num_epochs_per_validation: 1 95 | 96 | # validation 97 | validate: 98 | ckpt_name: "$@bundle_root + '/model_fold' + str(@fold) + '/best_metric_model.pt'" 99 | log_output_file: "$@bundle_root + '/model_fold' + str(@fold) + '/validation.log'" 100 | save_mask: true 101 | data_list_key: null 102 | output_path: "$@bundle_root + '/prediction_fold' + str(@fold)" 103 | 104 | # inference 105 | infer: 106 | ckpt_name: "$@bundle_root + '/model_fold' + str(@fold) + '/best_metric_model.pt'" 107 | save_prob: false 108 | fast: true 109 | data_list_key: testing 110 | log_output_file: "$@bundle_root + '/model_fold' + str(@fold) + '/inference.log'" 111 | output_path: "$@bundle_root + '/prediction_' + @infer#data_list_key" 112 | -------------------------------------------------------------------------------- /SwinMM/WORD/utils/dataset_in_memory.py: -------------------------------------------------------------------------------- 1 | """Cache the data using redis. 2 | 3 | TODO(meijieru): zeromp may be better. 4 | """ 5 | 6 | from typing import Callable, MutableMapping, Optional, Sequence, Union 7 | 8 | import bagua.torch_api.contrib.cache_loader as bagua_cache_loader 9 | import torch 10 | import torch.utils.data.dataset as torch_dataset 11 | 12 | import monai.data as monai_data 13 | import monai.transforms as monai_transforms 14 | 15 | _ALL_DATASET_NAMES = set() 16 | _SERIALIZATION_HIJACKED = False 17 | 18 | 19 | def hijack_bagua_serialization(method: str): 20 | """Replace bagua serialization.""" 21 | global _SERIALIZATION_HIJACKED 22 | if _SERIALIZATION_HIJACKED: 23 | raise RuntimeError("Already hijacked.") 24 | 25 | import pickle 26 | 27 | if method == "lz4": 28 | import lz4 29 | 30 | compress, decompress = lz4.frame.compress, lz4.frame.decompress 31 | elif method == "lzma": 32 | import pylzma as lzma 33 | 34 | compress, decompress = lzma.compress, lzma.decompress 35 | elif method == "zlib": 36 | import zlib 37 | 38 | compress, decompress = zlib.compress, zlib.decompress 39 | else: 40 | raise ValueError(f"Unknown compress method: {method}") 41 | 42 | bagua_cache_loader.serialize = lambda val: compress(pickle.dumps(val)) 43 | bagua_cache_loader.deserialize = lambda val: pickle.loads(decompress(val)) 44 | _SERIALIZATION_HIJACKED = True 45 | 46 | 47 | def is_deterministic_transform(transform) -> bool: 48 | return not ( 49 | isinstance(transform, monai_transforms.Randomizable) or not isinstance(transform, monai_transforms.Transform) 50 | ) 51 | 52 | 53 | class CachedDataset(torch_dataset.Dataset): 54 | def __init__( 55 | self, 56 | data: Sequence, 57 | transform: Optional[Union[Sequence[Callable], Callable]] = None, 58 | as_contiguous: bool = True, 59 | backend: str = "redis", 60 | hosts: Optional[Sequence[MutableMapping[str, str]]] = None, 61 | dataset_name: str = "", 62 | writer_buffer_size: int = 20, 63 | **kwargs, 64 | ) -> None: 65 | super().__init__() 66 | 67 | if hosts is None: 68 | raise ValueError("We don't init bagua, have to manually launch redis") 69 | 70 | # NOTE(meijieru): check if the dataset name is unique, to avoid 71 | # potential confliction. 72 | if not dataset_name or dataset_name in _ALL_DATASET_NAMES: 73 | raise ValueError("Must have an unique name for each dataset.") 74 | _ALL_DATASET_NAMES.add(dataset_name) 75 | 76 | self._dataset = monai_data.Dataset(data=data) 77 | self._cache_loader = bagua_cache_loader.CacheLoader( 78 | backend, dataset_name, writer_buffer_size, hosts=hosts, **kwargs 79 | ) 80 | self.transform = transform 81 | self.as_contiguous = as_contiguous 82 | 83 | def __len__(self): 84 | return len(self._dataset) 85 | 86 | def _apply_non_deterministic_transform(self, item): 87 | for trans in self.transform.transforms: # type:ignore 88 | if not is_deterministic_transform(trans): 89 | item = monai_transforms.apply_transform(trans, item) 90 | return item 91 | 92 | def _apply_deterministic_transform(self, item): 93 | for trans in self.transform.transforms: # type:ignore 94 | # execute all the deterministic transforms 95 | if not is_deterministic_transform(trans): 96 | break 97 | item = monai_transforms.apply_transform(trans, item) 98 | if self.as_contiguous: 99 | item = monai_transforms.convert_to_contiguous(item, memory_format=torch.contiguous_format) 100 | return item 101 | 102 | def _load_item(self, index: int): 103 | return self._apply_deterministic_transform(self._dataset[index]) 104 | 105 | def __getitem__(self, item): 106 | cached_item = self._cache_loader.get(item, self._load_item) 107 | return self._apply_non_deterministic_transform(cached_item) 108 | -------------------------------------------------------------------------------- /SwinMM/Pretrain/losses/loss.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2022 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import torch 13 | from torch.nn import functional as F 14 | 15 | 16 | class ContrastLoss(torch.nn.Module): 17 | def __init__(self, args, batch_size, temperature=0.5): 18 | super().__init__() 19 | device = torch.device(f"cuda:{args.local_rank}") 20 | self.batch_size = batch_size 21 | self.register_buffer("temp", torch.tensor(temperature).to(torch.device(f"cuda:{args.local_rank}"))) 22 | self.register_buffer("neg_mask", (~torch.eye(batch_size * 2, batch_size * 2, dtype=bool).to(device)).float()) 23 | 24 | def forward(self, x_i, x_j): 25 | z_i = F.normalize(x_i, dim=1) 26 | z_j = F.normalize(x_j, dim=1) 27 | z = torch.cat([z_i, z_j], dim=0) 28 | sim = F.cosine_similarity(z.unsqueeze(1), z.unsqueeze(0), dim=2) 29 | sim_ij = torch.diag(sim, self.batch_size) 30 | sim_ji = torch.diag(sim, -self.batch_size) 31 | pos = torch.cat([sim_ij, sim_ji], dim=0) 32 | nom = torch.exp(pos / self.temp) 33 | denom = self.neg_mask * torch.exp(sim / self.temp) 34 | return torch.sum(-torch.log(nom / torch.sum(denom, dim=1))) / (2 * self.batch_size) 35 | 36 | 37 | class MutualLoss(torch.nn.Module): 38 | def __init__(self, args): 39 | super().__init__() 40 | self.alpha = 1.0 41 | self.mask_ratio = args.mask_ratio 42 | self.recon_loss_2 = torch.nn.MSELoss().cuda() 43 | 44 | def __call__(self, rec1, rec2, mask): 45 | mask = mask.to(dtype=rec1.dtype) 46 | rec1, rec2 = [val * mask for val in [rec1, rec2]] 47 | 48 | recon_loss = self.recon_loss_2(rec1, rec2) / self.mask_ratio 49 | return self.alpha * recon_loss 50 | 51 | 52 | class Loss(torch.nn.Module): 53 | def __init__(self, batch_size, args): 54 | super().__init__() 55 | self.rot_loss = torch.nn.CrossEntropyLoss().cuda() 56 | self.recon_loss = torch.nn.L1Loss().cuda() 57 | self.recon_loss_2 = torch.nn.MSELoss().cuda() 58 | self.contrast_loss = ContrastLoss(args, batch_size).cuda() 59 | self.alpha1 = 1.0 60 | self.alpha2 = 1.0 61 | self.alpha3 = 1.0 62 | self.norm_pix_loss = args.norm_pix_loss 63 | self.mask_ratio = args.mask_ratio 64 | 65 | def __call__( 66 | self, 67 | output_rot, 68 | target_rot, 69 | output_contrastive, 70 | target_contrastive, 71 | output_recons, 72 | target_recons, 73 | mask, 74 | only_mae=False, 75 | ): 76 | B, C, H, W, D = output_recons.shape 77 | target_recons = target_recons.reshape(B, C, -1) 78 | 79 | if self.norm_pix_loss: 80 | mean = target_recons.mean(dim=-1, keepdim=True) 81 | var = target_recons.var(dim=-1, keepdim=True) 82 | target_recons = (target_recons - mean) / (var + 1.0e-6) ** 0.5 83 | target_recons = target_recons.reshape(B, C, H, W, D) 84 | # masked voxels. 85 | mask = mask.to(dtype=target_recons.dtype)[None, ...] 86 | target_recons, output_recons = [val * mask for val in [target_recons, output_recons]] 87 | recon_loss = self.recon_loss_2(output_recons, target_recons) / self.mask_ratio 88 | recon_loss = self.alpha3 * recon_loss 89 | if only_mae: 90 | return recon_loss 91 | contrast_loss = self.alpha2 * self.contrast_loss(output_contrastive, target_contrastive) 92 | rot_loss = self.alpha1 * self.rot_loss(output_rot, target_rot) 93 | total_loss = rot_loss + contrast_loss + recon_loss 94 | 95 | return total_loss, (rot_loss, contrast_loss, recon_loss) 96 | -------------------------------------------------------------------------------- /SwinMM/INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | We provide installation instructions here. 4 | 5 | ## Setup 6 | 7 | ### Using Docker 8 | 9 | The simplest way to use SwinMM is to use our docker image [`swinmm`](https://drive.google.com/file/d/1EGSoqN-HphyMV_gKUq-g7_BSwTTg35oA/view?usp=sharing), which has contained all the needed dependencies. Download the `swinmm.tar` into the `SwinMM` directory and try the following scripts: 10 | 11 | ```bash 12 | cd SwinMM 13 | docker import - swinmm < swinmm.tar 14 | docker run --runtime=nvidia --gpus=all -m="800g" --shm-size="32g" -itd -v ./:/volume swinmm /bin/bash 15 | docker exec -it swinmm /bin/bash 16 | conda activate SwinMM 17 | ``` 18 | 19 | To use docker, make sure you have installed `docker` and `nvidia-docker`. 20 | 21 | ### Manual 22 | 23 | For fast dataset loading, we required the users to install the Redis database, for example, on Ubuntu: `sudo apt-get install redis` 24 | 25 | We also recommend the users install the PyTorch-based version from the official website. 26 | 27 | Two packages are recommended to install manually according to their complicated dependencies: [bagua==0.9.2](https://github.com/BaguaSys/bagua), [monai==0.9.0](https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies) 28 | 29 | The others can be installed through `pip install -r requirements.txt` 30 | 31 | ## Datasets 32 | 33 | Our pre-training dataset includes 5833 volumes from 8 public datasets: 34 | 35 | - [AbdomenCT-1K](https://github.com/JunMa11/AbdomenCT-1K) 36 | - [BTCV](https://www.synapse.org/#!Synapse:syn3193805/wiki/217789) 37 | - [MSD](http://medicaldecathlon.com/) 38 | - [TCIACovid19](https://wiki.cancerimagingarchive.net/display/Public/CT+Images+in+COVID-19/) 39 | - [WORD](https://github.com/HiLab-git/WORD) 40 | - [TCIA-Colon](https://wiki.cancerimagingarchive.net/display/Public/CT+COLONOGRAPHY/) 41 | - [LiDC](https://wiki.cancerimagingarchive.net/display/Public/LIDC-IDRI/) 42 | - [HNSCC](https://wiki.cancerimagingarchive.net/display/Public/HNSCC) 43 | 44 | We choose two popular datasets to test the downstream segmentation performance: 45 | 46 | - [WORD](https://github.com/HiLab-git/WORD) (The Whole abdominal Organ Dataset) 47 | - [ACDC](https://www.creatis.insa-lyon.fr/Challenge/acdc/#challenge/584e75606a3c77492fe91bba) (Automated Cardiac Diagnosis Challenge) 48 | 49 | The json files can be downloaded from [pretrain_jsons](https://drive.google.com/file/d/1gJThxBvnJnc2_N1nFX7xywjFWFw7DSEY/view?usp=sharing) and [word_jsons](https://drive.google.com/file/d/1Td4T_k2QlEcTETz9TERGsVdOyebD5ULv/view?usp=sharing); 50 | 51 | The dataset is organized as below: 52 | 53 | ```text 54 | SwinMM 55 | ├── WORD 56 | │ └── dataset 57 | │ └── dataset12_WORD 58 | │ ├── imagesTr 59 | │ ├── imagesTs 60 | │ ├── imagesVal 61 | │ ├── labelsTr 62 | │ ├── labelsTs 63 | │ ├── labelsVal 64 | │ └── dataset12_WORD.json 65 | └── Pretrain 66 | ├── dataset 67 | │ ├── dataset00_BTCV 68 | │ ├── dataset02_Heart 69 | │ ├── dataset03_Liver 70 | │ ├── dataset04_Hippocampus 71 | │ ├── dataset06_Lung 72 | │ ├── dataset07_Pancreas 73 | │ ├── dataset08_HepaticVessel 74 | │ ├── dataset09_Spleen 75 | │ ├── dataset10_Colon 76 | │ ├── dataset11_TCIAcovid19 77 | │ ├── dataset12_WORD 78 | │ ├── dataset13_AbdomenCT-1K 79 | │ ├── dataset_HNSCC 80 | │ ├── dataset_TCIAcolon 81 | │ └── dataset_LIDC 82 | └── jsons 83 | ├── dataset00_BTCV.json 84 | ├── dataset01_BrainTumour.json 85 | ├── dataset02_Heart.json 86 | ├── dataset03_Liver.json 87 | ├── dataset04_Hippocampus.json 88 | ├── dataset05_Prostate.json 89 | ├── dataset06_Lung.json 90 | ├── dataset07_Pancreas.json 91 | ├── dataset08_HepaticVessel.json 92 | ├── dataset09_Spleen.json 93 | ├── dataset10_Colon.json 94 | ├── dataset11_TCIAcovid19.json 95 | ├── dataset12_WORD.json 96 | ├── dataset13_AbdomenCT-1K.json 97 | ├── dataset_HNSCC.json 98 | ├── dataset_TCIAcolon.json 99 | └── dataset_LIDC.json 100 | 101 | ``` 102 | -------------------------------------------------------------------------------- /SwinUNETR/BRATS21/optimizers/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 - 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import math 13 | import warnings 14 | from typing import List 15 | 16 | from torch.optim import Adam, Optimizer 17 | from torch.optim.lr_scheduler import _LRScheduler 18 | 19 | 20 | class LinearWarmupCosineAnnealingLR(_LRScheduler): 21 | def __init__( 22 | self, 23 | optimizer: Optimizer, 24 | warmup_epochs: int, 25 | max_epochs: int, 26 | warmup_start_lr: float = 0.0, 27 | eta_min: float = 0.0, 28 | last_epoch: int = -1, 29 | ) -> None: 30 | """ 31 | Args: 32 | optimizer (Optimizer): Wrapped optimizer. 33 | warmup_epochs (int): Maximum number of iterations for linear warmup 34 | max_epochs (int): Maximum number of iterations 35 | warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0. 36 | eta_min (float): Minimum learning rate. Default: 0. 37 | last_epoch (int): The index of last epoch. Default: -1. 38 | """ 39 | self.warmup_epochs = warmup_epochs 40 | self.max_epochs = max_epochs 41 | self.warmup_start_lr = warmup_start_lr 42 | self.eta_min = eta_min 43 | 44 | super(LinearWarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch) 45 | 46 | def get_lr(self) -> List[float]: 47 | """ 48 | Compute learning rate using chainable form of the scheduler 49 | """ 50 | if not self._get_lr_called_within_step: 51 | warnings.warn( 52 | "To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning 53 | ) 54 | 55 | if self.last_epoch == 0: 56 | return [self.warmup_start_lr] * len(self.base_lrs) 57 | elif self.last_epoch < self.warmup_epochs: 58 | return [ 59 | group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) 60 | for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) 61 | ] 62 | elif self.last_epoch == self.warmup_epochs: 63 | return self.base_lrs 64 | elif (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0: 65 | return [ 66 | group["lr"] 67 | + (base_lr - self.eta_min) * (1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2 68 | for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) 69 | ] 70 | 71 | return [ 72 | (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) 73 | / ( 74 | 1 75 | + math.cos( 76 | math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs) 77 | ) 78 | ) 79 | * (group["lr"] - self.eta_min) 80 | + self.eta_min 81 | for group in self.optimizer.param_groups 82 | ] 83 | 84 | def _get_closed_form_lr(self) -> List[float]: 85 | """ 86 | Called when epoch is passed as a param to the `step` function of the scheduler. 87 | """ 88 | if self.last_epoch < self.warmup_epochs: 89 | return [ 90 | self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) 91 | for base_lr in self.base_lrs 92 | ] 93 | 94 | return [ 95 | self.eta_min 96 | + 0.5 97 | * (base_lr - self.eta_min) 98 | * (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) 99 | for base_lr in self.base_lrs 100 | ] 101 | -------------------------------------------------------------------------------- /DAE/Pretrain_full_contrast/scripts/ablation_m20.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ##################### Setting up hardware 4 | GPU_MEM=32g 5 | N_GPU=8 6 | NPROC_PER_NODE=8 7 | INSTANCE="dgxa100.40g.${N_GPU}.norm" 8 | INSTANCE="dgx1v.${GPU_MEM}.${N_GPU}.norm.beta" 9 | INSTANCE="dgx1v.${GPU_MEM}.${N_GPU}.norm" 10 | DOCKER_IMAGE="nvidian/dlmed/clara-train-sdk:v4.0" 11 | ##################### 12 | 13 | ##################### Datasets 14 | NGC_DATASET_ID1=81073 # luna 15 | NGC_DATASET_ID2=80863 # covid 16 | NGC_DATASET_ID3=81515 # HNSCC 17 | NGC_DATASET_ID4=81798 # colon 18 | NGC_DATASET_ID5=80329 # FLARE 2021 (id 80329) 19 | NGC_DATASET_ID6=75547 # LiTS 2017 (id 75547) 20 | NGC_DATASET_ID7=80490 # HECKTOR 2021 (id 80490) 21 | NGC_DATASET_ID8=60659 # LiDC 22 | NGC_DATASET_ID8=95352 # LiDC 23 | ##################### 24 | 25 | FOLDER_PATH="/workspace/Swin_UNETR/Pretrain_code/pretrain_framework" 26 | 27 | 28 | # Maximum batch sizes best on memory is specified below 29 | 30 | BATCH_SIZE=12 # for A100 w checkpoint 31 | BATCH_SIZE=10 # for V100 w checkpoint 32 | BATCH_SIZE=6 # for A100 33 | BATCH_SIZE=4 # for V100 34 | 35 | 36 | ##################### Important hyper-parameters 37 | # masking ratio in the image 38 | LOSS_TYPE='all_img' # this loss works on all pixels 39 | LOSS_TYPE='mask_only' # this loss only works on masked patches, which is default 40 | DECODER='deconv' # this decoder uses a series of deconv layers without anything else 41 | DECODER='pixel_shuffle' # this decoder only uses a deconv layer + pixel shuffling with stride 32 (maybe sub-optimal) 42 | DECODER='upsample' # this decoder uses unet decoder blocks (2 conv blocks) progressively but no skip connections (maybe computationally heavy) 43 | DECODER='vae2' # this decoder uses conv + upsamling layers progressively but no skip connections 44 | 45 | MODEL_TYPE='swin' # this is a swin transformer encoder but has no skip connections. Can be used with the different decoder types above 46 | MODEL_TYPE='swin_skip' # this is a new architecture which uses : (1) conv + upsampling layers for decoder (2) skip connections between encoder and decoder 47 | # when using this model_type, there is no need to choose another decoder type, since decoder is fixed 48 | 49 | LOSS_TYPE='mask_only' 50 | 51 | VERSION="new_b6_ep200_m50_p16_1node_lr2e4_vae_skip_v1_l2" 52 | VERSION="new_b6_ep200_m50_p16_1node_lr2e4_vae_skip_v1_l2_cache1" 53 | VERSION="new_b6_ep200_m50_p16_1node_lr2e4_vae_skip_v1_mask_only" 54 | 55 | VERSION="new_b6_ep800_m60_p16_1node_lr2e4_vae_skip_v1_mask_only_new_embed" 56 | VERSION="ablation_m20" 57 | 58 | EXP_NAME="pretrain.swin_unetr.${VERSION}" 59 | OUTPUT_PATH='./output/'${EXP_NAME} 60 | LOG_PATH='./logdir/'${EXP_NAME} 61 | NAME="ml-model.${EXP_NAME}.${N_GPU}GPU.${GPU_MEM}" 62 | 63 | 64 | IMG_SIZE=96 65 | SW_BATCH_SIZE=1 66 | EPOCHS=100 67 | BASE_LR=2e-4 68 | ##################### 69 | 70 | WARMUP_EPOCHS=10 71 | WARMUP_LR=1e-6 72 | MIN_LR=1e-5 73 | WEIGHT_DECAY=0.05 74 | SAVE_FREQ=5 75 | PRINT_FREQ=5 76 | CACHE_RATE=0.7 77 | 78 | MASK_PATCH_SIZE=16 79 | MASK_RATIO=0.20 80 | 81 | 82 | ngc batch run --name "${NAME}" \ 83 | --preempt RUNONCE --ace nv-us-west-2 --instance ${INSTANCE} \ 84 | --result /results \ 85 | --image ${DOCKER_IMAGE} \ 86 | --org nvidian --team "dlmed" \ 87 | --datasetid ${NGC_DATASET_ID1}:/dataset/dataset1\ 88 | --datasetid ${NGC_DATASET_ID2}:/dataset/dataset2\ 89 | --datasetid ${NGC_DATASET_ID3}:/dataset/dataset3\ 90 | --datasetid ${NGC_DATASET_ID4}:/dataset/dataset4\ 91 | --datasetid ${NGC_DATASET_ID5}:/dataset/dataset5\ 92 | --datasetid ${NGC_DATASET_ID6}:/dataset/dataset6\ 93 | --datasetid ${NGC_DATASET_ID7}:/dataset/dataset7\ 94 | --datasetid ${NGC_DATASET_ID8}:/dataset/dataset8\ 95 | --workspace pretrain-dlmed:/workspace:RW \ 96 | --commandline "cd ${FOLDER_PATH} ; pip install -r requirements.txt ; pip install monai==0.8.0; python -m torch.distributed.launch --nproc_per_node=$NPROC_PER_NODE \ 97 | main.py --batch_size=${BATCH_SIZE} --sw_batch_size=${SW_BATCH_SIZE} --mask_ratio=${MASK_RATIO} \ 98 | --epoch=${EPOCHS} --mask_patch_size=${MASK_PATCH_SIZE} --img_size=${IMG_SIZE} \ 99 | --min_lr=${MIN_LR} --warmpup_epoch=${WARMUP_EPOCHS} --decoder=${DECODER} --loss_type=${LOSS_TYPE} --base_lr=${BASE_LR} --warmup_lr=${WARMUP_LR} \ 100 | --weight_decay=${WEIGHT_DECAY} --cache_dataset --cache_rate=${CACHE_RATE} --model_type=${MODEL_TYPE} --save_freq=${SAVE_FREQ} \ 101 | --print_freq=${PRINT_FREQ} --log_dir=${LOG_PATH} --output=${OUTPUT_PATH} --thread_loader" 102 | 103 | # --use_grad_checkpoint for gradient checkpointing ( allows for increasing batch size ) 104 | #--iso_spacing for resampling isotropic spacing 105 | --------------------------------------------------------------------------------