├── LICENSE ├── README.md ├── configs ├── base_config.yaml ├── baseline.yaml ├── baseline │ ├── byol_100.yaml │ ├── byol_30.yaml │ ├── chexpert_100.yaml │ ├── chexpert_30.yaml │ ├── pixelpro_100.yaml │ ├── pixelpro_30.yaml │ ├── resnet50.yaml │ ├── simclr_100.yaml │ └── simclr_30.yaml ├── baseline_fine_tune.yaml ├── callback │ ├── checkpoint.yaml │ ├── early_stopping.yaml │ ├── eval_checkpoint.yaml │ ├── eval_early_stopping.yaml │ └── upload_config.yaml ├── dataset │ ├── covid_rural.yaml │ ├── mimic-cxr-img_ap-pa_find-impr.yaml │ ├── mimic-cxr-img_ap-pa_find-impr_03.yaml │ ├── mimic-cxr_ap-pa_find-impr.yaml │ ├── mimic-cxr_ap-pa_find-impr_03.yaml │ ├── nih-cxr.yaml │ ├── nih-cxr_seg.yaml │ ├── object-cxr.yaml │ ├── object-cxr_seg.yaml │ ├── rsna.yaml │ ├── rsna_001.yaml │ ├── rsna_01.yaml │ ├── rsna_seg.yaml │ ├── rsna_seg_001.yaml │ ├── rsna_seg_01.yaml │ └── siim_pneumothorax.yaml ├── evaluation_model │ ├── covid_linear_frozen.yaml │ ├── covid_unet_finetune.yaml │ ├── covid_unet_frozen.yaml │ ├── nih_seg_frozen.yaml │ ├── object_finetune.yaml │ ├── object_finetune_full.yaml │ ├── object_frozen.yaml │ ├── object_frozen_full.yaml │ ├── object_seg_frozen.yaml │ ├── object_seg_frozen_full.yaml │ ├── pneumothorax_unet_finetune.yaml │ ├── pneumothorax_unet_finetune_full.yaml │ ├── pneumothorax_unet_frozen.yaml │ ├── pneumothorax_unet_frozen_full.yaml │ ├── rsna_finetune.yaml │ ├── rsna_finetune_1.yaml │ ├── rsna_finetune_full.yaml │ ├── rsna_frozen.yaml │ ├── rsna_frozen_1.yaml │ ├── rsna_frozen_full.yaml │ ├── rsna_seg_frozen.yaml │ ├── rsna_seg_frozen_1.yaml │ └── rsna_seg_frozen_full.yaml ├── experiment │ ├── CLIP_100.yaml │ ├── CLIP_30.yaml │ ├── ConVIRT_100.yaml │ ├── ConVIRT_30.yaml │ ├── LoVT_100.yaml │ ├── LoVT_30.yaml │ ├── ablation │ │ ├── convirt_lr.yaml │ │ ├── g_only.yaml │ │ ├── g_single_sent_att.yaml │ │ ├── no_coslr.yaml │ │ ├── no_g.yaml │ │ ├── no_l.yaml │ │ ├── no_spatial.yaml │ │ ├── no_weight.yaml │ │ └── no_weight_no_att.yaml │ └── eval │ │ ├── baseline_random.yaml │ │ ├── baseline_random_chexpert_finetune.yaml │ │ ├── baseline_random_chexpert_linear.yaml │ │ ├── baseline_random_covid_lin_frozen.yaml │ │ ├── baseline_random_covid_unet_finetune.yaml │ │ ├── baseline_random_covid_unet_frozen.yaml │ │ ├── baseline_random_nih_seg_frozen.yaml │ │ ├── baseline_random_object_finetune_full.yaml │ │ ├── baseline_random_object_frozen_full.yaml │ │ ├── baseline_random_object_seg_frozen_full.yaml │ │ ├── baseline_random_pneumo_unet_finetune.yaml │ │ ├── baseline_random_pneumo_unet_finetune_full.yaml │ │ ├── baseline_random_pneumo_unet_frozen.yaml │ │ ├── baseline_random_pneumo_unet_frozen_full.yaml │ │ ├── baseline_random_rsna_finetune.yaml │ │ ├── baseline_random_rsna_finetune_1.yaml │ │ ├── baseline_random_rsna_finetune_full.yaml │ │ ├── baseline_random_rsna_frozen.yaml │ │ ├── baseline_random_rsna_frozen_1.yaml │ │ ├── baseline_random_rsna_frozen_full.yaml │ │ ├── baseline_random_rsna_seg_frozen.yaml │ │ ├── baseline_random_rsna_seg_frozen_1.yaml │ │ ├── baseline_random_rsna_seg_frozen_full.yaml │ │ ├── eval.yaml │ │ ├── eval_chexpert_finetune.yaml │ │ ├── eval_chexpert_linear.yaml │ │ ├── eval_covid_lin_frozen.yaml │ │ ├── eval_covid_unet_finetune.yaml │ │ ├── eval_covid_unet_frozen.yaml │ │ ├── eval_nih_finetune.yaml │ │ ├── eval_nih_frozen.yaml │ │ ├── eval_nih_seg_frozen.yaml │ │ ├── eval_object_finetune.yaml │ │ ├── eval_object_finetune_full.yaml │ │ ├── eval_object_frozen.yaml │ │ ├── eval_object_frozen_full.yaml │ │ ├── eval_object_seg_frozen.yaml │ │ ├── eval_object_seg_frozen_full.yaml │ │ ├── eval_pneumo_lin_frozen.yaml │ │ ├── eval_pneumo_lin_frozen_full.yaml │ │ ├── eval_pneumo_unet_finetune.yaml │ │ ├── eval_pneumo_unet_finetune_full.yaml │ │ ├── eval_pneumo_unet_frozen.yaml │ │ ├── eval_pneumo_unet_frozen_full.yaml │ │ ├── eval_rsna_finetune.yaml │ │ ├── eval_rsna_finetune_1.yaml │ │ ├── eval_rsna_finetune_full.yaml │ │ ├── eval_rsna_frozen.yaml │ │ ├── eval_rsna_frozen_1.yaml │ │ ├── eval_rsna_frozen_concat.yaml │ │ ├── eval_rsna_frozen_for_unet.yaml │ │ ├── eval_rsna_frozen_full.yaml │ │ ├── eval_rsna_frozen_up.yaml │ │ ├── eval_rsna_rcnn_frozen.yaml │ │ ├── eval_rsna_rcnnfpn_frozen.yaml │ │ ├── eval_rsna_seg_frozen.yaml │ │ ├── eval_rsna_seg_frozen_1.yaml │ │ ├── eval_rsna_seg_frozen_down.yaml │ │ ├── eval_rsna_seg_frozen_full.yaml │ │ ├── eval_scr_unet_finetune.yaml │ │ └── eval_scr_unet_frozen.yaml ├── fine_tune.yaml ├── logger │ └── wandb.yaml ├── objective │ ├── global_contrastive.yaml │ ├── report_sent_intra_contrastive.yaml │ └── scan_local_intra_contrastive.yaml ├── online_eval │ ├── chexpert_bin_joint.yaml │ ├── chexpert_bin_report.yaml │ └── chexpert_bin_scan.yaml ├── report_encoder │ ├── bioclinicalbert_sentatt.yaml │ └── bioclinicalbert_sentences.yaml ├── scan_encoder │ ├── resnet50_imagenet.yaml │ └── resnet50attention_imagenet.yaml ├── train_representation.yaml └── user_config.yml ├── datasets ├── NIH_CXR_pathology_detection │ ├── dataset_statistics.json │ ├── test.csv │ ├── train.csv │ └── validation.csv ├── Opacity-Segmentation-COVID │ └── opacity_segmentation_covid_chest_X_ray-master │ │ └── covid_rural_annot │ │ ├── dataset_statistics.json │ │ ├── test.csv │ │ ├── train.csv │ │ └── validation.csv ├── RSNA-Pneunomia-Detection │ ├── dataset_statistics.json │ ├── test.csv │ ├── train.csv │ └── validation.csv ├── object-CXR │ ├── dataset_statistics.json │ ├── test.csv │ ├── train.csv │ └── validation.csv └── siim-acr-pneumothorax-segmentation │ ├── dataset_statistics.json │ ├── test.csv │ ├── train.csv │ └── validation.csv ├── environment.yaml ├── results ├── LoVT.pdf ├── LoVT.png ├── generated │ ├── analysis-test.csv │ ├── covariance_y_a.pdf │ ├── covariance_y_b.pdf │ ├── downstream_covid_linear.csv │ ├── downstream_covid_unet_finetune.csv │ ├── downstream_covid_unet_frozen.csv │ ├── downstream_nih_seg_linear.csv │ ├── downstream_object_finetune_100.csv │ ├── downstream_object_frozen_100.csv │ ├── downstream_object_seg_linear_100.csv │ ├── downstream_pneumothorax_unet_finetune_100.csv │ ├── downstream_pneumothorax_unet_frozen_100.csv │ ├── downstream_rsna_finetune_1.csv │ ├── downstream_rsna_finetune_10.csv │ ├── downstream_rsna_finetune_100.csv │ ├── downstream_rsna_frozen_1.csv │ ├── downstream_rsna_frozen_10.csv │ ├── downstream_rsna_frozen_100.csv │ ├── downstream_rsna_seg_linear_1.csv │ ├── downstream_rsna_seg_linear_10.csv │ ├── downstream_rsna_seg_linear_100.csv │ ├── global_alignment.pdf │ ├── global_alignment_normalized.pdf │ ├── local_alignment.pdf │ ├── local_alignment_normalized.pdf │ ├── local_modality_assignment.pdf │ ├── results_table.png │ ├── results_table_rsna.png │ ├── std_y_a.pdf │ ├── std_y_b.pdf │ ├── uniformity_y_a.pdf │ └── uniformity_y_b.pdf └── models │ ├── LoVT_100 │ ├── scan_weights_heatmap_test.pdf │ ├── spatial_smoothness_yl_rsna_seg_test.png │ ├── spatial_smoothness_yl_test.pdf │ ├── tSNE_yg_a_chexpert.pdf │ ├── tSNE_yg_b_chexpert.pdf │ ├── tSNE_yg_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_a_chexpert.pdf │ ├── tSNE_yl_b_chexpert.pdf │ ├── tSNE_yl_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_downstream_class_probs_rsna_seg.pdf │ ├── tSNE_zg.pdf │ └── tSNE_zl.pdf │ ├── LoVT_30 │ ├── scan_weights_heatmap_test.pdf │ ├── spatial_smoothness_yl_rsna_seg_test.png │ ├── spatial_smoothness_yl_test.pdf │ ├── tSNE_yg_a_chexpert.pdf │ ├── tSNE_yg_b_chexpert.pdf │ ├── tSNE_yg_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_a_chexpert.pdf │ ├── tSNE_yl_b_chexpert.pdf │ ├── tSNE_yl_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_downstream_class_probs_rsna_seg.pdf │ ├── tSNE_zg.pdf │ └── tSNE_zl.pdf │ ├── ablation_g_only │ ├── spatial_smoothness_yl_rsna_seg_test.png │ ├── spatial_smoothness_yl_test.pdf │ ├── tSNE_yg_a_chexpert.pdf │ ├── tSNE_yg_b_chexpert.pdf │ ├── tSNE_yg_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_a_chexpert.pdf │ ├── tSNE_yl_b_chexpert.pdf │ ├── tSNE_yl_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_downstream_class_probs_rsna_seg.pdf │ ├── tSNE_zg.pdf │ └── tSNE_zl.pdf │ ├── ablation_no_g │ ├── spatial_smoothness_yl_rsna_seg_test.png │ ├── spatial_smoothness_yl_test.pdf │ ├── tSNE_yg_a_chexpert.pdf │ ├── tSNE_yg_b_chexpert.pdf │ ├── tSNE_yg_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_a_chexpert.pdf │ ├── tSNE_yl_b_chexpert.pdf │ ├── tSNE_yl_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_downstream_class_probs_rsna_seg.pdf │ ├── tSNE_zg.pdf │ └── tSNE_zl.pdf │ ├── ablation_no_l │ ├── spatial_smoothness_yl_rsna_seg_test.png │ ├── spatial_smoothness_yl_test.pdf │ ├── tSNE_yg_a_chexpert.pdf │ ├── tSNE_yg_b_chexpert.pdf │ ├── tSNE_yg_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_a_chexpert.pdf │ ├── tSNE_yl_b_chexpert.pdf │ ├── tSNE_yl_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_downstream_class_probs_rsna_seg.pdf │ ├── tSNE_zg.pdf │ └── tSNE_zl.pdf │ ├── ablation_no_l_report │ ├── scan_weights_heatmap_test.pdf │ ├── spatial_smoothness_yl_rsna_seg_test.pdf │ ├── spatial_smoothness_yl_test.pdf │ ├── tSNE_yg_a_chexpert.pdf │ ├── tSNE_yg_b_chexpert.pdf │ ├── tSNE_yg_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_a_chexpert.pdf │ ├── tSNE_yl_b_chexpert.pdf │ ├── tSNE_yl_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_downstream_class_probs_rsna_seg.pdf │ ├── tSNE_zg.pdf │ └── tSNE_zl.pdf │ └── ablation_no_l_scan │ ├── spatial_smoothness_yl_rsna_seg_test.pdf │ ├── spatial_smoothness_yl_test.pdf │ ├── tSNE_yg_a_chexpert.pdf │ ├── tSNE_yg_b_chexpert.pdf │ ├── tSNE_yg_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_a_chexpert.pdf │ ├── tSNE_yl_b_chexpert.pdf │ ├── tSNE_yl_downstream_class_g_rsna_seg.pdf │ ├── tSNE_yl_downstream_class_probs_rsna_seg.pdf │ ├── tSNE_zg.pdf │ └── tSNE_zl.pdf └── src ├── analysis ├── __init__.py ├── data_exporter.py ├── downstream_embeddings.py ├── embedding_analysis.py ├── evaluation_job.py ├── postprocess_run.py └── visualization │ ├── __init__.py │ ├── downstream_plotter.py │ ├── emb_properties_vis.py │ ├── embedding_vis.py │ ├── heatmaps.py │ └── spatial_smoothness.py ├── baselines ├── __init__.py ├── baseline_utils.py ├── byol_baseline.py ├── simclr_baseline.py └── supervised_baseline.py ├── common ├── __init__.py ├── config_utils.py ├── dataclass_utils.py ├── script_utils.py ├── user_config.py └── wandb.py ├── data ├── __init__.py ├── dataloading_utils.py ├── datasets │ ├── COVID_rural │ │ ├── __init__.py │ │ └── covid_rural_dataset.py │ ├── __init__.py │ ├── base_dataset.py │ ├── chexpert │ │ ├── __init__.py │ │ └── chexpert_dataset.py │ ├── mimic_cxr │ │ ├── __init__.py │ │ ├── mimic_cxr_dataset.py │ │ └── section_parser.py │ ├── nih_cxr │ │ └── nih_cxr_dataset.py │ ├── object_cxr │ │ ├── __init__.py │ │ └── object_cxr_dataset.py │ ├── processing_utils.py │ ├── rsna_pneunomia_detection │ │ ├── __init__.py │ │ └── rsna_pneunomia_detection_dataset.py │ └── siim_acr_pneumothorax │ │ ├── __init__.py │ │ └── siim_acr_pneumothorax.py └── text_utils │ ├── __init__.py │ └── sentence_splitting.py ├── metrics ├── __init__.py ├── attention_metrics.py ├── bimodal_alignment_metrics.py ├── classification_metrics.py ├── detection_metrics.py ├── embedding_metrics.py └── retrieval_metrics.py ├── models ├── __init__.py ├── components │ ├── __init__.py │ ├── aggregation.py │ ├── attention.py │ ├── fc.py │ └── utils.py ├── downstream │ ├── __init__.py │ ├── classification.py │ ├── detection.py │ ├── downstream_evaluation.py │ ├── online_evaluation.py │ └── segmentation.py ├── image │ ├── __init__.py │ ├── resnet.py │ ├── resunet.py │ ├── scan_encoder.py │ ├── scan_transforms.py │ └── unet.py ├── objectives │ ├── __init__.py │ ├── global_alignment.py │ └── local_alignment.py ├── pretraining │ ├── __init__.py │ ├── bimodal_alignment_model.py │ └── pretraining_utils.py └── text │ ├── __init__.py │ ├── language_model_loader.py │ ├── report_encoder.py │ └── report_transforms.py └── scripts ├── __init__.py ├── imports.py ├── run_finetuning.py ├── run_finetuning_baseline.py └── run_training.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Philip Müller 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /configs/base_config.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - logger: 3 | - wandb 4 | - callback: 5 | - upload_config 6 | 7 | name: 8 | 9 | seed: 12345 10 | trainer: 11 | precision: 16 12 | amp_level: 'O1' 13 | 14 | gpus: [0] 15 | num_dataloader_workers: 10 16 | 17 | debug: False 18 | print_config: True 19 | 20 | work_dir: ${hydra:runtime.cwd} 21 | 22 | hydra: 23 | output_subdir: hydra 24 | run: 25 | dir: logs/runs/${now:%Y-%m-%d}/${now:%H-%M-%S} 26 | sweep: 27 | dir: logs/multiruns/${now:%Y-%m-%d_%H-%M-%S} 28 | subdir: ${hydra.job.num} 29 | -------------------------------------------------------------------------------- /configs/baseline.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - baseline_config 3 | - base_config 4 | 5 | logger: 6 | wandb: 7 | tags: 8 | - baseline 9 | -------------------------------------------------------------------------------- /configs/baseline/byol_100.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_model_config 3 | - resnet50 4 | projection_size: 128 5 | ssl_method: BYOL 6 | 7 | dataset: mimic-cxr-img_ap-pa_find-impr 8 | batch_size: 64 9 | learning_rate: 1e-4 10 | weight_decay: 1e-4 11 | 12 | warmup_epochs: 1 13 | max_epochs: 100 -------------------------------------------------------------------------------- /configs/baseline/byol_30.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_model_config 3 | - resnet50 4 | projection_size: 128 5 | ssl_method: BYOL 6 | 7 | dataset: mimic-cxr-img_ap-pa_find-impr_03 8 | batch_size: 64 9 | learning_rate: 3e-5 10 | weight_decay: 1e-4 11 | 12 | warmup_epochs: 1 13 | max_epochs: 100 -------------------------------------------------------------------------------- /configs/baseline/chexpert_100.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_model_config 3 | - resnet50 4 | classifier_task: 'chexpert_binary' 5 | 6 | dataset: mimic-cxr-img_ap-pa_find-impr 7 | lr_reduce_patience: 3 8 | lr_reduce_factor: 0.5 9 | early_stop_patience: 10 10 | batch_size: 64 11 | learning_rate: 3e-4 12 | weight_decay: 1e-6 13 | max_epochs: 100 14 | -------------------------------------------------------------------------------- /configs/baseline/chexpert_30.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_model_config 3 | - resnet50 4 | classifier_task: 'chexpert_binary' 5 | 6 | dataset: mimic-cxr-img_ap-pa_find-impr_03 7 | lr_reduce_patience: 3 8 | lr_reduce_factor: 0.5 9 | early_stop_patience: 10 10 | batch_size: 64 11 | learning_rate: 1e-4 12 | weight_decay: 1e-6 13 | max_epochs: 100 14 | -------------------------------------------------------------------------------- /configs/baseline/pixelpro_100.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_model_config 3 | - resnet50 4 | projection_size: 512 5 | ssl_method: PixelPro 6 | 7 | dataset: mimic-cxr-img_ap-pa_find-impr 8 | batch_size: 64 9 | learning_rate: 1e-3 10 | weight_decay: 1e-5 11 | 12 | max_epochs: 100 -------------------------------------------------------------------------------- /configs/baseline/pixelpro_30.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_model_config 3 | - resnet50 4 | projection_size: 512 5 | ssl_method: PixelPro 6 | 7 | dataset: mimic-cxr-img_ap-pa_find-impr_03 8 | batch_size: 64 9 | learning_rate: 1e-3 10 | weight_decay: 1e-5 11 | 12 | max_epochs: 100 -------------------------------------------------------------------------------- /configs/baseline/resnet50.yaml: -------------------------------------------------------------------------------- 1 | backbone_architecture: 'resnet' 2 | backbone_model: ['pytorch/vision:v0.6.0', 'resnet50'] 3 | backbone_pretrained: true 4 | input_size: [224, 224] 5 | -------------------------------------------------------------------------------- /configs/baseline/simclr_100.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_model_config 3 | - resnet50 4 | projection_size: 128 5 | temperature: 0.1 6 | 7 | dataset: mimic-cxr-img_ap-pa_find-impr 8 | batch_size: 128 9 | learning_rate: 3e-4 10 | weight_decay: 1e-4 11 | 12 | warmup_epochs: 1 13 | max_epochs: 100 -------------------------------------------------------------------------------- /configs/baseline/simclr_30.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_model_config 3 | - resnet50 4 | projection_size: 128 5 | temperature: 0.1 6 | 7 | dataset: mimic-cxr-img_ap-pa_find-impr_03 8 | batch_size: 128 9 | learning_rate: 3e-4 10 | weight_decay: 1e-4 11 | 12 | warmup_epochs: 1 13 | max_epochs: 100 -------------------------------------------------------------------------------- /configs/baseline_fine_tune.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - baseline_fine_tune_config 3 | - base_config 4 | - callback: 5 | - eval_checkpoint 6 | - eval_early_stopping 7 | 8 | logger: 9 | wandb: 10 | tags: 11 | - downstream 12 | 13 | -------------------------------------------------------------------------------- /configs/callback/checkpoint.yaml: -------------------------------------------------------------------------------- 1 | model_checkpoint: 2 | _target_: pytorch_lightning.callbacks.ModelCheckpoint 3 | monitor: ${...monitor_metric} 4 | save_top_k: 1 # save k best models (determined by above metric) 5 | save_last: true # additionaly always save model from last epoch 6 | mode: ${...monitor_metric_mode} 7 | verbose: true 8 | dirpath: 'checkpoints/' 9 | filename: 'pretrain-{epoch:04d}' 10 | -------------------------------------------------------------------------------- /configs/callback/early_stopping.yaml: -------------------------------------------------------------------------------- 1 | early_stopping: 2 | _target_: pytorch_lightning.callbacks.EarlyStopping 3 | monitor: ${...monitor_metric} 4 | patience: 10 # 40 # how many epochs of not improving until training stops 5 | mode: ${...monitor_metric_mode} 6 | min_delta: 0.00001 # minimum change in the monitored metric needed to qualify as an improvement 7 | verbose: true -------------------------------------------------------------------------------- /configs/callback/eval_checkpoint.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - checkpoint 3 | 4 | model_checkpoint: 5 | filename: '${...evaluation_model.eval_name}-{epoch:04d}' 6 | -------------------------------------------------------------------------------- /configs/callback/eval_early_stopping.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - early_stopping 3 | 4 | early_stopping: 5 | patience: 10 6 | -------------------------------------------------------------------------------- /configs/callback/upload_config.yaml: -------------------------------------------------------------------------------- 1 | upload_config_to_wandb_as_artifact: 2 | _target_: imports.UploadConfigToWandbAsArtifact 3 | config_dir: hydra -------------------------------------------------------------------------------- /configs/dataset/covid_rural.yaml: -------------------------------------------------------------------------------- 1 | name: 'COVID Rural' 2 | loader: 'CovidRuralDataset' 3 | path: '/Opacity-Segmentation-COVID/opacity_segmentation_covid_chest_X_ray-master/covid_rural_annot' -------------------------------------------------------------------------------- /configs/dataset/mimic-cxr-img_ap-pa_find-impr.yaml: -------------------------------------------------------------------------------- 1 | name: 'MIMIC-CXR-IMG_ap/pa_find/impr' 2 | loader: 'MimicCxrImageDataset' 3 | path: '/MIMIC-CXR/mimic-cxr_ap-pa_dataset' -------------------------------------------------------------------------------- /configs/dataset/mimic-cxr-img_ap-pa_find-impr_03.yaml: -------------------------------------------------------------------------------- 1 | name: 'MIMIC-CXR-IMG_ap/pa_find/impr' 2 | loader: 'MimicCxrImageDataset' 3 | path: '/MIMIC-CXR/mimic-cxr_ap-pa_dataset' 4 | 5 | train_subset: 0.3 -------------------------------------------------------------------------------- /configs/dataset/mimic-cxr_ap-pa_find-impr.yaml: -------------------------------------------------------------------------------- 1 | name: 'MIMIC-CXR_ap/pa_find/impr' 2 | loader: 'MimicCxrDataset' 3 | path: '/MIMIC-CXR/mimic-cxr_ap-pa_dataset' -------------------------------------------------------------------------------- /configs/dataset/mimic-cxr_ap-pa_find-impr_03.yaml: -------------------------------------------------------------------------------- 1 | name: 'MIMIC-CXR_ap/pa_find/impr' 2 | loader: 'MimicCxrDataset' 3 | path: '/MIMIC-CXR/mimic-cxr_ap-pa_dataset' 4 | 5 | train_subset: 0.3 6 | 7 | -------------------------------------------------------------------------------- /configs/dataset/nih-cxr.yaml: -------------------------------------------------------------------------------- 1 | name: 'NIH_CXR_pathologies' 2 | loader: 'NihCxrDetectionDataset' 3 | path: '/NIH_CXR_pathology_detection' 4 | -------------------------------------------------------------------------------- /configs/dataset/nih-cxr_seg.yaml: -------------------------------------------------------------------------------- 1 | name: 'NIH_CXR_pathologies_seg' 2 | loader: 'NihCxrSegmentationDataset' 3 | path: '/NIH_CXR_pathology_detection' 4 | -------------------------------------------------------------------------------- /configs/dataset/object-cxr.yaml: -------------------------------------------------------------------------------- 1 | name: 'Object-CXR' 2 | loader: 'ObjectCxrDetectionDataset' 3 | path: '/object-CXR' 4 | 5 | 6 | -------------------------------------------------------------------------------- /configs/dataset/object-cxr_seg.yaml: -------------------------------------------------------------------------------- 1 | name: 'Object-CXR_seg' 2 | loader: 'ObjectCxrSegmentationDataset' 3 | path: '/object-CXR' 4 | 5 | 6 | -------------------------------------------------------------------------------- /configs/dataset/rsna.yaml: -------------------------------------------------------------------------------- 1 | name: 'RSNA' 2 | loader: 'RsnaPneunomiaDetectionDataset' 3 | path: '/RSNA-Pneunomia-Detection' 4 | 5 | val_subset: 0.1 6 | 7 | -------------------------------------------------------------------------------- /configs/dataset/rsna_001.yaml: -------------------------------------------------------------------------------- 1 | name: 'RSNA' 2 | loader: 'RsnaPneunomiaDetectionDataset' 3 | path: '/RSNA-Pneunomia-Detection' 4 | 5 | train_subset: 0.01 6 | val_subset: 0.1 7 | 8 | -------------------------------------------------------------------------------- /configs/dataset/rsna_01.yaml: -------------------------------------------------------------------------------- 1 | name: 'RSNA' 2 | loader: 'RsnaPneunomiaDetectionDataset' 3 | path: '/RSNA-Pneunomia-Detection' 4 | 5 | train_subset: 0.1 6 | val_subset: 0.1 7 | 8 | -------------------------------------------------------------------------------- /configs/dataset/rsna_seg.yaml: -------------------------------------------------------------------------------- 1 | name: 'RSNA_seg' 2 | loader: 'RsnaPneunomiaSegmentationDataset' 3 | path: '/RSNA-Pneunomia-Detection' 4 | 5 | val_subset: 0.1 6 | 7 | -------------------------------------------------------------------------------- /configs/dataset/rsna_seg_001.yaml: -------------------------------------------------------------------------------- 1 | name: 'RSNA_seg' 2 | loader: 'RsnaPneunomiaSegmentationDataset' 3 | path: '/RSNA-Pneunomia-Detection' 4 | 5 | train_subset: 0.01 6 | val_subset: 0.1 7 | 8 | -------------------------------------------------------------------------------- /configs/dataset/rsna_seg_01.yaml: -------------------------------------------------------------------------------- 1 | name: 'RSNA_seg' 2 | loader: 'RsnaPneunomiaSegmentationDataset' 3 | path: '/RSNA-Pneunomia-Detection' 4 | 5 | train_subset: 0.1 6 | val_subset: 0.1 7 | 8 | -------------------------------------------------------------------------------- /configs/dataset/siim_pneumothorax.yaml: -------------------------------------------------------------------------------- 1 | name: 'SIIM_pneumothorax' 2 | loader: 'SIIMSegmentationDataset' 3 | path: '/siim-acr-pneumothorax-segmentation' 4 | -------------------------------------------------------------------------------- /configs/evaluation_model/covid_linear_frozen.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - segmentation_eval 3 | 4 | dataset: covid_rural 5 | eval_name: 'covid_lin' 6 | task: 'covid_rural' 7 | data_augmentation: false 8 | loss_fn: dice 9 | segmentation_head: linear 10 | freeze_encoder: true 11 | 12 | batch_size: 8 13 | learning_rate: 3e-2 14 | weight_decay: 1e-6 15 | lr_reduce_patience: 3 16 | lr_reduce_factor: 0.5 17 | 18 | max_epochs: 100 19 | -------------------------------------------------------------------------------- /configs/evaluation_model/covid_unet_finetune.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - segmentation_eval 3 | 4 | dataset: covid_rural 5 | eval_name: 'covid_unet_finetune' 6 | task: 'covid_rural' 7 | data_augmentation: false 8 | loss_fn: dice 9 | segmentation_head: unet 10 | freeze_encoder: false 11 | frozen_warmup_steps: 20 12 | warmup_lr: 1e-3 13 | 14 | batch_size: 8 15 | learning_rate: 1e-4 16 | weight_decay: 1e-6 17 | lr_reduce_patience: 3 18 | lr_reduce_factor: 0.5 19 | 20 | max_epochs: 100 21 | -------------------------------------------------------------------------------- /configs/evaluation_model/covid_unet_frozen.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - segmentation_eval 3 | 4 | dataset: covid_rural 5 | eval_name: 'covid_unet_frozen' 6 | task: 'covid_rural' 7 | data_augmentation: false 8 | loss_fn: dice 9 | segmentation_head: unet 10 | freeze_encoder: true 11 | 12 | batch_size: 8 13 | learning_rate: 3e-4 14 | weight_decay: 1e-6 15 | lr_reduce_patience: 3 16 | lr_reduce_factor: 0.5 17 | 18 | max_epochs: 100 19 | -------------------------------------------------------------------------------- /configs/evaluation_model/nih_seg_frozen.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - segmentation_eval 3 | 4 | dataset: 'nih-cxr_seg' 5 | eval_name: 'nih_seg_lin' 6 | task: 'NIH_CXR_pathology_detection' 7 | data_augmentation: false 8 | loss_fn: dice 9 | segmentation_head: linear 10 | freeze_encoder: true 11 | 12 | batch_size: 64 13 | learning_rate: 1e-2 14 | weight_decay: 1e-6 15 | lr_reduce_patience: 3 16 | lr_reduce_factor: 0.5 17 | 18 | max_epochs: 100 19 | -------------------------------------------------------------------------------- /configs/evaluation_model/object_finetune.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - detection_eval 3 | 4 | dataset: object-cxr_01 5 | eval_name: 'object_finetune' 6 | task: 'object_cxr' 7 | data_augmentation: false 8 | 9 | detection_head: YOLOv3 10 | extracted_layers: ['conv3', 'conv4', 'conv5'] 11 | anchors: 12 | - [[64.64, 48.6], [84.24, 106.92], [201.42, 176.04]] 13 | - [[16.2, 32.94], [33.48, 24.3], [31.86, 64.26]] 14 | - [[5.4, 7.02], [8.64, 16.2], [17.82, 12.42]] 15 | 16 | freeze_encoder: false 17 | frozen_warmup_steps: 100 18 | warmup_lr: 1e-3 19 | 20 | batch_size: 64 21 | learning_rate: 3e-4 22 | weight_decay: 1e-6 23 | lr_reduce_patience: 3 24 | lr_reduce_factor: 0.5 25 | 26 | max_epochs: 100 27 | -------------------------------------------------------------------------------- /configs/evaluation_model/object_finetune_full.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - detection_eval 3 | 4 | dataset: object-cxr 5 | eval_name: 'object_finetune_full' 6 | task: 'object_cxr' 7 | data_augmentation: false 8 | 9 | detection_head: YOLOv3 10 | extracted_layers: ['conv3', 'conv4', 'conv5'] 11 | anchors: 12 | - [[64.64, 48.6], [84.24, 106.92], [201.42, 176.04]] 13 | - [[16.2, 32.94], [33.48, 24.3], [31.86, 64.26]] 14 | - [[5.4, 7.02], [8.64, 16.2], [17.82, 12.42]] 15 | 16 | freeze_encoder: false 17 | frozen_warmup_steps: 100 18 | warmup_lr: 1e-3 19 | 20 | batch_size: 64 21 | learning_rate: 3e-4 22 | weight_decay: 1e-6 23 | lr_reduce_patience: 3 24 | lr_reduce_factor: 0.5 25 | 26 | max_epochs: 100 27 | -------------------------------------------------------------------------------- /configs/evaluation_model/object_frozen.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - detection_eval 3 | 4 | dataset: object-cxr_01 5 | eval_name: 'object_frozen' 6 | task: 'object_cxr' 7 | data_augmentation: false 8 | 9 | detection_head: YOLOv3 10 | extracted_layers: ['conv3', 'conv4', 'conv5'] 11 | anchors: 12 | - [[64.64, 48.6], [84.24, 106.92], [201.42, 176.04]] 13 | - [[16.2, 32.94], [33.48, 24.3], [31.86, 64.26]] 14 | - [[5.4, 7.02], [8.64, 16.2], [17.82, 12.42]] 15 | 16 | freeze_encoder: true 17 | 18 | batch_size: 64 19 | learning_rate: 1e-3 20 | weight_decay: 1e-6 21 | lr_reduce_patience: 3 22 | lr_reduce_factor: 0.5 23 | 24 | max_epochs: 100 25 | -------------------------------------------------------------------------------- /configs/evaluation_model/object_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - detection_eval 3 | 4 | dataset: object-cxr 5 | eval_name: 'object_frozen_full' 6 | task: 'object_cxr' 7 | data_augmentation: false 8 | 9 | detection_head: YOLOv3 10 | extracted_layers: ['conv3', 'conv4', 'conv5'] 11 | anchors: 12 | - [[64.64, 48.6], [84.24, 106.92], [201.42, 176.04]] 13 | - [[16.2, 32.94], [33.48, 24.3], [31.86, 64.26]] 14 | - [[5.4, 7.02], [8.64, 16.2], [17.82, 12.42]] 15 | 16 | freeze_encoder: true 17 | 18 | batch_size: 64 19 | learning_rate: 1e-3 20 | weight_decay: 1e-6 21 | lr_reduce_patience: 3 22 | lr_reduce_factor: 0.5 23 | 24 | max_epochs: 100 25 | -------------------------------------------------------------------------------- /configs/evaluation_model/object_seg_frozen.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - segmentation_eval 3 | 4 | dataset: object-cxr_seg_01 5 | eval_name: 'object_seg_lin' 6 | task: 'object_cxr' 7 | data_augmentation: false 8 | loss_fn: dice 9 | segmentation_head: linear 10 | freeze_encoder: true 11 | 12 | batch_size: 64 13 | learning_rate: 1e-2 14 | weight_decay: 1e-6 15 | lr_reduce_patience: 3 16 | lr_reduce_factor: 0.5 17 | 18 | max_epochs: 100 19 | -------------------------------------------------------------------------------- /configs/evaluation_model/object_seg_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - segmentation_eval 3 | 4 | dataset: object-cxr_seg 5 | eval_name: 'object_seg_lin_full' 6 | task: 'object_cxr' 7 | data_augmentation: false 8 | loss_fn: dice 9 | segmentation_head: linear 10 | freeze_encoder: true 11 | 12 | batch_size: 64 13 | learning_rate: 1e-2 14 | weight_decay: 1e-6 15 | lr_reduce_patience: 3 16 | lr_reduce_factor: 0.5 17 | 18 | max_epochs: 100 19 | -------------------------------------------------------------------------------- /configs/evaluation_model/pneumothorax_unet_finetune.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - segmentation_eval 3 | 4 | dataset: siim_pneumothorax_01 5 | eval_name: 'pneumo_unet_finetune' 6 | task: 'SIIM_pneumothorax_segmentation' 7 | data_augmentation: false 8 | loss_fn: dice 9 | segmentation_head: unet 10 | freeze_encoder: false 11 | frozen_warmup_steps: 100 12 | warmup_lr: 1e-3 13 | 14 | batch_size: 64 15 | learning_rate: 1e-3 16 | weight_decay: 1e-6 17 | lr_reduce_patience: 3 18 | lr_reduce_factor: 0.5 19 | 20 | max_epochs: 100 21 | -------------------------------------------------------------------------------- /configs/evaluation_model/pneumothorax_unet_finetune_full.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - pneumothorax_unet_finetune 3 | 4 | dataset: siim_pneumothorax 5 | eval_name: 'pneumo_unet_finetune_full' 6 | loss_fn: dice 7 | batch_size: 64 8 | learning_rate: 1e-3 9 | lr_reduce_patience: 3 10 | 11 | max_epochs: 50 12 | -------------------------------------------------------------------------------- /configs/evaluation_model/pneumothorax_unet_frozen.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - segmentation_eval 3 | 4 | dataset: siim_pneumothorax_01 5 | eval_name: 'pneumo_unet_frozen' 6 | task: 'SIIM_pneumothorax_segmentation' 7 | data_augmentation: false 8 | loss_fn: dice 9 | segmentation_head: unet 10 | freeze_encoder: true 11 | 12 | batch_size: 64 13 | learning_rate: 1e-3 14 | weight_decay: 1e-6 15 | lr_reduce_patience: 3 16 | lr_reduce_factor: 0.5 17 | 18 | max_epochs: 100 19 | -------------------------------------------------------------------------------- /configs/evaluation_model/pneumothorax_unet_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - pneumothorax_unet_frozen 3 | 4 | dataset: siim_pneumothorax 5 | eval_name: 'pneumo_unet_frozen_full' 6 | loss_fn: dice 7 | batch_size: 64 8 | learning_rate: 1e-3 9 | lr_reduce_patience: 3 10 | 11 | max_epochs: 50 12 | -------------------------------------------------------------------------------- /configs/evaluation_model/rsna_finetune.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - detection_eval 3 | 4 | dataset: rsna_01 5 | eval_name: 'rsna_finetune' 6 | task: 'rsna_pneunomia_detection' 7 | data_augmentation: false 8 | 9 | detection_head: YOLOv3 10 | extracted_layers: ['conv3', 'conv4', 'conv5'] 11 | anchors: 12 | - [[64.64, 48.6], [84.24, 106.92], [201.42, 176.04]] 13 | - [[16.2, 32.94], [33.48, 24.3], [31.86, 64.26]] 14 | - [[5.4, 7.02], [8.64, 16.2], [17.82, 12.42]] 15 | 16 | freeze_encoder: false 17 | frozen_warmup_steps: 100 18 | warmup_lr: 1e-3 19 | 20 | batch_size: 64 21 | learning_rate: 3e-4 22 | weight_decay: 1e-6 23 | lr_reduce_patience: 3 24 | lr_reduce_factor: 0.5 25 | 26 | max_epochs: 100 27 | -------------------------------------------------------------------------------- /configs/evaluation_model/rsna_finetune_1.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - rsna_finetune 3 | 4 | dataset: rsna_001 5 | eval_name: 'rsna_finetune_1' 6 | 7 | batch_size: 8 8 | learning_rate: 1e-4 9 | lr_reduce_patience: 3 10 | 11 | max_epochs: 500 12 | -------------------------------------------------------------------------------- /configs/evaluation_model/rsna_finetune_full.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - rsna_finetune 3 | 4 | dataset: rsna 5 | eval_name: 'rsna_finetune_full' 6 | 7 | batch_size: 64 8 | learning_rate: 1e-4 9 | lr_reduce_patience: 3 10 | 11 | max_epochs: 50 12 | -------------------------------------------------------------------------------- /configs/evaluation_model/rsna_frozen.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - detection_eval 3 | 4 | dataset: rsna_01 5 | eval_name: 'rsna' 6 | task: 'rsna_pneunomia_detection' 7 | data_augmentation: false 8 | 9 | detection_head: YOLOv3 10 | extracted_layers: ['conv3', 'conv4', 'conv5'] 11 | anchors: 12 | - [[64.64, 48.6], [84.24, 106.92], [201.42, 176.04]] 13 | - [[16.2, 32.94], [33.48, 24.3], [31.86, 64.26]] 14 | - [[5.4, 7.02], [8.64, 16.2], [17.82, 12.42]] 15 | 16 | freeze_encoder: true 17 | 18 | batch_size: 64 # 8 19 | learning_rate: 1e-3 # lg_watt: 1e-3, old: 3e-4 20 | weight_decay: 1e-6 21 | lr_reduce_patience: 3 22 | lr_reduce_factor: 0.5 23 | 24 | max_epochs: 100 25 | -------------------------------------------------------------------------------- /configs/evaluation_model/rsna_frozen_1.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - rsna_frozen 3 | 4 | dataset: rsna_001 5 | eval_name: 'rsna_1' 6 | 7 | batch_size: 8 8 | learning_rate: 1e-3 9 | lr_reduce_patience: 3 10 | 11 | max_epochs: 500 12 | -------------------------------------------------------------------------------- /configs/evaluation_model/rsna_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - rsna_frozen 3 | 4 | dataset: rsna 5 | eval_name: 'rsna_full' 6 | 7 | batch_size: 64 8 | learning_rate: 1e-3 9 | lr_reduce_patience: 3 10 | 11 | max_epochs: 50 12 | -------------------------------------------------------------------------------- /configs/evaluation_model/rsna_seg_frozen.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - segmentation_eval 3 | 4 | dataset: rsna_seg_01 5 | eval_name: 'rsna_seg_lin' 6 | task: 'rsna_pneunomia_detection' 7 | data_augmentation: false 8 | loss_fn: dice 9 | segmentation_head: linear 10 | freeze_encoder: true 11 | 12 | batch_size: 64 # 8 13 | learning_rate: 3e-2 # lg_watt: 3e-2, old: 3e-3 for lcross and l, 3e-5 for global and lg (1e-5 is similar good) 14 | weight_decay: 1e-6 15 | lr_reduce_patience: 3 16 | lr_reduce_factor: 0.5 17 | 18 | max_epochs: 100 19 | -------------------------------------------------------------------------------- /configs/evaluation_model/rsna_seg_frozen_1.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - rsna_seg_frozen 3 | 4 | dataset: rsna_seg_001 5 | eval_name: 'rsna_seg_lin_1' 6 | loss_fn: dice 7 | batch_size: 8 8 | learning_rate: 1e-2 9 | lr_reduce_patience: 3 10 | 11 | max_epochs: 500 12 | -------------------------------------------------------------------------------- /configs/evaluation_model/rsna_seg_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - rsna_seg_frozen 3 | 4 | dataset: rsna_seg 5 | eval_name: 'rsna_seg_lin_full' 6 | loss_fn: dice 7 | batch_size: 64 8 | learning_rate: 1e-2 9 | lr_reduce_patience: 3 10 | 11 | max_epochs: 50 12 | -------------------------------------------------------------------------------- /configs/experiment/CLIP_100.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 8 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentences 9 | 10 | name: 'CLIP_100' 11 | pretrain_dataset: mimic-cxr_ap-pa_find-impr 12 | 13 | num_dataloader_workers: 20 14 | trainer: 15 | accumulate_grad_batches: 16 16 | 17 | callback: 18 | early_stopping: 19 | patience: 10 20 | 21 | pretrain_model: 22 | loss_weights: 23 | global_a2b: 0.75 # scan with negatives from report 24 | global_b2a: 0.25 # report with negatives from scan 25 | 26 | 27 | optimizer: AdamW 28 | lr_scheduler: 29 | - cosine_annealing_per_epoch 30 | learning_rate: 1e-4 31 | weight_decay: 1e-6 32 | batch_size: 32 33 | max_epochs: 100 34 | 35 | projection_norm: batch 36 | encoder_b: 37 | data_augmentation: 38 | sentence_shuffling: null 39 | sentence_sampling: random_sentence 40 | duplicate_sentences: false 41 | -------------------------------------------------------------------------------- /configs/experiment/CLIP_30.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 8 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentences 9 | 10 | name: 'CLIP_30' 11 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 12 | 13 | num_dataloader_workers: 20 14 | trainer: 15 | accumulate_grad_batches: 16 16 | 17 | callback: 18 | early_stopping: 19 | patience: 10 20 | 21 | pretrain_model: 22 | loss_weights: 23 | global_a2b: 0.75 # scan with negatives from report 24 | global_b2a: 0.25 # report with negatives from scan 25 | 26 | 27 | optimizer: AdamW 28 | lr_scheduler: 29 | - cosine_annealing_per_epoch 30 | learning_rate: 1e-4 31 | weight_decay: 1e-6 32 | batch_size: 32 33 | max_epochs: 100 34 | 35 | projection_norm: batch 36 | encoder_b: 37 | data_augmentation: 38 | sentence_shuffling: null 39 | sentence_sampling: random_sentence 40 | duplicate_sentences: false 41 | -------------------------------------------------------------------------------- /configs/experiment/ConVIRT_100.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /scan_encoder@pretrain_model.encoder_a: resnet50_imagenet 8 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentences 9 | 10 | name: 'ConVIRT_100' 11 | pretrain_dataset: mimic-cxr_ap-pa_find-impr 12 | 13 | callback: 14 | early_stopping: 15 | patience: 15 16 | 17 | pretrain_model: 18 | loss_weights: 19 | global_a2b: 0.75 # scan with negatives from report 20 | global_b2a: 0.25 # report with negatives from scan 21 | 22 | encoder_a: 23 | data_augmentation: 24 | augment: true 25 | random_horizontal_flip: true 26 | encoder_b: 27 | data_augmentation: 28 | augment: true 29 | sentence_shuffling: null 30 | sentence_sampling: random_sentence 31 | duplicate_sentences: false 32 | 33 | projection_norm: null 34 | batch_size: 32 35 | learning_rate: 1e-4 36 | weight_decay: 1e-6 37 | lr_scheduler: 38 | - reduce_on_plateau 39 | lr_reduce_patience: 12 40 | lr_reduce_factor: 0.5 41 | max_epochs: 500 42 | -------------------------------------------------------------------------------- /configs/experiment/ConVIRT_30.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /scan_encoder@pretrain_model.encoder_a: resnet50_imagenet 8 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentences 9 | 10 | name: 'ConVIRT_30' 11 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 12 | 13 | callback: 14 | early_stopping: 15 | patience: 15 16 | 17 | pretrain_model: 18 | loss_weights: 19 | global_a2b: 0.75 # scan with negatives from report 20 | global_b2a: 0.25 # report with negatives from scan 21 | 22 | encoder_a: 23 | data_augmentation: 24 | augment: true 25 | random_horizontal_flip: true 26 | encoder_b: 27 | data_augmentation: 28 | augment: true 29 | sentence_shuffling: null 30 | sentence_sampling: random_sentence 31 | duplicate_sentences: false 32 | 33 | projection_norm: null 34 | batch_size: 32 35 | learning_rate: 1e-5 36 | weight_decay: 1e-6 37 | lr_scheduler: 38 | - reduce_on_plateau 39 | lr_reduce_patience: 12 40 | lr_reduce_factor: 0.5 41 | max_epochs: 500 42 | -------------------------------------------------------------------------------- /configs/experiment/LoVT_100.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /objective@pretrain_model.ll_alignments_a.0: scan_local_intra_contrastive 8 | - /objective@pretrain_model.ll_alignments_b.0: report_sent_intra_contrastive 9 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 10 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentatt 11 | 12 | name: 'LoVT_100' 13 | pretrain_dataset: mimic-cxr_ap-pa_find-impr 14 | 15 | num_dataloader_workers: 20 16 | trainer: 17 | accumulate_grad_batches: 16 18 | 19 | callback: 20 | early_stopping: 21 | patience: 10 22 | 23 | pretrain_model: 24 | loss_weights: 25 | local_a_l2att: 0.375 26 | local_a_att2l: 0.375 27 | local_b_l2att: 0.375 28 | local_b_att2l: 0.375 29 | global_a2b: 0.75 # scan with negatives from report 30 | global_b2a: 0.25 # report with negatives from scan 31 | 32 | 33 | optimizer: AdamW 34 | lr_scheduler: 35 | - cosine_annealing_per_epoch 36 | learning_rate: 1e-4 37 | weight_decay: 1e-6 38 | batch_size: 32 39 | max_epochs: 100 40 | 41 | projection_norm: batch 42 | 43 | l_weights_a: from_aggregation 44 | l_weights_b: from_aggregation 45 | l_weights_stop_grad: true 46 | encoder_a: 47 | local_weights_from_aggregator: true 48 | encoder_b: 49 | local_weights_from_aggregator: true 50 | -------------------------------------------------------------------------------- /configs/experiment/LoVT_30.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /objective@pretrain_model.ll_alignments_a.0: scan_local_intra_contrastive 8 | - /objective@pretrain_model.ll_alignments_b.0: report_sent_intra_contrastive 9 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 10 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentatt 11 | 12 | name: 'LoVT_30' 13 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 14 | 15 | num_dataloader_workers: 20 16 | trainer: 17 | accumulate_grad_batches: 16 18 | 19 | callback: 20 | early_stopping: 21 | patience: 10 22 | 23 | pretrain_model: 24 | loss_weights: 25 | local_a_l2att: 0.375 26 | local_a_att2l: 0.375 27 | local_b_l2att: 0.375 28 | local_b_att2l: 0.375 29 | global_a2b: 0.75 # scan with negatives from report 30 | global_b2a: 0.25 # report with negatives from scan 31 | 32 | 33 | optimizer: AdamW 34 | lr_scheduler: 35 | - cosine_annealing_per_epoch 36 | learning_rate: 1e-4 37 | weight_decay: 1e-6 38 | batch_size: 32 39 | max_epochs: 100 40 | 41 | projection_norm: batch 42 | 43 | l_weights_a: from_aggregation 44 | l_weights_b: from_aggregation 45 | l_weights_stop_grad: true 46 | encoder_a: 47 | local_weights_from_aggregator: true 48 | encoder_b: 49 | local_weights_from_aggregator: true 50 | -------------------------------------------------------------------------------- /configs/experiment/ablation/convirt_lr.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /scan_encoder@pretrain_model.encoder_a: resnet50_imagenet 8 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentences 9 | 10 | name: 'ablation_convirt_lr' 11 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 12 | 13 | num_dataloader_workers: 20 14 | trainer: 15 | accumulate_grad_batches: 16 16 | 17 | callback: 18 | early_stopping: 19 | patience: 10 20 | 21 | pretrain_model: 22 | loss_weights: 23 | global_a2b: 0.75 # scan with negatives from report 24 | global_b2a: 0.25 # report with negatives from scan 25 | 26 | encoder_b: 27 | data_augmentation: 28 | augment: true 29 | sentence_shuffling: null 30 | sentence_sampling: random_sentence 31 | duplicate_sentences: false 32 | 33 | projection_norm: null 34 | optimizer: AdamW 35 | lr_scheduler: 36 | - cosine_annealing_per_epoch 37 | learning_rate: 3e-5 38 | weight_decay: 1e-6 39 | batch_size: 32 40 | max_epochs: 100 41 | -------------------------------------------------------------------------------- /configs/experiment/ablation/g_only.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /scan_encoder@pretrain_model.encoder_a: resnet50_imagenet 8 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentences 9 | 10 | name: 'ablation_g_only' 11 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 12 | 13 | num_dataloader_workers: 20 14 | trainer: 15 | accumulate_grad_batches: 16 16 | 17 | callback: 18 | early_stopping: 19 | patience: 10 20 | 21 | pretrain_model: 22 | loss_weights: 23 | global_a2b: 0.75 # scan with negatives from report 24 | global_b2a: 0.25 # report with negatives from scan 25 | 26 | 27 | optimizer: AdamW 28 | lr_scheduler: 29 | - cosine_annealing_per_epoch 30 | learning_rate: 3e-4 31 | weight_decay: 1e-6 32 | batch_size: 32 33 | max_epochs: 100 34 | 35 | projection_norm: batch 36 | -------------------------------------------------------------------------------- /configs/experiment/ablation/g_single_sent_att.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 8 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentences 9 | 10 | name: 'ablation_g_single_sent_att' 11 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 12 | 13 | num_dataloader_workers: 20 14 | trainer: 15 | accumulate_grad_batches: 16 16 | 17 | callback: 18 | early_stopping: 19 | patience: 10 20 | 21 | pretrain_model: 22 | loss_weights: 23 | global_a2b: 0.75 # scan with negatives from report 24 | global_b2a: 0.25 # report with negatives from scan 25 | 26 | 27 | optimizer: AdamW 28 | lr_scheduler: 29 | - cosine_annealing_per_epoch 30 | learning_rate: 1e-4 31 | weight_decay: 1e-6 32 | batch_size: 32 33 | max_epochs: 100 34 | 35 | projection_norm: batch 36 | encoder_b: 37 | data_augmentation: 38 | sentence_shuffling: null 39 | sentence_sampling: random_sentence 40 | duplicate_sentences: false 41 | -------------------------------------------------------------------------------- /configs/experiment/ablation/no_coslr.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /objective@pretrain_model.ll_alignments_a.0: scan_local_intra_contrastive 8 | - /objective@pretrain_model.ll_alignments_b.0: report_sent_intra_contrastive 9 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 10 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentatt 11 | 12 | name: 'ablation_no_coslr' 13 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 14 | 15 | num_dataloader_workers: 20 16 | trainer: 17 | accumulate_grad_batches: 16 18 | 19 | callback: 20 | early_stopping: 21 | patience: 10 22 | 23 | pretrain_model: 24 | loss_weights: 25 | local_a_l2att: 0.375 26 | local_a_att2l: 0.375 27 | local_b_l2att: 0.375 28 | local_b_att2l: 0.375 29 | global_a2b: 0.75 # scan with negatives from report 30 | global_b2a: 0.25 # report with negatives from scan 31 | 32 | 33 | optimizer: AdamW 34 | lr_scheduler: 35 | - reduce_on_plateau 36 | lr_reduce_patience: 12 37 | lr_reduce_factor: 0.5 38 | learning_rate: 3e-5 39 | weight_decay: 1e-6 40 | batch_size: 32 41 | max_epochs: 100 42 | 43 | projection_norm: batch 44 | 45 | l_weights_a: from_aggregation 46 | l_weights_b: from_aggregation 47 | l_weights_stop_grad: true 48 | encoder_a: 49 | local_weights_from_aggregator: true 50 | encoder_b: 51 | local_weights_from_aggregator: true 52 | -------------------------------------------------------------------------------- /configs/experiment/ablation/no_g.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.ll_alignments_a.0: scan_local_intra_contrastive 7 | - /objective@pretrain_model.ll_alignments_b.0: report_sent_intra_contrastive 8 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 9 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentences 10 | 11 | name: 'ablation_no_g' 12 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 13 | 14 | num_dataloader_workers: 20 15 | trainer: 16 | accumulate_grad_batches: 16 17 | 18 | callback: 19 | early_stopping: 20 | patience: 10 21 | 22 | pretrain_model: 23 | loss_weights: 24 | local_a_l2att: 0.375 25 | local_a_att2l: 0.375 26 | local_b_l2att: 0.375 27 | local_b_att2l: 0.375 28 | 29 | 30 | optimizer: AdamW 31 | lr_scheduler: 32 | - cosine_annealing_per_epoch 33 | learning_rate: 1e-4 34 | weight_decay: 1e-6 35 | batch_size: 32 36 | max_epochs: 100 37 | 38 | projection_norm: batch 39 | 40 | -------------------------------------------------------------------------------- /configs/experiment/ablation/no_l.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 8 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentatt 9 | 10 | name: 'ablation_no_l' 11 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 12 | 13 | num_dataloader_workers: 20 14 | trainer: 15 | accumulate_grad_batches: 16 16 | 17 | callback: 18 | early_stopping: 19 | patience: 10 20 | 21 | pretrain_model: 22 | loss_weights: 23 | global_a2b: 0.75 # scan with negatives from report 24 | global_b2a: 0.25 # report with negatives from scan 25 | 26 | 27 | optimizer: AdamW 28 | lr_scheduler: 29 | - cosine_annealing_per_epoch 30 | learning_rate: 1e-4 31 | weight_decay: 1e-6 32 | batch_size: 32 33 | max_epochs: 100 34 | 35 | projection_norm: batch 36 | -------------------------------------------------------------------------------- /configs/experiment/ablation/no_spatial.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /objective@pretrain_model.ll_alignments_a.0: scan_local_intra_contrastive 8 | - /objective@pretrain_model.ll_alignments_b.0: report_sent_intra_contrastive 9 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 10 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentatt 11 | 12 | name: 'ablation_no_spatial' 13 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 14 | 15 | num_dataloader_workers: 20 16 | trainer: 17 | accumulate_grad_batches: 16 18 | 19 | callback: 20 | early_stopping: 21 | patience: 10 22 | 23 | pretrain_model: 24 | loss_weights: 25 | local_a_l2att: 0.375 26 | local_a_att2l: 0.375 27 | local_b_l2att: 0.375 28 | local_b_att2l: 0.375 29 | global_a2b: 0.75 # scan with negatives from report 30 | global_b2a: 0.25 # report with negatives from scan 31 | ll_alignments_a: 32 | "0": 33 | smooth_weights: false 34 | 35 | optimizer: AdamW 36 | lr_scheduler: 37 | - cosine_annealing_per_epoch 38 | learning_rate: 1e-4 39 | weight_decay: 1e-6 40 | batch_size: 32 41 | max_epochs: 100 42 | 43 | projection_norm: batch 44 | 45 | l_weights_a: from_aggregation 46 | l_weights_b: from_aggregation 47 | l_weights_stop_grad: true 48 | encoder_a: 49 | local_weights_from_aggregator: true 50 | encoder_b: 51 | local_weights_from_aggregator: true 52 | -------------------------------------------------------------------------------- /configs/experiment/ablation/no_weight.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /objective@pretrain_model.ll_alignments_a.0: scan_local_intra_contrastive 8 | - /objective@pretrain_model.ll_alignments_b.0: report_sent_intra_contrastive 9 | - /scan_encoder@pretrain_model.encoder_a: resnet50attention_imagenet 10 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentatt 11 | 12 | name: 'ablation_no_weight' 13 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 14 | 15 | num_dataloader_workers: 20 16 | trainer: 17 | accumulate_grad_batches: 16 18 | 19 | callback: 20 | early_stopping: 21 | patience: 10 22 | 23 | pretrain_model: 24 | loss_weights: 25 | local_a_l2att: 0.375 26 | local_a_att2l: 0.375 27 | local_b_l2att: 0.375 28 | local_b_att2l: 0.375 29 | global_a2b: 0.75 # scan with negatives from report 30 | global_b2a: 0.25 # report with negatives from scan 31 | 32 | 33 | optimizer: AdamW 34 | lr_scheduler: 35 | - cosine_annealing_per_epoch 36 | learning_rate: 1e-4 37 | weight_decay: 1e-6 38 | batch_size: 32 39 | max_epochs: 100 40 | 41 | projection_norm: batch 42 | 43 | l_weights_a: null 44 | l_weights_b: null 45 | -------------------------------------------------------------------------------- /configs/experiment/ablation/no_weight_no_att.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /online_eval: 4 | - chexpert_bin_scan 5 | - chexpert_bin_report 6 | - /objective@pretrain_model.g_alignment: global_contrastive 7 | - /objective@pretrain_model.ll_alignments_a.0: scan_local_intra_contrastive 8 | - /objective@pretrain_model.ll_alignments_b.0: report_sent_intra_contrastive 9 | - /scan_encoder@pretrain_model.encoder_a: resnet50_imagenet 10 | - /report_encoder@pretrain_model.encoder_b: bioclinicalbert_sentences 11 | 12 | name: 'ablation_no_weight_no_att' 13 | pretrain_dataset: mimic-cxr_ap-pa_find-impr_03 14 | 15 | num_dataloader_workers: 20 16 | trainer: 17 | accumulate_grad_batches: 16 18 | 19 | callback: 20 | early_stopping: 21 | patience: 10 22 | 23 | pretrain_model: 24 | loss_weights: 25 | local_a_l2att: 0.375 26 | local_a_att2l: 0.375 27 | local_b_l2att: 0.375 28 | local_b_att2l: 0.375 29 | global_a2b: 0.75 # scan with negatives from report 30 | global_b2a: 0.25 # report with negatives from scan 31 | 32 | 33 | optimizer: AdamW 34 | lr_scheduler: 35 | - cosine_annealing_per_epoch 36 | learning_rate: 3e-5 37 | weight_decay: 1e-6 38 | batch_size: 32 39 | max_epochs: 100 40 | 41 | projection_norm: batch 42 | 43 | l_weights_a: null 44 | l_weights_b: null 45 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /encoder: 4 | - base_scan_encoder 5 | 6 | encoder: 7 | backbone_architecture: 'resnet' 8 | backbone_model: [ 'pytorch/vision:v0.6.0', 'resnet50' ] 9 | backbone_pretrained: false 10 | input_size: [ 224, 224 ] 11 | region_feature_layer: 'conv5' 12 | global_feature_layer: 'conv5' # before avg pool and FC 13 | global_aggregator: 'avg' # as used in ResNet 14 | 15 | data_augmentation: 16 | augment: false 17 | 18 | name: ??? 19 | average_runs: 5 20 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_chexpert_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - chexpert_finetune 5 | - /encoder: 6 | - base_scan_encoder 7 | 8 | encoder: 9 | backbone_architecture: 'resnet' 10 | backbone_model: [ 'pytorch/vision:v0.6.0', 'resnet50' ] 11 | backbone_pretrained: false 12 | input_size: [ 224, 224 ] 13 | region_feature_layer: 'conv3' 14 | global_feature_layer: 'conv5' # before avg pool and FC 15 | global_aggregator: 'avg' # as used in ResNet 16 | 17 | data_augmentation: 18 | augment: false 19 | 20 | monitor_metric: '${evaluation_model.eval_name}_val/auroc' 21 | monitor_metric_mode: 'max' 22 | 23 | name: 'baseline_random_chexpert_finetune' 24 | 25 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_chexpert_linear.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - chexpert_linear 5 | - /encoder: 6 | - base_scan_encoder 7 | 8 | encoder: 9 | backbone_architecture: 'resnet' 10 | backbone_model: [ 'pytorch/vision:v0.6.0', 'resnet50' ] 11 | backbone_pretrained: false 12 | input_size: [ 224, 224 ] 13 | region_feature_layer: 'conv3' 14 | global_feature_layer: 'conv5' # before avg pool and FC 15 | global_aggregator: 'avg' # as used in ResNet 16 | 17 | data_augmentation: 18 | augment: false 19 | 20 | monitor_metric: '${evaluation_model.eval_name}_val/auroc' 21 | monitor_metric_mode: 'max' 22 | 23 | name: 'baseline_random_chexpert_linear' 24 | 25 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_covid_lin_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: covid_linear_frozen 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 20 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_covid_unet_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: covid_unet_finetune 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 20 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_covid_unet_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: covid_unet_frozen 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 20 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_nih_seg_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: nih_seg_frozen 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/avg_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_object_finetune_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: object_finetune_full 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/froc 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_object_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: object_frozen_full 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/froc 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_object_seg_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: object_seg_frozen_full 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/avg_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_pneumo_unet_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: pneumothorax_unet_finetune 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_pneumo_unet_finetune_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: pneumothorax_unet_finetune_full 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | 13 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_pneumo_unet_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: pneumothorax_unet_frozen 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_pneumo_unet_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: pneumothorax_unet_frozen_full 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_rsna_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: rsna_finetune 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_rsna_finetune_1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: rsna_finetune_1 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 20 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_rsna_finetune_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: rsna_finetune_full 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_rsna_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: rsna_frozen 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_rsna_frozen_1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: rsna_frozen_1 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 20 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_rsna_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: rsna_frozen_full 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_rsna_seg_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: rsna_seg_frozen 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_rsna_seg_frozen_1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: rsna_seg_frozen_1 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 20 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/baseline_random_rsna_seg_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: rsna_seg_frozen_full 4 | - /experiment/eval: baseline_random 5 | 6 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 7 | monitor_metric_mode: max 8 | 9 | callback: 10 | early_stopping: 11 | patience: 10 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | evaluated_encoder: a 3 | name: ??? 4 | 5 | callback: 6 | early_stopping: 7 | patience: 10 8 | 9 | average_runs: 5 10 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_chexpert_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - chexpert_finetune 5 | 6 | evaluated_encoder: a 7 | name: 'eval_chexpert_finetune' 8 | 9 | monitor_metric: '${evaluation_model.eval_name}_val/auroc' 10 | monitor_metric_mode: max 11 | 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_chexpert_linear.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - chexpert_linear 5 | 6 | evaluated_encoder: a 7 | name: 'eval_chexpert_linear' 8 | 9 | monitor_metric: '${evaluation_model.eval_name}_val/auroc' 10 | monitor_metric_mode: max 11 | 12 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_covid_lin_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - covid_linear_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 20 15 | 16 | average_runs: 5 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_covid_unet_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - covid_unet_finetune 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 20 15 | 16 | average_runs: 5 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_covid_unet_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - covid_unet_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 20 15 | 16 | average_runs: 5 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_nih_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - nih_finetune 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_nih_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - nih_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_nih_seg_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - nih_seg_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/avg_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_object_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - object_finetune 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/froc 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_object_finetune_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - object_finetune_full 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/froc 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_object_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - object_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/froc 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_object_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - object_frozen_full 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/froc 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_object_seg_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - object_seg_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/avg_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_object_seg_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - object_seg_frozen_full 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/avg_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_pneumo_lin_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - pneumothorax_linear_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_pneumo_lin_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - pneumothorax_linear_frozen_full 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_pneumo_unet_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - pneumothorax_unet_finetune 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_pneumo_unet_finetune_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - pneumothorax_unet_finetune_full 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_pneumo_unet_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - pneumothorax_unet_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_pneumo_unet_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - pneumothorax_unet_frozen_full 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/pneumothorax_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_finetune 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_finetune_1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_finetune_1 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 20 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_finetune_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_finetune_full 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | reported_metrics: 10 | - "rsna_test/mAP__runs_mean" 11 | - "rsna_test/mAP__runs_std" 12 | 13 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 14 | monitor_metric_mode: max 15 | 16 | callback: 17 | early_stopping: 18 | patience: 10 19 | 20 | average_runs: 5 21 | 22 | #evaluation_model: 23 | # batch_size: 8 24 | # learning_rate: 3e-4 25 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_frozen_1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_frozen_1 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 20 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_frozen_concat.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_frozen_concat 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | reported_metrics: 10 | - "rsna_test/mAP__runs_mean" 11 | - "rsna_test/mAP__runs_std" 12 | - "rsna_test/mAP@0.75__runs_mean" 13 | - "rsna_test/mAP@0.75__runs_std" 14 | - "rsna_test/mAP@0.5__runs_mean" 15 | - "rsna_test/mAP@0.5__runs_std" 16 | - "rsna_val/mAP__runs_mean" 17 | - "rsna_val/mAP__runs_std" 18 | - "rsna_val/mAP@0.75__runs_mean" 19 | - "rsna_val/mAP@0.75__runs_std" 20 | - "rsna_val/mAP@0.5__runs_mean" 21 | - "rsna_val/mAP@0.5__runs_std" 22 | 23 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 24 | monitor_metric_mode: max 25 | 26 | evaluation_model: 27 | dataset: rsna_01 28 | batch_size: 8 29 | learning_rate: 3e-4 30 | 31 | average_runs: 5 32 | 33 | callback: 34 | early_stopping: 35 | patience: 10 # 20 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_frozen_for_unet.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_frozen_for_unet 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | reported_metrics: 10 | - "rsna_test/mAP__runs_mean" 11 | - "rsna_test/mAP__runs_std" 12 | 13 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 14 | monitor_metric_mode: max 15 | 16 | callback: 17 | early_stopping: 18 | patience: 10 19 | 20 | average_runs: 5 21 | 22 | #evaluation_model: 23 | # batch_size: 8 24 | # learning_rate: 3e-4 25 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_frozen_full 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_frozen_up.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_frozen_up 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | reported_metrics: 10 | - "rsna_test/mAP__runs_mean" 11 | - "rsna_test/mAP__runs_std" 12 | - "rsna_test/mAP@0.75__runs_mean" 13 | - "rsna_test/mAP@0.75__runs_std" 14 | - "rsna_test/mAP@0.5__runs_mean" 15 | - "rsna_test/mAP@0.5__runs_std" 16 | - "rsna_val/mAP__runs_mean" 17 | - "rsna_val/mAP__runs_std" 18 | - "rsna_val/mAP@0.75__runs_mean" 19 | - "rsna_val/mAP@0.75__runs_std" 20 | - "rsna_val/mAP@0.5__runs_mean" 21 | - "rsna_val/mAP@0.5__runs_std" 22 | 23 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 24 | monitor_metric_mode: max 25 | 26 | evaluation_model: 27 | dataset: rsna_01 28 | batch_size: 8 29 | learning_rate: 3e-4 30 | 31 | average_runs: 5 32 | 33 | callback: 34 | early_stopping: 35 | patience: 10 # 20 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_rcnn_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_RCNN_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | reported_metrics: 10 | - "rsna_test/mAP__runs_mean" 11 | - "rsna_test/mAP__runs_std" 12 | - "rsna_test/mAP@0.75__runs_mean" 13 | - "rsna_test/mAP@0.75__runs_std" 14 | - "rsna_test/mAP@0.5__runs_mean" 15 | - "rsna_test/mAP@0.5__runs_std" 16 | - "rsna_val/mAP__runs_mean" 17 | - "rsna_val/mAP__runs_std" 18 | - "rsna_val/mAP@0.75__runs_mean" 19 | - "rsna_val/mAP@0.75__runs_std" 20 | - "rsna_val/mAP@0.5__runs_mean" 21 | - "rsna_val/mAP@0.5__runs_std" 22 | 23 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 24 | monitor_metric_mode: max 25 | 26 | evaluation_model: 27 | dataset: rsna_01 28 | batch_size: 8 29 | learning_rate: 3e-4 30 | 31 | average_runs: 5 32 | 33 | callback: 34 | early_stopping: 35 | patience: 10 # 20 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_rcnnfpn_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_RCNN_FPN_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | reported_metrics: 10 | - "rsna_test/mAP__runs_mean" 11 | - "rsna_test/mAP__runs_std" 12 | - "rsna_test/mAP@0.75__runs_mean" 13 | - "rsna_test/mAP@0.75__runs_std" 14 | - "rsna_test/mAP@0.5__runs_mean" 15 | - "rsna_test/mAP@0.5__runs_std" 16 | - "rsna_val/mAP__runs_mean" 17 | - "rsna_val/mAP__runs_std" 18 | - "rsna_val/mAP@0.75__runs_mean" 19 | - "rsna_val/mAP@0.75__runs_std" 20 | - "rsna_val/mAP@0.5__runs_mean" 21 | - "rsna_val/mAP@0.5__runs_std" 22 | 23 | monitor_metric: ${evaluation_model.eval_name}_val/mAP 24 | monitor_metric_mode: max 25 | 26 | evaluation_model: 27 | dataset: rsna_01 28 | batch_size: 8 29 | learning_rate: 3e-4 30 | 31 | average_runs: 5 32 | 33 | callback: 34 | early_stopping: 35 | patience: 10 # 20 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_seg_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_seg_frozen 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | 18 | #evaluation_model: 19 | # batch_size: 8 20 | # learning_rate: 3e-3 #3e-3 for lcross and l, 3e-5 for global and lg (1e-5 is similar good) 21 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_seg_frozen_1.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_seg_frozen_1 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 20 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_seg_frozen_down.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_seg_frozen_down 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 10 | monitor_metric_mode: max 11 | 12 | evaluation_model: 13 | dataset: rsna_seg_01 14 | batch_size: 8 15 | learning_rate: 3e-3 #3e-3 for lcross and l, 3e-5 for global and lg (1e-5 is similar good) 16 | callback: 17 | early_stopping: 18 | patience: 20 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_rsna_seg_frozen_full.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - rsna_seg_frozen_full 5 | 6 | evaluated_encoder: a 7 | name: ??? 8 | 9 | monitor_metric: ${evaluation_model.eval_name}_val/opacity_dice 10 | monitor_metric_mode: max 11 | 12 | callback: 13 | early_stopping: 14 | patience: 10 15 | 16 | average_runs: 5 17 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_scr_unet_finetune.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - scr_unet_finetune 5 | 6 | evaluated_encoder: a 7 | name: 'scr_finetune_l025_01' 8 | 9 | monitor_metric: '${evaluation_model.eval_name}_val/loss' 10 | monitor_metric_mode: min 11 | 12 | trainer: 13 | limit_train_batches: 0.1 14 | evaluation_model: 15 | frozen_warmup_steps: 100 #200 # 100 #50 #10 # 200 16 | warmup_lr: 3e-4 17 | batch_size: 8 18 | learning_rate: 3e-5 #3e-4 # more? 19 | lr_reduce_patience: 5 20 | 21 | -------------------------------------------------------------------------------- /configs/experiment/eval/eval_scr_unet_frozen.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - /evaluation_model: 4 | - scr_unet_frozen 5 | 6 | evaluated_encoder: a 7 | name: 'scr_frozen_la025_01' 8 | 9 | monitor_metric: '${evaluation_model.eval_name}_val/loss' 10 | monitor_metric_mode: min 11 | 12 | trainer: 13 | limit_train_batches: 0.1 14 | evaluation_model: 15 | batch_size: 8 16 | learning_rate: 3e-4 17 | lr_reduce_patience: 5 18 | 19 | 20 | -------------------------------------------------------------------------------- /configs/fine_tune.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - fine_tune_config 3 | - base_config 4 | - callback: 5 | - eval_checkpoint 6 | - eval_early_stopping 7 | 8 | logger: 9 | wandb: 10 | tags: 11 | - downstream -------------------------------------------------------------------------------- /configs/logger/wandb.yaml: -------------------------------------------------------------------------------- 1 | wandb: 2 | _target_: pytorch_lightning.loggers.wandb.WandbLogger 3 | project: "lovt" 4 | job_type: "train" 5 | group: "" 6 | save_dir: "." 7 | name: ${name} 8 | log_model: true 9 | reinit: true -------------------------------------------------------------------------------- /configs/objective/global_contrastive.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_global_contrastive 3 | 4 | similarity_temperature: 0.1 5 | negatives_from_same_modality: false 6 | -------------------------------------------------------------------------------- /configs/objective/report_sent_intra_contrastive.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - local_intra_contrastive 3 | 4 | similarity_temperature: 0.3 5 | distance_threshold: 0 6 | threshold_type: absolute 7 | negatives_from_same_modality: false 8 | normalize_by_num_negatives: false 9 | -------------------------------------------------------------------------------- /configs/objective/scan_local_intra_contrastive.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - local_intra_contrastive 3 | 4 | similarity_temperature: 0.3 5 | distance_threshold: 0.5 6 | threshold_type: relative 7 | negatives_from_same_modality: false 8 | normalize_by_num_negatives: false 9 | 10 | smooth_weights: true 11 | smooth_lambda: 1.0 12 | -------------------------------------------------------------------------------- /configs/online_eval/chexpert_bin_joint.yaml: -------------------------------------------------------------------------------- 1 | chexpert_bin_joint: 2 | task: 'chexpert_binary' 3 | source_modality: 'a&b' 4 | 5 | nonlinear: false 6 | dropout_prob: 0.2 7 | -------------------------------------------------------------------------------- /configs/online_eval/chexpert_bin_report.yaml: -------------------------------------------------------------------------------- 1 | chexpert_bin_report: 2 | task: 'chexpert_binary' 3 | source_modality: 'b' 4 | 5 | nonlinear: false 6 | dropout_prob: 0.2 7 | 8 | -------------------------------------------------------------------------------- /configs/online_eval/chexpert_bin_scan.yaml: -------------------------------------------------------------------------------- 1 | chexpert_bin_scan: 2 | task: 'chexpert_binary' 3 | source_modality: 'a' 4 | 5 | nonlinear: false 6 | dropout_prob: 0.2 7 | -------------------------------------------------------------------------------- /configs/report_encoder/bioclinicalbert_sentatt.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_report_encoder 3 | 4 | language_encoder: 'emilyalsentzer/Bio_ClinicalBERT' 5 | max_length: 512 6 | freeze_embeddings: true 7 | freeze_encoder_layers: 6 # freeze first 6 encoder layers (see ConVIRT) 8 | 9 | # options: 'max', 'avg', 'token_0' 10 | global_aggregator: avgpool_attention 11 | global_from_local: true 12 | 13 | # options: 'tokens' or 'sentences' 14 | local_from: sentences 15 | # options: 'max', 'avg' 16 | sentence_aggregator: max 17 | 18 | data_augmentation: 19 | augment: true 20 | sentence_shuffling: random_swaps 21 | sentence_sampling: null 22 | duplicate_sentences: false 23 | -------------------------------------------------------------------------------- /configs/report_encoder/bioclinicalbert_sentences.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_report_encoder 3 | 4 | language_encoder: 'emilyalsentzer/Bio_ClinicalBERT' 5 | max_length: 512 6 | freeze_embeddings: true 7 | freeze_encoder_layers: 6 # freeze first 6 encoder layers (see ConVIRT) 8 | 9 | # options: 'max', 'avg', 'avgpool_attention' 10 | global_aggregator: max 11 | 12 | # options: 'tokens' or 'sentences' 13 | local_from: sentences 14 | # options: 'max', 'avg' 15 | sentence_aggregator: max 16 | 17 | data_augmentation: 18 | augment: true 19 | sentence_shuffling: random_swaps 20 | sentence_sampling: null 21 | duplicate_sentences: false 22 | -------------------------------------------------------------------------------- /configs/scan_encoder/resnet50_imagenet.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_scan_encoder 3 | 4 | backbone_architecture: 'resnet' 5 | backbone_model: ['pytorch/vision:v0.6.0', 'resnet50'] 6 | backbone_pretrained: true 7 | input_size: [224, 224] 8 | region_feature_layer: 'conv5' # conv5=7x7, conv4=14x14, conv3=28x28 9 | global_feature_layer: 'local' 10 | global_aggregator: 'avg' # as used in ResNet 11 | -------------------------------------------------------------------------------- /configs/scan_encoder/resnet50attention_imagenet.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - base_scan_encoder 3 | 4 | backbone_architecture: 'resnet' 5 | backbone_model: ['pytorch/vision:v0.6.0', 'resnet50'] 6 | backbone_pretrained: true 7 | input_size: [224, 224] 8 | region_feature_layer: 'conv5' # conv5=7x7, conv4=14x14, conv3=28x28 9 | global_feature_layer: 'local' 10 | global_aggregator: 'avgpool_attention' 11 | -------------------------------------------------------------------------------- /configs/train_representation.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - train_representation_config 3 | - base_config 4 | - pretrain_model: base_model 5 | - callback: 6 | - checkpoint 7 | - early_stopping 8 | 9 | logger: 10 | wandb: 11 | tags: 12 | - pretrain 13 | 14 | 15 | monitor_metric: 'val/total_loss' 16 | monitor_metric_mode: 'min' 17 | -------------------------------------------------------------------------------- /configs/user_config.yml: -------------------------------------------------------------------------------- 1 | models: 2 | base_path: ??? 3 | pretrained_models_folder: pretraining 4 | 5 | wandb: 6 | user: ??? 7 | project: lovt 8 | -------------------------------------------------------------------------------- /datasets/Opacity-Segmentation-COVID/opacity_segmentation_covid_chest_X_ray-master/covid_rural_annot/dataset_statistics.json: -------------------------------------------------------------------------------- 1 | {"train": {"num_samples": 133, "scan": {"pixel_mean": 0.5081052428896399, "pixel_std": 0.2906311866244445}, "segmentation_pos_weights": {"opacity": 0.942548394203186}, "patient_ids": ["COVID-19-AR-16406541", "COVID-19-AR-16434361", "COVID-19-AR-16439190", "COVID-19-AR-16434371", "COVID-19-AR-16406496", "COVID-19-AR-16434399", "COVID-19-AR-16434391", "COVID-19-AR-16406503", "COVID-19-AR-16424115", "COVID-19-AR-16434383", "COVID-19-AR-16406491", "COVID-19-AR-16407183", "COVID-19-AR-16424072", "COVID-19-AR-16406500", "COVID-19-AR-16406533", "COVID-19-AR-16406559", "COVID-19-AR-16434356", "COVID-19-AR-16434370", "COVID-19-AR-16406494", "COVID-19-AR-16424079", "COVID-19-AR-16424120", "COVID-19-AR-16407177", "COVID-19-AR-16434369", "COVID-19-AR-16434346", "COVID-19-AR-16424113", "COVID-19-AR-16406531", "COVID-19-AR-16406508", "COVID-19-AR-16424108", "COVID-19-AR-16406502", "COVID-19-AR-16445122", "COVID-19-AR-16406512", "COVID-19-AR-16439188", "COVID-19-AR-16434411", "COVID-19-AR-16439191", "COVID-19-AR-16406529", "COVID-19-AR-16424070", "COVID-19-AR-16439195", "COVID-19-AR-16406488", "COVID-19-AR-16434347", "COVID-19-AR-16445167", "COVID-19-AR-16434363", "COVID-19-AR-16434366", "COVID-19-AR-16424103", "COVID-19-AR-16445151", "COVID-19-AR-16434358", "COVID-19-AR-16424075", "COVID-19-AR-16407176", "COVID-19-AR-16406561", "COVID-19-AR-16406513", "COVID-19-AR-16424095", "COVID-19-AR-16406521", "COVID-19-AR-16424074", "COVID-19-AR-16434344", "COVID-19-AR-16424104", "COVID-19-AR-16424116", "COVID-19-AR-16406498", "COVID-19-AR-16406522", "COVID-19-AR-16439200", "COVID-19-AR-16424105", "COVID-19-AR-16434453", "COVID-19-AR-16424071", "COVID-19-AR-16406492", "COVID-19-AR-16434444"]}, "validation": {"num_samples": 44, "scan": {"pixel_mean": 0.505572644462122, "pixel_std": 0.2891823915075215}, "segmentation_pos_weights": {"opacity": 0.9167999029159546}, "patient_ids": ["COVID-19-AR-16424076", "COVID-19-AR-16445168", "COVID-19-AR-16445143", "COVID-19-AR-16439216", "COVID-19-AR-16439183", "COVID-19-AR-16434378", "COVID-19-AR-16439192", "COVID-19-AR-16406490", "COVID-19-AR-16424106", "COVID-19-AR-16434381", "COVID-19-AR-16434380", "COVID-19-AR-16434396", "COVID-19-AR-16424077", "COVID-19-AR-16406489", "COVID-19-AR-16406542", "COVID-19-AR-16424081", "COVID-19-AR-16424129", "COVID-19-AR-16424082", "COVID-19-AR-16424083", "COVID-19-AR-16439194", "COVID-19-AR-16445138", "COVID-19-AR-16434349", "COVID-19-AR-16406526"]}, "test": {"num_samples": 44, "scan": {"pixel_mean": 0.5067166371575283, "pixel_std": 0.2878761703394194}, "segmentation_pos_weights": {"opacity": 0.9640106558799744}, "patient_ids": ["COVID-19-AR-16406505", "COVID-19-AR-16406524", "COVID-19-AR-16434409", "COVID-19-AR-16445149", "COVID-19-AR-16439186", "COVID-19-AR-16424118", "COVID-19-AR-16434350", "COVID-19-AR-16407187", "COVID-19-AR-16424111", "COVID-19-AR-16445144", "COVID-19-AR-16434452", "COVID-19-AR-16406579", "COVID-19-AR-16406545", "COVID-19-AR-16424093", "COVID-19-AR-16434395", "COVID-19-AR-16406517", "COVID-19-AR-16406504", "COVID-19-AR-16407173"]}} -------------------------------------------------------------------------------- /datasets/Opacity-Segmentation-COVID/opacity_segmentation_covid_chest_X_ray-master/covid_rural_annot/test.csv: -------------------------------------------------------------------------------- 1 | ID 2 | COVID-19-AR-16406505_01-14-2012-XR 3 | COVID-19-AR-16406505_01-09-2012-XR 4 | COVID-19-AR-16406524_01-21-2012-XR 5 | COVID-19-AR-16406524_01-20-2012-XR 6 | COVID-19-AR-16434409_02-18-2012-XR 7 | COVID-19-AR-16445149_03-13-2012-XR 8 | COVID-19-AR-16439186_03-08-2012-XR 9 | COVID-19-AR-16424118_02-04-2012-XR 10 | COVID-19-AR-16434350_02-16-2012-XR 11 | COVID-19-AR-16407187_01-31-2012-XR 12 | COVID-19-AR-16407187_01-29-2012-XR 13 | COVID-19-AR-16407187_01-27-2012-XR 14 | COVID-19-AR-16424111_02-11-2012-XR 15 | COVID-19-AR-16424111_02-18-2012-XR 16 | COVID-19-AR-16424111_02-09-2012-XR 17 | COVID-19-AR-16424111_02-21-2012-XR 18 | COVID-19-AR-16424111_02-10-2012-XR 19 | COVID-19-AR-16424111_02-20-2012-XR 20 | COVID-19-AR-16424111_02-17-2012-XR 21 | COVID-19-AR-16424111_02-14-2012-XR 22 | COVID-19-AR-16424111_02-16-2012-XR 23 | COVID-19-AR-16424111_02-13-2012-XR 24 | COVID-19-AR-16424111_02-15-2012-XR 25 | COVID-19-AR-16424111_02-08-2012-XR 26 | COVID-19-AR-16424111_02-12-2012-XR 27 | COVID-19-AR-16424111_02-19-2012-XR 28 | COVID-19-AR-16445144_03-11-2012-XR 29 | COVID-19-AR-16434452_02-19-2012-XR 30 | COVID-19-AR-16434452_02-25-2012-XR 31 | COVID-19-AR-16406579_01-06-2012-XR 32 | COVID-19-AR-16406545_01-17-2012-XR 33 | COVID-19-AR-16424093_02-10-2012-XR 34 | COVID-19-AR-16434395_03-04-2012-XR 35 | COVID-19-AR-16434395_03-08-2012-XR 36 | COVID-19-AR-16434395_03-03-2012-XR 37 | COVID-19-AR-16434395_02-26-2012-XR 38 | COVID-19-AR-16406517_01-08-2012-XR 39 | COVID-19-AR-16406517_01-01-2012-XR 40 | COVID-19-AR-16406517_01-05-2012-XR 41 | COVID-19-AR-16406504_01-06-2012-XR 42 | COVID-19-AR-16406504_01-16-2012-XR 43 | COVID-19-AR-16407173_01-30-2012-XR 44 | COVID-19-AR-16407173_01-28-2012-XR 45 | COVID-19-AR-16407173_02-02-2012-XR 46 | -------------------------------------------------------------------------------- /datasets/Opacity-Segmentation-COVID/opacity_segmentation_covid_chest_X_ray-master/covid_rural_annot/validation.csv: -------------------------------------------------------------------------------- 1 | ID 2 | COVID-19-AR-16424076_02-03-2012-XR 3 | COVID-19-AR-16424076_02-02-2012-XR 4 | COVID-19-AR-16424076_02-06-2012-XR 5 | COVID-19-AR-16445168_03-03-2012-XR 6 | COVID-19-AR-16445143_03-14-2012-XR 7 | COVID-19-AR-16439216_03-05-2012-XR 8 | COVID-19-AR-16439216_03-09-2012-XR 9 | COVID-19-AR-16439183_03-07-2012-XR 10 | COVID-19-AR-16434378_02-09-2012-XR 11 | COVID-19-AR-16439192_03-10-2012-XR 12 | COVID-19-AR-16439192_03-15-2012-XR 13 | COVID-19-AR-16439192_03-08-2012-XR 14 | COVID-19-AR-16439192_03-06-2012-XR 15 | COVID-19-AR-16439192_03-02-2012-XR 16 | COVID-19-AR-16439192_03-11-2012-XR 17 | COVID-19-AR-16406490_01-11-2012-XR 18 | COVID-19-AR-16424106_02-12-2012-XR 19 | COVID-19-AR-16434381_01-20-2012-XR 20 | COVID-19-AR-16434381_01-18-2012-XR 21 | COVID-19-AR-16434380_02-09-2012-XR 22 | COVID-19-AR-16434396_01-09-2012-XR 23 | COVID-19-AR-16424077_02-09-2012-XR 24 | COVID-19-AR-16406489_01-26-2012-XR 25 | COVID-19-AR-16406542_02-02-2012-XR 26 | COVID-19-AR-16406542_01-17-2012-XR 27 | COVID-19-AR-16406542_01-23-2012-XR 28 | COVID-19-AR-16406542_01-13-2012-XR 29 | COVID-19-AR-16406542_01-29-2012-XR 30 | COVID-19-AR-16406542_01-12-2012-XR 31 | COVID-19-AR-16424081_02-05-2012-XR 32 | COVID-19-AR-16424081_02-06-2012-XR 33 | COVID-19-AR-16424129_02-14-2012-XR 34 | COVID-19-AR-16424129_02-13-2012-XR 35 | COVID-19-AR-16424129_02-16-2012-XR 36 | COVID-19-AR-16424082_02-07-2012-XR 37 | COVID-19-AR-16424082_02-10-2012-XR 38 | COVID-19-AR-16424082_02-09-2012-XR 39 | COVID-19-AR-16424083_02-07-2012-XR 40 | COVID-19-AR-16439194_03-03-2012-XR 41 | COVID-19-AR-16439194_03-04-2012-XR 42 | COVID-19-AR-16445138_03-13-2012-XR 43 | COVID-19-AR-16434349_02-27-2012-XR 44 | COVID-19-AR-16406526_01-20-2012-XR 45 | COVID-19-AR-16406526_01-25-2012-XR 46 | -------------------------------------------------------------------------------- /datasets/object-CXR/dataset_statistics.json: -------------------------------------------------------------------------------- 1 | {"train": {"num_samples": 6400, "scan": {"pixel_mean": 0.505339195588568, "pixel_std": 0.2899103954490179}, "patient_ids": null}, "validation": {"num_samples": 1600, "scan": {"pixel_mean": 0.5051676659318466, "pixel_std": 0.28975788147648557}, "patient_ids": null}, "test": {"num_samples": 1000, "scan": {"pixel_mean": 0.5051608751029176, "pixel_std": 0.2899181829964151}, "patient_ids": null}} -------------------------------------------------------------------------------- /datasets/siim-acr-pneumothorax-segmentation/dataset_statistics.json: -------------------------------------------------------------------------------- 1 | {"train": {"num_samples": 7229, "scan": {"pixel_mean": 0.5043012020616566, "pixel_std": 0.28800468898118353}, "segmentation_pos_weights": {"pneumothorax": 327.50465563430885}}, "validation": {"num_samples": 2409, "scan": {"pixel_mean": 0.5042212459526761, "pixel_std": 0.28803788144362635}, "segmentation_pos_weights": {"pneumothorax": 332.73377341105777}}, "test": {"num_samples": 2409, "scan": {"pixel_mean": 0.5045960545751641, "pixel_std": 0.2878340935994556}, "segmentation_pos_weights": {"pneumothorax": 331.07501419129846}}} -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: lovt 2 | channels: 3 | - pytorch 4 | - huggingface 5 | - stanfordnlp 6 | - conda-forge 7 | - defaults 8 | 9 | dependencies: 10 | - python=3.9 11 | - pip 12 | - pylint 13 | 14 | - numpy>=1.20 15 | - pandas>=1.2.3 16 | - pyarrow=5.0.0 17 | - matplotlib>=3.3.4 18 | - pytorch=1.10.0 19 | - cudatoolkit=10.2 20 | 21 | - torchvision=0.11.1 22 | - pytorch-lightning=1.4.9 23 | - datasets>=1.2.1 24 | - tokenizers>=0.10.1 25 | - transformers>=4.4.0 26 | - nltk>=3.4.4 27 | - stanza>=1.2 28 | - pillow=8.2 29 | 30 | - scikit-image 31 | 32 | - click>=7.1 33 | - tqdm 34 | 35 | - wandb>=0.10.20 36 | 37 | - pip: 38 | - hydra-core==1.1.1 39 | - omegaconf==2.1.1 40 | - lightning-bolts==0.4.0 41 | - segmentation_models_pytorch 42 | - seaborn 43 | - pydicom 44 | - jupyter 45 | 46 | # for pl-bolts 47 | - gym 48 | - sklearn 49 | - opencv-python 50 | - git+https://github.com/bes-dev/mean_average_precision.git 51 | - deepdiff 52 | 53 | # baselines 54 | - byol-pytorch 55 | - pixel-level-contrastive-learning 56 | - simclr 57 | -------------------------------------------------------------------------------- /results/LoVT.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/LoVT.pdf -------------------------------------------------------------------------------- /results/LoVT.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/LoVT.png -------------------------------------------------------------------------------- /results/generated/covariance_y_a.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/covariance_y_a.pdf -------------------------------------------------------------------------------- /results/generated/covariance_y_b.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/covariance_y_b.pdf -------------------------------------------------------------------------------- /results/generated/downstream_covid_linear.csv: -------------------------------------------------------------------------------- 1 | name,covid_lin_test/opacity_dice_run_results,covid_lin_test/opacity_dice_mean,covid_lin_test/opacity_dice_std,covid_lin_test/opacity_dice_num_runs,covid_lin_test/opacity_dice_95_interval 2 | LoVT_100,0.4280567765235901;0.4429672658443451;0.4477905333042145;0.4484729468822479;0.43489035964012146,0.4404355764389038,0.0087904533738114,5.0,0.0077051721083788 3 | LoVT_30,0.48687979578971863;0.4938634932041168;0.4934031665325165;0.49221986532211304;0.4926016628742218,0.4917935967445374,0.0028220250640942,5.0,0.0024736140319889 4 | -------------------------------------------------------------------------------- /results/generated/downstream_covid_unet_finetune.csv: -------------------------------------------------------------------------------- 1 | name,covid_unet_finetune_test/opacity_dice_run_results,covid_unet_finetune_test/opacity_dice_mean,covid_unet_finetune_test/opacity_dice_std,covid_unet_finetune_test/opacity_dice_num_runs,covid_unet_finetune_test/opacity_dice_95_interval 2 | LoVT_100,0.5115025639533997;0.4852478504180908;0.48323023319244385;0.5308707356452942;0.5504631996154785,0.5122629165649414,0.0290637282316316,5.0,0.0254754810261592 3 | LoVT_30,0.5074538588523865;0.4869449734687805;0.5133095979690552;0.48319706320762634;0.48377642035484314,0.4949363827705383,0.0143220219569267,5.0,0.0125538057510056 4 | -------------------------------------------------------------------------------- /results/generated/downstream_covid_unet_frozen.csv: -------------------------------------------------------------------------------- 1 | name,covid_unet_frozen_test/opacity_dice_run_results,covid_unet_frozen_test/opacity_dice_mean,covid_unet_frozen_test/opacity_dice_std,covid_unet_frozen_test/opacity_dice_num_runs,covid_unet_frozen_test/opacity_dice_95_interval 2 | LoVT_100,0.4799465835094452;0.41766583919525146;0.48299333453178406;0.4535195827484131;0.4777686297893524,0.4623787939548492,0.0276051059886558,5.0,0.0241969422585546 3 | LoVT_30,0.49403777718544006;0.5450481176376343;0.4397992193698883;0.5432164072990417;0.43641534447669983,0.4917033731937408,0.053045996993414,5.0,0.046496866442917 4 | -------------------------------------------------------------------------------- /results/generated/downstream_nih_seg_linear.csv: -------------------------------------------------------------------------------- 1 | name,nih_seg_lin_test/avg_dice_run_results,nih_seg_lin_test/avg_dice_mean,nih_seg_lin_test/avg_dice_std,nih_seg_lin_test/avg_dice_num_runs,nih_seg_lin_test/avg_dice_95_interval,nih_seg_lin_test/Atelectasis_dice_run_results,nih_seg_lin_test/Atelectasis_dice_mean,nih_seg_lin_test/Atelectasis_dice_std,nih_seg_lin_test/Atelectasis_dice_num_runs,nih_seg_lin_test/Atelectasis_dice_95_interval,nih_seg_lin_test/Cardiomegaly_dice_run_results,nih_seg_lin_test/Cardiomegaly_dice_mean,nih_seg_lin_test/Cardiomegaly_dice_std,nih_seg_lin_test/Cardiomegaly_dice_num_runs,nih_seg_lin_test/Cardiomegaly_dice_95_interval,nih_seg_lin_test/Effusion_dice_run_results,nih_seg_lin_test/Effusion_dice_mean,nih_seg_lin_test/Effusion_dice_std,nih_seg_lin_test/Effusion_dice_num_runs,nih_seg_lin_test/Effusion_dice_95_interval,nih_seg_lin_test/Infiltrate_dice_run_results,nih_seg_lin_test/Infiltrate_dice_mean,nih_seg_lin_test/Infiltrate_dice_std,nih_seg_lin_test/Infiltrate_dice_num_runs,nih_seg_lin_test/Infiltrate_dice_95_interval,nih_seg_lin_test/Mass_dice_run_results,nih_seg_lin_test/Mass_dice_mean,nih_seg_lin_test/Mass_dice_std,nih_seg_lin_test/Mass_dice_num_runs,nih_seg_lin_test/Mass_dice_95_interval,nih_seg_lin_test/Nodule_dice_run_results,nih_seg_lin_test/Nodule_dice_mean,nih_seg_lin_test/Nodule_dice_std,nih_seg_lin_test/Nodule_dice_num_runs,nih_seg_lin_test/Nodule_dice_95_interval,nih_seg_lin_test/Pneumonia_dice_run_results,nih_seg_lin_test/Pneumonia_dice_mean,nih_seg_lin_test/Pneumonia_dice_std,nih_seg_lin_test/Pneumonia_dice_num_runs,nih_seg_lin_test/Pneumonia_dice_95_interval,nih_seg_lin_test/Pneumothorax_dice_run_results,nih_seg_lin_test/Pneumothorax_dice_mean,nih_seg_lin_test/Pneumothorax_dice_std,nih_seg_lin_test/Pneumothorax_dice_num_runs,nih_seg_lin_test/Pneumothorax_dice_95_interval 2 | LoVT_100,0.10478605329990388;0.09121674299240112;0.09191980212926865;0.09089145064353944;0.09186932444572447,0.0941366747021675,0.0059691319739177,5.0,0.0052321748652562,0.0;0.0;0.0;0.0;0.0,0.0,0.0,5.0,0.0,0.4416803419589996;0.4649796485900879;0.46072128415107727;0.4621145129203797;0.46316561102867126,0.4585322797298431,0.0095478828250652,5.0,0.008369088294915,0.08766679465770721;0.09250584989786148;0.10024109482765198;0.09131526947021484;0.09812106937170029,0.0939700156450271,0.0051351663692455,5.0,0.0045011717823422,0.16826847195625305;0.02950582280755043;0.031366847455501556;0.0306959580630064;0.030539238825440407,0.0580752678215503,0.0616034792849776,5.0,0.0539978303940304,0.0;0.0;0.0;0.0;0.0,0.0,0.0,5.0,0.0,0.0;0.0;0.0;0.0;0.0,0.0,0.0,5.0,0.0,0.14067280292510986;0.14274261891841888;0.14302916824817655;0.1430058777332306;0.1431286782026291,0.142515829205513,0.0010401235346875,5.0,0.000911708475995,0.0;0.0;0.0;0.0;0.0,0.0,0.0,5.0,0.0 3 | LoVT_30,0.12909027934074402;0.12400106340646744;0.13239052891731262;0.13823115825653076;0.1478696167469025,0.1343165293335914,0.0091711845189438,5.0,0.0080388976712724,0.0;0.0;0.0;0.0;0.0,0.0,0.0,5.0,0.0,0.6079469919204712;0.5992507338523865;0.644733190536499;0.5604463219642639;0.6055771112442017,0.6035908699035645,0.0299829130446908,5.0,0.0262811820387064,0.16292141377925873;0.17036500573158264;0.16125069558620453;0.02000272274017334;0.15872050821781158,0.1346520692110061,0.0642374871175353,5.0,0.0563066400562418,0.1635270714759827;0.21381138265132904;0.17009694874286652;0.2280120551586151;0.26771974563598633,0.2086334407329559,0.0430499098986017,5.0,0.0377349097837377,0.0;0.0;0.0;0.0;0.0,0.0,0.0,5.0,0.0,0.0;0.0;0.0;0.0;0.0,0.0,0.0,5.0,0.0,0.09832679480314256;0.008581378497183323;0.08304334431886673;0.18889841437339783;0.15093955397605896,0.1059578971937298,0.0688621370702424,5.0,0.0603603244694683,0.0;0.0;0.0;0.10848970711231232;0.0,0.0216979414224624,0.0485180719924345,5.0,0.0425279651880264 4 | -------------------------------------------------------------------------------- /results/generated/downstream_object_finetune_100.csv: -------------------------------------------------------------------------------- 1 | name,object_finetune_full_test/froc_run_results,object_finetune_full_test/froc_mean,object_finetune_full_test/froc_std,object_finetune_full_test/froc_num_runs,object_finetune_full_test/froc_95_interval,object_finetune_full_test/mAP_run_results,object_finetune_full_test/mAP_mean,object_finetune_full_test/mAP_std,object_finetune_full_test/mAP_num_runs,object_finetune_full_test/mAP_95_interval,object_finetune_full_test/mAP@0.5_run_results,object_finetune_full_test/mAP@0.5_mean,object_finetune_full_test/mAP@0.5_std,object_finetune_full_test/mAP@0.5_num_runs,object_finetune_full_test/mAP@0.5_95_interval 2 | LoVT_100,0.6228008270263672;0.6150597929954529;0.6268824934959412;0.6150597929954529;0.6268824934959412,0.6213370800018311,0.0059677139952004,5.0,0.0052309319521096,0.15500454604625702;0.1975771188735962;0.16049739718437195;0.1975771188735962;0.16049739718437195,0.1742307156324386,0.02142990175401,5.0,0.0187841370926585,0.22746479511260984;0.2850914001464844;0.2461266815662384;0.2850914001464844;0.2461266815662384,0.2579801917076111,0.0258951543764732,5.0,0.022698103585669 3 | LoVT_30,0.6301196217536926;0.5950738787651062;0.6019704341888428;0.6019704341888428;0.6216748952865601,0.6101618528366088,0.0149391124829285,5.0,0.0130947094458548,0.16017349064350128;0.18194133043289185;0.18171587586402893;0.18171587586402893;0.14236558973789215,0.1695824325084686,0.0178638459650218,5.0,0.0156583513756106,0.2307395190000534;0.25770071148872375;0.2577812373638153;0.2577812373638153;0.20061922073364255,0.24092438519001,0.0253869545025693,5.0,0.0222526467557002 4 | -------------------------------------------------------------------------------- /results/generated/downstream_object_frozen_100.csv: -------------------------------------------------------------------------------- 1 | name,object_frozen_full_test/froc_run_results,object_frozen_full_test/froc_mean,object_frozen_full_test/froc_std,object_frozen_full_test/froc_num_runs,object_frozen_full_test/froc_95_interval,object_frozen_full_test/mAP_run_results,object_frozen_full_test/mAP_mean,object_frozen_full_test/mAP_std,object_frozen_full_test/mAP_num_runs,object_frozen_full_test/mAP_95_interval,object_frozen_full_test/mAP@0.5_run_results,object_frozen_full_test/mAP@0.5_mean,object_frozen_full_test/mAP@0.5_std,object_frozen_full_test/mAP@0.5_num_runs,object_frozen_full_test/mAP@0.5_95_interval 2 | LoVT_100,0.5683321356773376;0.5835327506065369;0.5721322894096375;0.5721322894096375;0.5721322894096375,0.5736523509025574,0.0057632184840554,5.0,0.0050516837334162,0.10445944964885712;0.1000540778040886;0.09926311671733856;0.09926311671733856;0.09926311671733856,0.1004605755209922,0.0022615236812654,5.0,0.0019823129081417,0.15056367218494415;0.148638978600502;0.14450038969516754;0.14450038969516754;0.14450038969516754,0.1465407639741897,0.0028755728423095,5.0,0.0025205507290652 3 | LoVT_30,0.5373680591583252;0.5674876570701599;0.5596058964729309;0.5674876570701599;0.5596058964729309,0.5583110332489014,0.0123529587023738,5.0,0.0108278457096481,0.09346145391464232;0.10885148495435716;0.09819545596837996;0.10885148495435716;0.09819545596837996,0.1015110671520233,0.0069739926752768,5.0,0.0061129741050298,0.12725898623466492;0.15171347558498385;0.142166405916214;0.15171347558498385;0.142166405916214,0.1430037498474121,0.0100127233531511,5.0,0.0087765389825579 4 | -------------------------------------------------------------------------------- /results/generated/downstream_object_seg_linear_100.csv: -------------------------------------------------------------------------------- 1 | name,object_seg_lin_full_test/avg_dice_run_results,object_seg_lin_full_test/avg_dice_mean,object_seg_lin_full_test/avg_dice_std,object_seg_lin_full_test/avg_dice_num_runs,object_seg_lin_full_test/avg_dice_95_interval 2 | LoVT_100,0.3989646136760712;0.399026483297348;0.39888495206832886;0.3988021910190582;0.3988736867904663,0.3989103853702545,8.677426759019395e-05,5.0,7.606099912353677e-05 3 | LoVT_30,0.3772648274898529;0.3769781291484833;0.3772738873958587;0.37429991364479065;0.3735426068305969,0.3758718729019165,0.0018045987327803,5.0,0.0015818005319338 4 | -------------------------------------------------------------------------------- /results/generated/downstream_pneumothorax_unet_finetune_100.csv: -------------------------------------------------------------------------------- 1 | name,pneumo_unet_finetune_full_test/pneumothorax_dice_run_results,pneumo_unet_finetune_full_test/pneumothorax_dice_mean,pneumo_unet_finetune_full_test/pneumothorax_dice_std,pneumo_unet_finetune_full_test/pneumothorax_dice_num_runs,pneumo_unet_finetune_full_test/pneumothorax_dice_95_interval 2 | LoVT_100,0.4379163980484009;0.4371230900287628;0.44350701570510864;0.4399798810482025;0.4451691508293152,0.440739107131958,0.0034966518105562,5.0,0.0030649504476841 3 | LoVT_30,0.4342937469482422;0.4322644174098969;0.44247153401374817;0.43902352452278137;0.4209962487220764,0.433809894323349,0.0082008851325691,5.0,0.00718839275978 4 | -------------------------------------------------------------------------------- /results/generated/downstream_pneumothorax_unet_frozen_100.csv: -------------------------------------------------------------------------------- 1 | name,pneumo_unet_frozen_full_test/pneumothorax_dice_run_results,pneumo_unet_frozen_full_test/pneumothorax_dice_mean,pneumo_unet_frozen_full_test/pneumothorax_dice_std,pneumo_unet_frozen_full_test/pneumothorax_dice_num_runs,pneumo_unet_frozen_full_test/pneumothorax_dice_95_interval 2 | LoVT_100,0.4437149167060852;0.43634358048439026;0.4484308660030365;0.43761515617370605;0.42661139369010925,0.4385431826114654,0.0082550012999365,5.0,0.0072358276719148 3 | LoVT_30,0.4290328919887543;0.4204568862915039;0.43860092759132385;0.4289343059062958;0.437399685382843,0.4308849394321442,0.0073816957954032,5.0,0.0064703416463964 4 | -------------------------------------------------------------------------------- /results/generated/downstream_rsna_finetune_1.csv: -------------------------------------------------------------------------------- 1 | name,rsna_finetune_1_test/mAP_run_results,rsna_finetune_1_test/mAP_mean,rsna_finetune_1_test/mAP_std,rsna_finetune_1_test/mAP_num_runs,rsna_finetune_1_test/mAP_95_interval,rsna_finetune_1_test/mAP@0.5_run_results,rsna_finetune_1_test/mAP@0.5_mean,rsna_finetune_1_test/mAP@0.5_std,rsna_finetune_1_test/mAP@0.5_num_runs,rsna_finetune_1_test/mAP@0.5_95_interval,rsna_finetune_1_test/mAP@0.75_run_results,rsna_finetune_1_test/mAP@0.75_mean,rsna_finetune_1_test/mAP@0.75_std,rsna_finetune_1_test/mAP@0.75_num_runs,rsna_finetune_1_test/mAP@0.75_95_interval 2 | LoVT_100,0.07348094135522842;0.09525048732757568;0.07585161179304123;0.09242334961891174;0.0856296494603157,0.0845272079110145,0.0096932853758431,5.0,0.0084965392500704,0.1024528369307518;0.1417117565870285;0.10650170594453812;0.1384551227092743;0.12408187240362167,0.1226406589150428,0.0179157408951369,5.0,0.0157038392874492,0.0019772499799728394;0.012603874318301678;0.002409450942650438;0.003249094355851412;0.001966913463547826,0.0044413166120648,0.004592698434014,5.0,0.004025677672256 3 | LoVT_30,0.06012103706598282;0.08720920979976654;0.06997986137866974;0.08307070285081863;0.08260791003704071,0.0765977442264556,0.0112461408967254,5.0,0.0098576771276104,0.08057138323783875;0.124265156686306;0.1014936938881874;0.12123771011829376;0.12629257142543793,0.1107721030712127,0.0195617529894421,5.0,0.0171466325018333,0.0014153228839859366;0.0027265348471701145;0.003272818401455879;0.001898297923617065;0.0020969801116734743,0.0022819908335804,0.0007266225293285,5.0,0.0006369127288681 4 | -------------------------------------------------------------------------------- /results/generated/downstream_rsna_finetune_10.csv: -------------------------------------------------------------------------------- 1 | name,rsna_finetune_test/mAP_run_results,rsna_finetune_test/mAP_mean,rsna_finetune_test/mAP_std,rsna_finetune_test/mAP_num_runs,rsna_finetune_test/mAP_95_interval,rsna_finetune_test/mAP@0.5_run_results,rsna_finetune_test/mAP@0.5_mean,rsna_finetune_test/mAP@0.5_std,rsna_finetune_test/mAP@0.5_num_runs,rsna_finetune_test/mAP@0.5_95_interval,rsna_finetune_test/mAP@0.75_run_results,rsna_finetune_test/mAP@0.75_mean,rsna_finetune_test/mAP@0.75_std,rsna_finetune_test/mAP@0.75_num_runs,rsna_finetune_test/mAP@0.75_95_interval 2 | LoVT_100,0.12380903214216232;0.1269942671060562;0.13860294222831726;0.13762642443180084;0.1313585340976715,0.1316782400012016,0.0064671988764576,5.0,0.0056687497542136,0.17594413459300995;0.1814885437488556;0.19613900780677795;0.20655226707458496;0.1925512403249741,0.1905350387096405,0.0121115099915982,5.0,0.0106162065833416,0.011274042539298534;0.00867873802781105;0.02194744534790516;0.01005813479423523;0.016470346599817276,0.0136857414618134,0.0054784961917411,5.0,0.0048021136404891 3 | LoVT_30,0.1195145547389984;0.11023660004138948;0.1220771297812462;0.11023660004138948;0.1220771297812462,0.1168284028768539,0.0061077289008181,5.0,0.0053536604280648,0.18163222074508667;0.15842796862125397;0.18012593686580658;0.15842796862125397;0.18012593686580658,0.1717480063438415,0.0121750148239523,5.0,0.0106718710231826,0.006453335285186768;0.009486761875450613;0.010739944875240326;0.009486761875450613;0.010739944875240326,0.0093813497573137,0.0017526446849827,5.0,0.0015362608011618 4 | -------------------------------------------------------------------------------- /results/generated/downstream_rsna_finetune_100.csv: -------------------------------------------------------------------------------- 1 | name,rsna_finetune_full_test/mAP_run_results,rsna_finetune_full_test/mAP_mean,rsna_finetune_full_test/mAP_std,rsna_finetune_full_test/mAP_num_runs,rsna_finetune_full_test/mAP_95_interval,rsna_finetune_full_test/mAP@0.5_run_results,rsna_finetune_full_test/mAP@0.5_mean,rsna_finetune_full_test/mAP@0.5_std,rsna_finetune_full_test/mAP@0.5_num_runs,rsna_finetune_full_test/mAP@0.5_95_interval,rsna_finetune_full_test/mAP@0.75_run_results,rsna_finetune_full_test/mAP@0.75_mean,rsna_finetune_full_test/mAP@0.75_std,rsna_finetune_full_test/mAP@0.75_num_runs,rsna_finetune_full_test/mAP@0.75_95_interval 2 | LoVT_100,0.1993439495563507;0.14192064106464386;0.21182042360305783;0.14192064106464386;0.21182042360305783,0.1813652157783508,0.0363662727820806,5.0,0.0318764435473808,0.2812964916229248;0.2084351032972336;0.3010258674621582;0.2084351032972336;0.3010258674621582,0.2600436866283416,0.0477955326333353,5.0,0.0418946315156673,0.02746404893696308;0.011933096684515476;0.034310728311538696;0.011933096684515476;0.034310728311538696,0.0239903397858142,0.0113560745453503,5.0,0.0099540382192557 3 | LoVT_30,0.19872282445430756;0.16564463078975675;0.16564463078975675;0.16564463078975675;0.16564463078975675,0.1722602695226669,0.0147930179213676,5.0,0.0129666519165039,0.28405866026878357;0.23654493689537048;0.23654493689537048;0.23654493689537048;0.23654493689537048,0.2460476815700531,0.0212487830654144,5.0,0.0186253795623779,0.01933719217777252;0.015415357425808908;0.015415357425808908;0.015415357425808908;0.015415357425808908,0.0161997243762016,0.0017538978203823,5.0,0.0015373592227697 4 | -------------------------------------------------------------------------------- /results/generated/downstream_rsna_frozen_1.csv: -------------------------------------------------------------------------------- 1 | name,rsna_1_test/mAP_run_results,rsna_1_test/mAP_mean,rsna_1_test/mAP_std,rsna_1_test/mAP_num_runs,rsna_1_test/mAP_95_interval,rsna_1_test/mAP@0.5_run_results,rsna_1_test/mAP@0.5_mean,rsna_1_test/mAP@0.5_std,rsna_1_test/mAP@0.5_num_runs,rsna_1_test/mAP@0.5_95_interval,rsna_1_test/mAP@0.75_run_results,rsna_1_test/mAP@0.75_mean,rsna_1_test/mAP@0.75_std,rsna_1_test/mAP@0.75_num_runs,rsna_1_test/mAP@0.75_95_interval 2 | LoVT_100,0.07714593410491943;0.11359652131795885;0.0919799730181694;0.10089953243732452;0.09764502197504044,0.0962533965706825,0.0133009816037826,5.0,0.0116588244211446,0.11503361910581587;0.17151185870170593;0.13385123014450073;0.14710545539855957;0.13432647287845612,0.1403657272458076,0.0208334611193978,5.0,0.0182613338256723,0.0011228705989196897;0.0035747280344367027;0.0024382194969803095;0.00617566891014576;0.010862289927899836,0.0048347553936764,0.003848323128313,5.0,0.0033732039488027 3 | LoVT_30,0.08981052786111832;0.06117516756057739;0.095982164144516;0.1051708459854126;0.07805899530649185,0.0860395401716232,0.0170367918300856,5.0,0.0149334064630291,0.13209150731563568;0.085035040974617;0.1436612755060196;0.16267287731170654;0.11312687397003174,0.1273175150156021,0.0297030850958638,5.0,0.0260359020269984,0.00545099750161171;0.0037953255232423544;0.003152664750814438;0.0024482300505042076;0.0025945084635168314,0.0034883452579379,0.0012185756003981,5.0,0.0010681286082594 4 | -------------------------------------------------------------------------------- /results/generated/downstream_rsna_frozen_100.csv: -------------------------------------------------------------------------------- 1 | name,rsna_full_test/mAP_run_results,rsna_full_test/mAP_mean,rsna_full_test/mAP_std,rsna_full_test/mAP_num_runs,rsna_full_test/mAP_95_interval,rsna_full_test/mAP@0.5_run_results,rsna_full_test/mAP@0.5_mean,rsna_full_test/mAP@0.5_std,rsna_full_test/mAP@0.5_num_runs,rsna_full_test/mAP@0.5_95_interval,rsna_full_test/mAP@0.75_run_results,rsna_full_test/mAP@0.75_mean,rsna_full_test/mAP@0.75_std,rsna_full_test/mAP@0.75_num_runs,rsna_full_test/mAP@0.75_95_interval 2 | LoVT_100,0.22437512874603271;0.2051118165254593;0.1977258622646332;0.1977258622646332;0.1977258622646332,0.2045329064130783,0.0115440079061092,5.0,0.0101187690730552,0.31034860014915466;0.2904259264469147;0.2790074944496155;0.2790074944496155;0.2790074944496155,0.2875594019889831,0.0136653748845134,5.0,0.0119782292144778,0.033469200134277344;0.029404759407043457;0.021288510411977768;0.021288510411977768;0.021288510411977768,0.0253478981554508,0.005741288087214,5.0,0.0050324608930367 3 | LoVT_30,0.1772325485944748;0.18093115091323853;0.18093115091323853;0.18093115091323853;0.18093115091323853,0.1801914304494857,0.0016540652412988,5.0,0.0014498521089553,0.2531190812587738;0.2508377432823181;0.2508377432823181;0.2508377432823181;0.2508377432823181,0.2512940108776093,0.0010202453590013,5.0,0.0008942844867706,0.01939980313181877;0.026390498504042625;0.026390498504042625;0.026390498504042625;0.026390498504042625,0.0249923594295978,0.0031263340124571,5.0,0.0027403525859117 4 | -------------------------------------------------------------------------------- /results/generated/downstream_rsna_seg_linear_1.csv: -------------------------------------------------------------------------------- 1 | name,rsna_seg_lin_1_test/opacity_dice_run_results,rsna_seg_lin_1_test/opacity_dice_mean,rsna_seg_lin_1_test/opacity_dice_std,rsna_seg_lin_1_test/opacity_dice_num_runs,rsna_seg_lin_1_test/opacity_dice_95_interval 2 | LoVT_100,0.4637266993522644;0.4632646441459656;0.4631454646587372;0.4631621539592743;0.4631358385086059,0.4632869601249695,0.000251149298144,5.0,0.0002201420660353 3 | LoVT_30,0.4602607190608978;0.4593919515609741;0.4594123959541321;0.4593825340270996;0.4594292938709259,0.4595753788948059,0.0003835460031759,5.0,0.000336192894755 4 | -------------------------------------------------------------------------------- /results/generated/downstream_rsna_seg_linear_10.csv: -------------------------------------------------------------------------------- 1 | name,rsna_seg_lin_test/opacity_dice_run_results,rsna_seg_lin_test/opacity_dice_mean,rsna_seg_lin_test/opacity_dice_std,rsna_seg_lin_test/opacity_dice_num_runs,rsna_seg_lin_test/opacity_dice_95_interval 2 | LoVT_100,0.5007181763648987;0.5014671087265015;0.5014009475708008;0.5014087557792664;0.5013766884803772,0.5012743353843689,0.0003126699781538,5.0,0.0002740673196647 3 | LoVT_30,0.49421873688697815;0.4942434430122375;0.4942416548728943;0.494200736284256;0.4942348599433899,0.4942278861999511,1.80350472054392e-05,5.0,1.580841587928162e-05 4 | -------------------------------------------------------------------------------- /results/generated/downstream_rsna_seg_linear_100.csv: -------------------------------------------------------------------------------- 1 | name,rsna_seg_lin_full_test/opacity_dice_run_results,rsna_seg_lin_full_test/opacity_dice_mean,rsna_seg_lin_full_test/opacity_dice_std,rsna_seg_lin_full_test/opacity_dice_num_runs,rsna_seg_lin_full_test/opacity_dice_95_interval 2 | LoVT_100,0.5182268023490906;0.5180647373199463;0.5180938839912415;0.5180387496948242;0.5180655121803284,0.5180979371070862,7.463147770303289e-05,5.0,6.541737450285462e-05 3 | LoVT_30,0.5154538750648499;0.5154972672462463;0.5154891014099121;0.5154907703399658;0.515488862991333,0.5154839754104614,1.716856930981381e-05,5.0,1.5048914516838844e-05 4 | -------------------------------------------------------------------------------- /results/generated/global_alignment.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/global_alignment.pdf -------------------------------------------------------------------------------- /results/generated/global_alignment_normalized.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/global_alignment_normalized.pdf -------------------------------------------------------------------------------- /results/generated/local_alignment.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/local_alignment.pdf -------------------------------------------------------------------------------- /results/generated/local_alignment_normalized.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/local_alignment_normalized.pdf -------------------------------------------------------------------------------- /results/generated/local_modality_assignment.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/local_modality_assignment.pdf -------------------------------------------------------------------------------- /results/generated/results_table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/results_table.png -------------------------------------------------------------------------------- /results/generated/results_table_rsna.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/results_table_rsna.png -------------------------------------------------------------------------------- /results/generated/std_y_a.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/std_y_a.pdf -------------------------------------------------------------------------------- /results/generated/std_y_b.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/std_y_b.pdf -------------------------------------------------------------------------------- /results/generated/uniformity_y_a.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/uniformity_y_a.pdf -------------------------------------------------------------------------------- /results/generated/uniformity_y_b.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/generated/uniformity_y_b.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/scan_weights_heatmap_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/scan_weights_heatmap_test.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/spatial_smoothness_yl_rsna_seg_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/spatial_smoothness_yl_rsna_seg_test.png -------------------------------------------------------------------------------- /results/models/LoVT_100/spatial_smoothness_yl_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/spatial_smoothness_yl_test.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/tSNE_yg_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/tSNE_yg_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/tSNE_yg_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/tSNE_yg_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/tSNE_yg_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/tSNE_yg_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/tSNE_yl_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/tSNE_yl_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/tSNE_yl_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/tSNE_yl_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/tSNE_yl_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/tSNE_yl_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/tSNE_yl_downstream_class_probs_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/tSNE_yl_downstream_class_probs_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/tSNE_zg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/tSNE_zg.pdf -------------------------------------------------------------------------------- /results/models/LoVT_100/tSNE_zl.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_100/tSNE_zl.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/scan_weights_heatmap_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/scan_weights_heatmap_test.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/spatial_smoothness_yl_rsna_seg_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/spatial_smoothness_yl_rsna_seg_test.png -------------------------------------------------------------------------------- /results/models/LoVT_30/spatial_smoothness_yl_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/spatial_smoothness_yl_test.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/tSNE_yg_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/tSNE_yg_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/tSNE_yg_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/tSNE_yg_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/tSNE_yg_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/tSNE_yg_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/tSNE_yl_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/tSNE_yl_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/tSNE_yl_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/tSNE_yl_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/tSNE_yl_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/tSNE_yl_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/tSNE_yl_downstream_class_probs_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/tSNE_yl_downstream_class_probs_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/tSNE_zg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/tSNE_zg.pdf -------------------------------------------------------------------------------- /results/models/LoVT_30/tSNE_zl.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/LoVT_30/tSNE_zl.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/spatial_smoothness_yl_rsna_seg_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/spatial_smoothness_yl_rsna_seg_test.png -------------------------------------------------------------------------------- /results/models/ablation_g_only/spatial_smoothness_yl_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/spatial_smoothness_yl_test.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/tSNE_yg_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/tSNE_yg_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/tSNE_yg_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/tSNE_yg_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/tSNE_yg_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/tSNE_yg_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/tSNE_yl_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/tSNE_yl_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/tSNE_yl_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/tSNE_yl_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/tSNE_yl_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/tSNE_yl_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/tSNE_yl_downstream_class_probs_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/tSNE_yl_downstream_class_probs_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/tSNE_zg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/tSNE_zg.pdf -------------------------------------------------------------------------------- /results/models/ablation_g_only/tSNE_zl.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_g_only/tSNE_zl.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/spatial_smoothness_yl_rsna_seg_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/spatial_smoothness_yl_rsna_seg_test.png -------------------------------------------------------------------------------- /results/models/ablation_no_g/spatial_smoothness_yl_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/spatial_smoothness_yl_test.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/tSNE_yg_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/tSNE_yg_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/tSNE_yg_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/tSNE_yg_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/tSNE_yg_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/tSNE_yg_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/tSNE_yl_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/tSNE_yl_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/tSNE_yl_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/tSNE_yl_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/tSNE_yl_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/tSNE_yl_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/tSNE_yl_downstream_class_probs_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/tSNE_yl_downstream_class_probs_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/tSNE_zg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/tSNE_zg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_g/tSNE_zl.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_g/tSNE_zl.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/spatial_smoothness_yl_rsna_seg_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/spatial_smoothness_yl_rsna_seg_test.png -------------------------------------------------------------------------------- /results/models/ablation_no_l/spatial_smoothness_yl_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/spatial_smoothness_yl_test.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/tSNE_yg_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/tSNE_yg_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/tSNE_yg_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/tSNE_yg_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/tSNE_yg_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/tSNE_yg_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/tSNE_yl_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/tSNE_yl_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/tSNE_yl_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/tSNE_yl_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/tSNE_yl_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/tSNE_yl_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/tSNE_yl_downstream_class_probs_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/tSNE_yl_downstream_class_probs_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/tSNE_zg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/tSNE_zg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l/tSNE_zl.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l/tSNE_zl.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/scan_weights_heatmap_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/scan_weights_heatmap_test.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/spatial_smoothness_yl_rsna_seg_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/spatial_smoothness_yl_rsna_seg_test.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/spatial_smoothness_yl_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/spatial_smoothness_yl_test.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/tSNE_yg_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/tSNE_yg_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/tSNE_yg_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/tSNE_yg_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/tSNE_yg_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/tSNE_yg_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/tSNE_yl_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/tSNE_yl_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/tSNE_yl_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/tSNE_yl_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/tSNE_yl_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/tSNE_yl_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/tSNE_yl_downstream_class_probs_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/tSNE_yl_downstream_class_probs_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/tSNE_zg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/tSNE_zg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_report/tSNE_zl.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_report/tSNE_zl.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/spatial_smoothness_yl_rsna_seg_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/spatial_smoothness_yl_rsna_seg_test.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/spatial_smoothness_yl_test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/spatial_smoothness_yl_test.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/tSNE_yg_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/tSNE_yg_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/tSNE_yg_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/tSNE_yg_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/tSNE_yg_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/tSNE_yg_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/tSNE_yl_a_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/tSNE_yl_a_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/tSNE_yl_b_chexpert.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/tSNE_yl_b_chexpert.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/tSNE_yl_downstream_class_g_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/tSNE_yl_downstream_class_g_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/tSNE_yl_downstream_class_probs_rsna_seg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/tSNE_yl_downstream_class_probs_rsna_seg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/tSNE_zg.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/tSNE_zg.pdf -------------------------------------------------------------------------------- /results/models/ablation_no_l_scan/tSNE_zl.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/results/models/ablation_no_l_scan/tSNE_zl.pdf -------------------------------------------------------------------------------- /src/analysis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/analysis/__init__.py -------------------------------------------------------------------------------- /src/analysis/embedding_analysis.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from pathlib import Path 5 | from pprint import pformat 6 | from typing import Union 7 | 8 | import click 9 | from tqdm import tqdm 10 | 11 | from analysis.data_exporter import update_results 12 | 13 | if __name__ == '__main__': 14 | scripts_path = Path(os.path.realpath(__file__)).absolute().parent 15 | root_path = scripts_path.parent 16 | sys.path.append(str(root_path)) 17 | 18 | log = logging.getLogger(__name__) 19 | 20 | from analysis.postprocess_run import PretrainingRun, BaselineRun 21 | from metrics.embedding_metrics import EmbeddingMetrics, DownstreamEmbeddingMetrics 22 | 23 | 24 | def run_embedding_analysis(run: PretrainingRun, data='test'): 25 | log.info('Running (basic) embedding analysis...') 26 | embedding_metrics = EmbeddingMetrics(compute_cov=True) 27 | for batch in tqdm(run.iter_model_data_batches(data=data, load_inputs=False, load_attentions=False)): 28 | embedding_metrics(batch.embeddings) 29 | log.info('Computing metrics...') 30 | results = embedding_metrics.compute() 31 | log.info(f'Results: {pformat(results)}') 32 | log.info('Saving results to run...') 33 | run.update_summary(results, analysis_name=f'analysis-{data}/emb') 34 | log.info('Done') 35 | 36 | 37 | def run_downstream_embedding_analysis(run: Union[BaselineRun, PretrainingRun], dataset, data='test'): 38 | log.info('Running baseline embedding analysis...') 39 | embedding_metrics = DownstreamEmbeddingMetrics(compute_cov=True) 40 | for batch in tqdm(run.iter_downstream_data_batches(dataset=dataset, data=data)): 41 | embedding_metrics(batch) 42 | log.info('Computing metrics...') 43 | results = embedding_metrics.compute() 44 | log.info(f'Results: {pformat(results)}') 45 | log.info('Saving results...') 46 | 47 | update_results(f'analysis-{dataset}', run.baseline_name, **results) 48 | log.info('Done') 49 | -------------------------------------------------------------------------------- /src/analysis/visualization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/analysis/visualization/__init__.py -------------------------------------------------------------------------------- /src/analysis/visualization/downstream_plotter.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import matplotlib.pyplot as plt 4 | import matplotlib.patches as mpatches 5 | import numpy as np 6 | 7 | from analysis.data_exporter import get_paper_data, load_downstream_results, RESULTS_PATH 8 | 9 | 10 | def plot_downstream_results(downstream_task, metric, ylabel, category=None): 11 | data = get_paper_data(category=category).join(load_downstream_results(downstream_task)) 12 | 13 | colors = ['tab:red' if is_baseline else 'tab:blue' for is_baseline in data['baseline']] 14 | run_paper_names = data['paper_name'] 15 | x_pos = np.arange(len(run_paper_names)) 16 | means = data[f'{metric}_mean'] 17 | conf_intervals = data[f'{metric}_95_interval'] 18 | 19 | fig, ax = plt.subplots() 20 | ax.bar(x_pos, 100 * means, yerr=100 * conf_intervals, color=colors, align='center', alpha=0.5, ecolor='black', capsize=8) 21 | ax.set_ylabel(ylabel) 22 | ax.set_xticks(x_pos) 23 | ax.set_xticklabels(run_paper_names, rotation=45, fontsize=8, ha='right', rotation_mode='anchor') 24 | ax.yaxis.grid(True) 25 | 26 | baseline_patch = mpatches.Patch(color='tab:red', alpha=0.5, label='Baseline') 27 | ours_patch = mpatches.Patch(color='tab:blue', alpha=0.5, label='Ours') 28 | ax.legend(handles=[baseline_patch, ours_patch]) 29 | 30 | plt.tight_layout() 31 | plt.savefig(os.path.join(RESULTS_PATH, 'generated', f'downstream_{downstream_task}_{metric.replace("/", "_")}.pdf')) 32 | -------------------------------------------------------------------------------- /src/analysis/visualization/heatmaps.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from pathlib import Path 5 | 6 | import torch 7 | from tqdm import tqdm 8 | import matplotlib.pyplot as plt 9 | import seaborn as sns 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | from analysis.postprocess_run import PretrainingRun 14 | 15 | 16 | def plot_scan_region_weights_heatmap(run: PretrainingRun, data='test'): 17 | all_weights = [] 18 | size = None 19 | 20 | log.info('Collecting weights...') 21 | mean_image = None 22 | num_batches = 0 23 | for batch in tqdm(run.iter_model_data_batches(data, load_inputs=True)): 24 | batch_mean_image = batch.inputs.x_a.scan.float().mean(dim=0) # H x W 25 | if mean_image is None: 26 | mean_image = batch_mean_image 27 | else: 28 | mean_image = mean_image + batch_mean_image 29 | num_batches += 1 30 | 31 | all_weights.append(batch.embeddings.weights_a) # (B x N) 32 | new_size = batch.embeddings.local_size_a 33 | assert size is None or size == new_size, f'{size} != {new_size}' 34 | size = new_size 35 | mean_image = mean_image / num_batches 36 | mean_image = mean_image - mean_image.min() 37 | mean_image = mean_image / mean_image.max() 38 | 39 | avg_weights = torch.cat(all_weights, dim=0).mean(0) # (N) 40 | avg_weights = avg_weights.view(*size) 41 | 42 | log.info('Plotting...') 43 | fig, ax = plt.subplots() 44 | ax = sns.heatmap(avg_weights, linewidth=0.5, 45 | alpha=0.3, 46 | zorder=2, 47 | ax=ax) 48 | ax.imshow(mean_image, 49 | cmap='gray', 50 | aspect=ax.get_aspect(), 51 | extent=ax.get_xlim() + ax.get_ylim(), 52 | zorder=1) 53 | plt.tight_layout() 54 | plot_folder = os.path.join(run.run_path, 'plots') 55 | os.makedirs(plot_folder, exist_ok=True) 56 | path = os.path.join(plot_folder, f'scan_weights_heatmap_{data}.pdf') 57 | plt.savefig(path) 58 | log.info(f'Plot saved to {path}') 59 | -------------------------------------------------------------------------------- /src/analysis/visualization/spatial_smoothness.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import math 3 | import os 4 | import sys 5 | from pathlib import Path 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | from datasets import tqdm 10 | import matplotlib.pyplot as plt 11 | import pandas as pd 12 | import seaborn as sns 13 | 14 | log = logging.getLogger(__name__) 15 | from analysis.postprocess_run import PretrainingRun 16 | 17 | 18 | def plot_spatial_smoothness(run: PretrainingRun, dataset=None, data='test', plot_zl=False, limit_samples=None): 19 | log.info('Computing distances for spatial smoothness...') 20 | cos_emb = [] 21 | coord_dists = [] 22 | batch_iterator = run.iter_model_data_batches(data, load_attentions=False) if dataset is None \ 23 | else run.iter_downstream_data_batches(dataset, data) 24 | num_samples = 0 25 | for batch in tqdm(batch_iterator): 26 | if dataset is None: 27 | if plot_zl: 28 | emb = batch.embeddings.zl_a 29 | else: 30 | emb = batch.embeddings.yl_a # (B x N_a x d) 31 | H, W = batch.inputs.x_a.local_regions_shape 32 | else: 33 | emb = batch.yl 34 | H, W = batch.x.local_regions_shape 35 | 36 | num_samples += emb.shape[0] 37 | if limit_samples is not None and num_samples > limit_samples: 38 | emb = emb[:limit_samples - num_samples] 39 | 40 | B, N_a, _ = emb.size() 41 | emb = F.normalize(emb, dim=-1) 42 | coordinates = torch.meshgrid(torch.arange(H), torch.arange(W)) 43 | coordinates = torch.stack(coordinates).float() # (2 x H x W) 44 | coordinates = coordinates.view(2, -1).T # ((H*W) x 2) 45 | coordinates /= math.sqrt(H ** 2 + W ** 2) 46 | distances = torch.cdist(coordinates, coordinates) # (N_a x N_a) 47 | assert distances.size() == (N_a, N_a) 48 | coord_dists.append(distances[None, :, :].expand(B, -1, -1)) 49 | cos_emb.append(torch.bmm(emb, emb.transpose(-1, -2))) # (B x N_a x N_a) 50 | 51 | if limit_samples is not None and num_samples >= limit_samples: 52 | break 53 | coord_dists = torch.cat(coord_dists, dim=0).flatten().numpy() 54 | cos_emb = torch.cat(cos_emb, dim=0).flatten().numpy() 55 | 56 | non_rounded_df = pd.DataFrame({'coord_dist': coord_dists, 'cos_sim': cos_emb}) 57 | df = non_rounded_df.copy().sort_values('coord_dist') 58 | df['coord_dist'] = df['coord_dist'].round(1).astype('category') 59 | 60 | log.info('Plotting...') 61 | fig, ax = plt.subplots() 62 | sns.violinplot(data=df, x='coord_dist', y='cos_sim', positions=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], 63 | color='tab:blue', width=0.8, scale='count', inner='quartiles', ax=ax, alpha=0.8) 64 | ax.set_xlabel('Spatial Distance of Region Pair') 65 | ax.set_ylabel('Cosine Similarity of Region Embeddings Pair') 66 | ax.set_xticklabels(['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7', '0.8', '0.9']) 67 | plt.tight_layout() 68 | plot_folder = os.path.join(run.run_path, 'plots') 69 | os.makedirs(plot_folder, exist_ok=True) 70 | emb_name = 'zl' if plot_zl else 'yl' 71 | dataset_infix = dataset + '_' if dataset is not None else '' 72 | path = os.path.join(plot_folder, f'spatial_smoothness_{emb_name}_{dataset_infix}{data}.pdf') 73 | plt.savefig(path, dpi=1000) 74 | log.info(f'Plot saved to {path}') 75 | -------------------------------------------------------------------------------- /src/baselines/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/baselines/__init__.py -------------------------------------------------------------------------------- /src/baselines/simclr_baseline.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from dataclasses import dataclass 5 | from pathlib import Path 6 | 7 | import hydra 8 | import pytorch_lightning as pl 9 | import torch 10 | from hydra.core.config_store import ConfigStore 11 | from pl_bolts.optimizers import LinearWarmupCosineAnnealingLR 12 | from pytorch_lightning.callbacks import ModelCheckpoint 13 | from simclr import SimCLR 14 | from simclr.modules import NT_Xent 15 | from torch.optim import Adam 16 | from torch.utils.data import DataLoader 17 | 18 | 19 | if __name__ == '__main__': 20 | baselines_path = Path(os.path.realpath(__file__)).absolute().parent 21 | root_path = baselines_path.parent 22 | sys.path.insert(0, str(root_path)) 23 | sys.path.append(str(os.path.join(root_path, 'scripts'))) 24 | 25 | from data.dataloading_utils import load_dataset 26 | from common.script_utils import setup_training, init_trainer, log_hyperparameters 27 | from common.wandb import finish_run 28 | from baselines.baseline_utils import BaselineModelConfig, load_backbone, prepare_transform, \ 29 | TwoImageTransformsWrapper, BaselineExperimentConfig, export_pretrained_weights 30 | 31 | log = logging.getLogger(__name__) 32 | 33 | @dataclass 34 | class SimCLRConfig(BaselineModelConfig): 35 | projection_size: int = 128 36 | temperature: float = 0.1 37 | 38 | 39 | class SimCLRModule(pl.LightningModule): 40 | def __init__(self, config: SimCLRConfig, num_workers): 41 | super().__init__() 42 | self.save_hyperparameters('config', 'num_workers') 43 | self.config = config 44 | self.num_workers = num_workers 45 | 46 | model = load_backbone(config) 47 | 48 | img_size, self.transform = prepare_transform(config, mode='SimCLR') 49 | self.simclr_model = SimCLR(model, config.projection_size, n_features=model.fc.in_features) 50 | self.criterion = NT_Xent(config.batch_size, config.temperature, world_size=1) 51 | 52 | dataset = load_dataset(self.config.dataset) 53 | self.train_dataset = dataset['train'] 54 | 55 | @property 56 | def model(self): 57 | return self.simclr_model.encoder 58 | 59 | def training_step(self, batch, batch_idx): 60 | x_i, x_j = batch 61 | h_i, h_j, z_i, z_j = self.simclr_model(x_i, x_j) 62 | loss = self.criterion(z_i, z_j) 63 | self.log(f'train/loss', loss, prog_bar=True) 64 | return loss 65 | 66 | def configure_optimizers(self): 67 | optimizer = Adam(self.parameters(), lr=self.config.learning_rate, weight_decay=self.config.weight_decay) 68 | scheduler = LinearWarmupCosineAnnealingLR( 69 | optimizer, warmup_epochs=self.config.warmup_epochs, max_epochs=self.config.max_epochs 70 | ) 71 | return [optimizer], [scheduler] 72 | 73 | def train_dataloader(self): 74 | return DataLoader(TwoImageTransformsWrapper(self.train_dataset, self.transform), 75 | batch_size=self.config.batch_size, 76 | num_workers=self.num_workers, pin_memory=True, drop_last=True, shuffle=True) 77 | 78 | 79 | def pretrain_simclr_baseline(config: BaselineExperimentConfig): 80 | setup_training(config) 81 | 82 | log.info(f"----- Initializing Model -----") 83 | model = SimCLRModule(config.model_config, config.num_dataloader_workers) 84 | 85 | log.info(f"----- Initializing Trainer -----") 86 | logger, trainer = init_trainer( 87 | model, 88 | trainer_config=config.trainer, 89 | gpus=config.gpus, 90 | logger_configs=config.logger, 91 | callback_configs=config.callback, 92 | callbacks=[ 93 | ModelCheckpoint(save_last=True, verbose=True, dirpath='checkpoints/', filename='checkpoint-last') 94 | ] 95 | ) 96 | 97 | log_hyperparameters(config, model, trainer, logger) 98 | 99 | # Train the model 100 | log.info(f"----- Starting Training -----") 101 | trainer.fit(model=model) 102 | 103 | log.info(f"----- Completed training -----") 104 | finish_run(trainer) 105 | export_pretrained_weights(config, model, trainer, last_model=True) 106 | 107 | 108 | cs = ConfigStore.instance() 109 | cs.store(name="baseline_config", node=BaselineExperimentConfig) 110 | cs.store(group="baseline", name="base_model_config", node=SimCLRConfig) 111 | 112 | 113 | @hydra.main(config_path="../../configs/", config_name="baseline") 114 | def main(config: BaselineExperimentConfig) -> None: 115 | pretrain_simclr_baseline(config) 116 | 117 | 118 | if __name__ == "__main__": 119 | main() 120 | 121 | 122 | -------------------------------------------------------------------------------- /src/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/common/__init__.py -------------------------------------------------------------------------------- /src/common/config_utils.py: -------------------------------------------------------------------------------- 1 | from deepdiff import DeepDiff 2 | from omegaconf import OmegaConf 3 | 4 | 5 | def prepare_config(config, config_cls, log): 6 | # make it possible to init this class with different types of configs (dataclass, omegaconf, dict) 7 | config = OmegaConf.create(config) 8 | # fill defaults, which is required if "deprecated" configs are used (e.g. when loading old checkpoints) 9 | config_defaults = OmegaConf.structured(config_cls) 10 | new_config = OmegaConf.merge(config_defaults, config) 11 | diff = DeepDiff(config, new_config, verbose_level=2) 12 | if len(diff) > 0: 13 | log.info(f'Defaults have been added to the config: {diff}') 14 | return new_config 15 | -------------------------------------------------------------------------------- /src/common/dataclass_utils.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | from typing import Callable 3 | 4 | import torch 5 | 6 | 7 | class TensorDataclassMixin: 8 | def __init__(self): 9 | super(TensorDataclassMixin, self).__init__() 10 | assert dataclasses.is_dataclass(self), f'{type(self)} has to be a dataclass to use TensorDataclassMixin' 11 | 12 | def apply(self, tensor_fn: Callable[[torch.Tensor], torch.Tensor], ignore=None): 13 | def apply_to_value(value): 14 | if value is None: 15 | return None 16 | elif isinstance(value, torch.Tensor): 17 | return tensor_fn(value) 18 | elif isinstance(value, list): 19 | return [apply_to_value(el) for el in value] 20 | elif isinstance(value, tuple): 21 | return tuple(apply_to_value(el) for el in value) 22 | elif isinstance(value, dict): 23 | return {key: apply_to_value(el) for key, el in value.items()} 24 | elif isinstance(value, TensorDataclassMixin): 25 | return value.apply(tensor_fn) 26 | else: 27 | return value 28 | 29 | def apply_to_field(field: dataclasses.Field): 30 | value = getattr(self, field.name) 31 | if ignore is not None and field.name in ignore: 32 | return value 33 | else: 34 | return apply_to_value(value) 35 | 36 | return self.__class__(**{field.name: apply_to_field(field) for field in dataclasses.fields(self)}) 37 | 38 | def to(self, device, *args, non_blocking=True, **kwargs): 39 | return self.apply(lambda x: x.to(device, *args, non_blocking=non_blocking, **kwargs)) 40 | 41 | def view(self, *args): 42 | return self.apply(lambda x: x.view(*args)) 43 | 44 | def detach(self): 45 | return self.apply(lambda x: x.detach()) 46 | 47 | def unsqueeze(self, dim): 48 | return self.apply(lambda x: x.unsqueeze(dim)) 49 | 50 | def squeeze(self, dim): 51 | return self.apply(lambda x: x.squeeze(dim)) 52 | 53 | def __getitem__(self, *args): 54 | return self.apply(lambda x: x.__getitem__(*args)) 55 | 56 | def to_dict(self): 57 | return dataclasses.asdict(self) 58 | -------------------------------------------------------------------------------- /src/common/user_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dataclasses import dataclass 3 | from pathlib import Path 4 | 5 | from omegaconf import MISSING, OmegaConf 6 | 7 | 8 | @dataclass 9 | class WandbConfig: 10 | user: str = MISSING 11 | project: str = MISSING 12 | 13 | 14 | @dataclass 15 | class ModelsFolderConfig: 16 | base_path: str = MISSING 17 | pretrained_models_folder: str = MISSING 18 | 19 | 20 | @dataclass 21 | class UserConfig: 22 | wandb: WandbConfig = MISSING 23 | models: ModelsFolderConfig = MISSING 24 | 25 | 26 | def load_user_config(): 27 | src_path = Path(os.path.realpath(__file__)).absolute().parent.parent 28 | user_config_path = src_path.parent.joinpath('configs', 'user_config.yml') 29 | return OmegaConf.load(user_config_path) 30 | 31 | 32 | USER_CONFIG: UserConfig = load_user_config() 33 | -------------------------------------------------------------------------------- /src/common/wandb.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import logging 3 | import os 4 | 5 | import wandb 6 | from pytorch_lightning import Callback, LightningModule, Trainer 7 | from pytorch_lightning.loggers import WandbLogger 8 | import pytorch_lightning as pl 9 | 10 | log = logging.getLogger(__name__) 11 | 12 | 13 | def get_wandb_logger(trainer: pl.Trainer) -> WandbLogger: 14 | logger = None 15 | for lg in trainer.logger: 16 | if isinstance(lg, WandbLogger): 17 | logger = lg 18 | 19 | if not logger: 20 | raise Exception( 21 | "You are using wandb related callback," 22 | "but WandbLogger was not found for some reason..." 23 | ) 24 | 25 | return logger 26 | 27 | 28 | def finish_run(trainer): 29 | logger = None 30 | for lg in trainer.logger: 31 | if isinstance(lg, WandbLogger): 32 | logger = lg 33 | if logger is not None: 34 | path = logger.experiment.path 35 | dir = logger.experiment.dir 36 | logger.experiment.finish() 37 | run_api = wandb.Api().run(path) 38 | return run_api, dir 39 | else: 40 | return None, None 41 | 42 | 43 | class UploadCodeToWandbAsArtifact(Callback): 44 | """Upload all *.py files to wandb as an artifact at the beginning of the run.""" 45 | 46 | def __init__(self, code_dir: str): 47 | self.code_dir = code_dir 48 | 49 | def on_train_start(self, trainer, pl_module): 50 | logger = get_wandb_logger(trainer=trainer) 51 | experiment = logger.experiment 52 | 53 | code = wandb.Artifact("project-source", type="code") 54 | for path in glob.glob(os.path.join(self.code_dir, "**/*.py"), recursive=True): 55 | code.add_file(path) 56 | 57 | experiment.use_artifact(code) 58 | 59 | 60 | class UploadConfigToWandbAsArtifact(Callback): 61 | def __init__(self, config_dir: str): 62 | self.config_dir = config_dir 63 | 64 | def on_train_start(self, trainer, pl_module): 65 | logger = get_wandb_logger(trainer=trainer) 66 | experiment = logger.experiment 67 | 68 | run_config = wandb.Artifact("run-config", type="config") 69 | for path in glob.glob(os.path.join(self.config_dir, "*.yaml")): 70 | log.info(f'Uploading config {path}') 71 | run_config.add_file(path) 72 | 73 | experiment.use_artifact(run_config) 74 | 75 | 76 | class UploadCheckpointsToWandbAsArtifact(Callback): 77 | """Upload checkpoints to wandb as an artifact, at the end of training.""" 78 | 79 | def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False): 80 | self.ckpt_dir = ckpt_dir 81 | self.upload_best_only = upload_best_only 82 | 83 | def on_train_end(self, trainer, pl_module): 84 | logger = get_wandb_logger(trainer=trainer) 85 | experiment = logger.experiment 86 | 87 | ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints") 88 | 89 | if self.upload_best_only: 90 | ckpts.add_file(trainer.checkpoint_callback.best_model_path) 91 | else: 92 | for path in glob.glob( 93 | os.path.join(self.ckpt_dir, "**/*.ckpt"), recursive=True 94 | ): 95 | ckpts.add_file(path) 96 | 97 | experiment.use_artifact(ckpts) 98 | 99 | 100 | class SaveBestCheckpointPathToWandbSummary(Callback): 101 | def __init__(self, prefix: str = None): 102 | self.prefix = '' if prefix is None else prefix + '_' 103 | 104 | def on_epoch_end(self, trainer: Trainer, pl_module: LightningModule): 105 | logger = get_wandb_logger(trainer=trainer) 106 | logger.experiment.summary[self.prefix + 'best_checkpoint'] = trainer.checkpoint_callback.best_model_path 107 | 108 | 109 | class WatchModelWithWandb(Callback): 110 | """Make WandbLogger watch model at the beginning of the run.""" 111 | 112 | def __init__(self, log: str = "gradients", log_freq: int = 100): 113 | self.log = log 114 | self.log_freq = log_freq 115 | 116 | def on_train_start(self, trainer, pl_module): 117 | logger = get_wandb_logger(trainer=trainer) 118 | logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq) 119 | -------------------------------------------------------------------------------- /src/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/data/__init__.py -------------------------------------------------------------------------------- /src/data/dataloading_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dataclasses import dataclass, field 3 | from pathlib import Path 4 | from typing import Union, Optional, Dict, Any 5 | 6 | from omegaconf import MISSING, OmegaConf 7 | from torch.utils.data import Dataset, Subset 8 | 9 | from data.datasets.COVID_rural.covid_rural_dataset import CovidRuralDataset 10 | from data.datasets.chexpert.chexpert_dataset import ChexpertDataset 11 | from data.datasets.mimic_cxr.mimic_cxr_dataset import MimicCxrDataset, MimicCxrImageDataset 12 | from data.datasets.nih_cxr.nih_cxr_dataset import NihCxrDetectionDataset, NihCxrSegmentationDataset 13 | from data.datasets.object_cxr.object_cxr_dataset import ObjectCxrDetectionDataset, ObjectCxrSegmentationDataset 14 | from data.datasets.rsna_pneunomia_detection.rsna_pneunomia_detection_dataset import RsnaPneunomiaDetectionDataset, \ 15 | RsnaPneunomiaSegmentationDataset 16 | from data.datasets.siim_acr_pneumothorax.siim_acr_pneumothorax import SIIMSegmentationDataset 17 | 18 | 19 | class DatasetTransformWrapper(Dataset): 20 | def __init__(self, dataset: Dataset, transform) -> None: 21 | self.dataset = dataset 22 | self.transform = transform 23 | 24 | def __getitem__(self, idx): 25 | return self.transform(self.dataset[idx]) 26 | 27 | def __len__(self): 28 | return len(self.dataset) 29 | 30 | 31 | @dataclass 32 | class DatasetConfig: 33 | name: str = MISSING 34 | path: str = MISSING 35 | loader: str = MISSING 36 | 37 | train_subset: Optional[float] = None 38 | val_subset: Optional[float] = None 39 | test_subset: Optional[float] = None 40 | 41 | arguments: Dict[str, Any] = field(default_factory=dict) 42 | 43 | 44 | def load_dataset_config(name: str): 45 | src_path = Path(os.path.realpath(__file__)).absolute().parent.parent 46 | dataset_configs_path = src_path.parent.joinpath('configs', 'dataset') 47 | config_path = dataset_configs_path.joinpath(f'{name}.yaml') 48 | return OmegaConf.merge(DatasetConfig, OmegaConf.load(config_path)) 49 | 50 | 51 | DATASET_LOADERS = { 52 | 'MimicCxrDataset': MimicCxrDataset, 53 | 'MimicCxrImageDataset': MimicCxrImageDataset, 54 | 'ChexpertDataset': ChexpertDataset, 55 | 'CovidRuralDataset': CovidRuralDataset, 56 | 'RsnaPneunomiaDetectionDataset': RsnaPneunomiaDetectionDataset, 57 | 'RsnaPneunomiaSegmentationDataset': RsnaPneunomiaSegmentationDataset, 58 | 'NihCxrDetectionDataset': NihCxrDetectionDataset, 59 | 'NihCxrSegmentationDataset': NihCxrSegmentationDataset, 60 | 'SIIMSegmentationDataset': SIIMSegmentationDataset, 61 | 'ObjectCxrDetectionDataset': ObjectCxrDetectionDataset, 62 | 'ObjectCxrSegmentationDataset': ObjectCxrSegmentationDataset 63 | } 64 | 65 | 66 | def load_dataset(config: Union[str, DatasetConfig]): 67 | if isinstance(config, str): 68 | config = load_dataset_config(config) 69 | if config.loader not in DATASET_LOADERS: 70 | raise ValueError(config.loader) 71 | datasets = DATASET_LOADERS[config.loader].load_from_disk(config.path, **config.arguments) 72 | 73 | datasets['train'] = apply_subset(datasets['train'], config.train_subset) 74 | datasets['validation'] = apply_subset(datasets['validation'], config.val_subset) 75 | datasets['test'] = apply_subset(datasets['test'], config.test_subset) 76 | 77 | return datasets 78 | 79 | 80 | def apply_subset(dataset, subset: Optional[float]): 81 | if subset is None: 82 | return dataset 83 | else: 84 | num_samples = round(len(dataset) * subset) 85 | indices = list(range(num_samples)) 86 | sub_dataset = Subset(dataset, indices) 87 | if hasattr(dataset, 'stats'): 88 | sub_dataset.stats = dataset.stats 89 | return sub_dataset 90 | -------------------------------------------------------------------------------- /src/data/datasets/COVID_rural/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/data/datasets/COVID_rural/__init__.py -------------------------------------------------------------------------------- /src/data/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/data/datasets/__init__.py -------------------------------------------------------------------------------- /src/data/datasets/base_dataset.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import json 3 | import logging 4 | import os 5 | from abc import abstractmethod 6 | from itertools import islice 7 | from typing import List, Tuple, Dict, Any 8 | 9 | import PIL 10 | from torch.utils.data import Dataset 11 | import numpy as np 12 | 13 | from data.datasets.processing_utils import PIXEL_STATS_FILE_NAME 14 | 15 | 16 | log = logging.getLogger(__name__) 17 | 18 | 19 | class DetectionDataset(Dataset): 20 | def __init__(self, root_path, csv_path, stats): 21 | self.root_path = root_path 22 | with open(csv_path, newline='') as csvfile: 23 | reader = csv.reader(csvfile, delimiter=',') 24 | self.samples = list(islice(reader, 1, None)) 25 | self.stats = stats 26 | 27 | @classmethod 28 | def load_from_disk(cls, dataset_path: str, **kwargs): 29 | with open(os.path.join(dataset_path, PIXEL_STATS_FILE_NAME), "r") as f: 30 | stats = json.load(f) 31 | 32 | return { 33 | split: cls(dataset_path, os.path.join(dataset_path, f'{split}.csv'), split_stats, **kwargs) 34 | for split, split_stats in stats.items() 35 | } 36 | 37 | @abstractmethod 38 | def _get_img_path(self, id): 39 | raise NotImplementedError 40 | 41 | @classmethod 42 | def _extract_id_boxes_classes(cls, csv_row: List[str]) -> Tuple[str, str, str]: 43 | id, boxes, classes, *_ = csv_row 44 | return id, boxes, classes 45 | 46 | def __getitem__(self, item): 47 | id, boxes, classes = self._extract_id_boxes_classes(self.samples[item]) 48 | 49 | scan = PIL.Image.open(self._get_img_path(id)) 50 | boxes = [[float(coord) for coord in box.split(';')] for box in boxes.split('|') if len(box) > 0] 51 | boxes = np.array(boxes, dtype=float) if len(boxes) > 0 else np.zeros((0, 4)) 52 | assert boxes.shape[1] == 4, boxes.shape 53 | classes = np.array([0 for _ in boxes], dtype=int) # only 1 class => index = 0 54 | assert boxes.shape[0] == classes.shape[0] 55 | 56 | return { 57 | "id": id, 58 | "scan": scan, 59 | "detection_targets": {"boxes": boxes, "classes": classes} 60 | } 61 | 62 | def __len__(self): 63 | return len(self.samples) 64 | 65 | 66 | class SegmentationDataset(Dataset): 67 | def __init__(self, root_path, csv_path, stats): 68 | self.root_path = root_path 69 | with open(csv_path, newline='') as csvfile: 70 | reader = csv.reader(csvfile, delimiter=',') 71 | self.samples = list(islice(reader, 1, None)) 72 | self.stats = stats 73 | 74 | @classmethod 75 | def load_from_disk(cls, dataset_path: str, **kwargs): 76 | with open(os.path.join(dataset_path, PIXEL_STATS_FILE_NAME), "r") as f: 77 | stats = json.load(f) 78 | 79 | return { 80 | split: cls(dataset_path, os.path.join(dataset_path, f'{split}.csv'), split_stats, **kwargs) 81 | for split, split_stats in stats.items() 82 | } 83 | 84 | @abstractmethod 85 | def _get_img_path(self, id: Any) -> str: 86 | raise NotImplementedError 87 | 88 | @abstractmethod 89 | def _get_mask_paths(self, id: Any) -> Dict[str, str]: 90 | raise NotImplementedError 91 | 92 | @classmethod 93 | def _extract_id(cls, csv_row: List[str]) -> Any: 94 | return csv_row[0] 95 | 96 | def __getitem__(self, item): 97 | id = self._extract_id(self.samples[item]) 98 | 99 | scan = PIL.Image.open(self._get_img_path(id)) 100 | segmentation_masks = { 101 | mask_name: PIL.Image.open(mask_path) for mask_name, mask_path in self._get_mask_paths(id).items() 102 | } 103 | 104 | return { 105 | "id": id, 106 | "scan": scan, 107 | "segmentation_masks": segmentation_masks 108 | } 109 | 110 | def __len__(self): 111 | return len(self.samples) 112 | -------------------------------------------------------------------------------- /src/data/datasets/chexpert/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/data/datasets/chexpert/__init__.py -------------------------------------------------------------------------------- /src/data/datasets/mimic_cxr/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/data/datasets/mimic_cxr/__init__.py -------------------------------------------------------------------------------- /src/data/datasets/object_cxr/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/data/datasets/object_cxr/__init__.py -------------------------------------------------------------------------------- /src/data/datasets/processing_utils.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import logging 3 | import math 4 | from typing import List, Tuple 5 | 6 | import PIL 7 | import datasets 8 | import numpy as np 9 | from pyarrow import compute 10 | 11 | 12 | log = logging.getLogger(__name__) 13 | PIXEL_STATS_FILE_NAME = 'dataset_statistics.json' 14 | 15 | 16 | def compute_pixel_stats(dataset: datasets.Dataset, pixel_mean_column='scan_pixel_mean', pixel_var_column='scan_pixel_var'): 17 | log.info('Computing pixel stats...') 18 | data = dataset.data 19 | if dataset._indices is not None: 20 | data = data.fast_gather(dataset._indices.column(0).to_pylist()) 21 | means = data.column(pixel_mean_column) 22 | variances = data.column(pixel_var_column) 23 | 24 | if len(dataset) > 0: 25 | mean = compute.mean(means).as_py() 26 | if len(dataset) > 1: 27 | var = compute.mean(variances).as_py() + compute.variance(means).as_py() 28 | else: 29 | var = compute.mean(variances).as_py() 30 | std = math.sqrt(var) 31 | else: 32 | mean = 0. 33 | std = 0. 34 | 35 | log.info(f'Pixel stats computed (mean={mean}, std={std})') 36 | return mean, std 37 | 38 | 39 | def write_detection_csv_file(target_path, samples: List[Tuple[str, List[Tuple[float, float, float, float]], List[int]]]): 40 | log.info('Writing csv file') 41 | with open(target_path, 'w', newline='') as csvfile: 42 | writer = csv.writer(csvfile, delimiter=',') 43 | writer.writerow(['ID', 'boxes', 'classes']) 44 | for sample_id, boxes, classes in samples: 45 | assert len(boxes) == len(classes) 46 | boxes = '|'.join(f'{box[0]};{box[1]};{box[2]};{box[3]}' for box in boxes) 47 | classes = '|'.join(str(int(cls)) for cls in classes) 48 | 49 | writer.writerow([sample_id, boxes, classes]) 50 | 51 | 52 | def write_segmentation_csv_file(target_path, samples: List[str]): 53 | log.info('Writing csv file') 54 | with open(target_path, 'w', newline='') as csvfile: 55 | writer = csv.writer(csvfile, delimiter=',') 56 | writer.writerow(['ID']) 57 | for sample_id in samples: 58 | writer.writerow([sample_id]) 59 | 60 | 61 | def compute_detection_dataset_stats(patient_ids, means, vars, boxes: List[Tuple[float, float, float, float]]): 62 | mean = np.mean(means) 63 | var = np.mean(vars) + np.var(means) 64 | 65 | return {'num_samples': len(means), 'scan': {'pixel_mean': float(mean), 'pixel_std': math.sqrt(float(var))}, 66 | 'patient_ids': patient_ids} 67 | 68 | 69 | def create_segmentation_mask(boxes, img_size, target_file): 70 | mask = np.zeros(img_size, dtype=np.uint8) 71 | for x, y, w, h in boxes: 72 | mask[round(y):round(y+h), round(x):round(x+w)] = 1. 73 | 74 | PIL.Image.fromarray(mask * 255).save(target_file, format='GIF') 75 | 76 | 77 | def random_split_indices(num_samples, test_percentage, val_percentage): 78 | indices = np.random.permutation(num_samples) 79 | num_test = round(test_percentage * num_samples) 80 | num_val = round(val_percentage * num_samples) 81 | test_indices = indices[:num_test] 82 | val_indices = indices[num_test:num_test + num_val] 83 | train_indices = indices[num_test + num_val:] 84 | return train_indices, val_indices, test_indices 85 | -------------------------------------------------------------------------------- /src/data/datasets/rsna_pneunomia_detection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/data/datasets/rsna_pneunomia_detection/__init__.py -------------------------------------------------------------------------------- /src/data/datasets/siim_acr_pneumothorax/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/data/datasets/siim_acr_pneumothorax/__init__.py -------------------------------------------------------------------------------- /src/data/text_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/data/text_utils/__init__.py -------------------------------------------------------------------------------- /src/data/text_utils/sentence_splitting.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Union, Tuple 2 | 3 | import stanza 4 | from nltk import PunktSentenceTokenizer, re 5 | from nltk.tokenize.punkt import PunktParameters, PunktTrainer 6 | from stanza import Document 7 | 8 | 9 | PATTERN_REPLACE_MULTILINES = re.compile(r'(?:[\t ]*(?:\r\n|\n)+)+', flags=re.MULTILINE) 10 | PATTERN_REPLACE_MULTISPACES = re.compile(r'[\t ]+') 11 | 12 | 13 | def clean_text(text: str, replace_newlines=False, newline_token=' ') -> str: 14 | text = PATTERN_REPLACE_MULTILINES.sub('\n', text) 15 | 16 | if replace_newlines: 17 | text = text.replace('\n', newline_token) 18 | text = PATTERN_REPLACE_MULTISPACES.sub(' ', text).strip() 19 | return text 20 | 21 | 22 | class SentenceSplitter: 23 | def __init__(self, lang: str = 'de', 24 | train_text: str = None, abbreviations: list = None, 25 | section_splitter=None, optional_sections: list = None): 26 | stanza.download(lang) 27 | self.stanza_tokenizer = stanza.Pipeline(lang, processors='tokenize', use_gpu=False) 28 | 29 | if train_text is not None: 30 | params = PunktTrainer(train_text).get_params() 31 | elif abbreviations: 32 | params = PunktParameters() 33 | else: 34 | params = None 35 | if abbreviations: 36 | for abbr in abbreviations: 37 | split_abbr = abbr.lower().split(' ') 38 | split_abbr = tuple(abbr_part[:-1] if abbr_part.endswith('.') else abbr_part for abbr_part in split_abbr) 39 | 40 | if len(split_abbr) == 1: 41 | params.abbrev_types.add(split_abbr[0]) 42 | elif len(split_abbr) == 2: 43 | params.collocations.add((tuple(split_abbr))) 44 | else: 45 | raise ValueError(abbr) 46 | 47 | self.punkt_tokenizer = PunktSentenceTokenizer(params) if params else None 48 | 49 | self.section_splitter = section_splitter 50 | self.optional_sections = optional_sections if optional_sections else [] 51 | 52 | def __call__(self, report_full_text: str, **kwargs): 53 | if self.section_splitter is None: 54 | return report_full_text, self._process_section(report_full_text) 55 | else: 56 | sections = self.section_splitter(report_full_text, **kwargs) 57 | if sections is None: 58 | return None 59 | return { 60 | key: (report_full_text, *self._process_section(text)) 61 | for key, text in sections.items() 62 | } 63 | 64 | def _process_section(self, section_txt: str) -> Tuple[List[str], int]: 65 | doc: Document = self.stanza_tokenizer(section_txt) 66 | 67 | sentences = [sent.text for sent in doc.sentences] 68 | if self.punkt_tokenizer: 69 | merged_sentences = [] 70 | i = 0 71 | while i < len(sentences)-1: 72 | sent_a, sent_b = sentences[i], sentences[i+1] 73 | split_pos = len(sent_a) + 1 74 | merged_sentence = sent_a + ' ' + sent_b 75 | 76 | punkt_spans = self.punkt_tokenizer.span_tokenize(merged_sentence) 77 | punkt_split_positions = [span[0] for span in punkt_spans] 78 | 79 | if split_pos not in punkt_split_positions: 80 | # punkt does not agree on split => ignore split 81 | merged_sentences.append(merged_sentence) 82 | i += 2 83 | else: 84 | merged_sentences.append(sent_a) 85 | i += 1 86 | sentences = merged_sentences 87 | 88 | sentences = [clean_text(sentence, replace_newlines=True) for sentence in sentences] 89 | 90 | return sentences, doc.num_tokens 91 | -------------------------------------------------------------------------------- /src/metrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/metrics/__init__.py -------------------------------------------------------------------------------- /src/metrics/classification_metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from pytorch_lightning.metrics import Metric 3 | 4 | 5 | class TopKAccuracy(Metric): 6 | """ 7 | ref: 8 | - https://pytorch.org/docs/stable/generated/torch.topk.html 9 | - https://discuss.pytorch.org/t/imagenet-example-accuracy-calculation/7840 10 | - https://gist.github.com/weiaicunzai/2a5ae6eac6712c70bde0630f3e76b77b 11 | - https://discuss.pytorch.org/t/top-k-error-calculation/48815/2 12 | - https://stackoverflow.com/questions/59474987/how-to-get-top-k-accuracy-in-semantic-segmentation-using-pytorch 13 | """ 14 | def __init__(self, topk=(1,), name='top_{k}_acc', dist_sync_on_step=False): 15 | super().__init__(dist_sync_on_step=dist_sync_on_step) 16 | 17 | self.topk = topk 18 | self.maxk = max(topk) 19 | self.names = [name.format(k=k) for k in topk] 20 | self.add_state("correct", default=torch.zeros(len(topk)), dist_reduce_fx="sum") 21 | self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") 22 | 23 | def update(self, logits: torch.Tensor, target: torch.Tensor): 24 | # get top maxk indicies that correspond to the most likely probability scores 25 | # (note _ means we don't care about the actual top maxk scores just their corresponding indicies/labels) 26 | maxk = min(self.maxk, int(logits.size(1))) 27 | _, y_pred = logits.topk(k=maxk, dim=1) # _, [B, n_classes] -> [B, maxk] 28 | y_pred = y_pred.t() # [B, maxk] -> [maxk, B] Expects input to be <= 2-D tensor and transposes dimensions 0 and 1. 29 | 30 | # - get the credit for each example if the models predictions is in maxk values (main crux of code) 31 | # for any example, the model will get credit if it's prediction matches the ground truth 32 | # for each example we compare if the model's best prediction matches the truth. If yes we get an entry of 1. 33 | # if the k'th top answer of the model matches the truth we get 1. 34 | # Note: this for any example in batch we can only ever get 1 match (so we never overestimate accuracy <1) 35 | target_reshaped = target.view(1, -1).expand_as(y_pred) # [B] -> [1, B] -> [maxk, B] 36 | # compare every topk's model prediction with the ground truth & give credit if any matches the ground truth 37 | correct = (y_pred == target_reshaped) # [maxk, B] were for each example we know which topk prediction matched truth 38 | # original: correct = pred.eq(target.view(1, -1).expand_as(pred)) 39 | 40 | # -- get topk accuracy 41 | for i, k in enumerate(self.topk): 42 | k = min(k, maxk) 43 | # get tensor of which topk answer was right 44 | ind_which_topk_matched_truth = correct[:k] # [maxk, B] -> [k, B] 45 | # flatten it to help compute if we got it correct for each example in batch 46 | flattened_indicator_which_topk_matched_truth = ind_which_topk_matched_truth.reshape( 47 | -1).float() # [k, B] -> [kB] 48 | # get if we got it right for any of our top k prediction for each example in batch 49 | tot_correct_topk = flattened_indicator_which_topk_matched_truth.float().sum(dim=0, 50 | keepdim=True) # [kB] -> [1] 51 | self.correct[i, None] += tot_correct_topk 52 | self.total += target.size(0) # batch size 53 | 54 | def compute(self): 55 | topk_accuracies = self.correct.float() / self.total # (len(topk)) 56 | return {name: acc for name, acc in zip(self.names, topk_accuracies)} -------------------------------------------------------------------------------- /src/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/models/__init__.py -------------------------------------------------------------------------------- /src/models/components/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/models/components/__init__.py -------------------------------------------------------------------------------- /src/models/components/aggregation.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | from typing import Optional 3 | 4 | import torch 5 | from torch import nn 6 | from torch.nn import MultiheadAttention 7 | 8 | from models.components.utils import AttentionMask 9 | 10 | 11 | class GlobalMaxPool(nn.Module): 12 | def __init__(self, dim: int = 1, d=None): 13 | super(GlobalMaxPool, self).__init__() 14 | self.dim = dim 15 | 16 | def forward(self, x: torch.Tensor, mask: Optional[AttentionMask] = None): 17 | """ 18 | 19 | :param x: (B x N x d) 20 | :param mask: (B x N) 21 | :return: 22 | """ 23 | if mask is not None: 24 | x = x + mask.additive_mask[:, :, None] 25 | return torch.max(x, dim=self.dim)[0] 26 | 27 | 28 | class GlobalAvgPool(nn.Module): 29 | def __init__(self, dim: int = 1, d=None): 30 | super(GlobalAvgPool, self).__init__() 31 | self.dim = dim 32 | 33 | def forward(self, x, mask: Optional[AttentionMask] = None): 34 | if mask is not None: 35 | x = torch.masked_fill(x, mask.inverted_binary_mask[:, :, None], 0.) 36 | return torch.mean(x, dim=self.dim) 37 | 38 | 39 | class GlobalAvgAttentionAggregator(nn.Module): 40 | def __init__(self, d, num_heads=8): 41 | super(GlobalAvgAttentionAggregator, self).__init__() 42 | 43 | self.avg_pool = GlobalAvgPool(dim=1) 44 | self.attention = MultiheadAttention(d, num_heads=num_heads, batch_first=True) 45 | 46 | def forward(self, x: torch.Tensor, mask: Optional[AttentionMask] = None, return_weights=False): 47 | """ 48 | 49 | :param x: (B x N x d) 50 | :param mask: 51 | :return: 52 | """ 53 | query = self.avg_pool(x, mask=mask) # (B x d) 54 | bool_mask = mask.inverted_binary_mask if mask is not None else None 55 | # (B x 1 x d), (B x 1 x N) 56 | attn_output, attn_output_weights = self.attention(query[:, None, :], key=x, value=x, key_padding_mask=bool_mask) 57 | attn_output = attn_output.squeeze(dim=1) # (B x d) 58 | attn_output_weights = attn_output_weights.squeeze(1) # (B x N) 59 | 60 | if return_weights: 61 | return attn_output, attn_output_weights 62 | else: 63 | return attn_output 64 | 65 | 66 | class GlobalTokenAggregator(nn.Module): 67 | def __init__(self, global_index=0, d=None): 68 | super(GlobalTokenAggregator, self).__init__() 69 | self.global_index = global_index 70 | 71 | def forward(self, x: torch.Tensor, mask: Optional[AttentionMask] = None): 72 | return x[:, self.global_index, :] 73 | 74 | 75 | AGGREGATOR_DICT = { 76 | 'max': GlobalMaxPool, 77 | 'avg': GlobalAvgPool, 78 | 'avgpool_attention': GlobalAvgAttentionAggregator, 79 | 'token_0': partial(GlobalTokenAggregator, global_index=0), 80 | } 81 | 82 | 83 | def get_aggregator(aggregator, d: int, **kwargs): 84 | """ 85 | 86 | :param aggregator: 87 | :param d: 88 | :param dim: 89 | :param kwargs: 90 | :return: Aggegrator: (B x N x d) -> (B x d) 91 | """ 92 | if isinstance(aggregator, str): 93 | return AGGREGATOR_DICT[aggregator](d=d, **kwargs) 94 | else: 95 | return aggregator 96 | -------------------------------------------------------------------------------- /src/models/downstream/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/models/downstream/__init__.py -------------------------------------------------------------------------------- /src/models/downstream/classification.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import torch 4 | from torch.nn import functional as F 5 | from torch import nn 6 | 7 | from data.datasets.chexpert.chexpert_dataset import chexpert_labels 8 | 9 | 10 | class ClassificationHead(nn.Module): 11 | def __init__(self, d_y: int, classifier_task: str = 'chexpert_binary', 12 | d_hidden=512, dropout_prob=0.2, nonlinear=False, dataset_stats=None): 13 | super(ClassificationHead, self).__init__() 14 | if classifier_task == 'chexpert_binary': 15 | classifier_loss = MultiTaskBinaryClassifierLoss(chexpert_labels()) 16 | self.labels_name = 'chexpert_bin_labels' 17 | self.labels_converter = None 18 | elif classifier_task == 'chexpert_binary_weighted': 19 | assert dataset_stats is not None 20 | classifier_loss = MultiTaskBinaryClassifierLoss(chexpert_labels(), 21 | pos_weights=dataset_stats.get('chexpert_bin_pos_weights')) 22 | self.labels_name = 'chexpert_bin_labels' 23 | self.labels_converter = None 24 | else: 25 | raise ValueError(classifier_task) 26 | 27 | self.nonlinear = nonlinear 28 | if nonlinear: 29 | assert d_hidden is not None 30 | self.project = nn.Linear(d_y, d_hidden, bias=False) 31 | self.relu = nn.ReLU(inplace=True) 32 | self.bn = nn.BatchNorm1d(d_hidden) 33 | else: 34 | d_hidden = d_y 35 | 36 | self.dropout = nn.Dropout(p=dropout_prob) 37 | self.classifier = nn.Linear(d_hidden, classifier_loss.num_logits, bias=True) 38 | self.classifier_loss = classifier_loss 39 | self.num_labels = len(self.classifier_loss.tasks) 40 | 41 | def get_labels(self, batch): 42 | labels = batch[self.labels_name] 43 | if self.labels_converter: 44 | labels = self.labels_converter(labels) 45 | return labels 46 | 47 | def forward(self, yg, labels=None, return_probs=True): 48 | """ 49 | 50 | :param x: (B x d_y) 51 | :return: probs, labels, loss 52 | """ 53 | x = yg 54 | if self.nonlinear: 55 | x = self.dropout(x) 56 | x = self.project(x) 57 | x = self.bn(x) 58 | x = self.relu(x) 59 | x = self.dropout(x) 60 | x = self.classifier(x) 61 | return self.classifier_loss(x, labels=labels, return_probs=return_probs) 62 | 63 | 64 | class MultiTaskBinaryClassifierLoss(nn.Module): 65 | def __init__(self, tasks: List[str], pos_weights: dict = None): 66 | super(MultiTaskBinaryClassifierLoss, self).__init__() 67 | self.num_logits = len(tasks) 68 | self.tasks = tasks 69 | if pos_weights is not None: 70 | self.register_buffer('pos_weights', torch.tensor([pos_weights[task] for task in tasks])) 71 | else: 72 | self.pos_weights = None 73 | 74 | def stacked_to_dict(self, stacked_tensor: torch.Tensor) -> dict: 75 | return {task: stacked_tensor[:, i] for i, task in enumerate(self.tasks)} 76 | 77 | def forward(self, logits, labels=None, return_probs=True): 78 | """ 79 | 80 | :param logits: B x N_tasks 81 | :param labels: dict { task_name: (B) } or B x N_tasks 82 | :param return_probs: If True return the prediction as probs if false return predictions as logits 83 | :return: 84 | """ 85 | 86 | if labels is not None: 87 | # B x N_tasks 88 | if isinstance(labels, dict): 89 | labels = torch.stack([labels[task] for task in self.tasks], dim=-1) 90 | else: 91 | assert isinstance(labels, torch.Tensor) 92 | loss = F.binary_cross_entropy_with_logits(logits, labels.type_as(logits), pos_weight=self.pos_weights) 93 | predictions = torch.sigmoid(logits) if return_probs else logits 94 | 95 | if labels is not None: 96 | return predictions, labels, loss 97 | else: 98 | return predictions 99 | 100 | -------------------------------------------------------------------------------- /src/models/image/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/models/image/__init__.py -------------------------------------------------------------------------------- /src/models/image/resnet.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from itertools import islice 3 | from typing import List 4 | 5 | from torch import nn 6 | from torchvision.models import ResNet 7 | 8 | 9 | class ResNetFeatureExtractor(nn.Module): 10 | LAYERS = {'input', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5'} 11 | def __init__(self, backbone: ResNet, extracted_layers: List[str]): 12 | super(ResNetFeatureExtractor, self).__init__() 13 | assert len(extracted_layers) > 0 14 | 15 | self.d = { 16 | 'input': 3, 17 | 'conv1': backbone.conv1.out_channels, # 64, 18 | 'conv2': 256, 19 | 'conv3': 512, 20 | 'conv4': 1024, 21 | 'conv5': 2048 22 | } 23 | self.downscale_factors = { 24 | 'input': 1, 25 | 'conv1': 2, 26 | 'conv2': 4, 27 | 'conv3': 8, 28 | 'conv4': 16, 29 | 'conv5': 32 30 | } 31 | 32 | self.layer_name_to_index = { 33 | 'input': -1, 34 | 'conv1': 2, # 0: conv1, 1: bn1, 2: relu 35 | 'conv2': 4, # 3: maxpool, 4: layer1 36 | 'conv3': 5, # layer2 37 | 'conv4': 6, # layer3 38 | 'conv5': 7 # layer4 39 | } 40 | 41 | self.backbone_layers = nn.ModuleDict(list(backbone.named_children())[:-2]) # avg pool and final fc layers are never used 42 | self.set_extracted_feature_layers(extracted_layers) 43 | 44 | def set_extracted_feature_layers(self, extracted_layers: List[str]): 45 | self.extracted_layers_by_index = { 46 | self.layer_name_to_index[extracted_name]: extracted_name for extracted_name in extracted_layers 47 | } 48 | self.num_encoder_layers = max(self.layer_name_to_index[layer_name] for layer_name in extracted_layers) + 1 49 | 50 | def add_extracted_feature_layer(self, feature_layer): 51 | if feature_layer not in self.extracted_layers_by_index.values(): 52 | layer_index = self.layer_name_to_index[feature_layer] 53 | self.num_encoder_layers = max(self.num_encoder_layers, layer_index + 1) 54 | self.extracted_layers_by_index[layer_index] = feature_layer 55 | 56 | def forward(self, x): 57 | extracted_features = OrderedDict() 58 | 59 | # save input feature map if required (index = -1) 60 | extracted_name = self.extracted_layers_by_index.get(-1) 61 | if extracted_name is not None: 62 | extracted_features[extracted_name] = x 63 | 64 | for index, layer in enumerate(islice(self.backbone_layers.values(), self.num_encoder_layers)): 65 | # apply layer 66 | x = layer(x) 67 | 68 | # extract the output feature map of this layer if required 69 | extracted_name = self.extracted_layers_by_index.get(index) 70 | 71 | if extracted_name is not None: 72 | extracted_features[extracted_name] = x 73 | 74 | return extracted_features -------------------------------------------------------------------------------- /src/models/image/unet.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from typing import List, Collection 3 | 4 | from pl_bolts.models.vision import UNet 5 | from torch import nn 6 | 7 | 8 | class UNetFeatureExtractor(nn.Module): 9 | def __init__(self, extracted_layers: List[str], backbone: UNet = None): 10 | super(UNetFeatureExtractor, self).__init__() 11 | 12 | if backbone is None: 13 | backbone = UNet(num_classes=1) # classifier layer will not be used 14 | self.layers = backbone.layers[:-1] # remove classifier layer 15 | self.num_layers = backbone.num_layers 16 | 17 | self.d = { 18 | 'input': 3, 19 | 'conv': 64, 20 | 'down1': 128, 21 | 'down2': 256, 22 | 'down3': 512, 23 | 'down4': 1024, 24 | 'up1': 512, 25 | 'up2': 256, 26 | 'up3': 128, 27 | 'up4': 64, 28 | } 29 | self.downscale_factors = { 30 | 'input': 1, 31 | 'conv': 1, 32 | 'down1': 2, 33 | 'down2': 4, 34 | 'down3': 8, 35 | 'down4': 16, 36 | 'up1': 8, 37 | 'up2': 4, 38 | 'up3': 2, 39 | 'up4': 1, 40 | } 41 | 42 | assert all(layer in set(self.d.keys()) for layer in extracted_layers) 43 | self.extracted_layers = set(extracted_layers) 44 | 45 | def set_extracted_feature_layers(self, extracted_layers: Collection[str]): 46 | self.extracted_layers = set(extracted_layers) 47 | 48 | def forward(self, x): 49 | extracted_features = OrderedDict() 50 | if 'input' in self.extracted_layers: 51 | extracted_features['input'] = x 52 | 53 | xi = [self.layers[0](x)] 54 | if 'conv' in self.extracted_layers: 55 | extracted_features['conv'] = xi[0] 56 | 57 | # Down path 58 | for i, layer in enumerate(self.layers[1:self.num_layers]): 59 | xi.append(layer(xi[-1])) 60 | if f'down{i+1}' in self.extracted_layers: 61 | extracted_features[f'down{i+1}'] = xi[-1] 62 | 63 | # Up path 64 | for i, layer in enumerate(self.layers[self.num_layers:]): 65 | xi[-1] = layer(xi[-1], xi[-2 - i]) 66 | if f'up{i+1}' in self.extracted_layers: 67 | extracted_features[f'up{i+1}'] = xi[-1] 68 | 69 | return extracted_features 70 | -------------------------------------------------------------------------------- /src/models/objectives/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/models/objectives/__init__.py -------------------------------------------------------------------------------- /src/models/pretraining/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/models/pretraining/__init__.py -------------------------------------------------------------------------------- /src/models/text/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/models/text/__init__.py -------------------------------------------------------------------------------- /src/models/text/language_model_loader.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Tuple 3 | 4 | from transformers import PretrainedConfig, BertForMaskedLM, BertModel, \ 5 | PreTrainedModel, PreTrainedTokenizerBase, BertTokenizerFast 6 | 7 | 8 | class ModelTask(Enum): 9 | MASKED_LM = 1 10 | 11 | 12 | MODEL_MAPPINGS = { 13 | 'bert': { 14 | None: BertModel, 15 | ModelTask.MASKED_LM: BertForMaskedLM 16 | } 17 | } 18 | 19 | TOKENIZER_MAPPINGS = { 20 | 'bert': BertTokenizerFast, 21 | } 22 | 23 | 24 | def load_language_model(pretrained_model: str, pretrained_tokenizer: str = None, model_task: ModelTask = None, 25 | do_load_tokenizer=True, 26 | do_load_model=True) -> Tuple[PreTrainedModel, PreTrainedTokenizerBase]: 27 | if pretrained_tokenizer is None: 28 | pretrained_tokenizer = pretrained_model 29 | config_dict, _ = PretrainedConfig.get_config_dict(pretrained_model) 30 | model_type = config_dict["model_type"] 31 | 32 | if do_load_model: 33 | model_class = MODEL_MAPPINGS[model_type][model_task] 34 | model = model_class.from_pretrained(pretrained_model) 35 | else: 36 | model = None 37 | 38 | if do_load_tokenizer: 39 | tokenizer = TOKENIZER_MAPPINGS[model_type].from_pretrained(pretrained_tokenizer) 40 | else: 41 | tokenizer = None 42 | return model, tokenizer 43 | -------------------------------------------------------------------------------- /src/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philip-mueller/lovt/f548985d0e56989107948df983a7d508fe510878/src/scripts/__init__.py -------------------------------------------------------------------------------- /src/scripts/imports.py: -------------------------------------------------------------------------------- 1 | # imports required for instantiation by hydra !!! 2 | from common.wandb import * 3 | -------------------------------------------------------------------------------- /src/scripts/run_finetuning.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from dataclasses import dataclass, field 5 | from pathlib import Path 6 | from typing import Optional, List 7 | 8 | import hydra 9 | from hydra.core.config_store import ConfigStore 10 | from omegaconf import MISSING, OmegaConf 11 | 12 | if __name__ == '__main__': 13 | scripts_path = Path(os.path.realpath(__file__)).absolute().parent 14 | root_path = scripts_path.parent 15 | sys.path.insert(0, str(root_path)) 16 | 17 | 18 | from analysis.postprocess_run import update_best_epoch, average_runs, PretrainingRun 19 | from common.wandb import finish_run, get_wandb_logger 20 | from models.downstream.downstream_evaluation import ClassificationEvaluationModelConfig, ClassificationEvaluator, \ 21 | SegmentationEvaluationModelConfig, EvaluationModelConfig, create_downstream_evaluator_for_bimodal_model, \ 22 | DetectionEvaluationModelConfig 23 | from common.script_utils import BaseExperimentConfig, init_trainer, log_hyperparameters, setup_training 24 | 25 | log = logging.getLogger(__name__) 26 | 27 | 28 | @dataclass 29 | class CheckpointFinetuningExperimentConfig(BaseExperimentConfig): 30 | pretrained_model: str = MISSING 31 | evaluated_encoder: str = 'a' 32 | evaluation_model: EvaluationModelConfig = MISSING 33 | feature_layer: Optional[str] = None 34 | 35 | reported_metrics: List[str] = field(default_factory=list) 36 | 37 | average_runs: int = 1 38 | 39 | 40 | cs = ConfigStore.instance() 41 | cs.store(name="fine_tune_config", node=CheckpointFinetuningExperimentConfig) 42 | cs.store(group="evaluation_model", name="classification_eval", node=ClassificationEvaluationModelConfig) 43 | cs.store(group="evaluation_model", name="segmentation_eval", node=SegmentationEvaluationModelConfig) 44 | cs.store(group="evaluation_model", name="detection_eval", node=DetectionEvaluationModelConfig) 45 | 46 | 47 | def run_finetuning(config: CheckpointFinetuningExperimentConfig): 48 | setup_training(config) 49 | run_base_name = config.name 50 | pretrain_run = PretrainingRun.from_run_path(config.pretrained_model) 51 | wandb_runs = [] 52 | for run_i in range(config.average_runs): 53 | if config.average_runs > 1: 54 | config.name = f'{run_base_name}[{run_i+1}]' 55 | log.info(f"----- Initializing Model -----") 56 | model, pretrain_config, pretrain_dataset = create_downstream_evaluator_for_bimodal_model( 57 | checkpoint_path=pretrain_run.best_checkpoint_path, 58 | encoder=config.evaluated_encoder, 59 | eval_config=config.evaluation_model, 60 | num_workers=config.num_dataloader_workers, 61 | feature_layer=config.feature_layer 62 | ) 63 | 64 | log.info(f"----- Initializing Trainer -----") 65 | logger, trainer = init_trainer( 66 | model, 67 | trainer_config=config.trainer, 68 | gpus=config.gpus, 69 | logger_configs=config.logger, 70 | callback_configs=config.callback 71 | ) 72 | 73 | OmegaConf.set_struct(config, False) 74 | config.pretrain_model = pretrain_config 75 | config.pretrain_dataset = pretrain_dataset 76 | 77 | log_hyperparameters(config, model, trainer, logger) 78 | 79 | # Train the model 80 | log.info(f"----- Starting Finetuning -----") 81 | trainer.fit(model=model) 82 | 83 | log.info(f"----- Completed Finetuning -----") 84 | 85 | log.info(f"----- Testing -----") 86 | trainer.test() 87 | log.info(f"----- Done -----") 88 | 89 | run_api, dir = finish_run(trainer) 90 | update_best_epoch(run_api, config.monitor_metric, config.monitor_metric_mode) 91 | wandb_runs.append(run_api) 92 | 93 | avg_summary = average_runs(wandb_runs) 94 | for run in wandb_runs: 95 | run.summary.update(avg_summary) 96 | log.info(f'Reported metrics: {config.reported_metrics}') 97 | pretrain_run.update_summary(avg_summary, config.evaluation_model.eval_name, include_keys=config.reported_metrics + ['run_ids']) 98 | 99 | return avg_summary 100 | 101 | 102 | @hydra.main(config_path="../../configs/", config_name="fine_tune") 103 | def main(config: CheckpointFinetuningExperimentConfig): 104 | return run_finetuning(config) 105 | 106 | 107 | if __name__ == "__main__": 108 | main() 109 | -------------------------------------------------------------------------------- /src/scripts/run_finetuning_baseline.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from dataclasses import dataclass 5 | from pathlib import Path 6 | 7 | import hydra 8 | from hydra.core.config_store import ConfigStore 9 | from omegaconf import MISSING 10 | 11 | 12 | if __name__ == '__main__': 13 | scripts_path = Path(os.path.realpath(__file__)).absolute().parent 14 | root_path = scripts_path.parent 15 | sys.path.insert(0, str(root_path)) 16 | 17 | 18 | from baselines.baseline_utils import export_pretrained_weights 19 | from analysis.postprocess_run import update_best_epoch, average_runs 20 | from common.wandb import finish_run 21 | from models.image.scan_encoder import ScanEncoderConfig 22 | from models.text.report_encoder import ReportEncoderConfig 23 | from models.components.utils import EncoderConfig 24 | from models.downstream.downstream_evaluation import ClassificationEvaluationModelConfig, \ 25 | SegmentationEvaluationModelConfig, EvaluationModelConfig, create_downstream_evaluator_for_encoder, \ 26 | DetectionEvaluationModelConfig 27 | from common.script_utils import BaseExperimentConfig, init_trainer, log_hyperparameters, setup_training 28 | 29 | log = logging.getLogger(__name__) 30 | 31 | 32 | @dataclass 33 | class EncoderFinetuningExperimentConfig(BaseExperimentConfig): 34 | encoder: EncoderConfig = MISSING 35 | evaluation_model: EvaluationModelConfig = MISSING 36 | 37 | average_runs: int = 1 38 | export_backbone_weights: bool = False 39 | 40 | 41 | cs = ConfigStore.instance() 42 | cs.store(name="baseline_fine_tune_config", node=EncoderFinetuningExperimentConfig) 43 | cs.store(group="encoder", name="base_scan_encoder", node=ScanEncoderConfig) 44 | cs.store(group="encoder", name="base_report_encoder", node=ReportEncoderConfig) 45 | cs.store(group="evaluation_model", name="classification_eval", node=ClassificationEvaluationModelConfig) 46 | cs.store(group="evaluation_model", name="segmentation_eval", node=SegmentationEvaluationModelConfig) 47 | cs.store(group="evaluation_model", name="detection_eval", node=DetectionEvaluationModelConfig) 48 | 49 | 50 | def run_finetuning(config: EncoderFinetuningExperimentConfig): 51 | setup_training(config) 52 | run_base_name = config.name 53 | wandb_runs = [] 54 | for run_i in range(config.average_runs): 55 | if config.average_runs > 1: 56 | config.name = f'{run_base_name}[{run_i + 1}]' 57 | log.info(f"----- Initializing Model -----") 58 | model = create_downstream_evaluator_for_encoder( 59 | encoder_config=config.encoder, 60 | eval_config=config.evaluation_model, 61 | num_workers=config.num_dataloader_workers 62 | ) 63 | 64 | log.info(f"----- Initializing Trainer -----") 65 | logger, trainer = init_trainer( 66 | model, 67 | trainer_config=config.trainer, 68 | gpus=config.gpus, 69 | logger_configs=config.logger, 70 | callback_configs=config.callback 71 | ) 72 | 73 | log_hyperparameters(config, model, trainer, logger) 74 | 75 | # Train the model 76 | log.info(f"----- Starting Finetuning -----") 77 | trainer.fit(model=model) 78 | log.info(f"----- Completed Finetuning -----") 79 | 80 | log.info(f"----- Testing -----") 81 | trainer.test() 82 | log.info(f"----- Done -----") 83 | 84 | if config.export_backbone_weights: 85 | # segmentation_model.backbone 86 | export_pretrained_weights(config, model, trainer, last_model=False, 87 | model_extraction_fn=lambda x: x.backbone) 88 | 89 | run_api, dir = finish_run(trainer) 90 | update_best_epoch(run_api, config.monitor_metric, config.monitor_metric_mode) 91 | wandb_runs.append(run_api) 92 | avg_summary = average_runs(wandb_runs) 93 | for run in wandb_runs: 94 | run.summary.update(avg_summary) 95 | return avg_summary 96 | 97 | 98 | @hydra.main(config_path="../../configs/", config_name="baseline_fine_tune") 99 | def main(config: EncoderFinetuningExperimentConfig): 100 | return run_finetuning(config) 101 | 102 | 103 | if __name__ == "__main__": 104 | main() 105 | -------------------------------------------------------------------------------- /src/scripts/run_training.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import time 5 | from pathlib import Path 6 | 7 | from omegaconf import OmegaConf 8 | import hydra 9 | 10 | if __name__ == '__main__': 11 | scripts_path = Path(os.path.realpath(__file__)).absolute().parent 12 | root_path = scripts_path.parent 13 | sys.path.insert(0, str(root_path)) 14 | 15 | from analysis.postprocess_run import update_best_epoch, export_pretrain_run 16 | from common.wandb import finish_run 17 | from models.downstream.downstream_evaluation import ClassificationEvaluator 18 | from common.script_utils import log_hyperparameters, init_trainer, setup_training, PreTrainExperimentConfig, \ 19 | init_config_store 20 | from models.downstream.online_evaluation import instantiate_online_evaluator 21 | from models.pretraining.bimodal_alignment_model import BiModalModelRepresentationLearner 22 | 23 | log = logging.getLogger(__name__) 24 | 25 | init_config_store() 26 | 27 | 28 | def run_training(config: PreTrainExperimentConfig): 29 | setup_training(config) 30 | 31 | log.info(f"----- Initializing Model -----") 32 | model = BiModalModelRepresentationLearner(OmegaConf.to_container(config.pretrain_model), 33 | dataset=config.pretrain_dataset, 34 | num_workers=config.num_dataloader_workers) 35 | 36 | log.info(f"----- Initializing Trainer -----") 37 | online_eval_callbacks = [] 38 | for name, oe_conf in config.online_eval.items(): 39 | log.info(f"Instantiating online_evaluator <{oe_conf.task}> for {name}") 40 | online_eval_callbacks.append(instantiate_online_evaluator(oe_conf, name=name)) 41 | 42 | logger, trainer = init_trainer( 43 | model, 44 | trainer_config=config.trainer, 45 | gpus=config.gpus, 46 | auto_lr_find=config.auto_lr_find, 47 | logger_configs=config.logger, 48 | callback_configs=config.callback, 49 | callbacks=online_eval_callbacks 50 | ) 51 | 52 | log_hyperparameters(config, model, trainer, logger) 53 | 54 | if config.auto_lr_find: 55 | log.info(f"----- Tuning LR -----") 56 | trainer.tune(model) 57 | log.info(f"----- Completed LR Tuning -----") 58 | 59 | # Train the model 60 | log.info(f"----- Starting Pretraining -----") 61 | trainer.fit(model=model) 62 | 63 | log.info(f"----- Completed Pretraining -----") 64 | 65 | run_api, _ = finish_run(trainer) 66 | run_path = os.getcwd() 67 | os.chdir('..') 68 | new_path = export_pretrain_run(run_path) 69 | os.chdir(new_path) 70 | 71 | 72 | @hydra.main(config_path="../../configs/", config_name="train_representation") 73 | def main(config: PreTrainExperimentConfig): 74 | return run_training(config) 75 | 76 | 77 | if __name__ == "__main__": 78 | main() 79 | --------------------------------------------------------------------------------