├── Slide-level multi-class subtyping task ├── splits │ ├── BRCA_subtyping2 │ │ ├── splits_0_descriptor.csv │ │ ├── splits_1_descriptor.csv │ │ ├── splits_2_descriptor.csv │ │ ├── splits_3_descriptor.csv │ │ ├── splits_4_descriptor.csv │ │ ├── splits_5_descriptor.csv │ │ ├── splits_6_descriptor.csv │ │ ├── splits_7_descriptor.csv │ │ ├── splits_8_descriptor.csv │ │ └── splits_9_descriptor.csv │ ├── NSCLC_subtyping2 │ │ ├── splits_0_descriptor.csv │ │ ├── splits_1_descriptor.csv │ │ ├── splits_2_descriptor.csv │ │ ├── splits_3_descriptor.csv │ │ ├── splits_4_descriptor.csv │ │ ├── splits_5_descriptor.csv │ │ ├── splits_6_descriptor.csv │ │ ├── splits_7_descriptor.csv │ │ ├── splits_8_descriptor.csv │ │ └── splits_9_descriptor.csv │ ├── Camelyon16_subtyping2 │ │ ├── splits_0_descriptor.csv │ │ ├── splits_1_descriptor.csv │ │ ├── splits_2_descriptor.csv │ │ ├── splits_3_descriptor.csv │ │ ├── splits_4_descriptor.csv │ │ ├── splits_5_descriptor.csv │ │ ├── splits_6_descriptor.csv │ │ ├── splits_7_descriptor.csv │ │ ├── splits_8_descriptor.csv │ │ ├── splits_9_descriptor.csv │ │ ├── splits_0.csv │ │ ├── splits_1.csv │ │ ├── splits_2.csv │ │ ├── splits_3.csv │ │ ├── splits_4.csv │ │ ├── splits_5.csv │ │ ├── splits_6.csv │ │ ├── splits_7.csv │ │ ├── splits_8.csv │ │ ├── splits_9.csv │ │ └── splits_0_bool.csv │ ├── PANDA_subtyping2 │ │ ├── splits_0_descriptor.csv │ │ ├── splits_1_descriptor.csv │ │ ├── splits_2_descriptor.csv │ │ ├── splits_3_descriptor.csv │ │ └── splits_4_descriptor.csv │ └── RCC_subtyping3 │ │ ├── splits_0_descriptor.csv │ │ ├── splits_1_descriptor.csv │ │ ├── splits_2_descriptor.csv │ │ ├── splits_3_descriptor.csv │ │ ├── splits_4_descriptor.csv │ │ ├── splits_5_descriptor.csv │ │ ├── splits_6_descriptor.csv │ │ ├── splits_7_descriptor.csv │ │ ├── splits_8_descriptor.csv │ │ └── splits_9_descriptor.csv ├── utils │ ├── __pycache__ │ │ ├── utils.cpython-37.pyc │ │ ├── utils.cpython-38.pyc │ │ ├── utils.cpython-39.pyc │ │ ├── core_utils.cpython-37.pyc │ │ ├── core_utils.cpython-38.pyc │ │ ├── core_utils.cpython-39.pyc │ │ ├── eval_utils.cpython-37.pyc │ │ ├── eval_utils.cpython-38.pyc │ │ ├── file_utils.cpython-37.pyc │ │ ├── file_utils.cpython-38.pyc │ │ └── file_utils.cpython-39.pyc │ ├── file_utils.py │ ├── eval_utils.py │ └── utils.py ├── models │ ├── __pycache__ │ │ ├── model_clam.cpython-37.pyc │ │ ├── model_clam.cpython-38.pyc │ │ ├── model_clam.cpython-39.pyc │ │ ├── model_mil.cpython-37.pyc │ │ ├── model_mil.cpython-38.pyc │ │ ├── model_mil.cpython-39.pyc │ │ └── resnet_custom.cpython-37.pyc │ ├── model_mil.py │ ├── resnet_custom.py │ └── model_clam.py ├── datasets │ ├── __pycache__ │ │ ├── dataset_h5.cpython-37.pyc │ │ ├── wsi_dataset.cpython-37.pyc │ │ ├── dataset_generic.cpython-37.pyc │ │ ├── dataset_generic.cpython-38.pyc │ │ └── dataset_generic.cpython-39.pyc │ ├── dataset_h5.py │ └── wsi_dataset.py ├── wsi_core │ ├── __pycache__ │ │ ├── wsi_utils.cpython-37.pyc │ │ ├── util_classes.cpython-37.pyc │ │ ├── WholeSlideImage.cpython-37.pyc │ │ └── batch_process_utils.cpython-37.pyc │ ├── batch_process_utils.py │ └── util_classes.py ├── vis_utils │ ├── __pycache__ │ │ └── heatmap_utils.cpython-37.pyc │ └── heatmap_utils.py ├── presets │ ├── bwh_biopsy.csv │ ├── tcga.csv │ └── bwh_resection.csv ├── args │ └── experiment_task_2_tumor_subtyping_brca.txt ├── feature_extract │ ├── datasets.py │ ├── extract_features.py │ └── genmodel.py ├── eval.py └── train.py ├── Docs └── INSTALLATION.md ├── environment.yaml └── genmodel.py /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_0_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_1_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_2_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_3_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_4_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_5_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_6_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_7_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_8_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/BRCA_subtyping2/splits_9_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | IDC,620,77,77 3 | ILC,159,20,20 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_0_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,396,53,51 3 | LUSC,399,48,53 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_1_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,398,53,49 3 | LUSC,397,53,50 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_2_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,397,51,52 3 | LUSC,402,51,47 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_3_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,404,45,51 3 | LUSC,402,51,47 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_4_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,396,53,51 3 | LUSC,404,48,48 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_5_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,394,55,51 3 | LUSC,401,48,51 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_6_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,400,49,51 3 | LUSC,403,51,46 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_7_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,397,50,53 3 | LUSC,401,50,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_8_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,402,49,49 3 | LUSC,400,52,48 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/NSCLC_subtyping2/splits_9_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | LUAD,399,49,52 3 | LUSC,401,49,50 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_0_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_1_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_2_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_3_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_4_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_5_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_6_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_7_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_8_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_9_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | normal,143,16,80 3 | tumor,100,11,49 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/PANDA_subtyping2/splits_0_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | grades0,2024,289,578 3 | grades1,5407,772,1545 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/PANDA_subtyping2/splits_1_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | grades0,2024,289,578 3 | grades1,5407,772,1545 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/PANDA_subtyping2/splits_2_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | grades0,2024,289,578 3 | grades1,5407,772,1545 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/PANDA_subtyping2/splits_3_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | grades0,2024,289,578 3 | grades1,5407,772,1545 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/PANDA_subtyping2/splits_4_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | grades0,2024,289,578 3 | grades1,5407,772,1545 4 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_0_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_1_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_2_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_3_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_4_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_5_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_6_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_7_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_8_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/RCC_subtyping3/splits_9_descriptor.csv: -------------------------------------------------------------------------------- 1 | ,train,val,test 2 | CCRCC,414,52,52 3 | CHRCC,72,9,9 4 | PRCC,230,29,29 5 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/utils.cpython-39.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/__pycache__/model_clam.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/models/__pycache__/model_clam.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/__pycache__/model_clam.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/models/__pycache__/model_clam.cpython-38.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/__pycache__/model_clam.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/models/__pycache__/model_clam.cpython-39.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/__pycache__/model_mil.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/models/__pycache__/model_mil.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/__pycache__/model_mil.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/models/__pycache__/model_mil.cpython-38.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/__pycache__/model_mil.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/models/__pycache__/model_mil.cpython-39.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/core_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/core_utils.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/core_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/core_utils.cpython-38.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/core_utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/core_utils.cpython-39.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/eval_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/eval_utils.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/eval_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/eval_utils.cpython-38.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/file_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/file_utils.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/file_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/file_utils.cpython-38.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/__pycache__/file_utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/utils/__pycache__/file_utils.cpython-39.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/datasets/__pycache__/dataset_h5.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/datasets/__pycache__/dataset_h5.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/datasets/__pycache__/wsi_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/datasets/__pycache__/wsi_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/__pycache__/resnet_custom.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/models/__pycache__/resnet_custom.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/wsi_core/__pycache__/wsi_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/wsi_core/__pycache__/wsi_utils.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/wsi_core/__pycache__/util_classes.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/wsi_core/__pycache__/util_classes.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/datasets/__pycache__/dataset_generic.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/datasets/__pycache__/dataset_generic.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/datasets/__pycache__/dataset_generic.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/datasets/__pycache__/dataset_generic.cpython-38.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/datasets/__pycache__/dataset_generic.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/datasets/__pycache__/dataset_generic.cpython-39.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/vis_utils/__pycache__/heatmap_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/vis_utils/__pycache__/heatmap_utils.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/wsi_core/__pycache__/WholeSlideImage.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/wsi_core/__pycache__/WholeSlideImage.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/wsi_core/__pycache__/batch_process_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openmedlab/BROW/HEAD/Slide-level multi-class subtyping task/wsi_core/__pycache__/batch_process_utils.cpython-37.pyc -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/presets/bwh_biopsy.csv: -------------------------------------------------------------------------------- 1 | seg_level,sthresh,mthresh,close,use_otsu,a_t,a_h,max_n_holes,vis_level,line_thickness,white_thresh,black_thresh,use_padding,contour_fn,keep_ids,exclude_ids 2 | -1,15,11,2,FALSE,1,1,2,-1,50,5,50,TRUE,four_pt,none,none -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/presets/tcga.csv: -------------------------------------------------------------------------------- 1 | seg_level,sthresh,mthresh,close,use_otsu,a_t,a_h,max_n_holes,vis_level,line_thickness,white_thresh,black_thresh,use_padding,contour_fn,keep_ids,exclude_ids 2 | -1,8,7,4,FALSE,16,4,8,-1,100,5,50,TRUE,four_pt,none,none 3 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/presets/bwh_resection.csv: -------------------------------------------------------------------------------- 1 | seg_level,sthresh,mthresh,close,use_otsu,a_t,a_h,max_n_holes,vis_level,line_thickness,white_thresh,black_thresh,use_padding,contour_fn,keep_ids,exclude_ids 2 | -1,15,11,4,FALSE,100,16,8,-1,150,5,50,TRUE,four_pt,none,none -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/args/experiment_task_2_tumor_subtyping_brca.txt: -------------------------------------------------------------------------------- 1 | {'num_splits': 10, 2 | 'k_start': -1, 3 | 'k_end': -1, 4 | 'task': 'task_2_tumor_subtyping', 5 | 'max_epochs': 100, 6 | 'results_dir': './results', 7 | 'lr': 0.0001, 8 | 'experiment': 'task_2_tumor_subtyping_brca', 9 | 'reg': 0.0001, 10 | 'label_frac': 1.0, 11 | 'bag_loss': 'ce', 12 | 'seed': 1, 13 | 'model_type': 'clam_sb', 14 | 'model_size': 'small', 15 | 'use_drop_out': True, 16 | 'weighted_sample': True, 17 | 'opt': 'adam', 18 | 'bag_weight': 0.7, 19 | 'inst_loss': 'svm', 20 | 'B': 8, 21 | 'n_classes':2, 22 | 'split_dir': 'splits/BRCA_subtyping2'} 23 | -------------------------------------------------------------------------------- /Docs/INSTALLATION.md: -------------------------------------------------------------------------------- 1 | Installation Guide 2 | =========== 3 | After setting up anaconda, you can create and activate a `BROW` conda environment using the provided environment definition `environment.yaml`: 4 | ```bash 5 | conda env create -f environment.yaml 6 | conda activate BROW 7 | ``` 8 | Or you can install the enviroment step by step: 9 | 10 | ```bash 11 | conda create -n BROW python=3.9.16 12 | conda activate BROW 13 | conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 -c pytorch 14 | ``` 15 | Then install the required packages for pre-training: 16 | ```bash 17 | pip install pandas 18 | pip install timm 19 | pip install opencv-python 20 | pip install openslide-python 21 | pip install matplotlib 22 | pip install h5py 23 | ``` 24 | So far the enviroment is ready for model pre-training. 25 | 26 | To reproduce the results of slide-level subtyping tasks, some extra packages are needed: 27 | ```bash 28 | pip install scipy 29 | pip install scikit-learn 30 | pip install tensorboard 31 | pip install future 32 | ``` 33 | Please note that the package smooth-topk is installed by: 34 | ```bash 35 | git clone https://github.com/oval-group/smooth-topk.git 36 | cd smooth-topk 37 | python setup.py install 38 | ``` 39 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/file_utils.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import h5py 3 | 4 | def save_pkl(filename, save_object): 5 | writer = open(filename,'wb') 6 | pickle.dump(save_object, writer) 7 | writer.close() 8 | 9 | def load_pkl(filename): 10 | loader = open(filename,'rb') 11 | file = pickle.load(loader) 12 | loader.close() 13 | return file 14 | 15 | 16 | def save_hdf5(output_path, asset_dict, attr_dict= None, mode='a'): 17 | file = h5py.File(output_path, mode) 18 | for key, val in asset_dict.items(): 19 | data_shape = val.shape 20 | if key not in file: 21 | data_type = val.dtype 22 | chunk_shape = (1, ) + data_shape[1:] 23 | maxshape = (None, ) + data_shape[1:] 24 | dset = file.create_dataset(key, shape=data_shape, maxshape=maxshape, chunks=chunk_shape, dtype=data_type) 25 | dset[:] = val 26 | if attr_dict is not None: 27 | if key in attr_dict.keys(): 28 | for attr_key, attr_val in attr_dict[key].items(): 29 | dset.attrs[attr_key] = attr_val 30 | else: 31 | dset = file[key] 32 | dset.resize(len(dset) + data_shape[0], axis=0) 33 | dset[-data_shape[0]:] = val 34 | file.close() 35 | return output_path -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/feature_extract/datasets.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import openslide 3 | import numpy as np 4 | 5 | 6 | import torch 7 | from torch.utils.data import Dataset, DataLoader 8 | from torchvision import transforms 9 | 10 | 11 | 12 | def build_transform(): 13 | mean,std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] 14 | 15 | return transforms.Compose([ 16 | # RandomResizedCrop(args.input_size, interpolation=3), 17 | transforms.CenterCrop((224,224)), 18 | transforms.ToTensor(), 19 | transforms.Normalize(mean=mean, std=std)], 20 | ) 21 | 22 | # 定义子数据集类 23 | class SubDataset(Dataset): 24 | def __init__(self, slidedir): 25 | ext = slidedir.split('.')[-1] 26 | len_ext = len(ext)+1 27 | coords_path = slidedir[:-len_ext] + '.npy' 28 | self.coords_arr = np.load(coords_path) 29 | self.len_coords = self.coords_arr.shape[0] 30 | self.slidedir = slidedir 31 | self.trans = build_transform() 32 | def __len__(self): 33 | return self.len_coords 34 | 35 | def __getitem__(self, index): 36 | coors = self.coords_arr[index] 37 | patch = openslide.open_slide(self.slidedir).read_region(coors, 0, (256,256)).convert('RGB') 38 | patch = self.trans(patch) 39 | return patch 40 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/feature_extract/extract_features.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import torch 4 | import torch.nn as nn 5 | from torch.utils.data import Dataset, DataLoader 6 | 7 | import numpy as np 8 | import pandas as pd 9 | from datasets import SubDataset 10 | from tqdm import tqdm 11 | 12 | from genmodel import genmodel 13 | 14 | 15 | 16 | 17 | def get_args(): 18 | parser = argparse.ArgumentParser('extract features script for whole slide image classification', add_help=False) 19 | parser.add_argument('--sub_batch_size', default=128, type=int) 20 | parser.add_argument('--device', default='cuda', help='device to use for training / testing') 21 | parser.add_argument('--input_size', default=224, type=int, help='images input size') 22 | parser.add_argument('--data_root_path', type=str, default='', required=True) 23 | parser.add_argument('--save_pt_path', type=str, default='', required=True) 24 | parser.add_argument('--pin_mem', action='store_true', default=True, help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') 25 | parser.add_argument('--num_workers', default=8, type=int, help='Number of data loading workers per GPU.') 26 | parser.add_argument('--modelpath', default='', required=True, help='Dir to the checkpoints') 27 | parser.add_argument('--dataset', choices=['BRCA', 'RCC', 'CAM16', 'PANDA', 'NSCLC'], required=True, help='select the dataset') 28 | parser.add_argument('--file_ext', type=str, default='.svs', help='setting the file extension') 29 | 30 | 31 | return parser.parse_args() 32 | 33 | 34 | def extract_features(args): 35 | device = torch.device(args.device) 36 | model = genmodel(ckpt=args.modelpath) 37 | model = nn.DataParallel(model) 38 | model.to(device) 39 | model.eval() 40 | csv_path = f'./csv_files/{args.dataset}.csv' 41 | 42 | df = pd.read_csv(csv_path) 43 | wsidirs = df['slide_id'].to_list() 44 | for i in range(len(wsidirs)): 45 | print(f'working on {i} / {len(wsidirs)}') 46 | wsidir = f'{args.data_root_path}/{wsidirs[i]}' + args.file_ext 47 | sub_dataset = SubDataset(wsidir) 48 | 49 | # 创建子数据加载器 50 | sub_dataloader = DataLoader(sub_dataset, batch_size=args.sub_batch_size, shuffle=False, drop_last=False, num_workers=args.num_workers) 51 | name = wsidir.split('/')[-1] 52 | ext = name.split('.')[-1] 53 | lenext = len(ext) + 1 54 | name = name[:-lenext] 55 | savedir = f'{args.save_pt_path}/{name}.pt' 56 | if os.path.exists(savedir): 57 | continue 58 | if True: 59 | out = [] 60 | for i, patch in enumerate(tqdm(sub_dataloader)): 61 | patch = patch.to(device) 62 | 63 | with torch.no_grad(): 64 | output = model(patch) 65 | features = output.cpu().detach() 66 | # print(features.shape) 67 | out.append(features) 68 | out = torch.cat(out, dim=0) 69 | torch.save(out, savedir) 70 | 71 | 72 | if __name__ == '__main__': 73 | args = get_args() 74 | os.makedirs(args.save_pt_path, exist_ok=True) 75 | extract_features(args) 76 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/wsi_core/batch_process_utils.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import pdb 4 | 5 | ''' 6 | initiate a pandas df describing a list of slides to process 7 | args: 8 | slides (df or array-like): 9 | array-like structure containing list of slide ids, if df, these ids assumed to be 10 | stored under the 'slide_id' column 11 | seg_params (dict): segmentation paramters 12 | filter_params (dict): filter parameters 13 | vis_params (dict): visualization paramters 14 | patch_params (dict): patching paramters 15 | use_heatmap_args (bool): whether to include heatmap arguments such as ROI coordinates 16 | ''' 17 | def initialize_df(slides, seg_params, filter_params, vis_params, patch_params, 18 | use_heatmap_args=False, save_patches=False): 19 | 20 | total = len(slides) 21 | if isinstance(slides, pd.DataFrame): 22 | slide_ids = slides.slide_id.values 23 | else: 24 | slide_ids = slides 25 | default_df_dict = {'slide_id': slide_ids, 'process': np.full((total), 1, dtype=np.uint8)} 26 | 27 | # initiate empty labels in case not provided 28 | if use_heatmap_args: 29 | default_df_dict.update({'label': np.full((total), -1)}) 30 | 31 | default_df_dict.update({ 32 | 'status': np.full((total), 'tbp'), 33 | # seg params 34 | 'seg_level': np.full((total), int(seg_params['seg_level']), dtype=np.int8), 35 | 'sthresh': np.full((total), int(seg_params['sthresh']), dtype=np.uint8), 36 | 'mthresh': np.full((total), int(seg_params['mthresh']), dtype=np.uint8), 37 | 'close': np.full((total), int(seg_params['close']), dtype=np.uint32), 38 | 'use_otsu': np.full((total), bool(seg_params['use_otsu']), dtype=bool), 39 | 'keep_ids': np.full((total), seg_params['keep_ids']), 40 | 'exclude_ids': np.full((total), seg_params['exclude_ids']), 41 | 42 | # filter params 43 | 'a_t': np.full((total), int(filter_params['a_t']), dtype=np.float32), 44 | 'a_h': np.full((total), int(filter_params['a_h']), dtype=np.float32), 45 | 'max_n_holes': np.full((total), int(filter_params['max_n_holes']), dtype=np.uint32), 46 | 47 | # vis params 48 | 'vis_level': np.full((total), int(vis_params['vis_level']), dtype=np.int8), 49 | 'line_thickness': np.full((total), int(vis_params['line_thickness']), dtype=np.uint32), 50 | 51 | # patching params 52 | 'use_padding': np.full((total), bool(patch_params['use_padding']), dtype=bool), 53 | 'contour_fn': np.full((total), patch_params['contour_fn']) 54 | }) 55 | 56 | if save_patches: 57 | default_df_dict.update({ 58 | 'white_thresh': np.full((total), int(patch_params['white_thresh']), dtype=np.uint8), 59 | 'black_thresh': np.full((total), int(patch_params['black_thresh']), dtype=np.uint8)}) 60 | 61 | if use_heatmap_args: 62 | # initiate empty x,y coordinates in case not provided 63 | default_df_dict.update({'x1': np.empty((total)).fill(np.NaN), 64 | 'x2': np.empty((total)).fill(np.NaN), 65 | 'y1': np.empty((total)).fill(np.NaN), 66 | 'y2': np.empty((total)).fill(np.NaN)}) 67 | 68 | 69 | if isinstance(slides, pd.DataFrame): 70 | temp_copy = pd.DataFrame(default_df_dict) # temporary dataframe w/ default params 71 | # find key in provided df 72 | # if exist, fill empty fields w/ default values, else, insert the default values as a new column 73 | for key in default_df_dict.keys(): 74 | if key in slides.columns: 75 | mask = slides[key].isna() 76 | slides.loc[mask, key] = temp_copy.loc[mask, key] 77 | else: 78 | slides.insert(len(slides.columns), key, default_df_dict[key]) 79 | else: 80 | slides = pd.DataFrame(default_df_dict) 81 | 82 | return slides -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/vis_utils/heatmap_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | import pdb 6 | import os 7 | import pandas as pd 8 | from utils.utils import * 9 | from PIL import Image 10 | from math import floor 11 | import matplotlib.pyplot as plt 12 | from datasets.wsi_dataset import Wsi_Region 13 | import h5py 14 | from wsi_core.WholeSlideImage import WholeSlideImage 15 | from scipy.stats import percentileofscore 16 | import math 17 | from utils.file_utils import save_hdf5 18 | from scipy.stats import percentileofscore 19 | 20 | device=torch.device("cuda" if torch.cuda.is_available() else "cpu") 21 | 22 | def score2percentile(score, ref): 23 | percentile = percentileofscore(ref, score) 24 | return percentile 25 | 26 | def drawHeatmap(scores, coords, slide_path=None, wsi_object=None, vis_level = -1, **kwargs): 27 | if wsi_object is None: 28 | wsi_object = WholeSlideImage(slide_path) 29 | print(wsi_object.name) 30 | 31 | wsi = wsi_object.getOpenSlide() 32 | if vis_level < 0: 33 | vis_level = wsi.get_best_level_for_downsample(32) 34 | 35 | heatmap = wsi_object.visHeatmap(scores=scores, coords=coords, vis_level=vis_level, **kwargs) 36 | return heatmap 37 | 38 | def initialize_wsi(wsi_path, seg_mask_path=None, seg_params=None, filter_params=None): 39 | wsi_object = WholeSlideImage(wsi_path) 40 | if seg_params['seg_level'] < 0: 41 | best_level = wsi_object.wsi.get_best_level_for_downsample(32) 42 | seg_params['seg_level'] = best_level 43 | 44 | wsi_object.segmentTissue(**seg_params, filter_params=filter_params) 45 | wsi_object.saveSegmentation(seg_mask_path) 46 | return wsi_object 47 | 48 | def compute_from_patches(wsi_object, clam_pred=None, model=None, feature_extractor=None, batch_size=512, 49 | attn_save_path=None, ref_scores=None, feat_save_path=None, **wsi_kwargs): 50 | top_left = wsi_kwargs['top_left'] 51 | bot_right = wsi_kwargs['bot_right'] 52 | patch_size = wsi_kwargs['patch_size'] 53 | 54 | roi_dataset = Wsi_Region(wsi_object, **wsi_kwargs) 55 | roi_loader = get_simple_loader(roi_dataset, batch_size=batch_size, num_workers=8) 56 | print('total number of patches to process: ', len(roi_dataset)) 57 | num_batches = len(roi_loader) 58 | print('number of batches: ', len(roi_loader)) 59 | mode = "w" 60 | for idx, (roi, coords) in enumerate(roi_loader): 61 | roi = roi.to(device) 62 | coords = coords.numpy() 63 | 64 | with torch.no_grad(): 65 | features = feature_extractor(roi) 66 | 67 | if attn_save_path is not None: 68 | A = model(features, attention_only=True) 69 | 70 | if A.size(0) > 1: #CLAM multi-branch attention 71 | A = A[clam_pred] 72 | 73 | A = A.view(-1, 1).cpu().numpy() 74 | 75 | if ref_scores is not None: 76 | for score_idx in range(len(A)): 77 | A[score_idx] = score2percentile(A[score_idx], ref_scores) 78 | 79 | asset_dict = {'attention_scores': A, 'coords': coords} 80 | save_path = save_hdf5(attn_save_path, asset_dict, mode=mode) 81 | 82 | if idx % math.ceil(num_batches * 0.05) == 0: 83 | print('processed {} / {}'.format(idx, num_batches)) 84 | 85 | if feat_save_path is not None: 86 | asset_dict = {'features': features.cpu().numpy(), 'coords': coords} 87 | save_hdf5(feat_save_path, asset_dict, mode=mode) 88 | 89 | mode = "a" 90 | return attn_save_path, feat_save_path, wsi_object -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/model_mil.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from utils.utils import initialize_weights 5 | import numpy as np 6 | 7 | class MIL_fc(nn.Module): 8 | def __init__(self, gate = True, size_arg = "small", dropout = False, n_classes = 2, top_k=1): 9 | super(MIL_fc, self).__init__() 10 | assert n_classes == 2 11 | self.size_dict = {"small": [1024, 512]} 12 | size = self.size_dict[size_arg] 13 | fc = [nn.Linear(size[0], size[1]), nn.ReLU()] 14 | if dropout: 15 | fc.append(nn.Dropout(0.25)) 16 | 17 | fc.append(nn.Linear(size[1], n_classes)) 18 | self.classifier= nn.Sequential(*fc) 19 | initialize_weights(self) 20 | self.top_k=top_k 21 | 22 | def relocate(self): 23 | device=torch.device("cuda" if torch.cuda.is_available() else "cpu") 24 | self.classifier.to(device) 25 | 26 | def forward(self, h, return_features=False): 27 | if return_features: 28 | h = self.classifier.module[:3](h) 29 | logits = self.classifier.module[3](h) 30 | else: 31 | logits = self.classifier(h) # K x 1 32 | 33 | y_probs = F.softmax(logits, dim = 1) 34 | top_instance_idx = torch.topk(y_probs[:, 1], self.top_k, dim=0)[1].view(1,) 35 | top_instance = torch.index_select(logits, dim=0, index=top_instance_idx) 36 | Y_hat = torch.topk(top_instance, 1, dim = 1)[1] 37 | Y_prob = F.softmax(top_instance, dim = 1) 38 | results_dict = {} 39 | 40 | if return_features: 41 | top_features = torch.index_select(h, dim=0, index=top_instance_idx) 42 | results_dict.update({'features': top_features}) 43 | return top_instance, Y_prob, Y_hat, y_probs, results_dict 44 | 45 | 46 | class MIL_fc_mc(nn.Module): 47 | def __init__(self, gate = True, size_arg = "small", dropout = False, n_classes = 2, top_k=1): 48 | super(MIL_fc_mc, self).__init__() 49 | assert n_classes > 2 50 | self.size_dict = {"small": [1024, 512]} 51 | size = self.size_dict[size_arg] 52 | fc = [nn.Linear(size[0], size[1]), nn.ReLU()] 53 | if dropout: 54 | fc.append(nn.Dropout(0.25)) 55 | self.fc = nn.Sequential(*fc) 56 | 57 | self.classifiers = nn.ModuleList([nn.Linear(size[1], 1) for i in range(n_classes)]) 58 | initialize_weights(self) 59 | self.top_k=top_k 60 | self.n_classes = n_classes 61 | assert self.top_k == 1 62 | 63 | def relocate(self): 64 | device=torch.device("cuda" if torch.cuda.is_available() else "cpu") 65 | self.fc = self.fc.to(device) 66 | self.classifiers = self.classifiers.to(device) 67 | 68 | def forward(self, h, return_features=False): 69 | device = h.device 70 | 71 | h = self.fc(h) 72 | logits = torch.empty(h.size(0), self.n_classes).float().to(device) 73 | 74 | for c in range(self.n_classes): 75 | if isinstance(self.classifiers, nn.DataParallel): 76 | logits[:, c] = self.classifiers.module[c](h).squeeze(1) 77 | else: 78 | logits[:, c] = self.classifiers[c](h).squeeze(1) 79 | 80 | y_probs = F.softmax(logits, dim = 1) 81 | m = y_probs.view(1, -1).argmax(1) 82 | top_indices = torch.cat(((m // self.n_classes).view(-1, 1), (m % self.n_classes).view(-1, 1)), dim=1).view(-1, 1) 83 | top_instance = logits[top_indices[0]] 84 | 85 | Y_hat = top_indices[1] 86 | Y_prob = y_probs[top_indices[0]] 87 | 88 | results_dict = {} 89 | 90 | if return_features: 91 | top_features = torch.index_select(h, dim=0, index=top_indices[0]) 92 | results_dict.update({'features': top_features}) 93 | return top_instance, Y_prob, Y_hat, y_probs, results_dict 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: BROW 2 | channels: 3 | - pytorch 4 | - defaults 5 | dependencies: 6 | - _libgcc_mutex=0.1=main 7 | - _openmp_mutex=5.1=1_gnu 8 | - blas=1.0=mkl 9 | - bottleneck=1.3.5=py39h7deecbd_0 10 | - brotlipy=0.7.0=py39h27cfd23_1003 11 | - bzip2=1.0.8=h7b6447c_0 12 | - ca-certificates=2023.05.30=h06a4308_0 13 | - certifi=2023.5.7=py39h06a4308_0 14 | - cffi=1.15.1=py39h5eee18b_3 15 | - charset-normalizer=2.0.4=pyhd3eb1b0_0 16 | - cryptography=39.0.1=py39h9ce1e76_2 17 | - cudatoolkit=11.3.1=h2bc3f7f_2 18 | - ffmpeg=4.3=hf484d3e_0 19 | - freetype=2.12.1=h4a9f257_0 20 | - giflib=5.2.1=h5eee18b_3 21 | - gmp=6.2.1=h295c915_3 22 | - gnutls=3.6.15=he1e5248_0 23 | - idna=3.4=py39h06a4308_0 24 | - intel-openmp=2023.1.0=hdb19cb5_46305 25 | - jpeg=9e=h5eee18b_1 26 | - lame=3.100=h7b6447c_0 27 | - lcms2=2.12=h3be6417_0 28 | - ld_impl_linux-64=2.38=h1181459_1 29 | - lerc=3.0=h295c915_0 30 | - libdeflate=1.17=h5eee18b_0 31 | - libffi=3.4.4=h6a678d5_0 32 | - libgcc-ng=11.2.0=h1234567_1 33 | - libgomp=11.2.0=h1234567_1 34 | - libiconv=1.16=h7f8727e_2 35 | - libidn2=2.3.4=h5eee18b_0 36 | - libpng=1.6.39=h5eee18b_0 37 | - libstdcxx-ng=11.2.0=h1234567_1 38 | - libtasn1=4.19.0=h5eee18b_0 39 | - libtiff=4.5.0=h6a678d5_2 40 | - libunistring=0.9.10=h27cfd23_0 41 | - libwebp=1.2.4=h11a3e52_1 42 | - libwebp-base=1.2.4=h5eee18b_1 43 | - lz4-c=1.9.4=h6a678d5_0 44 | - mkl=2023.1.0=h6d00ec8_46342 45 | - mkl-service=2.4.0=py39h5eee18b_1 46 | - mkl_fft=1.3.6=py39h417a72b_1 47 | - mkl_random=1.2.2=py39h417a72b_1 48 | - ncurses=6.4=h6a678d5_0 49 | - nettle=3.7.3=hbbd107a_1 50 | - numexpr=2.8.4=py39hc78ab66_1 51 | - numpy=1.24.3=py39hf6e8229_1 52 | - numpy-base=1.24.3=py39h060ed82_1 53 | - openh264=2.1.1=h4ff587b_0 54 | - openssl=3.0.8=h7f8727e_0 55 | - pandas=1.5.3=py39h417a72b_0 56 | - pillow=9.4.0=py39h6a678d5_0 57 | - pip=23.1.2=py39h06a4308_0 58 | - pycparser=2.21=pyhd3eb1b0_0 59 | - pyopenssl=23.0.0=py39h06a4308_0 60 | - pysocks=1.7.1=py39h06a4308_0 61 | - python=3.9.16=h955ad1f_3 62 | - python-dateutil=2.8.2=pyhd3eb1b0_0 63 | - pytorch=1.12.0=py3.9_cuda11.3_cudnn8.3.2_0 64 | - pytorch-mutex=1.0=cuda 65 | - pytz=2022.7=py39h06a4308_0 66 | - readline=8.2=h5eee18b_0 67 | - requests=2.29.0=py39h06a4308_0 68 | - setuptools=67.8.0=py39h06a4308_0 69 | - six=1.16.0=pyhd3eb1b0_1 70 | - sqlite=3.41.2=h5eee18b_0 71 | - tbb=2021.8.0=hdb19cb5_0 72 | - tk=8.6.12=h1ccaba5_0 73 | - torchaudio=0.12.0=py39_cu113 74 | - torchvision=0.13.0=py39_cu113 75 | - typing_extensions=4.6.3=py39h06a4308_0 76 | - tzdata=2023c=h04d1e81_0 77 | - urllib3=1.26.16=py39h06a4308_0 78 | - wheel=0.38.4=py39h06a4308_0 79 | - xz=5.4.2=h5eee18b_0 80 | - zlib=1.2.13=h5eee18b_0 81 | - zstd=1.5.5=hc292b87_0 82 | - pip: 83 | - absl-py==1.4.0 84 | - cachetools==5.3.0 85 | - contourpy==1.0.7 86 | - cycler==0.11.0 87 | - filelock==3.12.0 88 | - fonttools==4.40.0 89 | - fsspec==2023.5.0 90 | - future==0.18.3 91 | - google-auth==2.20.0 92 | - google-auth-oauthlib==1.0.0 93 | - grpcio==1.54.2 94 | - h5py==3.8.0 95 | - huggingface-hub==0.14.1 96 | - importlib-metadata==6.6.0 97 | - importlib-resources==5.12.0 98 | - joblib==1.2.0 99 | - kiwisolver==1.4.4 100 | - markdown==3.4.3 101 | - markupsafe==2.1.2 102 | - matplotlib==3.7.1 103 | - oauthlib==3.2.2 104 | - opencv-python==4.7.0.72 105 | - openslide-python==1.2.0 106 | - packaging==23.1 107 | - protobuf==4.23.2 108 | - pyasn1==0.5.0 109 | - pyasn1-modules==0.3.0 110 | - pyparsing==3.0.9 111 | - pyyaml==6.0 112 | - requests-oauthlib==1.3.1 113 | - rsa==4.9 114 | - safetensors==0.3.1 115 | - scikit-learn==1.2.2 116 | - scipy==1.10.1 117 | - tensorboard==2.13.0 118 | - tensorboard-data-server==0.7.0 119 | - threadpoolctl==3.1.0 120 | - timm==0.9.2 121 | - tqdm==4.65.0 122 | - werkzeug==2.3.6 123 | - zipp==3.15.0 124 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/wsi_core/util_classes.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from PIL import Image 4 | import pdb 5 | import cv2 6 | class Mosaic_Canvas(object): 7 | def __init__(self,patch_size=256, n=100, downscale=4, n_per_row=10, bg_color=(0,0,0), alpha=-1): 8 | self.patch_size = patch_size 9 | self.downscaled_patch_size = int(np.ceil(patch_size/downscale)) 10 | self.n_rows = int(np.ceil(n / n_per_row)) 11 | self.n_cols = n_per_row 12 | w = self.n_cols * self.downscaled_patch_size 13 | h = self.n_rows * self.downscaled_patch_size 14 | if alpha < 0: 15 | canvas = Image.new(size=(w,h), mode="RGB", color=bg_color) 16 | else: 17 | canvas = Image.new(size=(w,h), mode="RGBA", color=bg_color + (int(255 * alpha),)) 18 | 19 | self.canvas = canvas 20 | self.dimensions = np.array([w, h]) 21 | self.reset_coord() 22 | 23 | def reset_coord(self): 24 | self.coord = np.array([0, 0]) 25 | 26 | def increment_coord(self): 27 | #print('current coord: {} x {} / {} x {}'.format(self.coord[0], self.coord[1], self.dimensions[0], self.dimensions[1])) 28 | assert np.all(self.coord<=self.dimensions) 29 | if self.coord[0] + self.downscaled_patch_size <=self.dimensions[0] - self.downscaled_patch_size: 30 | self.coord[0]+=self.downscaled_patch_size 31 | else: 32 | self.coord[0] = 0 33 | self.coord[1]+=self.downscaled_patch_size 34 | 35 | 36 | def save(self, save_path, **kwargs): 37 | self.canvas.save(save_path, **kwargs) 38 | 39 | def paste_patch(self, patch): 40 | assert patch.size[0] == self.patch_size 41 | assert patch.size[1] == self.patch_size 42 | self.canvas.paste(patch.resize(tuple([self.downscaled_patch_size, self.downscaled_patch_size])), tuple(self.coord)) 43 | self.increment_coord() 44 | 45 | def get_painting(self): 46 | return self.canvas 47 | 48 | class Contour_Checking_fn(object): 49 | # Defining __call__ method 50 | def __call__(self, pt): 51 | raise NotImplementedError 52 | 53 | class isInContourV1(Contour_Checking_fn): 54 | def __init__(self, contour): 55 | self.cont = contour 56 | 57 | def __call__(self, pt): 58 | return 1 if cv2.pointPolygonTest(self.cont, tuple(np.array(pt).astype(float)), False) >= 0 else 0 59 | 60 | class isInContourV2(Contour_Checking_fn): 61 | def __init__(self, contour, patch_size): 62 | self.cont = contour 63 | self.patch_size = patch_size 64 | 65 | def __call__(self, pt): 66 | pt = np.array((pt[0]+self.patch_size//2, pt[1]+self.patch_size//2)).astype(float) 67 | return 1 if cv2.pointPolygonTest(self.cont, tuple(np.array(pt).astype(float)), False) >= 0 else 0 68 | 69 | # Easy version of 4pt contour checking function - 1 of 4 points need to be in the contour for test to pass 70 | class isInContourV3_Easy(Contour_Checking_fn): 71 | def __init__(self, contour, patch_size, center_shift=0.5): 72 | self.cont = contour 73 | self.patch_size = patch_size 74 | self.shift = int(patch_size//2*center_shift) 75 | def __call__(self, pt): 76 | center = (pt[0]+self.patch_size//2, pt[1]+self.patch_size//2) 77 | if self.shift > 0: 78 | all_points = [(center[0]-self.shift, center[1]-self.shift), 79 | (center[0]+self.shift, center[1]+self.shift), 80 | (center[0]+self.shift, center[1]-self.shift), 81 | (center[0]-self.shift, center[1]+self.shift) 82 | ] 83 | else: 84 | all_points = [center] 85 | 86 | for points in all_points: 87 | if cv2.pointPolygonTest(self.cont, tuple(np.array(points).astype(float)), False) >= 0: 88 | return 1 89 | return 0 90 | 91 | # Hard version of 4pt contour checking function - all 4 points need to be in the contour for test to pass 92 | class isInContourV3_Hard(Contour_Checking_fn): 93 | def __init__(self, contour, patch_size, center_shift=0.5): 94 | self.cont = contour 95 | self.patch_size = patch_size 96 | self.shift = int(patch_size//2*center_shift) 97 | def __call__(self, pt): 98 | center = (pt[0]+self.patch_size//2, pt[1]+self.patch_size//2) 99 | if self.shift > 0: 100 | all_points = [(center[0]-self.shift, center[1]-self.shift), 101 | (center[0]+self.shift, center[1]+self.shift), 102 | (center[0]+self.shift, center[1]-self.shift), 103 | (center[0]-self.shift, center[1]+self.shift) 104 | ] 105 | else: 106 | all_points = [center] 107 | 108 | for points in all_points: 109 | if cv2.pointPolygonTest(self.cont, tuple(np.array(points).astype(float)), False) < 0: 110 | return 0 111 | return 1 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/eval_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from models.model_mil import MIL_fc, MIL_fc_mc 7 | from models.model_clam import CLAM_SB, CLAM_MB 8 | import pdb 9 | import os 10 | import pandas as pd 11 | from utils.utils import * 12 | from utils.core_utils import Accuracy_Logger 13 | from sklearn.metrics import roc_auc_score, roc_curve, auc 14 | from sklearn.preprocessing import label_binarize 15 | import matplotlib.pyplot as plt 16 | 17 | def initiate_model(args, ckpt_path): 18 | print('Init Model') 19 | model_dict = {"dropout": args.drop_out, 'n_classes': args.n_classes} 20 | 21 | if args.model_size is not None and args.model_type in ['clam_sb', 'clam_mb']: 22 | model_dict.update({"size_arg": args.model_size}) 23 | 24 | if args.model_type =='clam_sb': 25 | model = CLAM_SB(**model_dict) 26 | elif args.model_type =='clam_mb': 27 | model = CLAM_MB(**model_dict) 28 | else: # args.model_type == 'mil' 29 | if args.n_classes > 2: 30 | model = MIL_fc_mc(**model_dict) 31 | else: 32 | model = MIL_fc(**model_dict) 33 | 34 | print_network(model) 35 | print(ckpt_path) 36 | ckpt = torch.load(ckpt_path) 37 | ckpt_clean = {} 38 | for key in ckpt.keys(): 39 | if 'instance_loss_fn' in key or 'classifiers1' in key: 40 | continue 41 | ckpt_clean.update({key.replace('.module', ''):ckpt[key]}) 42 | model.load_state_dict(ckpt_clean, strict=True) 43 | 44 | model.relocate() 45 | model.eval() 46 | return model 47 | 48 | def eval(dataset, args, ckpt_path): 49 | print('***************', len(dataset)) 50 | model = initiate_model(args, ckpt_path) 51 | 52 | print('Init Loaders') 53 | loader = get_simple_loader(dataset) 54 | patient_results, test_error, auc, df, _ = summary(model, loader, args) 55 | print('test_error: ', test_error) 56 | print('auc: ', auc) 57 | return model, patient_results, test_error, auc, df 58 | 59 | def summary(model, loader, args): 60 | acc_logger = Accuracy_Logger(n_classes=args.n_classes) 61 | model.eval() 62 | test_loss = 0. 63 | test_error = 0. 64 | 65 | all_probs = np.zeros((len(loader), args.n_classes)) 66 | all_labels = np.zeros(len(loader)) 67 | all_preds = np.zeros(len(loader)) 68 | 69 | slide_ids = loader.dataset.slide_data['slide_id'] 70 | patient_results = {} 71 | for batch_idx, (data, label) in enumerate(loader): 72 | data, label = data.to(device), label.to(device) 73 | slide_id = slide_ids.iloc[batch_idx] 74 | with torch.no_grad(): 75 | logits, Y_prob, Y_hat, _, results_dict = model(data) 76 | 77 | acc_logger.log(Y_hat, label) 78 | 79 | probs = Y_prob.cpu().numpy() 80 | 81 | all_probs[batch_idx] = probs 82 | all_labels[batch_idx] = label.item() 83 | all_preds[batch_idx] = Y_hat.item() 84 | 85 | patient_results.update({slide_id: {'slide_id': np.array(slide_id), 'prob': probs, 'label': label.item()}}) 86 | 87 | error = calculate_error(Y_hat, label) 88 | test_error += error 89 | 90 | del data 91 | test_error /= len(loader) 92 | 93 | aucs = [] 94 | if len(np.unique(all_labels)) == 1: 95 | auc_score = -1 96 | 97 | else: 98 | if args.n_classes == 2: 99 | auc_score = roc_auc_score(all_labels, all_probs[:, 1]) 100 | else: 101 | binary_labels = label_binarize(all_labels, classes=[i for i in range(args.n_classes)]) 102 | for class_idx in range(args.n_classes): 103 | if class_idx in all_labels: 104 | fpr, tpr, _ = roc_curve(binary_labels[:, class_idx], all_probs[:, class_idx]) 105 | aucs.append(auc(fpr, tpr)) 106 | else: 107 | aucs.append(float('nan')) 108 | if args.micro_average: 109 | binary_labels = label_binarize(all_labels, classes=[i for i in range(args.n_classes)]) 110 | fpr, tpr, _ = roc_curve(binary_labels.ravel(), all_probs.ravel()) 111 | auc_score = auc(fpr, tpr) 112 | else: 113 | auc_score = np.nanmean(np.array(aucs)) 114 | 115 | results_dict = {'slide_id': slide_ids, 'Y': all_labels, 'Y_hat': all_preds} 116 | for c in range(args.n_classes): 117 | results_dict.update({'p_{}'.format(c): all_probs[:,c]}) 118 | df = pd.DataFrame(results_dict) 119 | return patient_results, test_error, auc_score, df, acc_logger 120 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/resnet_custom.py: -------------------------------------------------------------------------------- 1 | # modified from Pytorch official resnet.py 2 | import torch.nn as nn 3 | import torch.utils.model_zoo as model_zoo 4 | import torch 5 | import torch.nn.functional as F 6 | 7 | __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 8 | 'resnet152'] 9 | 10 | model_urls = { 11 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 12 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 13 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 14 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 15 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 16 | } 17 | 18 | class Bottleneck_Baseline(nn.Module): 19 | expansion = 4 20 | 21 | def __init__(self, inplanes, planes, stride=1, downsample=None): 22 | super(Bottleneck_Baseline, self).__init__() 23 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 24 | self.bn1 = nn.BatchNorm2d(planes) 25 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 26 | padding=1, bias=False) 27 | self.bn2 = nn.BatchNorm2d(planes) 28 | self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) 29 | self.bn3 = nn.BatchNorm2d(planes * self.expansion) 30 | self.relu = nn.ReLU(inplace=True) 31 | self.downsample = downsample 32 | self.stride = stride 33 | 34 | def forward(self, x): 35 | residual = x 36 | 37 | out = self.conv1(x) 38 | out = self.bn1(out) 39 | out = self.relu(out) 40 | 41 | out = self.conv2(out) 42 | out = self.bn2(out) 43 | out = self.relu(out) 44 | 45 | out = self.conv3(out) 46 | out = self.bn3(out) 47 | 48 | if self.downsample is not None: 49 | residual = self.downsample(x) 50 | 51 | out += residual 52 | out = self.relu(out) 53 | 54 | return out 55 | 56 | class ResNet_Baseline(nn.Module): 57 | 58 | def __init__(self, block, layers): 59 | self.inplanes = 64 60 | super(ResNet_Baseline, self).__init__() 61 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 62 | bias=False) 63 | self.bn1 = nn.BatchNorm2d(64) 64 | self.relu = nn.ReLU(inplace=True) 65 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 66 | self.layer1 = self._make_layer(block, 64, layers[0]) 67 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 68 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 69 | self.avgpool = nn.AdaptiveAvgPool2d(1) 70 | 71 | for m in self.modules(): 72 | if isinstance(m, nn.Conv2d): 73 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 74 | elif isinstance(m, nn.BatchNorm2d): 75 | nn.init.constant_(m.weight, 1) 76 | nn.init.constant_(m.bias, 0) 77 | 78 | def _make_layer(self, block, planes, blocks, stride=1): 79 | downsample = None 80 | if stride != 1 or self.inplanes != planes * block.expansion: 81 | downsample = nn.Sequential( 82 | nn.Conv2d(self.inplanes, planes * block.expansion, 83 | kernel_size=1, stride=stride, bias=False), 84 | nn.BatchNorm2d(planes * block.expansion), 85 | ) 86 | 87 | layers = [] 88 | layers.append(block(self.inplanes, planes, stride, downsample)) 89 | self.inplanes = planes * block.expansion 90 | for i in range(1, blocks): 91 | layers.append(block(self.inplanes, planes)) 92 | 93 | return nn.Sequential(*layers) 94 | 95 | def forward(self, x): 96 | x = self.conv1(x) 97 | x = self.bn1(x) 98 | x = self.relu(x) 99 | x = self.maxpool(x) 100 | 101 | x = self.layer1(x) 102 | x = self.layer2(x) 103 | x = self.layer3(x) 104 | 105 | x = self.avgpool(x) 106 | x = x.view(x.size(0), -1) 107 | 108 | return x 109 | 110 | def resnet50_baseline(pretrained=False): 111 | """Constructs a Modified ResNet-50 model. 112 | Args: 113 | pretrained (bool): If True, returns a model pre-trained on ImageNet 114 | """ 115 | model = ResNet_Baseline(Bottleneck_Baseline, [3, 4, 6, 3]) 116 | if pretrained: 117 | model = load_pretrained_weights(model, 'resnet50') 118 | return model 119 | 120 | def load_pretrained_weights(model, name): 121 | pretrained_dict = model_zoo.load_url(model_urls[name]) 122 | model.load_state_dict(pretrained_dict, strict=False) 123 | return model 124 | 125 | 126 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/datasets/dataset_h5.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import os 3 | import torch 4 | import numpy as np 5 | import pandas as pd 6 | import math 7 | import re 8 | import pdb 9 | import pickle 10 | 11 | from torch.utils.data import Dataset, DataLoader, sampler 12 | from torchvision import transforms, utils, models 13 | import torch.nn.functional as F 14 | 15 | from PIL import Image 16 | import h5py 17 | 18 | from random import randrange 19 | 20 | def eval_transforms(pretrained=False): 21 | if pretrained: 22 | mean = (0.485, 0.456, 0.406) 23 | std = (0.229, 0.224, 0.225) 24 | 25 | else: 26 | mean = (0.5,0.5,0.5) 27 | std = (0.5,0.5,0.5) 28 | 29 | trnsfrms_val = transforms.Compose( 30 | [ 31 | transforms.ToTensor(), 32 | transforms.Normalize(mean = mean, std = std) 33 | ] 34 | ) 35 | 36 | return trnsfrms_val 37 | 38 | class Whole_Slide_Bag(Dataset): 39 | def __init__(self, 40 | file_path, 41 | pretrained=False, 42 | custom_transforms=None, 43 | target_patch_size=-1, 44 | ): 45 | """ 46 | Args: 47 | file_path (string): Path to the .h5 file containing patched data. 48 | pretrained (bool): Use ImageNet transforms 49 | custom_transforms (callable, optional): Optional transform to be applied on a sample 50 | """ 51 | self.pretrained=pretrained 52 | if target_patch_size > 0: 53 | self.target_patch_size = (target_patch_size, target_patch_size) 54 | else: 55 | self.target_patch_size = None 56 | 57 | if not custom_transforms: 58 | self.roi_transforms = eval_transforms(pretrained=pretrained) 59 | else: 60 | self.roi_transforms = custom_transforms 61 | 62 | self.file_path = file_path 63 | 64 | with h5py.File(self.file_path, "r") as f: 65 | dset = f['imgs'] 66 | self.length = len(dset) 67 | 68 | self.summary() 69 | 70 | def __len__(self): 71 | return self.length 72 | 73 | def summary(self): 74 | hdf5_file = h5py.File(self.file_path, "r") 75 | dset = hdf5_file['imgs'] 76 | for name, value in dset.attrs.items(): 77 | print(name, value) 78 | 79 | print('pretrained:', self.pretrained) 80 | print('transformations:', self.roi_transforms) 81 | if self.target_patch_size is not None: 82 | print('target_size: ', self.target_patch_size) 83 | 84 | def __getitem__(self, idx): 85 | with h5py.File(self.file_path,'r') as hdf5_file: 86 | img = hdf5_file['imgs'][idx] 87 | coord = hdf5_file['coords'][idx] 88 | 89 | img = Image.fromarray(img) 90 | if self.target_patch_size is not None: 91 | img = img.resize(self.target_patch_size) 92 | img = self.roi_transforms(img).unsqueeze(0) 93 | return img, coord 94 | 95 | class Whole_Slide_Bag_FP(Dataset): 96 | def __init__(self, 97 | file_path, 98 | wsi, 99 | pretrained=False, 100 | custom_transforms=None, 101 | custom_downsample=1, 102 | target_patch_size=-1 103 | ): 104 | """ 105 | Args: 106 | file_path (string): Path to the .h5 file containing patched data. 107 | pretrained (bool): Use ImageNet transforms 108 | custom_transforms (callable, optional): Optional transform to be applied on a sample 109 | custom_downsample (int): Custom defined downscale factor (overruled by target_patch_size) 110 | target_patch_size (int): Custom defined image size before embedding 111 | """ 112 | self.pretrained=pretrained 113 | self.wsi = wsi 114 | if not custom_transforms: 115 | self.roi_transforms = eval_transforms(pretrained=pretrained) 116 | else: 117 | self.roi_transforms = custom_transforms 118 | 119 | self.file_path = file_path 120 | 121 | with h5py.File(self.file_path, "r") as f: 122 | dset = f['coords'] 123 | self.patch_level = f['coords'].attrs['patch_level'] 124 | self.patch_size = f['coords'].attrs['patch_size'] 125 | self.length = len(dset) 126 | if target_patch_size > 0: 127 | self.target_patch_size = (target_patch_size, ) * 2 128 | elif custom_downsample > 1: 129 | self.target_patch_size = (self.patch_size // custom_downsample, ) * 2 130 | else: 131 | self.target_patch_size = None 132 | self.summary() 133 | 134 | def __len__(self): 135 | return self.length 136 | 137 | def summary(self): 138 | hdf5_file = h5py.File(self.file_path, "r") 139 | dset = hdf5_file['coords'] 140 | for name, value in dset.attrs.items(): 141 | print(name, value) 142 | 143 | print('\nfeature extraction settings') 144 | print('target patch size: ', self.target_patch_size) 145 | print('pretrained: ', self.pretrained) 146 | print('transformations: ', self.roi_transforms) 147 | 148 | def __getitem__(self, idx): 149 | with h5py.File(self.file_path,'r') as hdf5_file: 150 | coord = hdf5_file['coords'][idx] 151 | img = self.wsi.read_region(coord, self.patch_level, (self.patch_size, self.patch_size)).convert('RGB') 152 | 153 | if self.target_patch_size is not None: 154 | img = img.resize(self.target_patch_size) 155 | img = self.roi_transforms(img).unsqueeze(0) 156 | return img, coord 157 | 158 | class Dataset_All_Bags(Dataset): 159 | 160 | def __init__(self, csv_path): 161 | self.df = pd.read_csv(csv_path) 162 | 163 | def __len__(self): 164 | return len(self.df) 165 | 166 | def __getitem__(self, idx): 167 | return self.df['slide_id'][idx] 168 | 169 | 170 | 171 | 172 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/datasets/wsi_dataset.py: -------------------------------------------------------------------------------- 1 | from torchvision import transforms 2 | import pandas as pd 3 | import numpy as np 4 | import time 5 | import pdb 6 | import PIL.Image as Image 7 | import h5py 8 | from torch.utils.data import Dataset 9 | import torch 10 | from wsi_core.util_classes import Contour_Checking_fn, isInContourV1, isInContourV2, isInContourV3_Easy, isInContourV3_Hard 11 | 12 | def default_transforms(mean = (0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): 13 | t = transforms.Compose( 14 | [transforms.ToTensor(), 15 | transforms.Normalize(mean = mean, std = std)]) 16 | return t 17 | 18 | def get_contour_check_fn(contour_fn='four_pt_hard', cont=None, ref_patch_size=None, center_shift=None): 19 | if contour_fn == 'four_pt_hard': 20 | cont_check_fn = isInContourV3_Hard(contour=cont, patch_size=ref_patch_size, center_shift=center_shift) 21 | elif contour_fn == 'four_pt_easy': 22 | cont_check_fn = isInContourV3_Easy(contour=cont, patch_size=ref_patch_size, center_shift=0.5) 23 | elif contour_fn == 'center': 24 | cont_check_fn = isInContourV2(contour=cont, patch_size=ref_patch_size) 25 | elif contour_fn == 'basic': 26 | cont_check_fn = isInContourV1(contour=cont) 27 | else: 28 | raise NotImplementedError 29 | return cont_check_fn 30 | 31 | 32 | 33 | class Wsi_Region(Dataset): 34 | ''' 35 | args: 36 | wsi_object: instance of WholeSlideImage wrapper over a WSI 37 | top_left: tuple of coordinates representing the top left corner of WSI region (Default: None) 38 | bot_right tuple of coordinates representing the bot right corner of WSI region (Default: None) 39 | level: downsample level at which to prcess the WSI region 40 | patch_size: tuple of width, height representing the patch size 41 | step_size: tuple of w_step, h_step representing the step size 42 | contour_fn (str): 43 | contour checking fn to use 44 | choice of ['four_pt_hard', 'four_pt_easy', 'center', 'basic'] (Default: 'four_pt_hard') 45 | t: custom torchvision transformation to apply 46 | custom_downsample (int): additional downscale factor to apply 47 | use_center_shift: for 'four_pt_hard' contour check, how far out to shift the 4 points 48 | ''' 49 | def __init__(self, wsi_object, top_left=None, bot_right=None, level=0, 50 | patch_size = (256, 256), step_size=(256, 256), 51 | contour_fn='four_pt_hard', 52 | t=None, custom_downsample=1, use_center_shift=False): 53 | 54 | self.custom_downsample = custom_downsample 55 | 56 | # downscale factor in reference to level 0 57 | self.ref_downsample = wsi_object.level_downsamples[level] 58 | # patch size in reference to level 0 59 | self.ref_size = tuple((np.array(patch_size) * np.array(self.ref_downsample)).astype(int)) 60 | 61 | if self.custom_downsample > 1: 62 | self.target_patch_size = patch_size 63 | patch_size = tuple((np.array(patch_size) * np.array(self.ref_downsample) * custom_downsample).astype(int)) 64 | step_size = tuple((np.array(step_size) * custom_downsample).astype(int)) 65 | self.ref_size = patch_size 66 | else: 67 | step_size = tuple((np.array(step_size)).astype(int)) 68 | self.ref_size = tuple((np.array(patch_size) * np.array(self.ref_downsample)).astype(int)) 69 | 70 | self.wsi = wsi_object.wsi 71 | self.level = level 72 | self.patch_size = patch_size 73 | 74 | if not use_center_shift: 75 | center_shift = 0. 76 | else: 77 | overlap = 1 - float(step_size[0] / patch_size[0]) 78 | if overlap < 0.25: 79 | center_shift = 0.375 80 | elif overlap >= 0.25 and overlap < 0.75: 81 | center_shift = 0.5 82 | elif overlap >=0.75 and overlap < 0.95: 83 | center_shift = 0.5 84 | else: 85 | center_shift = 0.625 86 | #center_shift = 0.375 # 25% overlap 87 | #center_shift = 0.625 #50%, 75% overlap 88 | #center_shift = 1.0 #95% overlap 89 | 90 | filtered_coords = [] 91 | #iterate through tissue contours for valid patch coordinates 92 | for cont_idx, contour in enumerate(wsi_object.contours_tissue): 93 | print('processing {}/{} contours'.format(cont_idx, len(wsi_object.contours_tissue))) 94 | cont_check_fn = get_contour_check_fn(contour_fn, contour, self.ref_size[0], center_shift) 95 | coord_results, _ = wsi_object.process_contour(contour, wsi_object.holes_tissue[cont_idx], level, '', 96 | patch_size = patch_size[0], step_size = step_size[0], contour_fn=cont_check_fn, 97 | use_padding=True, top_left = top_left, bot_right = bot_right) 98 | if len(coord_results) > 0: 99 | filtered_coords.append(coord_results['coords']) 100 | 101 | coords=np.vstack(filtered_coords) 102 | 103 | self.coords = coords 104 | print('filtered a total of {} coordinates'.format(len(self.coords))) 105 | 106 | # apply transformation 107 | if t is None: 108 | self.transforms = default_transforms() 109 | else: 110 | self.transforms = t 111 | 112 | def __len__(self): 113 | return len(self.coords) 114 | 115 | def __getitem__(self, idx): 116 | coord = self.coords[idx] 117 | patch = self.wsi.read_region(tuple(coord), self.level, self.patch_size).convert('RGB') 118 | if self.custom_downsample > 1: 119 | patch = patch.resize(self.target_patch_size) 120 | patch = self.transforms(patch).unsqueeze(0) 121 | return patch, coord 122 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/utils/utils.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import torch 3 | import numpy as np 4 | import torch.nn as nn 5 | import pdb 6 | 7 | import torch 8 | import numpy as np 9 | import torch.nn as nn 10 | from torchvision import transforms 11 | from torch.utils.data import DataLoader, Sampler, WeightedRandomSampler, RandomSampler, SequentialSampler, sampler 12 | import torch.optim as optim 13 | import pdb 14 | import torch.nn.functional as F 15 | import math 16 | from itertools import islice 17 | import collections 18 | device=torch.device("cuda" if torch.cuda.is_available() else "cpu") 19 | 20 | class SubsetSequentialSampler(Sampler): 21 | """Samples elements sequentially from a given list of indices, without replacement. 22 | 23 | Arguments: 24 | indices (sequence): a sequence of indices 25 | """ 26 | def __init__(self, indices): 27 | self.indices = indices 28 | 29 | def __iter__(self): 30 | return iter(self.indices) 31 | 32 | def __len__(self): 33 | return len(self.indices) 34 | 35 | def collate_MIL(batch): 36 | img = torch.cat([item[0] for item in batch], dim = 0) 37 | label = torch.LongTensor([item[1] for item in batch]) 38 | return [img, label] 39 | 40 | def collate_features(batch): 41 | img = torch.cat([item[0] for item in batch], dim = 0) 42 | coords = np.vstack([item[1] for item in batch]) 43 | return [img, coords] 44 | 45 | 46 | def get_simple_loader(dataset, batch_size=1, num_workers=1): 47 | kwargs = {'num_workers': 4, 'pin_memory': False, 'num_workers': num_workers} if device.type == "cuda" else {} 48 | loader = DataLoader(dataset, batch_size=batch_size, sampler = sampler.SequentialSampler(dataset), collate_fn = collate_MIL, **kwargs) 49 | return loader 50 | 51 | def get_split_loader(split_dataset, training = False, testing = False, weighted = False): 52 | """ 53 | return either the validation loader or training loader 54 | """ 55 | kwargs = {'num_workers': 4} if device.type == "cuda" else {} 56 | if not testing: 57 | if training: 58 | if weighted: 59 | weights = make_weights_for_balanced_classes_split(split_dataset) 60 | loader = DataLoader(split_dataset, batch_size=1, sampler = WeightedRandomSampler(weights, len(weights)), collate_fn = collate_MIL, **kwargs) 61 | else: 62 | loader = DataLoader(split_dataset, batch_size=1, sampler = RandomSampler(split_dataset), collate_fn = collate_MIL, **kwargs) 63 | else: 64 | loader = DataLoader(split_dataset, batch_size=1, sampler = SequentialSampler(split_dataset), collate_fn = collate_MIL, **kwargs) 65 | 66 | else: 67 | ids = np.random.choice(np.arange(len(split_dataset), int(len(split_dataset)*0.1)), replace = False) 68 | loader = DataLoader(split_dataset, batch_size=1, sampler = SubsetSequentialSampler(ids), collate_fn = collate_MIL, **kwargs ) 69 | 70 | return loader 71 | 72 | def get_optim(model, args): 73 | if args.opt == "adam": 74 | optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.reg) 75 | elif args.opt == 'sgd': 76 | optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=args.reg) 77 | else: 78 | raise NotImplementedError 79 | return optimizer 80 | 81 | def print_network(net): 82 | num_params = 0 83 | num_params_train = 0 84 | print(net) 85 | 86 | for param in net.parameters(): 87 | n = param.numel() 88 | num_params += n 89 | if param.requires_grad: 90 | num_params_train += n 91 | 92 | print('Total number of parameters: %d' % num_params) 93 | print('Total number of trainable parameters: %d' % num_params_train) 94 | 95 | 96 | def generate_split(cls_ids, val_num, test_num, samples, n_splits = 5, 97 | seed = 7, label_frac = 1.0, custom_test_ids = None): 98 | indices = np.arange(samples).astype(int) 99 | 100 | if custom_test_ids is not None: 101 | indices = np.setdiff1d(indices, custom_test_ids) 102 | 103 | np.random.seed(seed) 104 | for i in range(n_splits): 105 | all_val_ids = [] 106 | all_test_ids = [] 107 | sampled_train_ids = [] 108 | 109 | if custom_test_ids is not None: # pre-built test split, do not need to sample 110 | all_test_ids.extend(custom_test_ids) 111 | 112 | for c in range(len(val_num)): 113 | possible_indices = np.intersect1d(cls_ids[c], indices) #all indices of this class 114 | val_ids = np.random.choice(possible_indices, val_num[c], replace = False) # validation ids 115 | 116 | remaining_ids = np.setdiff1d(possible_indices, val_ids) #indices of this class left after validation 117 | all_val_ids.extend(val_ids) 118 | 119 | if custom_test_ids is None: # sample test split 120 | 121 | test_ids = np.random.choice(remaining_ids, test_num[c], replace = False) 122 | remaining_ids = np.setdiff1d(remaining_ids, test_ids) 123 | all_test_ids.extend(test_ids) 124 | 125 | if label_frac == 1: 126 | sampled_train_ids.extend(remaining_ids) 127 | 128 | else: 129 | sample_num = math.ceil(len(remaining_ids) * label_frac) 130 | slice_ids = np.arange(sample_num) 131 | sampled_train_ids.extend(remaining_ids[slice_ids]) 132 | 133 | yield sampled_train_ids, all_val_ids, all_test_ids 134 | 135 | 136 | def nth(iterator, n, default=None): 137 | if n is None: 138 | return collections.deque(iterator, maxlen=0) 139 | else: 140 | return next(islice(iterator,n, None), default) 141 | 142 | def calculate_error(Y_hat, Y): 143 | error = 1. - Y_hat.float().eq(Y.float()).float().mean().item() 144 | 145 | return error 146 | 147 | def make_weights_for_balanced_classes_split(dataset): 148 | N = float(len(dataset)) 149 | weight_per_class = [N/len(dataset.slide_cls_ids[c]) for c in range(len(dataset.slide_cls_ids))] 150 | weight = [0] * int(N) 151 | for idx in range(len(dataset)): 152 | y = dataset.getlabel(idx) 153 | weight[idx] = weight_per_class[y] 154 | 155 | return torch.DoubleTensor(weight) 156 | 157 | def initialize_weights(module): 158 | for m in module.modules(): 159 | if isinstance(m, nn.Linear): 160 | nn.init.xavier_normal_(m.weight) 161 | m.bias.data.zero_() 162 | 163 | elif isinstance(m, nn.BatchNorm1d): 164 | nn.init.constant_(m.weight, 1) 165 | nn.init.constant_(m.bias, 0) 166 | 167 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/eval.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | 5 | import argparse 6 | import torch 7 | import torch.nn as nn 8 | import pdb 9 | import os 10 | import pandas as pd 11 | from utils.utils import * 12 | from math import floor 13 | import matplotlib.pyplot as plt 14 | from datasets.dataset_generic import Generic_MIL_Dataset, save_splits 15 | import h5py 16 | from utils.eval_utils import * 17 | 18 | parser = argparse.ArgumentParser(description='CLAM Evaluation Script') 19 | 20 | parser.add_argument('--dataset', type=str, default='', required=True, 21 | choices=['BRCA', 'RCC', 'CAM16', 'PANDA', 'NSCLC'], help='dataset select') 22 | parser.add_argument('--data_root_dir', type=str, default='', required=True, 23 | help='data directory') 24 | parser.add_argument('--models_exp_code', type=str, default='', required=True, 25 | help='experiment code to load trained models (directory under results_dir containing model checkpoints') 26 | parser.add_argument('--save_exp_code', type=str, default='', required=True, 27 | help='experiment code to save eval results') 28 | parser.add_argument('--labelcsv_dir', type=str, default='', required=True, 29 | help='labelcsv_dir directory, which can be found at ./dataset_csv') 30 | parser.add_argument('--splits_dir', type=str, default='', required=True, 31 | help='splits directory, which can be found at ./splits') 32 | parser.add_argument('--k', type=int, default=10, required=True, help='number of folds (default: 10)') 33 | 34 | 35 | 36 | parser.add_argument('--results_dir', type=str, default='./results', 37 | help='relative path to results folder, i.e. '+ 38 | 'the directory containing models_exp_code relative to project root (default: ./results)') 39 | parser.add_argument('--model_size', type=str, choices=['small', 'big', 'cl'], default='small', 40 | help='size of model (default: small)') 41 | parser.add_argument('--model_type', type=str, choices=['clam_sb', 'clam_mb', 'mil'], default='clam_sb', 42 | help='type of model (default: clam_sb)') 43 | parser.add_argument('--drop_out', action='store_true', default=True, 44 | help='whether model uses dropout') 45 | parser.add_argument('--k_start', type=int, default=-1, help='start fold (default: -1, last fold)') 46 | parser.add_argument('--k_end', type=int, default=-1, help='end fold (default: -1, first fold)') 47 | parser.add_argument('--fold', type=int, default=-1, help='single fold to evaluate') 48 | parser.add_argument('--micro_average', action='store_true', default=False, 49 | help='use micro_average instead of macro_avearge for multiclass AUC') 50 | parser.add_argument('--split', type=str, choices=['train', 'val', 'test', 'all'], default='test') 51 | parser.add_argument('--task', type=str, default='task_2_tumor_subtyping', choices=['task_1_tumor_vs_normal', 'task_2_tumor_subtyping']) 52 | args = parser.parse_args() 53 | 54 | device=torch.device("cuda" if torch.cuda.is_available() else "cpu") 55 | 56 | # args.save_dir = os.path.join('./eval_results', 'EVAL_' + str(args.save_exp_code)) 57 | # args.models_dir = os.path.join(args.results_dir, str(args.models_exp_code)) 58 | args.save_dir = os.path.join('./eval_results', str(args.save_exp_code)) 59 | args.models_dir = str(args.models_exp_code) 60 | 61 | os.makedirs(args.save_dir, exist_ok=True) 62 | 63 | if args.splits_dir is None: 64 | args.splits_dir = args.models_dir 65 | 66 | assert os.path.isdir(args.models_dir) 67 | assert os.path.isdir(args.splits_dir) 68 | 69 | settings = {'task': args.task, 70 | 'split': args.split, 71 | 'save_dir': args.save_dir, 72 | 'models_dir': args.models_dir, 73 | 'model_type': args.model_type, 74 | 'drop_out': args.drop_out, 75 | 'model_size': args.model_size} 76 | 77 | 78 | print(settings) 79 | if args.task == 'task_1_tumor_vs_normal': 80 | raise NotImplementedError 81 | elif args.task == 'task_2_tumor_subtyping': 82 | datasetdict = { 83 | 'BRCA':[2, {'IDC':0, 'ILC':1}], 84 | 'RCC':[3, {'CCRCC':0, 'CHRCC':1, 'PRCC':2}], 85 | 'NSCLC':[2, {'LUAD':0, 'LUSC':1}], 86 | 'CAM16':[2, {'normal':0, 'tumor':1}], 87 | 'PANDA':[2, {'grades0':0, 'grades1':1}], 88 | } 89 | args.n_classes= datasetdict[args.dataset][0] 90 | dataset = Generic_MIL_Dataset(csv_path = args.labelcsv_dir, 91 | data_dir= args.data_root_dir, 92 | shuffle = False, 93 | print_info = True, 94 | label_dict = datasetdict[args.dataset][1], 95 | patient_strat= False, 96 | ignore=[]) 97 | 98 | else: 99 | raise NotImplementedError 100 | 101 | if args.k_start == -1: 102 | start = 0 103 | else: 104 | start = args.k_start 105 | if args.k_end == -1: 106 | end = args.k 107 | else: 108 | end = args.k_end 109 | 110 | if args.fold == -1: 111 | folds = range(start, end) 112 | else: 113 | folds = range(args.fold, args.fold+1) 114 | ckpt_paths = [os.path.join(args.models_dir, 's_{}_best_checkpoint.pt'.format(fold)) for fold in folds] 115 | datasets_id = {'train': 0, 'val': 1, 'test': 2, 'all': -1} ##s_{}_best_checkpoint.pt 116 | 117 | if __name__ == "__main__": 118 | all_results = [] 119 | all_auc = [] 120 | all_acc = [] 121 | 122 | for ckpt_idx in range(len(ckpt_paths)): 123 | if datasets_id[args.split] < 0: 124 | split_dataset = dataset 125 | else: 126 | csv_path = '{}/splits_{}.csv'.format(args.splits_dir, folds[ckpt_idx]) 127 | datasets = dataset.return_splits(from_id=False, csv_path=csv_path) 128 | split_dataset = datasets[datasets_id[args.split]] 129 | model, patient_results, test_error, auc, df = eval(split_dataset, args, ckpt_paths[ckpt_idx]) 130 | all_results.append(all_results) 131 | all_auc.append(auc) 132 | all_acc.append(1-test_error) 133 | df.to_csv(os.path.join(args.save_dir, 'fold_{}.csv'.format(folds[ckpt_idx])), index=False) 134 | 135 | final_df = pd.DataFrame({'folds': folds, 'test_auc': all_auc, 'test_acc': all_acc}) 136 | if len(folds) != args.k: 137 | save_name = 'summary_partial_{}_{}.csv'.format(folds[0], folds[-1]) 138 | else: 139 | save_name = 'summary.csv' 140 | final_df.to_csv(os.path.join(args.save_dir, save_name)) 141 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_0.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_001,normal_030,test_104 3 | normal_003,normal_043,test_083 4 | normal_004,normal_015,test_096 5 | normal_005,normal_093,test_094 6 | normal_006,normal_082,test_084 7 | normal_007,normal_020,test_106 8 | normal_008,normal_045,test_079 9 | normal_009,normal_012,test_109 10 | normal_010,normal_041,test_057 11 | normal_011,normal_099,test_012 12 | normal_016,normal_091,test_053 13 | normal_017,normal_096,test_121 14 | normal_018,normal_074,test_038 15 | normal_019,normal_107,test_046 16 | normal_021,normal_060,test_098 17 | normal_023,normal_092,test_009 18 | normal_024,tumor_020,test_125 19 | normal_025,tumor_060,test_007 20 | normal_026,tumor_016,test_089 21 | normal_027,tumor_100,test_054 22 | normal_028,tumor_061,test_036 23 | normal_029,tumor_064,test_043 24 | normal_031,tumor_088,test_017 25 | normal_033,tumor_011,test_016 26 | normal_034,tumor_021,test_033 27 | normal_035,tumor_106,test_061 28 | normal_036,tumor_008,test_058 29 | normal_037,,test_095 30 | normal_038,,test_014 31 | normal_039,,test_110 32 | normal_040,,test_031 33 | normal_044,,test_003 34 | normal_046,,test_059 35 | normal_047,,test_117 36 | normal_048,,test_047 37 | normal_049,,test_082 38 | normal_050,,test_093 39 | normal_051,,test_091 40 | normal_052,,test_029 41 | normal_053,,test_019 42 | normal_054,,test_072 43 | normal_055,,test_122 44 | normal_056,,test_006 45 | normal_057,,test_052 46 | normal_058,,test_076 47 | normal_059,,test_103 48 | normal_061,,test_028 49 | normal_062,,test_086 50 | normal_063,,test_090 51 | normal_064,,test_074 52 | normal_065,,test_099 53 | normal_066,,test_129 54 | normal_067,,test_070 55 | normal_068,,test_008 56 | normal_069,,test_105 57 | normal_070,,test_119 58 | normal_072,,test_035 59 | normal_073,,test_037 60 | normal_075,,test_002 61 | normal_076,,test_024 62 | normal_077,,test_018 63 | normal_078,,test_060 64 | normal_079,,test_078 65 | normal_080,,test_034 66 | normal_083,,test_023 67 | normal_084,,test_127 68 | normal_085,,test_001 69 | normal_087,,test_004 70 | normal_088,,test_042 71 | normal_089,,test_075 72 | normal_090,,test_066 73 | normal_094,,test_118 74 | normal_095,,test_056 75 | normal_097,,test_116 76 | normal_098,,test_015 77 | normal_100,,test_048 78 | normal_101,,test_071 79 | normal_103,,test_051 80 | normal_104,,test_067 81 | normal_106,,test_100 82 | normal_108,,test_114 83 | normal_110,,test_097 84 | normal_111,,test_081 85 | normal_112,,test_032 86 | normal_113,,test_092 87 | normal_114,,test_108 88 | normal_115,,test_062 89 | normal_117,,test_120 90 | normal_118,,test_073 91 | normal_119,,test_102 92 | normal_120,,test_045 93 | normal_121,,test_128 94 | normal_122,,test_011 95 | normal_123,,test_085 96 | normal_124,,test_064 97 | normal_125,,test_010 98 | normal_126,,test_055 99 | normal_127,,test_101 100 | normal_129,,test_130 101 | normal_130,,test_005 102 | normal_131,,test_025 103 | normal_133,,test_107 104 | normal_134,,test_087 105 | normal_135,,test_020 106 | normal_136,,test_068 107 | normal_137,,test_080 108 | normal_138,,test_040 109 | normal_139,,test_069 110 | normal_140,,test_123 111 | normal_141,,test_041 112 | normal_142,,test_021 113 | normal_143,,test_065 114 | normal_144,,test_111 115 | normal_145,,test_022 116 | normal_146,,test_027 117 | normal_147,,test_077 118 | normal_148,,test_126 119 | normal_149,,test_063 120 | normal_151,,test_113 121 | normal_152,,test_039 122 | normal_153,,test_026 123 | normal_154,,test_044 124 | normal_155,,test_112 125 | normal_156,,test_115 126 | normal_157,,test_124 127 | normal_159,,test_013 128 | normal_160,,test_050 129 | tumor_002,,test_030 130 | tumor_003,,test_088 131 | tumor_004,, 132 | tumor_005,, 133 | tumor_006,, 134 | tumor_007,, 135 | tumor_009,, 136 | tumor_010,, 137 | tumor_012,, 138 | tumor_013,, 139 | tumor_014,, 140 | tumor_015,, 141 | tumor_017,, 142 | tumor_018,, 143 | tumor_019,, 144 | tumor_022,, 145 | tumor_023,, 146 | tumor_024,, 147 | tumor_025,, 148 | tumor_026,, 149 | tumor_027,, 150 | tumor_028,, 151 | tumor_029,, 152 | tumor_030,, 153 | tumor_031,, 154 | tumor_032,, 155 | tumor_033,, 156 | tumor_034,, 157 | tumor_035,, 158 | tumor_036,, 159 | tumor_037,, 160 | tumor_038,, 161 | tumor_039,, 162 | tumor_040,, 163 | tumor_041,, 164 | tumor_042,, 165 | tumor_044,, 166 | tumor_046,, 167 | tumor_047,, 168 | tumor_048,, 169 | tumor_049,, 170 | tumor_050,, 171 | tumor_051,, 172 | tumor_053,, 173 | tumor_054,, 174 | tumor_055,, 175 | tumor_056,, 176 | tumor_057,, 177 | tumor_058,, 178 | tumor_059,, 179 | tumor_062,, 180 | tumor_063,, 181 | tumor_065,, 182 | tumor_066,, 183 | tumor_067,, 184 | tumor_068,, 185 | tumor_069,, 186 | tumor_070,, 187 | tumor_071,, 188 | tumor_072,, 189 | tumor_073,, 190 | tumor_074,, 191 | tumor_075,, 192 | tumor_076,, 193 | tumor_078,, 194 | tumor_079,, 195 | tumor_080,, 196 | tumor_081,, 197 | tumor_082,, 198 | tumor_083,, 199 | tumor_084,, 200 | tumor_085,, 201 | tumor_086,, 202 | tumor_090,, 203 | tumor_091,, 204 | tumor_092,, 205 | tumor_094,, 206 | tumor_095,, 207 | tumor_097,, 208 | tumor_098,, 209 | tumor_099,, 210 | tumor_101,, 211 | tumor_103,, 212 | tumor_104,, 213 | tumor_107,, 214 | tumor_108,, 215 | tumor_109,, 216 | tumor_110,, 217 | tumor_111,, 218 | normal_150,, 219 | normal_002,, 220 | normal_132,, 221 | normal_071,, 222 | normal_128,, 223 | normal_032,, 224 | normal_109,, 225 | normal_042,, 226 | normal_102,, 227 | normal_116,, 228 | normal_105,, 229 | normal_013,, 230 | normal_158,, 231 | normal_081,, 232 | normal_014,, 233 | normal_022,, 234 | tumor_102,, 235 | tumor_096,, 236 | tumor_043,, 237 | tumor_001,, 238 | tumor_077,, 239 | tumor_045,, 240 | tumor_052,, 241 | tumor_105,, 242 | tumor_089,, 243 | tumor_087,, 244 | tumor_093,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_1.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_001,normal_025,test_104 3 | normal_002,normal_096,test_083 4 | normal_003,normal_155,test_096 5 | normal_004,normal_038,test_094 6 | normal_005,normal_056,test_084 7 | normal_008,normal_007,test_106 8 | normal_009,normal_147,test_079 9 | normal_012,normal_076,test_109 10 | normal_013,normal_121,test_057 11 | normal_015,normal_135,test_012 12 | normal_016,normal_010,test_053 13 | normal_017,normal_136,test_121 14 | normal_018,normal_138,test_038 15 | normal_019,normal_081,test_046 16 | normal_021,normal_087,test_098 17 | normal_022,normal_133,test_009 18 | normal_023,tumor_017,test_125 19 | normal_024,tumor_080,test_007 20 | normal_027,tumor_051,test_089 21 | normal_029,tumor_021,test_054 22 | normal_030,tumor_025,test_036 23 | normal_031,tumor_016,test_043 24 | normal_032,tumor_101,test_017 25 | normal_033,tumor_086,test_016 26 | normal_034,tumor_039,test_033 27 | normal_035,tumor_111,test_061 28 | normal_036,tumor_109,test_058 29 | normal_037,,test_095 30 | normal_039,,test_014 31 | normal_040,,test_110 32 | normal_041,,test_031 33 | normal_042,,test_003 34 | normal_043,,test_059 35 | normal_044,,test_117 36 | normal_045,,test_047 37 | normal_046,,test_082 38 | normal_047,,test_093 39 | normal_048,,test_091 40 | normal_049,,test_029 41 | normal_050,,test_019 42 | normal_051,,test_072 43 | normal_052,,test_122 44 | normal_053,,test_006 45 | normal_054,,test_052 46 | normal_055,,test_076 47 | normal_057,,test_103 48 | normal_058,,test_028 49 | normal_059,,test_086 50 | normal_060,,test_090 51 | normal_061,,test_074 52 | normal_062,,test_099 53 | normal_063,,test_129 54 | normal_064,,test_070 55 | normal_065,,test_008 56 | normal_066,,test_105 57 | normal_067,,test_119 58 | normal_068,,test_035 59 | normal_069,,test_037 60 | normal_071,,test_002 61 | normal_072,,test_024 62 | normal_073,,test_018 63 | normal_074,,test_060 64 | normal_075,,test_078 65 | normal_077,,test_034 66 | normal_078,,test_023 67 | normal_080,,test_127 68 | normal_082,,test_001 69 | normal_083,,test_004 70 | normal_084,,test_042 71 | normal_085,,test_075 72 | normal_089,,test_066 73 | normal_090,,test_118 74 | normal_091,,test_056 75 | normal_092,,test_116 76 | normal_093,,test_015 77 | normal_094,,test_048 78 | normal_095,,test_071 79 | normal_097,,test_051 80 | normal_099,,test_067 81 | normal_101,,test_100 82 | normal_102,,test_114 83 | normal_103,,test_097 84 | normal_104,,test_081 85 | normal_105,,test_032 86 | normal_106,,test_092 87 | normal_107,,test_108 88 | normal_108,,test_062 89 | normal_109,,test_120 90 | normal_110,,test_073 91 | normal_111,,test_102 92 | normal_112,,test_045 93 | normal_113,,test_128 94 | normal_114,,test_011 95 | normal_115,,test_085 96 | normal_116,,test_064 97 | normal_117,,test_010 98 | normal_118,,test_055 99 | normal_122,,test_101 100 | normal_123,,test_130 101 | normal_125,,test_005 102 | normal_126,,test_025 103 | normal_127,,test_107 104 | normal_128,,test_087 105 | normal_129,,test_020 106 | normal_130,,test_068 107 | normal_131,,test_080 108 | normal_132,,test_040 109 | normal_134,,test_069 110 | normal_137,,test_123 111 | normal_139,,test_041 112 | normal_140,,test_021 113 | normal_141,,test_065 114 | normal_142,,test_111 115 | normal_143,,test_022 116 | normal_144,,test_027 117 | normal_145,,test_077 118 | normal_148,,test_126 119 | normal_149,,test_063 120 | normal_150,,test_113 121 | normal_151,,test_039 122 | normal_152,,test_026 123 | normal_153,,test_044 124 | normal_154,,test_112 125 | normal_156,,test_115 126 | normal_158,,test_124 127 | normal_159,,test_013 128 | normal_160,,test_050 129 | tumor_001,,test_030 130 | tumor_002,,test_088 131 | tumor_003,, 132 | tumor_004,, 133 | tumor_005,, 134 | tumor_006,, 135 | tumor_007,, 136 | tumor_008,, 137 | tumor_009,, 138 | tumor_010,, 139 | tumor_011,, 140 | tumor_012,, 141 | tumor_013,, 142 | tumor_015,, 143 | tumor_018,, 144 | tumor_020,, 145 | tumor_022,, 146 | tumor_023,, 147 | tumor_026,, 148 | tumor_027,, 149 | tumor_028,, 150 | tumor_029,, 151 | tumor_030,, 152 | tumor_031,, 153 | tumor_032,, 154 | tumor_033,, 155 | tumor_034,, 156 | tumor_035,, 157 | tumor_036,, 158 | tumor_037,, 159 | tumor_038,, 160 | tumor_040,, 161 | tumor_041,, 162 | tumor_043,, 163 | tumor_044,, 164 | tumor_045,, 165 | tumor_046,, 166 | tumor_047,, 167 | tumor_048,, 168 | tumor_049,, 169 | tumor_052,, 170 | tumor_053,, 171 | tumor_054,, 172 | tumor_055,, 173 | tumor_056,, 174 | tumor_057,, 175 | tumor_058,, 176 | tumor_059,, 177 | tumor_060,, 178 | tumor_061,, 179 | tumor_062,, 180 | tumor_064,, 181 | tumor_065,, 182 | tumor_066,, 183 | tumor_068,, 184 | tumor_069,, 185 | tumor_070,, 186 | tumor_071,, 187 | tumor_072,, 188 | tumor_073,, 189 | tumor_075,, 190 | tumor_076,, 191 | tumor_077,, 192 | tumor_078,, 193 | tumor_079,, 194 | tumor_081,, 195 | tumor_082,, 196 | tumor_083,, 197 | tumor_085,, 198 | tumor_087,, 199 | tumor_088,, 200 | tumor_089,, 201 | tumor_090,, 202 | tumor_091,, 203 | tumor_092,, 204 | tumor_093,, 205 | tumor_094,, 206 | tumor_096,, 207 | tumor_097,, 208 | tumor_098,, 209 | tumor_099,, 210 | tumor_100,, 211 | tumor_102,, 212 | tumor_103,, 213 | tumor_104,, 214 | tumor_105,, 215 | tumor_107,, 216 | tumor_108,, 217 | tumor_110,, 218 | normal_011,, 219 | normal_014,, 220 | normal_119,, 221 | normal_146,, 222 | normal_020,, 223 | normal_070,, 224 | normal_098,, 225 | normal_079,, 226 | normal_157,, 227 | normal_088,, 228 | normal_026,, 229 | normal_120,, 230 | normal_100,, 231 | normal_028,, 232 | normal_006,, 233 | normal_124,, 234 | tumor_067,, 235 | tumor_084,, 236 | tumor_063,, 237 | tumor_014,, 238 | tumor_095,, 239 | tumor_019,, 240 | tumor_106,, 241 | tumor_042,, 242 | tumor_024,, 243 | tumor_050,, 244 | tumor_074,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_2.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_001,normal_126,test_104 3 | normal_002,normal_013,test_083 4 | normal_003,normal_056,test_096 5 | normal_004,normal_059,test_094 6 | normal_005,normal_054,test_084 7 | normal_007,normal_006,test_106 8 | normal_008,normal_046,test_079 9 | normal_009,normal_050,test_109 10 | normal_010,normal_034,test_057 11 | normal_011,normal_040,test_012 12 | normal_012,normal_154,test_053 13 | normal_014,normal_085,test_121 14 | normal_015,normal_100,test_038 15 | normal_016,normal_057,test_046 16 | normal_017,normal_041,test_098 17 | normal_018,normal_073,test_009 18 | normal_019,tumor_071,test_125 19 | normal_020,tumor_024,test_007 20 | normal_021,tumor_023,test_089 21 | normal_022,tumor_026,test_054 22 | normal_023,tumor_040,test_036 23 | normal_024,tumor_054,test_043 24 | normal_025,tumor_044,test_017 25 | normal_026,tumor_049,test_016 26 | normal_027,tumor_059,test_033 27 | normal_028,tumor_096,test_061 28 | normal_029,tumor_009,test_058 29 | normal_030,,test_095 30 | normal_032,,test_014 31 | normal_035,,test_110 32 | normal_037,,test_031 33 | normal_038,,test_003 34 | normal_039,,test_059 35 | normal_042,,test_117 36 | normal_044,,test_047 37 | normal_045,,test_082 38 | normal_047,,test_093 39 | normal_048,,test_091 40 | normal_049,,test_029 41 | normal_052,,test_019 42 | normal_053,,test_072 43 | normal_055,,test_122 44 | normal_058,,test_006 45 | normal_061,,test_052 46 | normal_062,,test_076 47 | normal_063,,test_103 48 | normal_064,,test_028 49 | normal_066,,test_086 50 | normal_067,,test_090 51 | normal_068,,test_074 52 | normal_069,,test_099 53 | normal_070,,test_129 54 | normal_071,,test_070 55 | normal_072,,test_008 56 | normal_074,,test_105 57 | normal_075,,test_119 58 | normal_076,,test_035 59 | normal_079,,test_037 60 | normal_080,,test_002 61 | normal_081,,test_024 62 | normal_082,,test_018 63 | normal_084,,test_060 64 | normal_087,,test_078 65 | normal_088,,test_034 66 | normal_089,,test_023 67 | normal_090,,test_127 68 | normal_091,,test_001 69 | normal_092,,test_004 70 | normal_093,,test_042 71 | normal_094,,test_075 72 | normal_095,,test_066 73 | normal_096,,test_118 74 | normal_097,,test_056 75 | normal_098,,test_116 76 | normal_099,,test_015 77 | normal_101,,test_048 78 | normal_102,,test_071 79 | normal_103,,test_051 80 | normal_104,,test_067 81 | normal_105,,test_100 82 | normal_106,,test_114 83 | normal_108,,test_097 84 | normal_109,,test_081 85 | normal_110,,test_032 86 | normal_111,,test_092 87 | normal_112,,test_108 88 | normal_113,,test_062 89 | normal_114,,test_120 90 | normal_115,,test_073 91 | normal_116,,test_102 92 | normal_117,,test_045 93 | normal_118,,test_128 94 | normal_119,,test_011 95 | normal_120,,test_085 96 | normal_121,,test_064 97 | normal_122,,test_010 98 | normal_123,,test_055 99 | normal_124,,test_101 100 | normal_125,,test_130 101 | normal_127,,test_005 102 | normal_128,,test_025 103 | normal_129,,test_107 104 | normal_130,,test_087 105 | normal_131,,test_020 106 | normal_132,,test_068 107 | normal_133,,test_080 108 | normal_135,,test_040 109 | normal_136,,test_069 110 | normal_137,,test_123 111 | normal_138,,test_041 112 | normal_139,,test_021 113 | normal_140,,test_065 114 | normal_141,,test_111 115 | normal_142,,test_022 116 | normal_143,,test_027 117 | normal_145,,test_077 118 | normal_147,,test_126 119 | normal_148,,test_063 120 | normal_150,,test_113 121 | normal_151,,test_039 122 | normal_152,,test_026 123 | normal_153,,test_044 124 | normal_155,,test_112 125 | normal_156,,test_115 126 | normal_157,,test_124 127 | normal_158,,test_013 128 | normal_159,,test_050 129 | tumor_001,,test_030 130 | tumor_003,,test_088 131 | tumor_004,, 132 | tumor_005,, 133 | tumor_006,, 134 | tumor_007,, 135 | tumor_008,, 136 | tumor_010,, 137 | tumor_011,, 138 | tumor_012,, 139 | tumor_015,, 140 | tumor_016,, 141 | tumor_017,, 142 | tumor_018,, 143 | tumor_019,, 144 | tumor_020,, 145 | tumor_021,, 146 | tumor_022,, 147 | tumor_025,, 148 | tumor_027,, 149 | tumor_030,, 150 | tumor_031,, 151 | tumor_032,, 152 | tumor_033,, 153 | tumor_034,, 154 | tumor_035,, 155 | tumor_036,, 156 | tumor_037,, 157 | tumor_038,, 158 | tumor_039,, 159 | tumor_041,, 160 | tumor_042,, 161 | tumor_043,, 162 | tumor_045,, 163 | tumor_046,, 164 | tumor_047,, 165 | tumor_048,, 166 | tumor_050,, 167 | tumor_051,, 168 | tumor_052,, 169 | tumor_055,, 170 | tumor_056,, 171 | tumor_057,, 172 | tumor_058,, 173 | tumor_060,, 174 | tumor_061,, 175 | tumor_062,, 176 | tumor_063,, 177 | tumor_065,, 178 | tumor_066,, 179 | tumor_067,, 180 | tumor_068,, 181 | tumor_069,, 182 | tumor_070,, 183 | tumor_072,, 184 | tumor_073,, 185 | tumor_074,, 186 | tumor_075,, 187 | tumor_076,, 188 | tumor_077,, 189 | tumor_078,, 190 | tumor_079,, 191 | tumor_080,, 192 | tumor_081,, 193 | tumor_083,, 194 | tumor_084,, 195 | tumor_085,, 196 | tumor_086,, 197 | tumor_087,, 198 | tumor_088,, 199 | tumor_089,, 200 | tumor_090,, 201 | tumor_091,, 202 | tumor_092,, 203 | tumor_093,, 204 | tumor_095,, 205 | tumor_097,, 206 | tumor_098,, 207 | tumor_099,, 208 | tumor_100,, 209 | tumor_101,, 210 | tumor_102,, 211 | tumor_103,, 212 | tumor_104,, 213 | tumor_105,, 214 | tumor_106,, 215 | tumor_108,, 216 | tumor_109,, 217 | tumor_110,, 218 | normal_107,, 219 | normal_077,, 220 | normal_060,, 221 | normal_083,, 222 | normal_031,, 223 | normal_134,, 224 | normal_160,, 225 | normal_146,, 226 | normal_065,, 227 | normal_043,, 228 | normal_149,, 229 | normal_033,, 230 | normal_051,, 231 | normal_144,, 232 | normal_036,, 233 | normal_078,, 234 | tumor_013,, 235 | tumor_028,, 236 | tumor_002,, 237 | tumor_029,, 238 | tumor_107,, 239 | tumor_082,, 240 | tumor_014,, 241 | tumor_094,, 242 | tumor_064,, 243 | tumor_053,, 244 | tumor_111,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_3.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_001,normal_139,test_104 3 | normal_002,normal_071,test_083 4 | normal_003,normal_062,test_096 5 | normal_004,normal_160,test_094 6 | normal_005,normal_143,test_084 7 | normal_006,normal_138,test_106 8 | normal_007,normal_158,test_079 9 | normal_008,normal_135,test_109 10 | normal_009,normal_036,test_057 11 | normal_010,normal_149,test_012 12 | normal_011,normal_056,test_053 13 | normal_012,normal_048,test_121 14 | normal_013,normal_129,test_038 15 | normal_014,normal_105,test_046 16 | normal_015,normal_108,test_098 17 | normal_016,normal_111,test_009 18 | normal_017,tumor_088,test_125 19 | normal_019,tumor_036,test_007 20 | normal_021,tumor_105,test_089 21 | normal_022,tumor_100,test_054 22 | normal_023,tumor_085,test_036 23 | normal_026,tumor_049,test_043 24 | normal_027,tumor_069,test_017 25 | normal_028,tumor_101,test_016 26 | normal_029,tumor_029,test_033 27 | normal_030,tumor_044,test_061 28 | normal_031,tumor_014,test_058 29 | normal_032,,test_095 30 | normal_033,,test_014 31 | normal_034,,test_110 32 | normal_035,,test_031 33 | normal_037,,test_003 34 | normal_038,,test_059 35 | normal_039,,test_117 36 | normal_040,,test_047 37 | normal_041,,test_082 38 | normal_042,,test_093 39 | normal_044,,test_091 40 | normal_045,,test_029 41 | normal_046,,test_019 42 | normal_047,,test_072 43 | normal_049,,test_122 44 | normal_050,,test_006 45 | normal_051,,test_052 46 | normal_053,,test_076 47 | normal_054,,test_103 48 | normal_055,,test_028 49 | normal_057,,test_086 50 | normal_058,,test_090 51 | normal_059,,test_074 52 | normal_060,,test_099 53 | normal_061,,test_129 54 | normal_064,,test_070 55 | normal_065,,test_008 56 | normal_066,,test_105 57 | normal_067,,test_119 58 | normal_068,,test_035 59 | normal_069,,test_037 60 | normal_070,,test_002 61 | normal_072,,test_024 62 | normal_073,,test_018 63 | normal_074,,test_060 64 | normal_075,,test_078 65 | normal_076,,test_034 66 | normal_077,,test_023 67 | normal_078,,test_127 68 | normal_079,,test_001 69 | normal_080,,test_004 70 | normal_081,,test_042 71 | normal_082,,test_075 72 | normal_083,,test_066 73 | normal_084,,test_118 74 | normal_085,,test_056 75 | normal_087,,test_116 76 | normal_088,,test_015 77 | normal_089,,test_048 78 | normal_090,,test_071 79 | normal_091,,test_051 80 | normal_092,,test_067 81 | normal_094,,test_100 82 | normal_095,,test_114 83 | normal_096,,test_097 84 | normal_097,,test_081 85 | normal_099,,test_032 86 | normal_100,,test_092 87 | normal_101,,test_108 88 | normal_102,,test_062 89 | normal_104,,test_120 90 | normal_106,,test_073 91 | normal_107,,test_102 92 | normal_110,,test_045 93 | normal_113,,test_128 94 | normal_114,,test_011 95 | normal_116,,test_085 96 | normal_117,,test_064 97 | normal_118,,test_010 98 | normal_119,,test_055 99 | normal_120,,test_101 100 | normal_121,,test_130 101 | normal_122,,test_005 102 | normal_123,,test_025 103 | normal_124,,test_107 104 | normal_125,,test_087 105 | normal_126,,test_020 106 | normal_127,,test_068 107 | normal_128,,test_080 108 | normal_130,,test_040 109 | normal_131,,test_069 110 | normal_133,,test_123 111 | normal_134,,test_041 112 | normal_136,,test_021 113 | normal_137,,test_065 114 | normal_141,,test_111 115 | normal_142,,test_022 116 | normal_144,,test_027 117 | normal_145,,test_077 118 | normal_146,,test_126 119 | normal_147,,test_063 120 | normal_148,,test_113 121 | normal_150,,test_039 122 | normal_151,,test_026 123 | normal_152,,test_044 124 | normal_153,,test_112 125 | normal_154,,test_115 126 | normal_155,,test_124 127 | normal_156,,test_013 128 | normal_157,,test_050 129 | tumor_001,,test_030 130 | tumor_002,,test_088 131 | tumor_003,, 132 | tumor_005,, 133 | tumor_006,, 134 | tumor_007,, 135 | tumor_008,, 136 | tumor_010,, 137 | tumor_012,, 138 | tumor_013,, 139 | tumor_015,, 140 | tumor_016,, 141 | tumor_018,, 142 | tumor_019,, 143 | tumor_020,, 144 | tumor_022,, 145 | tumor_023,, 146 | tumor_024,, 147 | tumor_025,, 148 | tumor_026,, 149 | tumor_028,, 150 | tumor_030,, 151 | tumor_031,, 152 | tumor_032,, 153 | tumor_034,, 154 | tumor_035,, 155 | tumor_037,, 156 | tumor_038,, 157 | tumor_039,, 158 | tumor_040,, 159 | tumor_041,, 160 | tumor_042,, 161 | tumor_043,, 162 | tumor_045,, 163 | tumor_047,, 164 | tumor_050,, 165 | tumor_051,, 166 | tumor_052,, 167 | tumor_053,, 168 | tumor_054,, 169 | tumor_055,, 170 | tumor_056,, 171 | tumor_057,, 172 | tumor_058,, 173 | tumor_059,, 174 | tumor_060,, 175 | tumor_061,, 176 | tumor_062,, 177 | tumor_063,, 178 | tumor_064,, 179 | tumor_065,, 180 | tumor_066,, 181 | tumor_067,, 182 | tumor_068,, 183 | tumor_070,, 184 | tumor_071,, 185 | tumor_073,, 186 | tumor_074,, 187 | tumor_075,, 188 | tumor_076,, 189 | tumor_077,, 190 | tumor_078,, 191 | tumor_079,, 192 | tumor_080,, 193 | tumor_081,, 194 | tumor_082,, 195 | tumor_083,, 196 | tumor_084,, 197 | tumor_086,, 198 | tumor_087,, 199 | tumor_089,, 200 | tumor_090,, 201 | tumor_091,, 202 | tumor_092,, 203 | tumor_093,, 204 | tumor_094,, 205 | tumor_095,, 206 | tumor_096,, 207 | tumor_098,, 208 | tumor_099,, 209 | tumor_102,, 210 | tumor_103,, 211 | tumor_104,, 212 | tumor_106,, 213 | tumor_107,, 214 | tumor_108,, 215 | tumor_109,, 216 | tumor_110,, 217 | tumor_111,, 218 | normal_020,, 219 | normal_159,, 220 | normal_132,, 221 | normal_018,, 222 | normal_063,, 223 | normal_052,, 224 | normal_093,, 225 | normal_103,, 226 | normal_043,, 227 | normal_140,, 228 | normal_112,, 229 | normal_109,, 230 | normal_098,, 231 | normal_024,, 232 | normal_115,, 233 | normal_025,, 234 | tumor_033,, 235 | tumor_072,, 236 | tumor_021,, 237 | tumor_048,, 238 | tumor_004,, 239 | tumor_097,, 240 | tumor_009,, 241 | tumor_046,, 242 | tumor_027,, 243 | tumor_011,, 244 | tumor_017,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_4.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_001,normal_043,test_104 3 | normal_003,normal_093,test_083 4 | normal_004,normal_002,test_096 5 | normal_005,normal_123,test_094 6 | normal_006,normal_088,test_084 7 | normal_008,normal_110,test_106 8 | normal_009,normal_078,test_079 9 | normal_010,normal_138,test_109 10 | normal_011,normal_035,test_057 11 | normal_013,normal_063,test_012 12 | normal_014,normal_007,test_053 13 | normal_015,normal_023,test_121 14 | normal_016,normal_111,test_038 15 | normal_017,normal_057,test_046 16 | normal_018,normal_090,test_098 17 | normal_019,normal_052,test_009 18 | normal_022,tumor_095,test_125 19 | normal_024,tumor_029,test_007 20 | normal_025,tumor_081,test_089 21 | normal_026,tumor_091,test_054 22 | normal_027,tumor_064,test_036 23 | normal_028,tumor_052,test_043 24 | normal_029,tumor_006,test_017 25 | normal_030,tumor_012,test_016 26 | normal_031,tumor_100,test_033 27 | normal_032,tumor_073,test_061 28 | normal_034,tumor_104,test_058 29 | normal_036,,test_095 30 | normal_037,,test_014 31 | normal_038,,test_110 32 | normal_039,,test_031 33 | normal_040,,test_003 34 | normal_041,,test_059 35 | normal_042,,test_117 36 | normal_044,,test_047 37 | normal_046,,test_082 38 | normal_047,,test_093 39 | normal_049,,test_091 40 | normal_050,,test_029 41 | normal_051,,test_019 42 | normal_053,,test_072 43 | normal_054,,test_122 44 | normal_055,,test_006 45 | normal_056,,test_052 46 | normal_058,,test_076 47 | normal_059,,test_103 48 | normal_060,,test_028 49 | normal_062,,test_086 50 | normal_064,,test_090 51 | normal_065,,test_074 52 | normal_066,,test_099 53 | normal_067,,test_129 54 | normal_068,,test_070 55 | normal_069,,test_008 56 | normal_070,,test_105 57 | normal_071,,test_119 58 | normal_073,,test_035 59 | normal_074,,test_037 60 | normal_075,,test_002 61 | normal_076,,test_024 62 | normal_077,,test_018 63 | normal_079,,test_060 64 | normal_080,,test_078 65 | normal_081,,test_034 66 | normal_082,,test_023 67 | normal_083,,test_127 68 | normal_084,,test_001 69 | normal_085,,test_004 70 | normal_087,,test_042 71 | normal_089,,test_075 72 | normal_091,,test_066 73 | normal_094,,test_118 74 | normal_095,,test_056 75 | normal_096,,test_116 76 | normal_097,,test_015 77 | normal_098,,test_048 78 | normal_099,,test_071 79 | normal_100,,test_051 80 | normal_101,,test_067 81 | normal_102,,test_100 82 | normal_103,,test_114 83 | normal_105,,test_097 84 | normal_106,,test_081 85 | normal_107,,test_032 86 | normal_108,,test_092 87 | normal_109,,test_108 88 | normal_112,,test_062 89 | normal_113,,test_120 90 | normal_114,,test_073 91 | normal_116,,test_102 92 | normal_117,,test_045 93 | normal_118,,test_128 94 | normal_119,,test_011 95 | normal_120,,test_085 96 | normal_121,,test_064 97 | normal_122,,test_010 98 | normal_124,,test_055 99 | normal_125,,test_101 100 | normal_126,,test_130 101 | normal_127,,test_005 102 | normal_128,,test_025 103 | normal_130,,test_107 104 | normal_131,,test_087 105 | normal_132,,test_020 106 | normal_133,,test_068 107 | normal_134,,test_080 108 | normal_135,,test_040 109 | normal_136,,test_069 110 | normal_137,,test_123 111 | normal_139,,test_041 112 | normal_141,,test_021 113 | normal_142,,test_065 114 | normal_143,,test_111 115 | normal_144,,test_022 116 | normal_145,,test_027 117 | normal_146,,test_077 118 | normal_147,,test_126 119 | normal_148,,test_063 120 | normal_149,,test_113 121 | normal_151,,test_039 122 | normal_153,,test_026 123 | normal_154,,test_044 124 | normal_155,,test_112 125 | normal_156,,test_115 126 | normal_157,,test_124 127 | normal_158,,test_013 128 | normal_160,,test_050 129 | tumor_001,,test_030 130 | tumor_002,,test_088 131 | tumor_003,, 132 | tumor_004,, 133 | tumor_005,, 134 | tumor_007,, 135 | tumor_008,, 136 | tumor_009,, 137 | tumor_010,, 138 | tumor_011,, 139 | tumor_013,, 140 | tumor_014,, 141 | tumor_015,, 142 | tumor_016,, 143 | tumor_017,, 144 | tumor_018,, 145 | tumor_019,, 146 | tumor_020,, 147 | tumor_021,, 148 | tumor_022,, 149 | tumor_023,, 150 | tumor_024,, 151 | tumor_025,, 152 | tumor_027,, 153 | tumor_028,, 154 | tumor_030,, 155 | tumor_031,, 156 | tumor_032,, 157 | tumor_033,, 158 | tumor_034,, 159 | tumor_035,, 160 | tumor_036,, 161 | tumor_037,, 162 | tumor_038,, 163 | tumor_039,, 164 | tumor_040,, 165 | tumor_041,, 166 | tumor_042,, 167 | tumor_043,, 168 | tumor_044,, 169 | tumor_045,, 170 | tumor_046,, 171 | tumor_048,, 172 | tumor_050,, 173 | tumor_051,, 174 | tumor_053,, 175 | tumor_054,, 176 | tumor_055,, 177 | tumor_056,, 178 | tumor_057,, 179 | tumor_058,, 180 | tumor_059,, 181 | tumor_060,, 182 | tumor_061,, 183 | tumor_062,, 184 | tumor_063,, 185 | tumor_066,, 186 | tumor_067,, 187 | tumor_068,, 188 | tumor_069,, 189 | tumor_071,, 190 | tumor_072,, 191 | tumor_074,, 192 | tumor_075,, 193 | tumor_076,, 194 | tumor_077,, 195 | tumor_078,, 196 | tumor_079,, 197 | tumor_080,, 198 | tumor_082,, 199 | tumor_083,, 200 | tumor_084,, 201 | tumor_085,, 202 | tumor_086,, 203 | tumor_088,, 204 | tumor_089,, 205 | tumor_090,, 206 | tumor_092,, 207 | tumor_093,, 208 | tumor_094,, 209 | tumor_096,, 210 | tumor_097,, 211 | tumor_098,, 212 | tumor_101,, 213 | tumor_102,, 214 | tumor_106,, 215 | tumor_108,, 216 | tumor_109,, 217 | tumor_111,, 218 | normal_129,, 219 | normal_159,, 220 | normal_150,, 221 | normal_045,, 222 | normal_021,, 223 | normal_092,, 224 | normal_020,, 225 | normal_012,, 226 | normal_072,, 227 | normal_033,, 228 | normal_061,, 229 | normal_140,, 230 | normal_104,, 231 | normal_115,, 232 | normal_048,, 233 | normal_152,, 234 | tumor_087,, 235 | tumor_070,, 236 | tumor_103,, 237 | tumor_105,, 238 | tumor_065,, 239 | tumor_110,, 240 | tumor_047,, 241 | tumor_099,, 242 | tumor_107,, 243 | tumor_049,, 244 | tumor_026,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_5.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_001,normal_116,test_104 3 | normal_005,normal_095,test_083 4 | normal_006,normal_084,test_096 5 | normal_007,normal_002,test_094 6 | normal_009,normal_044,test_084 7 | normal_010,normal_149,test_106 8 | normal_013,normal_060,test_079 9 | normal_014,normal_118,test_109 10 | normal_016,normal_150,test_057 11 | normal_017,normal_103,test_012 12 | normal_019,normal_003,test_053 13 | normal_020,normal_146,test_121 14 | normal_021,normal_075,test_038 15 | normal_024,normal_066,test_046 16 | normal_025,normal_159,test_098 17 | normal_026,normal_004,test_009 18 | normal_027,tumor_091,test_125 19 | normal_028,tumor_073,test_007 20 | normal_029,tumor_057,test_089 21 | normal_030,tumor_081,test_054 22 | normal_031,tumor_015,test_036 23 | normal_032,tumor_044,test_043 24 | normal_033,tumor_042,test_017 25 | normal_034,tumor_093,test_016 26 | normal_035,tumor_037,test_033 27 | normal_036,tumor_027,test_061 28 | normal_037,tumor_092,test_058 29 | normal_038,,test_095 30 | normal_039,,test_014 31 | normal_040,,test_110 32 | normal_041,,test_031 33 | normal_042,,test_003 34 | normal_043,,test_059 35 | normal_045,,test_117 36 | normal_047,,test_047 37 | normal_048,,test_082 38 | normal_049,,test_093 39 | normal_051,,test_091 40 | normal_052,,test_029 41 | normal_053,,test_019 42 | normal_054,,test_072 43 | normal_055,,test_122 44 | normal_056,,test_006 45 | normal_057,,test_052 46 | normal_058,,test_076 47 | normal_059,,test_103 48 | normal_062,,test_028 49 | normal_063,,test_086 50 | normal_064,,test_090 51 | normal_065,,test_074 52 | normal_067,,test_099 53 | normal_068,,test_129 54 | normal_069,,test_070 55 | normal_070,,test_008 56 | normal_072,,test_105 57 | normal_073,,test_119 58 | normal_074,,test_035 59 | normal_076,,test_037 60 | normal_077,,test_002 61 | normal_078,,test_024 62 | normal_079,,test_018 63 | normal_080,,test_060 64 | normal_081,,test_078 65 | normal_082,,test_034 66 | normal_085,,test_023 67 | normal_087,,test_127 68 | normal_088,,test_001 69 | normal_089,,test_004 70 | normal_090,,test_042 71 | normal_091,,test_075 72 | normal_092,,test_066 73 | normal_093,,test_118 74 | normal_094,,test_056 75 | normal_096,,test_116 76 | normal_097,,test_015 77 | normal_098,,test_048 78 | normal_099,,test_071 79 | normal_100,,test_051 80 | normal_101,,test_067 81 | normal_102,,test_100 82 | normal_104,,test_114 83 | normal_105,,test_097 84 | normal_106,,test_081 85 | normal_108,,test_032 86 | normal_110,,test_092 87 | normal_111,,test_108 88 | normal_112,,test_062 89 | normal_113,,test_120 90 | normal_114,,test_073 91 | normal_115,,test_102 92 | normal_117,,test_045 93 | normal_119,,test_128 94 | normal_120,,test_011 95 | normal_121,,test_085 96 | normal_122,,test_064 97 | normal_123,,test_010 98 | normal_124,,test_055 99 | normal_125,,test_101 100 | normal_126,,test_130 101 | normal_127,,test_005 102 | normal_128,,test_025 103 | normal_129,,test_107 104 | normal_130,,test_087 105 | normal_131,,test_020 106 | normal_132,,test_068 107 | normal_133,,test_080 108 | normal_135,,test_040 109 | normal_136,,test_069 110 | normal_137,,test_123 111 | normal_138,,test_041 112 | normal_139,,test_021 113 | normal_140,,test_065 114 | normal_142,,test_111 115 | normal_143,,test_022 116 | normal_144,,test_027 117 | normal_145,,test_077 118 | normal_147,,test_126 119 | normal_148,,test_063 120 | normal_151,,test_113 121 | normal_152,,test_039 122 | normal_153,,test_026 123 | normal_154,,test_044 124 | normal_155,,test_112 125 | normal_156,,test_115 126 | normal_157,,test_124 127 | normal_158,,test_013 128 | normal_160,,test_050 129 | tumor_001,,test_030 130 | tumor_002,,test_088 131 | tumor_003,, 132 | tumor_004,, 133 | tumor_005,, 134 | tumor_006,, 135 | tumor_007,, 136 | tumor_009,, 137 | tumor_010,, 138 | tumor_011,, 139 | tumor_012,, 140 | tumor_013,, 141 | tumor_014,, 142 | tumor_016,, 143 | tumor_017,, 144 | tumor_018,, 145 | tumor_019,, 146 | tumor_020,, 147 | tumor_021,, 148 | tumor_023,, 149 | tumor_025,, 150 | tumor_026,, 151 | tumor_028,, 152 | tumor_029,, 153 | tumor_030,, 154 | tumor_031,, 155 | tumor_032,, 156 | tumor_033,, 157 | tumor_034,, 158 | tumor_035,, 159 | tumor_036,, 160 | tumor_039,, 161 | tumor_040,, 162 | tumor_041,, 163 | tumor_043,, 164 | tumor_045,, 165 | tumor_046,, 166 | tumor_047,, 167 | tumor_048,, 168 | tumor_049,, 169 | tumor_050,, 170 | tumor_051,, 171 | tumor_052,, 172 | tumor_053,, 173 | tumor_054,, 174 | tumor_055,, 175 | tumor_056,, 176 | tumor_058,, 177 | tumor_059,, 178 | tumor_060,, 179 | tumor_062,, 180 | tumor_063,, 181 | tumor_064,, 182 | tumor_066,, 183 | tumor_067,, 184 | tumor_069,, 185 | tumor_070,, 186 | tumor_071,, 187 | tumor_072,, 188 | tumor_074,, 189 | tumor_075,, 190 | tumor_076,, 191 | tumor_077,, 192 | tumor_078,, 193 | tumor_080,, 194 | tumor_082,, 195 | tumor_083,, 196 | tumor_084,, 197 | tumor_085,, 198 | tumor_086,, 199 | tumor_088,, 200 | tumor_089,, 201 | tumor_090,, 202 | tumor_094,, 203 | tumor_095,, 204 | tumor_096,, 205 | tumor_097,, 206 | tumor_098,, 207 | tumor_099,, 208 | tumor_101,, 209 | tumor_102,, 210 | tumor_104,, 211 | tumor_105,, 212 | tumor_106,, 213 | tumor_107,, 214 | tumor_108,, 215 | tumor_109,, 216 | tumor_110,, 217 | tumor_111,, 218 | normal_061,, 219 | normal_071,, 220 | normal_018,, 221 | normal_008,, 222 | normal_050,, 223 | normal_046,, 224 | normal_083,, 225 | normal_141,, 226 | normal_134,, 227 | normal_109,, 228 | normal_011,, 229 | normal_015,, 230 | normal_022,, 231 | normal_107,, 232 | normal_023,, 233 | normal_012,, 234 | tumor_087,, 235 | tumor_079,, 236 | tumor_022,, 237 | tumor_100,, 238 | tumor_024,, 239 | tumor_065,, 240 | tumor_068,, 241 | tumor_038,, 242 | tumor_103,, 243 | tumor_008,, 244 | tumor_061,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_6.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_001,normal_044,test_104 3 | normal_002,normal_011,test_083 4 | normal_003,normal_065,test_096 5 | normal_004,normal_121,test_094 6 | normal_005,normal_060,test_084 7 | normal_006,normal_091,test_106 8 | normal_007,normal_066,test_079 9 | normal_008,normal_160,test_109 10 | normal_009,normal_058,test_057 11 | normal_012,normal_031,test_012 12 | normal_013,normal_083,test_053 13 | normal_014,normal_023,test_121 14 | normal_015,normal_144,test_038 15 | normal_016,normal_073,test_046 16 | normal_017,normal_114,test_098 17 | normal_018,normal_040,test_009 18 | normal_020,tumor_056,test_125 19 | normal_021,tumor_006,test_007 20 | normal_024,tumor_108,test_089 21 | normal_025,tumor_052,test_054 22 | normal_027,tumor_057,test_036 23 | normal_028,tumor_077,test_043 24 | normal_029,tumor_103,test_017 25 | normal_030,tumor_109,test_016 26 | normal_032,tumor_027,test_033 27 | normal_033,tumor_039,test_061 28 | normal_034,tumor_073,test_058 29 | normal_035,,test_095 30 | normal_036,,test_014 31 | normal_037,,test_110 32 | normal_038,,test_031 33 | normal_039,,test_003 34 | normal_041,,test_059 35 | normal_042,,test_117 36 | normal_043,,test_047 37 | normal_045,,test_082 38 | normal_046,,test_093 39 | normal_048,,test_091 40 | normal_049,,test_029 41 | normal_050,,test_019 42 | normal_051,,test_072 43 | normal_052,,test_122 44 | normal_053,,test_006 45 | normal_055,,test_052 46 | normal_056,,test_076 47 | normal_059,,test_103 48 | normal_061,,test_028 49 | normal_062,,test_086 50 | normal_064,,test_090 51 | normal_067,,test_074 52 | normal_068,,test_099 53 | normal_069,,test_129 54 | normal_070,,test_070 55 | normal_071,,test_008 56 | normal_072,,test_105 57 | normal_074,,test_119 58 | normal_075,,test_035 59 | normal_076,,test_037 60 | normal_077,,test_002 61 | normal_078,,test_024 62 | normal_079,,test_018 63 | normal_080,,test_060 64 | normal_081,,test_078 65 | normal_082,,test_034 66 | normal_084,,test_023 67 | normal_085,,test_127 68 | normal_087,,test_001 69 | normal_088,,test_004 70 | normal_089,,test_042 71 | normal_090,,test_075 72 | normal_092,,test_066 73 | normal_093,,test_118 74 | normal_094,,test_056 75 | normal_095,,test_116 76 | normal_097,,test_015 77 | normal_098,,test_048 78 | normal_099,,test_071 79 | normal_100,,test_051 80 | normal_101,,test_067 81 | normal_102,,test_100 82 | normal_103,,test_114 83 | normal_105,,test_097 84 | normal_106,,test_081 85 | normal_107,,test_032 86 | normal_109,,test_092 87 | normal_110,,test_108 88 | normal_111,,test_062 89 | normal_112,,test_120 90 | normal_113,,test_073 91 | normal_115,,test_102 92 | normal_116,,test_045 93 | normal_117,,test_128 94 | normal_119,,test_011 95 | normal_120,,test_085 96 | normal_122,,test_064 97 | normal_123,,test_010 98 | normal_124,,test_055 99 | normal_125,,test_101 100 | normal_126,,test_130 101 | normal_127,,test_005 102 | normal_128,,test_025 103 | normal_129,,test_107 104 | normal_130,,test_087 105 | normal_131,,test_020 106 | normal_132,,test_068 107 | normal_133,,test_080 108 | normal_134,,test_040 109 | normal_135,,test_069 110 | normal_137,,test_123 111 | normal_138,,test_041 112 | normal_139,,test_021 113 | normal_140,,test_065 114 | normal_141,,test_111 115 | normal_142,,test_022 116 | normal_143,,test_027 117 | normal_145,,test_077 118 | normal_147,,test_126 119 | normal_148,,test_063 120 | normal_150,,test_113 121 | normal_151,,test_039 122 | normal_152,,test_026 123 | normal_154,,test_044 124 | normal_155,,test_112 125 | normal_156,,test_115 126 | normal_157,,test_124 127 | normal_158,,test_013 128 | normal_159,,test_050 129 | tumor_001,,test_030 130 | tumor_003,,test_088 131 | tumor_004,, 132 | tumor_005,, 133 | tumor_007,, 134 | tumor_008,, 135 | tumor_009,, 136 | tumor_010,, 137 | tumor_011,, 138 | tumor_012,, 139 | tumor_013,, 140 | tumor_014,, 141 | tumor_015,, 142 | tumor_016,, 143 | tumor_017,, 144 | tumor_019,, 145 | tumor_020,, 146 | tumor_021,, 147 | tumor_022,, 148 | tumor_023,, 149 | tumor_024,, 150 | tumor_026,, 151 | tumor_028,, 152 | tumor_029,, 153 | tumor_030,, 154 | tumor_031,, 155 | tumor_032,, 156 | tumor_033,, 157 | tumor_034,, 158 | tumor_035,, 159 | tumor_036,, 160 | tumor_037,, 161 | tumor_038,, 162 | tumor_040,, 163 | tumor_041,, 164 | tumor_042,, 165 | tumor_043,, 166 | tumor_044,, 167 | tumor_045,, 168 | tumor_046,, 169 | tumor_047,, 170 | tumor_048,, 171 | tumor_049,, 172 | tumor_050,, 173 | tumor_051,, 174 | tumor_053,, 175 | tumor_054,, 176 | tumor_055,, 177 | tumor_058,, 178 | tumor_059,, 179 | tumor_060,, 180 | tumor_061,, 181 | tumor_063,, 182 | tumor_064,, 183 | tumor_066,, 184 | tumor_067,, 185 | tumor_068,, 186 | tumor_072,, 187 | tumor_074,, 188 | tumor_076,, 189 | tumor_078,, 190 | tumor_079,, 191 | tumor_081,, 192 | tumor_082,, 193 | tumor_083,, 194 | tumor_084,, 195 | tumor_085,, 196 | tumor_086,, 197 | tumor_087,, 198 | tumor_088,, 199 | tumor_089,, 200 | tumor_090,, 201 | tumor_091,, 202 | tumor_092,, 203 | tumor_093,, 204 | tumor_094,, 205 | tumor_095,, 206 | tumor_097,, 207 | tumor_098,, 208 | tumor_099,, 209 | tumor_100,, 210 | tumor_101,, 211 | tumor_102,, 212 | tumor_104,, 213 | tumor_105,, 214 | tumor_106,, 215 | tumor_107,, 216 | tumor_110,, 217 | tumor_111,, 218 | normal_019,, 219 | normal_108,, 220 | normal_054,, 221 | normal_057,, 222 | normal_022,, 223 | normal_063,, 224 | normal_047,, 225 | normal_096,, 226 | normal_136,, 227 | normal_146,, 228 | normal_153,, 229 | normal_149,, 230 | normal_118,, 231 | normal_010,, 232 | normal_104,, 233 | normal_026,, 234 | tumor_071,, 235 | tumor_025,, 236 | tumor_018,, 237 | tumor_062,, 238 | tumor_069,, 239 | tumor_080,, 240 | tumor_002,, 241 | tumor_070,, 242 | tumor_096,, 243 | tumor_065,, 244 | tumor_075,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_7.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_001,normal_049,test_104 3 | normal_002,normal_041,test_083 4 | normal_003,normal_042,test_096 5 | normal_004,normal_137,test_094 6 | normal_005,normal_073,test_084 7 | normal_006,normal_028,test_106 8 | normal_007,normal_157,test_079 9 | normal_008,normal_153,test_109 10 | normal_009,normal_063,test_057 11 | normal_012,normal_104,test_012 12 | normal_013,normal_155,test_053 13 | normal_014,normal_044,test_121 14 | normal_015,normal_010,test_038 15 | normal_016,normal_141,test_046 16 | normal_017,normal_140,test_098 17 | normal_018,normal_147,test_009 18 | normal_019,tumor_018,test_125 19 | normal_021,tumor_016,test_007 20 | normal_022,tumor_005,test_089 21 | normal_023,tumor_090,test_054 22 | normal_024,tumor_023,test_036 23 | normal_025,tumor_088,test_043 24 | normal_026,tumor_073,test_017 25 | normal_027,tumor_045,test_016 26 | normal_029,tumor_062,test_033 27 | normal_030,tumor_037,test_061 28 | normal_031,tumor_003,test_058 29 | normal_032,,test_095 30 | normal_033,,test_014 31 | normal_035,,test_110 32 | normal_036,,test_031 33 | normal_037,,test_003 34 | normal_038,,test_059 35 | normal_039,,test_117 36 | normal_040,,test_047 37 | normal_043,,test_082 38 | normal_045,,test_093 39 | normal_047,,test_091 40 | normal_048,,test_029 41 | normal_050,,test_019 42 | normal_051,,test_072 43 | normal_052,,test_122 44 | normal_053,,test_006 45 | normal_054,,test_052 46 | normal_055,,test_076 47 | normal_057,,test_103 48 | normal_059,,test_028 49 | normal_060,,test_086 50 | normal_061,,test_090 51 | normal_062,,test_074 52 | normal_064,,test_099 53 | normal_065,,test_129 54 | normal_066,,test_070 55 | normal_067,,test_008 56 | normal_068,,test_105 57 | normal_069,,test_119 58 | normal_070,,test_035 59 | normal_071,,test_037 60 | normal_072,,test_002 61 | normal_074,,test_024 62 | normal_075,,test_018 63 | normal_076,,test_060 64 | normal_077,,test_078 65 | normal_078,,test_034 66 | normal_079,,test_023 67 | normal_080,,test_127 68 | normal_081,,test_001 69 | normal_082,,test_004 70 | normal_084,,test_042 71 | normal_085,,test_075 72 | normal_087,,test_066 73 | normal_088,,test_118 74 | normal_089,,test_056 75 | normal_090,,test_116 76 | normal_091,,test_015 77 | normal_092,,test_048 78 | normal_093,,test_071 79 | normal_094,,test_051 80 | normal_095,,test_067 81 | normal_099,,test_100 82 | normal_100,,test_114 83 | normal_101,,test_097 84 | normal_102,,test_081 85 | normal_103,,test_032 86 | normal_105,,test_092 87 | normal_106,,test_108 88 | normal_107,,test_062 89 | normal_108,,test_120 90 | normal_109,,test_073 91 | normal_111,,test_102 92 | normal_112,,test_045 93 | normal_113,,test_128 94 | normal_115,,test_011 95 | normal_116,,test_085 96 | normal_117,,test_064 97 | normal_118,,test_010 98 | normal_119,,test_055 99 | normal_120,,test_101 100 | normal_122,,test_130 101 | normal_123,,test_005 102 | normal_124,,test_025 103 | normal_125,,test_107 104 | normal_126,,test_087 105 | normal_127,,test_020 106 | normal_128,,test_068 107 | normal_129,,test_080 108 | normal_131,,test_040 109 | normal_132,,test_069 110 | normal_133,,test_123 111 | normal_134,,test_041 112 | normal_136,,test_021 113 | normal_138,,test_065 114 | normal_139,,test_111 115 | normal_142,,test_022 116 | normal_143,,test_027 117 | normal_144,,test_077 118 | normal_145,,test_126 119 | normal_146,,test_063 120 | normal_148,,test_113 121 | normal_149,,test_039 122 | normal_150,,test_026 123 | normal_151,,test_044 124 | normal_152,,test_112 125 | normal_154,,test_115 126 | normal_156,,test_124 127 | normal_158,,test_013 128 | normal_160,,test_050 129 | tumor_001,,test_030 130 | tumor_002,,test_088 131 | tumor_004,, 132 | tumor_006,, 133 | tumor_007,, 134 | tumor_008,, 135 | tumor_009,, 136 | tumor_010,, 137 | tumor_011,, 138 | tumor_012,, 139 | tumor_013,, 140 | tumor_014,, 141 | tumor_015,, 142 | tumor_017,, 143 | tumor_019,, 144 | tumor_020,, 145 | tumor_021,, 146 | tumor_024,, 147 | tumor_025,, 148 | tumor_026,, 149 | tumor_027,, 150 | tumor_028,, 151 | tumor_029,, 152 | tumor_030,, 153 | tumor_031,, 154 | tumor_032,, 155 | tumor_033,, 156 | tumor_034,, 157 | tumor_035,, 158 | tumor_036,, 159 | tumor_038,, 160 | tumor_041,, 161 | tumor_042,, 162 | tumor_043,, 163 | tumor_044,, 164 | tumor_046,, 165 | tumor_047,, 166 | tumor_048,, 167 | tumor_049,, 168 | tumor_050,, 169 | tumor_051,, 170 | tumor_052,, 171 | tumor_053,, 172 | tumor_054,, 173 | tumor_055,, 174 | tumor_056,, 175 | tumor_057,, 176 | tumor_058,, 177 | tumor_059,, 178 | tumor_060,, 179 | tumor_061,, 180 | tumor_063,, 181 | tumor_064,, 182 | tumor_065,, 183 | tumor_067,, 184 | tumor_068,, 185 | tumor_070,, 186 | tumor_071,, 187 | tumor_072,, 188 | tumor_075,, 189 | tumor_076,, 190 | tumor_077,, 191 | tumor_078,, 192 | tumor_079,, 193 | tumor_080,, 194 | tumor_081,, 195 | tumor_082,, 196 | tumor_083,, 197 | tumor_084,, 198 | tumor_085,, 199 | tumor_086,, 200 | tumor_087,, 201 | tumor_089,, 202 | tumor_092,, 203 | tumor_093,, 204 | tumor_095,, 205 | tumor_097,, 206 | tumor_098,, 207 | tumor_099,, 208 | tumor_100,, 209 | tumor_101,, 210 | tumor_102,, 211 | tumor_103,, 212 | tumor_104,, 213 | tumor_106,, 214 | tumor_107,, 215 | tumor_108,, 216 | tumor_109,, 217 | tumor_110,, 218 | normal_098,, 219 | normal_056,, 220 | normal_083,, 221 | normal_121,, 222 | normal_130,, 223 | normal_159,, 224 | normal_046,, 225 | normal_034,, 226 | normal_096,, 227 | normal_011,, 228 | normal_135,, 229 | normal_020,, 230 | normal_114,, 231 | normal_110,, 232 | normal_097,, 233 | normal_058,, 234 | tumor_111,, 235 | tumor_039,, 236 | tumor_091,, 237 | tumor_069,, 238 | tumor_096,, 239 | tumor_105,, 240 | tumor_094,, 241 | tumor_022,, 242 | tumor_040,, 243 | tumor_066,, 244 | tumor_074,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_8.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_001,normal_057,test_104 3 | normal_002,normal_040,test_083 4 | normal_006,normal_073,test_096 5 | normal_007,normal_050,test_094 6 | normal_009,normal_134,test_084 7 | normal_010,normal_091,test_106 8 | normal_012,normal_083,test_079 9 | normal_013,normal_065,test_109 10 | normal_014,normal_132,test_057 11 | normal_017,normal_150,test_012 12 | normal_018,normal_103,test_053 13 | normal_019,normal_154,test_121 14 | normal_020,normal_123,test_038 15 | normal_021,normal_149,test_046 16 | normal_022,normal_082,test_098 17 | normal_023,normal_079,test_009 18 | normal_024,tumor_036,test_125 19 | normal_025,tumor_067,test_007 20 | normal_026,tumor_029,test_089 21 | normal_027,tumor_039,test_054 22 | normal_028,tumor_064,test_036 23 | normal_029,tumor_024,test_043 24 | normal_030,tumor_059,test_017 25 | normal_031,tumor_075,test_016 26 | normal_032,tumor_030,test_033 27 | normal_033,tumor_087,test_061 28 | normal_034,tumor_048,test_058 29 | normal_035,,test_095 30 | normal_036,,test_014 31 | normal_037,,test_110 32 | normal_038,,test_031 33 | normal_039,,test_003 34 | normal_041,,test_059 35 | normal_042,,test_117 36 | normal_043,,test_047 37 | normal_046,,test_082 38 | normal_047,,test_093 39 | normal_048,,test_091 40 | normal_049,,test_029 41 | normal_051,,test_019 42 | normal_052,,test_072 43 | normal_053,,test_122 44 | normal_054,,test_006 45 | normal_056,,test_052 46 | normal_059,,test_076 47 | normal_060,,test_103 48 | normal_061,,test_028 49 | normal_062,,test_086 50 | normal_063,,test_090 51 | normal_064,,test_074 52 | normal_066,,test_099 53 | normal_067,,test_129 54 | normal_068,,test_070 55 | normal_069,,test_008 56 | normal_071,,test_105 57 | normal_072,,test_119 58 | normal_074,,test_035 59 | normal_075,,test_037 60 | normal_076,,test_002 61 | normal_078,,test_024 62 | normal_080,,test_018 63 | normal_081,,test_060 64 | normal_084,,test_078 65 | normal_085,,test_034 66 | normal_087,,test_023 67 | normal_088,,test_127 68 | normal_090,,test_001 69 | normal_092,,test_004 70 | normal_093,,test_042 71 | normal_094,,test_075 72 | normal_095,,test_066 73 | normal_096,,test_118 74 | normal_097,,test_056 75 | normal_098,,test_116 76 | normal_099,,test_015 77 | normal_100,,test_048 78 | normal_101,,test_071 79 | normal_102,,test_051 80 | normal_104,,test_067 81 | normal_105,,test_100 82 | normal_106,,test_114 83 | normal_107,,test_097 84 | normal_108,,test_081 85 | normal_109,,test_032 86 | normal_110,,test_092 87 | normal_111,,test_108 88 | normal_112,,test_062 89 | normal_113,,test_120 90 | normal_114,,test_073 91 | normal_115,,test_102 92 | normal_116,,test_045 93 | normal_117,,test_128 94 | normal_118,,test_011 95 | normal_119,,test_085 96 | normal_120,,test_064 97 | normal_121,,test_010 98 | normal_122,,test_055 99 | normal_124,,test_101 100 | normal_125,,test_130 101 | normal_126,,test_005 102 | normal_127,,test_025 103 | normal_128,,test_107 104 | normal_129,,test_087 105 | normal_130,,test_020 106 | normal_131,,test_068 107 | normal_133,,test_080 108 | normal_135,,test_040 109 | normal_136,,test_069 110 | normal_137,,test_123 111 | normal_138,,test_041 112 | normal_139,,test_021 113 | normal_140,,test_065 114 | normal_141,,test_111 115 | normal_142,,test_022 116 | normal_144,,test_027 117 | normal_145,,test_077 118 | normal_146,,test_126 119 | normal_147,,test_063 120 | normal_148,,test_113 121 | normal_151,,test_039 122 | normal_152,,test_026 123 | normal_153,,test_044 124 | normal_155,,test_112 125 | normal_156,,test_115 126 | normal_157,,test_124 127 | normal_158,,test_013 128 | normal_159,,test_050 129 | tumor_001,,test_030 130 | tumor_002,,test_088 131 | tumor_003,, 132 | tumor_004,, 133 | tumor_005,, 134 | tumor_006,, 135 | tumor_007,, 136 | tumor_008,, 137 | tumor_009,, 138 | tumor_010,, 139 | tumor_011,, 140 | tumor_012,, 141 | tumor_013,, 142 | tumor_014,, 143 | tumor_015,, 144 | tumor_016,, 145 | tumor_017,, 146 | tumor_018,, 147 | tumor_019,, 148 | tumor_020,, 149 | tumor_021,, 150 | tumor_022,, 151 | tumor_023,, 152 | tumor_025,, 153 | tumor_027,, 154 | tumor_031,, 155 | tumor_032,, 156 | tumor_033,, 157 | tumor_034,, 158 | tumor_035,, 159 | tumor_037,, 160 | tumor_038,, 161 | tumor_040,, 162 | tumor_041,, 163 | tumor_043,, 164 | tumor_045,, 165 | tumor_046,, 166 | tumor_047,, 167 | tumor_049,, 168 | tumor_050,, 169 | tumor_051,, 170 | tumor_052,, 171 | tumor_053,, 172 | tumor_054,, 173 | tumor_055,, 174 | tumor_057,, 175 | tumor_058,, 176 | tumor_060,, 177 | tumor_061,, 178 | tumor_062,, 179 | tumor_063,, 180 | tumor_065,, 181 | tumor_066,, 182 | tumor_068,, 183 | tumor_069,, 184 | tumor_070,, 185 | tumor_071,, 186 | tumor_072,, 187 | tumor_073,, 188 | tumor_074,, 189 | tumor_078,, 190 | tumor_079,, 191 | tumor_080,, 192 | tumor_081,, 193 | tumor_082,, 194 | tumor_083,, 195 | tumor_084,, 196 | tumor_085,, 197 | tumor_086,, 198 | tumor_088,, 199 | tumor_089,, 200 | tumor_090,, 201 | tumor_091,, 202 | tumor_092,, 203 | tumor_094,, 204 | tumor_095,, 205 | tumor_097,, 206 | tumor_098,, 207 | tumor_099,, 208 | tumor_100,, 209 | tumor_101,, 210 | tumor_102,, 211 | tumor_103,, 212 | tumor_104,, 213 | tumor_105,, 214 | tumor_108,, 215 | tumor_109,, 216 | tumor_110,, 217 | tumor_111,, 218 | normal_015,, 219 | normal_045,, 220 | normal_070,, 221 | normal_058,, 222 | normal_077,, 223 | normal_004,, 224 | normal_005,, 225 | normal_011,, 226 | normal_008,, 227 | normal_055,, 228 | normal_003,, 229 | normal_016,, 230 | normal_160,, 231 | normal_044,, 232 | normal_089,, 233 | normal_143,, 234 | tumor_028,, 235 | tumor_042,, 236 | tumor_026,, 237 | tumor_077,, 238 | tumor_056,, 239 | tumor_096,, 240 | tumor_106,, 241 | tumor_044,, 242 | tumor_076,, 243 | tumor_107,, 244 | tumor_093,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_9.csv: -------------------------------------------------------------------------------- 1 | train,val,test 2 | normal_002,normal_137,test_104 3 | normal_003,normal_030,test_083 4 | normal_004,normal_078,test_096 5 | normal_005,normal_076,test_094 6 | normal_006,normal_121,test_084 7 | normal_007,normal_012,test_106 8 | normal_008,normal_034,test_079 9 | normal_011,normal_010,test_109 10 | normal_013,normal_156,test_057 11 | normal_014,normal_009,test_012 12 | normal_015,normal_087,test_053 13 | normal_016,normal_096,test_121 14 | normal_017,normal_040,test_038 15 | normal_018,normal_093,test_046 16 | normal_019,normal_125,test_098 17 | normal_020,normal_062,test_009 18 | normal_022,tumor_075,test_125 19 | normal_023,tumor_053,test_007 20 | normal_024,tumor_110,test_089 21 | normal_025,tumor_027,test_054 22 | normal_026,tumor_095,test_036 23 | normal_027,tumor_082,test_043 24 | normal_028,tumor_096,test_017 25 | normal_029,tumor_097,test_016 26 | normal_031,tumor_098,test_033 27 | normal_032,tumor_050,test_061 28 | normal_033,tumor_068,test_058 29 | normal_035,,test_095 30 | normal_036,,test_014 31 | normal_037,,test_110 32 | normal_039,,test_031 33 | normal_041,,test_003 34 | normal_042,,test_059 35 | normal_043,,test_117 36 | normal_044,,test_047 37 | normal_046,,test_082 38 | normal_047,,test_093 39 | normal_048,,test_091 40 | normal_049,,test_029 41 | normal_051,,test_019 42 | normal_052,,test_072 43 | normal_053,,test_122 44 | normal_054,,test_006 45 | normal_055,,test_052 46 | normal_056,,test_076 47 | normal_058,,test_103 48 | normal_059,,test_028 49 | normal_060,,test_086 50 | normal_061,,test_090 51 | normal_063,,test_074 52 | normal_064,,test_099 53 | normal_065,,test_129 54 | normal_066,,test_070 55 | normal_067,,test_008 56 | normal_068,,test_105 57 | normal_070,,test_119 58 | normal_071,,test_035 59 | normal_072,,test_037 60 | normal_073,,test_002 61 | normal_074,,test_024 62 | normal_075,,test_018 63 | normal_077,,test_060 64 | normal_080,,test_078 65 | normal_081,,test_034 66 | normal_082,,test_023 67 | normal_084,,test_127 68 | normal_085,,test_001 69 | normal_088,,test_004 70 | normal_089,,test_042 71 | normal_090,,test_075 72 | normal_091,,test_066 73 | normal_092,,test_118 74 | normal_094,,test_056 75 | normal_095,,test_116 76 | normal_097,,test_015 77 | normal_098,,test_048 78 | normal_099,,test_071 79 | normal_100,,test_051 80 | normal_101,,test_067 81 | normal_102,,test_100 82 | normal_103,,test_114 83 | normal_104,,test_097 84 | normal_105,,test_081 85 | normal_106,,test_032 86 | normal_107,,test_092 87 | normal_108,,test_108 88 | normal_109,,test_062 89 | normal_111,,test_120 90 | normal_112,,test_073 91 | normal_113,,test_102 92 | normal_114,,test_045 93 | normal_115,,test_128 94 | normal_116,,test_011 95 | normal_117,,test_085 96 | normal_118,,test_064 97 | normal_119,,test_010 98 | normal_120,,test_055 99 | normal_122,,test_101 100 | normal_123,,test_130 101 | normal_124,,test_005 102 | normal_126,,test_025 103 | normal_127,,test_107 104 | normal_129,,test_087 105 | normal_130,,test_020 106 | normal_132,,test_068 107 | normal_133,,test_080 108 | normal_134,,test_040 109 | normal_135,,test_069 110 | normal_136,,test_123 111 | normal_138,,test_041 112 | normal_139,,test_021 113 | normal_140,,test_065 114 | normal_141,,test_111 115 | normal_142,,test_022 116 | normal_143,,test_027 117 | normal_144,,test_077 118 | normal_145,,test_126 119 | normal_147,,test_063 120 | normal_149,,test_113 121 | normal_150,,test_039 122 | normal_152,,test_026 123 | normal_154,,test_044 124 | normal_155,,test_112 125 | normal_157,,test_115 126 | normal_158,,test_124 127 | normal_159,,test_013 128 | normal_160,,test_050 129 | tumor_001,,test_030 130 | tumor_002,,test_088 131 | tumor_003,, 132 | tumor_004,, 133 | tumor_006,, 134 | tumor_007,, 135 | tumor_008,, 136 | tumor_009,, 137 | tumor_010,, 138 | tumor_012,, 139 | tumor_013,, 140 | tumor_014,, 141 | tumor_015,, 142 | tumor_016,, 143 | tumor_017,, 144 | tumor_018,, 145 | tumor_019,, 146 | tumor_020,, 147 | tumor_021,, 148 | tumor_022,, 149 | tumor_023,, 150 | tumor_024,, 151 | tumor_025,, 152 | tumor_028,, 153 | tumor_029,, 154 | tumor_030,, 155 | tumor_031,, 156 | tumor_032,, 157 | tumor_033,, 158 | tumor_034,, 159 | tumor_035,, 160 | tumor_036,, 161 | tumor_037,, 162 | tumor_039,, 163 | tumor_040,, 164 | tumor_042,, 165 | tumor_043,, 166 | tumor_045,, 167 | tumor_046,, 168 | tumor_047,, 169 | tumor_048,, 170 | tumor_049,, 171 | tumor_051,, 172 | tumor_052,, 173 | tumor_054,, 174 | tumor_055,, 175 | tumor_056,, 176 | tumor_057,, 177 | tumor_058,, 178 | tumor_060,, 179 | tumor_061,, 180 | tumor_062,, 181 | tumor_063,, 182 | tumor_064,, 183 | tumor_065,, 184 | tumor_066,, 185 | tumor_067,, 186 | tumor_069,, 187 | tumor_070,, 188 | tumor_072,, 189 | tumor_073,, 190 | tumor_074,, 191 | tumor_076,, 192 | tumor_078,, 193 | tumor_079,, 194 | tumor_080,, 195 | tumor_081,, 196 | tumor_083,, 197 | tumor_084,, 198 | tumor_085,, 199 | tumor_086,, 200 | tumor_087,, 201 | tumor_088,, 202 | tumor_089,, 203 | tumor_090,, 204 | tumor_091,, 205 | tumor_092,, 206 | tumor_093,, 207 | tumor_094,, 208 | tumor_099,, 209 | tumor_100,, 210 | tumor_101,, 211 | tumor_102,, 212 | tumor_103,, 213 | tumor_104,, 214 | tumor_105,, 215 | tumor_107,, 216 | tumor_108,, 217 | tumor_111,, 218 | normal_045,, 219 | normal_083,, 220 | normal_148,, 221 | normal_110,, 222 | normal_131,, 223 | normal_128,, 224 | normal_001,, 225 | normal_153,, 226 | normal_057,, 227 | normal_038,, 228 | normal_021,, 229 | normal_069,, 230 | normal_079,, 231 | normal_146,, 232 | normal_050,, 233 | normal_151,, 234 | tumor_059,, 235 | tumor_071,, 236 | tumor_038,, 237 | tumor_106,, 238 | tumor_011,, 239 | tumor_077,, 240 | tumor_044,, 241 | tumor_109,, 242 | tumor_026,, 243 | tumor_005,, 244 | tumor_041,, 245 | ,, 246 | ,, 247 | ,, 248 | ,, 249 | ,, 250 | ,, 251 | ,, 252 | ,, 253 | ,, 254 | ,, 255 | ,, 256 | ,, 257 | ,, 258 | ,, 259 | ,, 260 | ,, 261 | ,, 262 | ,, 263 | ,, 264 | ,, 265 | ,, 266 | ,, 267 | ,, 268 | ,, 269 | ,, 270 | ,, 271 | ,, 272 | ,, 273 | ,, 274 | ,, 275 | ,, 276 | ,, 277 | ,, 278 | ,, 279 | ,, 280 | ,, 281 | ,, 282 | ,, 283 | ,, 284 | ,, 285 | ,, 286 | ,, 287 | ,, 288 | ,, 289 | ,, 290 | ,, 291 | ,, 292 | ,, 293 | ,, 294 | ,, 295 | ,, 296 | ,, 297 | ,, 298 | ,, 299 | ,, 300 | ,, 301 | ,, 302 | ,, 303 | ,, 304 | ,, 305 | ,, 306 | ,, 307 | ,, 308 | ,, 309 | ,, 310 | ,, 311 | ,, 312 | ,, 313 | ,, 314 | ,, 315 | ,, 316 | ,, 317 | ,, 318 | ,, 319 | ,, 320 | ,, 321 | ,, 322 | ,, 323 | ,, 324 | ,, 325 | ,, 326 | ,, 327 | ,, 328 | ,, 329 | ,, 330 | ,, 331 | ,, 332 | ,, 333 | ,, 334 | ,, 335 | ,, 336 | ,, 337 | ,, 338 | ,, 339 | ,, 340 | ,, 341 | ,, 342 | ,, 343 | ,, 344 | ,, 345 | ,, 346 | ,, 347 | ,, 348 | ,, 349 | ,, 350 | ,, 351 | ,, 352 | ,, 353 | ,, 354 | ,, 355 | ,, 356 | ,, 357 | ,, 358 | ,, 359 | ,, 360 | ,, 361 | ,, 362 | ,, 363 | ,, 364 | ,, 365 | ,, 366 | ,, 367 | ,, 368 | ,, 369 | ,, 370 | ,, 371 | ,, 372 | ,, 373 | ,, 374 | ,, 375 | ,, 376 | ,, 377 | ,, 378 | ,, 379 | ,, 380 | ,, 381 | ,, 382 | ,, 383 | ,, 384 | ,, 385 | ,, 386 | ,, 387 | ,, 388 | ,, 389 | ,, 390 | ,, 391 | ,, 392 | ,, 393 | ,, 394 | ,, 395 | ,, 396 | ,, 397 | ,, 398 | ,, 399 | ,, 400 | ,, 401 | ,, 402 | ,, 403 | ,, 404 | ,, 405 | ,, 406 | ,, 407 | ,, 408 | ,, 409 | ,, 410 | ,, 411 | ,, 412 | ,, 413 | ,, 414 | ,, 415 | ,, 416 | ,, 417 | ,, 418 | ,, 419 | ,, 420 | ,, 421 | ,, 422 | ,, 423 | ,, 424 | ,, 425 | ,, 426 | ,, 427 | ,, 428 | ,, 429 | ,, 430 | ,, 431 | ,, 432 | ,, 433 | ,, 434 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/models/model_clam.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from utils.utils import initialize_weights 5 | import numpy as np 6 | 7 | """ 8 | Attention Network without Gating (2 fc layers) 9 | args: 10 | L: input feature dimension 11 | D: hidden layer dimension 12 | dropout: whether to use dropout (p = 0.25) 13 | n_classes: number of classes 14 | """ 15 | class Attn_Net(nn.Module): 16 | 17 | def __init__(self, L = 1024, D = 256, dropout = False, n_classes = 1): 18 | super(Attn_Net, self).__init__() 19 | self.module = [ 20 | nn.Linear(L, D), 21 | nn.Tanh()] 22 | 23 | if dropout: 24 | self.module.append(nn.Dropout(0.25)) 25 | 26 | self.module.append(nn.Linear(D, n_classes)) 27 | 28 | self.module = nn.Sequential(*self.module) 29 | 30 | def forward(self, x): 31 | return self.module(x), x # N x n_classes 32 | 33 | """ 34 | Attention Network with Sigmoid Gating (3 fc layers) 35 | args: 36 | L: input feature dimension 37 | D: hidden layer dimension 38 | dropout: whether to use dropout (p = 0.25) 39 | n_classes: number of classes 40 | """ 41 | class Attn_Net_Gated(nn.Module): 42 | def __init__(self, L = 1024, D = 256, dropout = False, n_classes = 1): 43 | super(Attn_Net_Gated, self).__init__() 44 | self.attention_a = [ 45 | nn.Linear(L, D), 46 | nn.Tanh()] 47 | 48 | self.attention_b = [nn.Linear(L, D), 49 | nn.Sigmoid()] 50 | if dropout: 51 | self.attention_a.append(nn.Dropout(0.25)) 52 | self.attention_b.append(nn.Dropout(0.25)) 53 | 54 | self.attention_a = nn.Sequential(*self.attention_a) 55 | self.attention_b = nn.Sequential(*self.attention_b) 56 | 57 | self.attention_c = nn.Linear(D, n_classes) 58 | 59 | def forward(self, x): 60 | a = self.attention_a(x) 61 | b = self.attention_b(x) 62 | A = a.mul(b) 63 | A = self.attention_c(A) # N x n_classes 64 | return A, x 65 | 66 | """ 67 | args: 68 | gate: whether to use gated attention network 69 | size_arg: config for network size 70 | dropout: whether to use dropout 71 | k_sample: number of positive/neg patches to sample for instance-level training 72 | dropout: whether to use dropout (p = 0.25) 73 | n_classes: number of classes 74 | instance_loss_fn: loss function to supervise instance-level training 75 | subtyping: whether it's a subtyping problem 76 | """ 77 | class CLAM_SB(nn.Module): 78 | def __init__(self, gate = True, size_arg = "small", dropout = False, k_sample=8, n_classes=2, 79 | instance_loss_fn=nn.CrossEntropyLoss(), subtyping=False): 80 | super(CLAM_SB, self).__init__() 81 | self.size_dict = {"small": [768, 512, 256], "big": [1024, 512, 384], "cl": [512, 512, 256]} 82 | size = self.size_dict[size_arg] 83 | fc = [ nn.Linear(size[0], size[1]), nn.ReLU()] 84 | if dropout: 85 | fc.append(nn.Dropout(0.25)) 86 | if gate: 87 | attention_net = Attn_Net_Gated(L = size[1], D = size[2], dropout = dropout, n_classes = 1) 88 | else: 89 | attention_net = Attn_Net(L = size[1], D = size[2], dropout = dropout, n_classes = 1) 90 | fc.append(attention_net) 91 | self.attention_net = nn.Sequential(*fc) 92 | self.classifiers = nn.Linear(size[1], n_classes) 93 | instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)] 94 | self.instance_classifiers = nn.ModuleList(instance_classifiers) 95 | self.k_sample = k_sample 96 | self.instance_loss_fn = instance_loss_fn 97 | self.n_classes = n_classes 98 | self.subtyping = subtyping 99 | 100 | initialize_weights(self) 101 | 102 | def relocate(self): 103 | device=torch.device("cuda" if torch.cuda.is_available() else "cpu") 104 | self.attention_net = self.attention_net.to(device) 105 | self.classifiers = self.classifiers.to(device) 106 | self.instance_classifiers = self.instance_classifiers.to(device) 107 | 108 | @staticmethod 109 | def create_positive_targets(length, device): 110 | return torch.full((length, ), 1, device=device).long() 111 | @staticmethod 112 | def create_negative_targets(length, device): 113 | return torch.full((length, ), 0, device=device).long() 114 | 115 | #instance-level evaluation for in-the-class attention branch 116 | def inst_eval(self, A, h, classifier): 117 | device=h.device 118 | if len(A.shape) == 1: 119 | A = A.view(1, -1) 120 | top_p_ids = torch.topk(A, self.k_sample)[1][-1] 121 | top_p = torch.index_select(h, dim=0, index=top_p_ids) 122 | top_n_ids = torch.topk(-A, self.k_sample, dim=1)[1][-1] 123 | top_n = torch.index_select(h, dim=0, index=top_n_ids) 124 | p_targets = self.create_positive_targets(self.k_sample, device) 125 | n_targets = self.create_negative_targets(self.k_sample, device) 126 | 127 | all_targets = torch.cat([p_targets, n_targets], dim=0) 128 | all_instances = torch.cat([top_p, top_n], dim=0) 129 | logits = classifier(all_instances) 130 | all_preds = torch.topk(logits, 1, dim = 1)[1].squeeze(1) 131 | instance_loss = self.instance_loss_fn(logits, all_targets) 132 | return instance_loss, all_preds, all_targets 133 | 134 | #instance-level evaluation for out-of-the-class attention branch 135 | def inst_eval_out(self, A, h, classifier): 136 | device=h.device 137 | if len(A.shape) == 1: 138 | A = A.view(1, -1) 139 | top_p_ids = torch.topk(A, self.k_sample)[1][-1] 140 | top_p = torch.index_select(h, dim=0, index=top_p_ids) 141 | p_targets = self.create_negative_targets(self.k_sample, device) 142 | logits = classifier(top_p) 143 | p_preds = torch.topk(logits, 1, dim = 1)[1].squeeze(1) 144 | instance_loss = self.instance_loss_fn(logits, p_targets) 145 | return instance_loss, p_preds, p_targets 146 | 147 | def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False): 148 | device = h.device 149 | A, h = self.attention_net(h) # NxK 150 | A = torch.transpose(A, 1, 0) # KxN 151 | if attention_only: 152 | return A 153 | A_raw = A 154 | A = F.softmax(A, dim=1) # softmax over N 155 | 156 | if instance_eval: 157 | total_inst_loss = 0.0 158 | all_preds = [] 159 | all_targets = [] 160 | inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() #binarize label 161 | for i in range(len(self.instance_classifiers)): 162 | inst_label = inst_labels[i].item() 163 | classifier = self.instance_classifiers[i] 164 | if inst_label == 1: #in-the-class: 165 | instance_loss, preds, targets = self.inst_eval(A, h, classifier) 166 | all_preds.extend(preds.cpu().numpy()) 167 | all_targets.extend(targets.cpu().numpy()) 168 | else: #out-of-the-class 169 | if self.subtyping: 170 | instance_loss, preds, targets = self.inst_eval_out(A, h, classifier) 171 | all_preds.extend(preds.cpu().numpy()) 172 | all_targets.extend(targets.cpu().numpy()) 173 | else: 174 | continue 175 | total_inst_loss += instance_loss 176 | 177 | if self.subtyping: 178 | total_inst_loss /= len(self.instance_classifiers) 179 | 180 | M = torch.mm(A, h) 181 | logits = self.classifiers(M) 182 | Y_hat = torch.topk(logits, 1, dim = 1)[1] 183 | Y_prob = F.softmax(logits, dim = 1) 184 | if instance_eval: 185 | results_dict = {'instance_loss': total_inst_loss, 'inst_labels': np.array(all_targets), 186 | 'inst_preds': np.array(all_preds)} 187 | else: 188 | results_dict = {} 189 | if return_features: 190 | results_dict.update({'features': M}) 191 | return logits, Y_prob, Y_hat, A_raw, results_dict 192 | 193 | class CLAM_MB(CLAM_SB): 194 | def __init__(self, gate = True, size_arg = "small", dropout = False, k_sample=8, n_classes=2, 195 | instance_loss_fn=nn.CrossEntropyLoss(), subtyping=False): 196 | nn.Module.__init__(self) 197 | self.size_dict = {"small": [1024, 512, 256], "big": [1024, 512, 384]} 198 | size = self.size_dict[size_arg] 199 | fc = [nn.Linear(size[0], size[1]), nn.ReLU()] 200 | if dropout: 201 | fc.append(nn.Dropout(0.25)) 202 | if gate: 203 | attention_net = Attn_Net_Gated(L = size[1], D = size[2], dropout = dropout, n_classes = n_classes) 204 | else: 205 | attention_net = Attn_Net(L = size[1], D = size[2], dropout = dropout, n_classes = n_classes) 206 | fc.append(attention_net) 207 | self.attention_net = nn.Sequential(*fc) 208 | bag_classifiers = [nn.Linear(size[1], 1) for i in range(n_classes)] #use an indepdent linear layer to predict each class 209 | self.classifiers = nn.ModuleList(bag_classifiers) 210 | instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)] 211 | self.instance_classifiers = nn.ModuleList(instance_classifiers) 212 | self.k_sample = k_sample 213 | self.instance_loss_fn = instance_loss_fn 214 | self.n_classes = n_classes 215 | self.subtyping = subtyping 216 | initialize_weights(self) 217 | 218 | def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False): 219 | device = h.device 220 | A, h = self.attention_net(h) # NxK 221 | A = torch.transpose(A, 1, 0) # KxN 222 | if attention_only: 223 | return A 224 | A_raw = A 225 | A = F.softmax(A, dim=1) # softmax over N 226 | 227 | if instance_eval: 228 | total_inst_loss = 0.0 229 | all_preds = [] 230 | all_targets = [] 231 | inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() #binarize label 232 | for i in range(len(self.instance_classifiers)): 233 | inst_label = inst_labels[i].item() 234 | classifier = self.instance_classifiers[i] 235 | if inst_label == 1: #in-the-class: 236 | instance_loss, preds, targets = self.inst_eval(A[i], h, classifier) 237 | all_preds.extend(preds.cpu().numpy()) 238 | all_targets.extend(targets.cpu().numpy()) 239 | else: #out-of-the-class 240 | if self.subtyping: 241 | instance_loss, preds, targets = self.inst_eval_out(A[i], h, classifier) 242 | all_preds.extend(preds.cpu().numpy()) 243 | all_targets.extend(targets.cpu().numpy()) 244 | else: 245 | continue 246 | total_inst_loss += instance_loss 247 | 248 | if self.subtyping: 249 | total_inst_loss /= len(self.instance_classifiers) 250 | 251 | M = torch.mm(A, h) 252 | logits = torch.empty(1, self.n_classes).float().to(device) 253 | for c in range(self.n_classes): 254 | logits[0, c] = self.classifiers[c](M[c]) 255 | Y_hat = torch.topk(logits, 1, dim = 1)[1] 256 | Y_prob = F.softmax(logits, dim = 1) 257 | if instance_eval: 258 | results_dict = {'instance_loss': total_inst_loss, 'inst_labels': np.array(all_targets), 259 | 'inst_preds': np.array(all_preds)} 260 | else: 261 | results_dict = {} 262 | if return_features: 263 | results_dict.update({'features': M}) 264 | return logits, Y_prob, Y_hat, A_raw, results_dict 265 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/train.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import argparse 4 | import pdb 5 | import os 6 | import math 7 | 8 | # internal imports 9 | from utils.file_utils import save_pkl, load_pkl 10 | from utils.utils import * 11 | from utils.core_utils import train 12 | from datasets.dataset_generic import Generic_MIL_Dataset 13 | 14 | # pytorch imports 15 | import torch 16 | from torch.utils.data import DataLoader, sampler 17 | import torch.nn as nn 18 | import torch.nn.functional as F 19 | 20 | import pandas as pd 21 | import numpy as np 22 | import ast 23 | ''' 24 | path_to_feature_filepath/ 25 | ├── slide_1.pt 26 | ├── slide_2.pt 27 | └── ... 28 | ''' 29 | 30 | 31 | # Generic training settings 32 | parser = argparse.ArgumentParser(description='Configurations for WSI Training') 33 | parser.add_argument('--dataset', type=str, default='', required=True, 34 | choices=['BRCA', 'RCC', 'CAM16', 'PANDA', 'NSCLC'], help='dataset select') 35 | parser.add_argument('--data_root_dir', type=str, default='path_to_feature_filepath', 36 | help='data directory') 37 | parser.add_argument('--csv_path', type=str, default='dataset_csv/PANDA_subtyping2.csv', help='csv file') 38 | parser.add_argument('--exp_info', type=str, default='experiment_task_2_tumor_subtyping_panda.txt', help='experiment info file') 39 | parser.add_argument('--label_dict', default="{'grades0':0, 'grades1':1}", help='label dict') 40 | parser.add_argument('--max_epochs', type=int, default=50, 41 | help='maximum number of epochs to train (default: 200)') 42 | parser.add_argument('--lr', type=float, default=1e-4, 43 | help='learning rate (default: 0.0001)') 44 | parser.add_argument('--label_frac', type=float, default=1.0, 45 | help='fraction of training labels (default: 1.0)') 46 | parser.add_argument('--reg', type=float, default=1e-5, 47 | help='weight decay (default: 1e-5)') 48 | parser.add_argument('--seed', type=int, default=1, 49 | help='random seed for reproducible experiment (default: 1)') 50 | parser.add_argument('--k', type=int, default=5, help='number of folds (default: 10)') 51 | parser.add_argument('--k_start', type=int, default=-1, help='start fold (default: -1, last fold)') 52 | parser.add_argument('--k_end', type=int, default=-1, help='end fold (default: -1, first fold)') 53 | parser.add_argument('--results_dir', default='./results', help='results directory (default: ./results)') 54 | parser.add_argument('--split_dir', type=str, default='PANDA_subtyping2', 55 | help='manually specify the set of splits to use, ' 56 | +'instead of infering from the task and label_frac argument (default: None)') 57 | parser.add_argument('--log_data', action='store_true', default=True, help='log data using tensorboard') 58 | parser.add_argument('--testing', action='store_true', default=False, help='debugging tool') 59 | parser.add_argument('--early_stopping', action='store_true', default=False, help='enable early stopping') 60 | parser.add_argument('--opt', type=str, choices = ['adam', 'sgd'], default='adam') 61 | parser.add_argument('--drop_out', action='store_true', default=True, help='enable dropout (p=0.25)') 62 | parser.add_argument('--bag_loss', type=str, choices=['svm', 'ce'], default='ce', 63 | help='slide-level classification loss function (default: ce)') 64 | parser.add_argument('--model_type', type=str, choices=['clam_sb', 'clam_mb', 'mil'], default='clam_sb', 65 | help='type of model (default: clam_sb, clam w/ single attention branch)') 66 | parser.add_argument('--exp_code', type=str,default='task_2_tumor_subtyping_panda', help='experiment code for saving results') 67 | parser.add_argument('--weighted_sample', action='store_true', default=True, help='enable weighted sampling') 68 | parser.add_argument('--model_size', type=str, choices=['small', 'big', 'cl'], default='small', help='size of model, does not affect mil') 69 | parser.add_argument('--task', type=str,default='task_2_tumor_subtyping', choices=['task_1_tumor_vs_normal', 'task_2_tumor_subtyping']) 70 | ### CLAM specific options 71 | parser.add_argument('--no_inst_cluster', action='store_true', default=False, 72 | help='disable instance-level clustering') 73 | parser.add_argument('--inst_loss', type=str, choices=['svm', 'ce', None], default='svm', 74 | help='instance-level clustering loss function (default: None)') 75 | parser.add_argument('--subtyping', action='store_true', default=True, 76 | help='subtyping problem') 77 | parser.add_argument('--bag_weight', type=float, default=0.7, 78 | help='clam: weight coefficient for bag-level loss (default: 0.7)') 79 | parser.add_argument('--B', type=int, default=8, help='number of positive/negative patches to sample for clam') 80 | parser.add_argument('--n_classes', type=int, default=2, help='number of classes') 81 | args = parser.parse_args() 82 | device=torch.device("cuda" if torch.cuda.is_available() else "cpu") 83 | # print(f'the device is {device}') 84 | 85 | def seed_torch(seed=7): 86 | import random 87 | random.seed(seed) 88 | os.environ['PYTHONHASHSEED'] = str(seed) 89 | np.random.seed(seed) 90 | torch.manual_seed(seed) 91 | if device.type == 'cuda': 92 | torch.cuda.manual_seed(seed) 93 | torch.cuda.manual_seed_all(seed) # if you are using multi-GPU. 94 | torch.backends.cudnn.benchmark = False 95 | torch.backends.cudnn.deterministic = True 96 | if os.path.exists(args.exp_info): 97 | print('load setting from file') 98 | with open(args.exp_info, 'r') as f: 99 | exp_info = f.read() 100 | exp_set_info = ast.literal_eval(exp_info) 101 | args.k = exp_set_info['num_splits'] 102 | args.k_start = exp_set_info['k_start'] 103 | args.k_end = exp_set_info['k_end'] 104 | args.task = exp_set_info['task'] 105 | args.max_epochs = exp_set_info['max_epochs'] 106 | args.results_dir = exp_set_info['results_dir'] 107 | args.lr = exp_set_info['lr'] 108 | args.reg = exp_set_info['reg'] 109 | args.label_frac = exp_set_info['label_frac'] 110 | args.bag_loss = exp_set_info['bag_loss'] 111 | args.seed = exp_set_info['seed'] 112 | args.model_type = exp_set_info['model_type'] 113 | args.model_size = exp_set_info['model_size'] 114 | args.drop_out = exp_set_info["use_drop_out"] 115 | args.weighted_sample = exp_set_info['weighted_sample'] 116 | args.opt = exp_set_info['opt'] 117 | args.bag_weight = exp_set_info['bag_weight'] 118 | args.inst_loss = exp_set_info['inst_loss'] 119 | args.B = exp_set_info['B'] 120 | args.n_classes = exp_set_info['n_classes'] 121 | # args.split_dir = exp_set_info['split_dir'] 122 | 123 | datasetdict = { 124 | 'BRCA':[2, {'IDC':0, 'ILC':1}], 125 | 'RCC':[3, {'CCRCC':0, 'CHRCC':1, 'PRCC':2}], 126 | 'NSCLC':[2, {'LUAD':0, 'LUSC':1}], 127 | 'CAM16':[2, {'normal':0, 'tumor':1}], 128 | 'PANDA':[2, {'grades0':0, 'grades1':1}], 129 | } 130 | 131 | seed_torch(args.seed) 132 | # encoding_size = 1024 133 | settings = {'num_splits': args.k, 134 | 'k_start': args.k_start, 135 | 'k_end': args.k_end, 136 | 'task': args.task, 137 | 'max_epochs': args.max_epochs, 138 | 'results_dir': args.results_dir, 139 | 'lr': args.lr, 140 | 'experiment': args.exp_code, 141 | 'reg': args.reg, 142 | 'label_frac': args.label_frac, 143 | 'bag_loss': args.bag_loss, 144 | 'seed': args.seed, 145 | 'model_type': args.model_type, 146 | 'model_size': args.model_size, 147 | "use_drop_out": args.drop_out, 148 | 'weighted_sample': args.weighted_sample, 149 | 'opt': args.opt} 150 | 151 | if args.model_type in ['clam_sb', 'clam_mb']: 152 | settings.update({'bag_weight': args.bag_weight, 153 | 'inst_loss': args.inst_loss, 154 | 'B': args.B}) 155 | 156 | print('\nLoad Dataset') 157 | 158 | if args.task == 'task_1_tumor_vs_normal': 159 | # args.n_classes=2 160 | # dataset = Generic_MIL_Dataset(csv_path = 'dataset_csv/tumor_vs_normal_dummy_clean.csv', 161 | # data_dir= os.path.join(args.data_root_dir, 'tumor_vs_normal_resnet_features'), 162 | # shuffle = False, 163 | # seed = args.seed, 164 | # print_info = True, 165 | # label_dict = {'normal_tissue':0, 'tumor_tissue':1}, 166 | # patient_strat=False, 167 | # ignore=[]) 168 | raise NotImplementedError 169 | 170 | elif args.task == 'task_2_tumor_subtyping': 171 | label_dict = datasetdict[args.dataset][1] 172 | # label_dict = ast.literal_eval(args.label_dict) 173 | dataset = Generic_MIL_Dataset(csv_path = args.csv_path, 174 | # data_dir= os.path.join(args.data_root_dir, 'data_pt'), 175 | data_dir= args.data_root_dir, 176 | shuffle = False, 177 | seed = args.seed, 178 | print_info = True, 179 | label_dict = label_dict, 180 | patient_strat= False, 181 | ignore=[]) 182 | 183 | if args.model_type in ['clam_sb', 'clam_mb']: 184 | assert args.subtyping 185 | 186 | else: 187 | raise NotImplementedError 188 | 189 | if not os.path.isdir(args.results_dir): 190 | os.mkdir(args.results_dir) 191 | 192 | args.results_dir = os.path.join(args.results_dir, str(args.exp_code) + '_s{}'.format(args.seed)) 193 | if not os.path.isdir(args.results_dir): 194 | os.mkdir(args.results_dir) 195 | 196 | if args.split_dir is None: 197 | args.split_dir = os.path.join('splits', args.task+'_{}'.format(int(args.label_frac*100))) 198 | else: 199 | # args.split_dir = os.path.join('splits', args.split_dir) 200 | args.split_dir = args.split_dir 201 | 202 | print('split_dir: ', args.split_dir) 203 | assert os.path.isdir(args.split_dir) 204 | 205 | settings.update({'split_dir': args.split_dir}) 206 | 207 | 208 | with open(args.results_dir + '/experiment_{}.txt'.format(args.exp_code), 'w') as f: 209 | print(settings, file=f) 210 | f.close() 211 | 212 | print("################# Settings ###################") 213 | for key, val in settings.items(): 214 | print("{}: {}".format(key, val)) 215 | 216 | 217 | def main(args): 218 | # create results directory if necessary 219 | if not os.path.isdir(args.results_dir): 220 | os.mkdir(args.results_dir) 221 | 222 | if args.k_start == -1: 223 | start = 0 224 | else: 225 | start = args.k_start 226 | if args.k_end == -1: 227 | end = args.k 228 | else: 229 | end = args.k_end 230 | 231 | all_test_auc = [] 232 | all_val_auc = [] 233 | all_test_acc = [] 234 | all_val_acc = [] 235 | folds = np.arange(start, end) 236 | for i in folds: 237 | seed_torch(args.seed) 238 | train_dataset, val_dataset, test_dataset = dataset.return_splits(from_id=False, 239 | csv_path='{}/splits_{}.csv'.format(args.split_dir, i)) 240 | 241 | datasets = (train_dataset, val_dataset, test_dataset) 242 | results, test_auc, val_auc, test_acc, val_acc = train(datasets, i, args) 243 | all_test_auc.append(test_auc) 244 | all_val_auc.append(val_auc) 245 | all_test_acc.append(test_acc) 246 | all_val_acc.append(val_acc) 247 | #write results to pkl 248 | filename = os.path.join(args.results_dir, 'split_{}_results.pkl'.format(i)) 249 | save_pkl(filename, results) 250 | 251 | final_df = pd.DataFrame({'folds': folds, 'test_auc': all_test_auc, 252 | 'val_auc': all_val_auc, 'test_acc': all_test_acc, 'val_acc' : all_val_acc}) 253 | 254 | if len(folds) != args.k: 255 | save_name = 'summary_partial_{}_{}.csv'.format(start, end) 256 | else: 257 | save_name = 'summary.csv' 258 | final_df.to_csv(os.path.join(args.results_dir, save_name)) 259 | 260 | 261 | if __name__ == "__main__": 262 | results = main(args) 263 | print("finished!") 264 | print("end script") 265 | 266 | 267 | -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/feature_extract/genmodel.py: -------------------------------------------------------------------------------- 1 | import math 2 | from functools import partial 3 | 4 | import torch 5 | import torch.nn as nn 6 | 7 | 8 | def _no_grad_trunc_normal_(tensor, mean, std, a, b): 9 | # Cut & paste from PyTorch official master until it's in a few official releases - RW 10 | # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf 11 | def norm_cdf(x): 12 | # Computes standard normal cumulative distribution function 13 | return (1. + math.erf(x / math.sqrt(2.))) / 2. 14 | 15 | if (mean < a - 2 * std) or (mean > b + 2 * std): 16 | warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " 17 | "The distribution of values may be incorrect.", 18 | stacklevel=2) 19 | 20 | with torch.no_grad(): 21 | # Values are generated by using a truncated uniform distribution and 22 | # then using the inverse CDF for the normal distribution. 23 | # Get upper and lower cdf values 24 | l = norm_cdf((a - mean) / std) 25 | u = norm_cdf((b - mean) / std) 26 | 27 | # Uniformly fill tensor with values from [l, u], then translate to 28 | # [2l-1, 2u-1]. 29 | tensor.uniform_(2 * l - 1, 2 * u - 1) 30 | 31 | # Use inverse cdf transform for normal distribution to get truncated 32 | # standard normal 33 | tensor.erfinv_() 34 | 35 | # Transform to proper mean, std 36 | tensor.mul_(std * math.sqrt(2.)) 37 | tensor.add_(mean) 38 | 39 | # Clamp to ensure it's in the proper range 40 | tensor.clamp_(min=a, max=b) 41 | return tensor 42 | 43 | 44 | def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): 45 | # type: (Tensor, float, float, float, float) -> Tensor 46 | return _no_grad_trunc_normal_(tensor, mean, std, a, b) 47 | 48 | 49 | def drop_path(x, drop_prob: float = 0., training: bool = False): 50 | if drop_prob == 0. or not training: 51 | return x 52 | keep_prob = 1 - drop_prob 53 | shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets 54 | random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) 55 | random_tensor.floor_() # binarize 56 | output = x.div(keep_prob) * random_tensor 57 | return output 58 | 59 | 60 | class DropPath(nn.Module): 61 | """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). 62 | """ 63 | def __init__(self, drop_prob=None): 64 | super(DropPath, self).__init__() 65 | self.drop_prob = drop_prob 66 | 67 | def forward(self, x): 68 | return drop_path(x, self.drop_prob, self.training) 69 | 70 | 71 | class Mlp(nn.Module): 72 | def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): 73 | super().__init__() 74 | out_features = out_features or in_features 75 | hidden_features = hidden_features or in_features 76 | self.fc1 = nn.Linear(in_features, hidden_features) 77 | self.act = act_layer() 78 | self.fc2 = nn.Linear(hidden_features, out_features) 79 | self.drop = nn.Dropout(drop) 80 | 81 | def forward(self, x): 82 | x = self.fc1(x) 83 | x = self.act(x) 84 | x = self.drop(x) 85 | x = self.fc2(x) 86 | x = self.drop(x) 87 | return x 88 | 89 | 90 | class Attention(nn.Module): 91 | def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): 92 | super().__init__() 93 | self.num_heads = num_heads 94 | head_dim = dim // num_heads 95 | self.scale = qk_scale or head_dim ** -0.5 96 | 97 | self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) 98 | self.attn_drop = nn.Dropout(attn_drop) 99 | self.proj = nn.Linear(dim, dim) 100 | self.proj_drop = nn.Dropout(proj_drop) 101 | 102 | def forward(self, x): 103 | B, N, C = x.shape 104 | qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) 105 | q, k, v = qkv[0], qkv[1], qkv[2] 106 | 107 | attn = (q @ k.transpose(-2, -1)) * self.scale 108 | attn = attn.softmax(dim=-1) 109 | attn = self.attn_drop(attn) 110 | 111 | x = (attn @ v).transpose(1, 2).reshape(B, N, C) 112 | x = self.proj(x) 113 | x = self.proj_drop(x) 114 | return x, attn 115 | 116 | 117 | class Block(nn.Module): 118 | def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., 119 | drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): 120 | super().__init__() 121 | self.norm1 = norm_layer(dim) 122 | self.attn = Attention( 123 | dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) 124 | self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() 125 | self.norm2 = norm_layer(dim) 126 | mlp_hidden_dim = int(dim * mlp_ratio) 127 | self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) 128 | 129 | def forward(self, x, return_attention=False): 130 | y, attn = self.attn(self.norm1(x)) 131 | if return_attention: 132 | return attn 133 | x = x + self.drop_path(y) 134 | x = x + self.drop_path(self.mlp(self.norm2(x))) 135 | return x 136 | 137 | 138 | class PatchEmbed(nn.Module): 139 | """ Image to Patch Embedding 140 | """ 141 | def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): 142 | super().__init__() 143 | num_patches = (img_size // patch_size) * (img_size // patch_size) 144 | self.img_size = img_size 145 | self.patch_size = patch_size 146 | self.num_patches = num_patches 147 | 148 | self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) 149 | 150 | def forward(self, x): 151 | B, C, H, W = x.shape 152 | x = self.proj(x).flatten(2).transpose(1, 2) 153 | return x 154 | 155 | 156 | class VisionTransformer(nn.Module): 157 | """ Vision Transformer """ 158 | def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12, 159 | num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., 160 | drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs): 161 | super().__init__() 162 | self.num_features = self.embed_dim = embed_dim 163 | 164 | self.patch_embed = PatchEmbed( 165 | img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) 166 | num_patches = self.patch_embed.num_patches 167 | 168 | self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) 169 | self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) 170 | self.pos_drop = nn.Dropout(p=drop_rate) 171 | 172 | dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule 173 | self.blocks = nn.ModuleList([ 174 | Block( 175 | dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, 176 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) 177 | for i in range(depth)]) 178 | self.norm = norm_layer(embed_dim) 179 | 180 | # Classifier head 181 | self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() 182 | 183 | trunc_normal_(self.pos_embed, std=.02) 184 | trunc_normal_(self.cls_token, std=.02) 185 | self.apply(self._init_weights) 186 | 187 | def _init_weights(self, m): 188 | if isinstance(m, nn.Linear): 189 | trunc_normal_(m.weight, std=.02) 190 | if isinstance(m, nn.Linear) and m.bias is not None: 191 | nn.init.constant_(m.bias, 0) 192 | elif isinstance(m, nn.LayerNorm): 193 | nn.init.constant_(m.bias, 0) 194 | nn.init.constant_(m.weight, 1.0) 195 | 196 | def interpolate_pos_encoding(self, x, w, h): 197 | npatch = x.shape[1] - 1 198 | N = self.pos_embed.shape[1] - 1 199 | if npatch == N and w == h: 200 | return self.pos_embed 201 | class_pos_embed = self.pos_embed[:, 0] 202 | patch_pos_embed = self.pos_embed[:, 1:] 203 | dim = x.shape[-1] 204 | w0 = w // self.patch_embed.patch_size 205 | h0 = h // self.patch_embed.patch_size 206 | # we add a small number to avoid floating point error in the interpolation 207 | # see discussion at https://github.com/facebookresearch/dino/issues/8 208 | w0, h0 = w0 + 0.1, h0 + 0.1 209 | patch_pos_embed = nn.functional.interpolate( 210 | patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), 211 | scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), 212 | mode='bicubic', 213 | ) 214 | assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] 215 | patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) 216 | return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) 217 | 218 | def prepare_tokens(self, x): 219 | B, nc, w, h = x.shape 220 | x = self.patch_embed(x) # patch linear embedding 221 | 222 | # add the [CLS] token to the embed patch tokens 223 | cls_tokens = self.cls_token.expand(B, -1, -1) 224 | x = torch.cat((cls_tokens, x), dim=1) 225 | 226 | # add positional encoding to each token 227 | x = x + self.interpolate_pos_encoding(x, w, h) 228 | 229 | return self.pos_drop(x) 230 | 231 | def forward(self, x): 232 | x = self.prepare_tokens(x) 233 | for blk in self.blocks: 234 | x = blk(x) 235 | x = self.norm(x) 236 | return x[:, 0] 237 | 238 | def get_last_selfattention(self, x): 239 | x = self.prepare_tokens(x) 240 | for i, blk in enumerate(self.blocks): 241 | if i < len(self.blocks) - 1: 242 | x = blk(x) 243 | else: 244 | # return attention of the last block 245 | return blk(x, return_attention=True) 246 | 247 | def get_intermediate_layers(self, x, n=1): 248 | x = self.prepare_tokens(x) 249 | # we return the output tokens from the `n` last blocks 250 | output = [] 251 | for i, blk in enumerate(self.blocks): 252 | x = blk(x) 253 | if len(self.blocks) - i <= n: 254 | output.append(self.norm(x)) 255 | return output 256 | 257 | 258 | def vit_tiny(patch_size=16, **kwargs): 259 | model = VisionTransformer( 260 | patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, 261 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 262 | return model 263 | 264 | 265 | def vit_small(patch_size=16, **kwargs): 266 | model = VisionTransformer( 267 | patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, 268 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 269 | return model 270 | 271 | 272 | def vit_base(patch_size=16, **kwargs): 273 | model = VisionTransformer( 274 | patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, 275 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 276 | return model 277 | 278 | def vit_large(patch_size=16, **kwargs): 279 | model = VisionTransformer( 280 | patch_size=patch_size, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, 281 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 282 | return model 283 | 284 | def vit_huge(patch_size=16, **kwargs): 285 | model = VisionTransformer( 286 | patch_size=patch_size, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, 287 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 288 | return model 289 | 290 | 291 | 292 | def genmodel(ckpt=None): 293 | model = vit_base() 294 | ckptdir = ckpt 295 | weights = torch.load(ckptdir) 296 | model.load_state_dict(weights) 297 | return model -------------------------------------------------------------------------------- /genmodel.py: -------------------------------------------------------------------------------- 1 | import math 2 | from functools import partial 3 | 4 | import torch 5 | import torch.nn as nn 6 | 7 | 8 | def _no_grad_trunc_normal_(tensor, mean, std, a, b): 9 | # Cut & paste from PyTorch official master until it's in a few official releases - RW 10 | # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf 11 | def norm_cdf(x): 12 | # Computes standard normal cumulative distribution function 13 | return (1. + math.erf(x / math.sqrt(2.))) / 2. 14 | 15 | if (mean < a - 2 * std) or (mean > b + 2 * std): 16 | warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " 17 | "The distribution of values may be incorrect.", 18 | stacklevel=2) 19 | 20 | with torch.no_grad(): 21 | # Values are generated by using a truncated uniform distribution and 22 | # then using the inverse CDF for the normal distribution. 23 | # Get upper and lower cdf values 24 | l = norm_cdf((a - mean) / std) 25 | u = norm_cdf((b - mean) / std) 26 | 27 | # Uniformly fill tensor with values from [l, u], then translate to 28 | # [2l-1, 2u-1]. 29 | tensor.uniform_(2 * l - 1, 2 * u - 1) 30 | 31 | # Use inverse cdf transform for normal distribution to get truncated 32 | # standard normal 33 | tensor.erfinv_() 34 | 35 | # Transform to proper mean, std 36 | tensor.mul_(std * math.sqrt(2.)) 37 | tensor.add_(mean) 38 | 39 | # Clamp to ensure it's in the proper range 40 | tensor.clamp_(min=a, max=b) 41 | return tensor 42 | 43 | 44 | def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): 45 | # type: (Tensor, float, float, float, float) -> Tensor 46 | return _no_grad_trunc_normal_(tensor, mean, std, a, b) 47 | 48 | 49 | def drop_path(x, drop_prob: float = 0., training: bool = False): 50 | if drop_prob == 0. or not training: 51 | return x 52 | keep_prob = 1 - drop_prob 53 | shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets 54 | random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) 55 | random_tensor.floor_() # binarize 56 | output = x.div(keep_prob) * random_tensor 57 | return output 58 | 59 | 60 | class DropPath(nn.Module): 61 | """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). 62 | """ 63 | def __init__(self, drop_prob=None): 64 | super(DropPath, self).__init__() 65 | self.drop_prob = drop_prob 66 | 67 | def forward(self, x): 68 | return drop_path(x, self.drop_prob, self.training) 69 | 70 | 71 | class Mlp(nn.Module): 72 | def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): 73 | super().__init__() 74 | out_features = out_features or in_features 75 | hidden_features = hidden_features or in_features 76 | self.fc1 = nn.Linear(in_features, hidden_features) 77 | self.act = act_layer() 78 | self.fc2 = nn.Linear(hidden_features, out_features) 79 | self.drop = nn.Dropout(drop) 80 | 81 | def forward(self, x): 82 | x = self.fc1(x) 83 | x = self.act(x) 84 | x = self.drop(x) 85 | x = self.fc2(x) 86 | x = self.drop(x) 87 | return x 88 | 89 | 90 | class Attention(nn.Module): 91 | def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): 92 | super().__init__() 93 | self.num_heads = num_heads 94 | head_dim = dim // num_heads 95 | self.scale = qk_scale or head_dim ** -0.5 96 | 97 | self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) 98 | self.attn_drop = nn.Dropout(attn_drop) 99 | self.proj = nn.Linear(dim, dim) 100 | self.proj_drop = nn.Dropout(proj_drop) 101 | 102 | def forward(self, x): 103 | B, N, C = x.shape 104 | qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) 105 | q, k, v = qkv[0], qkv[1], qkv[2] 106 | 107 | attn = (q @ k.transpose(-2, -1)) * self.scale 108 | attn = attn.softmax(dim=-1) 109 | attn = self.attn_drop(attn) 110 | 111 | x = (attn @ v).transpose(1, 2).reshape(B, N, C) 112 | x = self.proj(x) 113 | x = self.proj_drop(x) 114 | return x, attn 115 | 116 | 117 | class Block(nn.Module): 118 | def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., 119 | drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): 120 | super().__init__() 121 | self.norm1 = norm_layer(dim) 122 | self.attn = Attention( 123 | dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) 124 | self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() 125 | self.norm2 = norm_layer(dim) 126 | mlp_hidden_dim = int(dim * mlp_ratio) 127 | self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) 128 | 129 | def forward(self, x, return_attention=False): 130 | y, attn = self.attn(self.norm1(x)) 131 | if return_attention: 132 | return attn 133 | x = x + self.drop_path(y) 134 | x = x + self.drop_path(self.mlp(self.norm2(x))) 135 | return x 136 | 137 | 138 | class PatchEmbed(nn.Module): 139 | """ Image to Patch Embedding 140 | """ 141 | def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): 142 | super().__init__() 143 | num_patches = (img_size // patch_size) * (img_size // patch_size) 144 | self.img_size = img_size 145 | self.patch_size = patch_size 146 | self.num_patches = num_patches 147 | 148 | self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) 149 | 150 | def forward(self, x): 151 | B, C, H, W = x.shape 152 | x = self.proj(x).flatten(2).transpose(1, 2) 153 | return x 154 | 155 | 156 | class VisionTransformer(nn.Module): 157 | """ Vision Transformer """ 158 | def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12, 159 | num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., 160 | drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs): 161 | super().__init__() 162 | self.num_features = self.embed_dim = embed_dim 163 | 164 | self.patch_embed = PatchEmbed( 165 | img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) 166 | num_patches = self.patch_embed.num_patches 167 | 168 | self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) 169 | self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) 170 | self.pos_drop = nn.Dropout(p=drop_rate) 171 | 172 | dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule 173 | self.blocks = nn.ModuleList([ 174 | Block( 175 | dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, 176 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) 177 | for i in range(depth)]) 178 | self.norm = norm_layer(embed_dim) 179 | 180 | # Classifier head 181 | self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() 182 | 183 | trunc_normal_(self.pos_embed, std=.02) 184 | trunc_normal_(self.cls_token, std=.02) 185 | self.apply(self._init_weights) 186 | 187 | def _init_weights(self, m): 188 | if isinstance(m, nn.Linear): 189 | trunc_normal_(m.weight, std=.02) 190 | if isinstance(m, nn.Linear) and m.bias is not None: 191 | nn.init.constant_(m.bias, 0) 192 | elif isinstance(m, nn.LayerNorm): 193 | nn.init.constant_(m.bias, 0) 194 | nn.init.constant_(m.weight, 1.0) 195 | 196 | def interpolate_pos_encoding(self, x, w, h): 197 | npatch = x.shape[1] - 1 198 | N = self.pos_embed.shape[1] - 1 199 | if npatch == N and w == h: 200 | return self.pos_embed 201 | class_pos_embed = self.pos_embed[:, 0] 202 | patch_pos_embed = self.pos_embed[:, 1:] 203 | dim = x.shape[-1] 204 | w0 = w // self.patch_embed.patch_size 205 | h0 = h // self.patch_embed.patch_size 206 | # we add a small number to avoid floating point error in the interpolation 207 | # see discussion at https://github.com/facebookresearch/dino/issues/8 208 | w0, h0 = w0 + 0.1, h0 + 0.1 209 | patch_pos_embed = nn.functional.interpolate( 210 | patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), 211 | scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), 212 | mode='bicubic', 213 | ) 214 | assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] 215 | patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) 216 | return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) 217 | 218 | def prepare_tokens(self, x): 219 | B, nc, w, h = x.shape 220 | x = self.patch_embed(x) # patch linear embedding 221 | 222 | # add the [CLS] token to the embed patch tokens 223 | cls_tokens = self.cls_token.expand(B, -1, -1) 224 | x = torch.cat((cls_tokens, x), dim=1) 225 | 226 | # add positional encoding to each token 227 | x = x + self.interpolate_pos_encoding(x, w, h) 228 | 229 | return self.pos_drop(x) 230 | 231 | def forward(self, x): 232 | x = self.prepare_tokens(x) 233 | for blk in self.blocks: 234 | x = blk(x) 235 | x = self.norm(x) 236 | return x[:, 0] 237 | 238 | def get_last_selfattention(self, x): 239 | x = self.prepare_tokens(x) 240 | for i, blk in enumerate(self.blocks): 241 | if i < len(self.blocks) - 1: 242 | x = blk(x) 243 | else: 244 | # return attention of the last block 245 | return blk(x, return_attention=True) 246 | 247 | def get_intermediate_layers(self, x, n=1): 248 | x = self.prepare_tokens(x) 249 | # we return the output tokens from the `n` last blocks 250 | output = [] 251 | for i, blk in enumerate(self.blocks): 252 | x = blk(x) 253 | if len(self.blocks) - i <= n: 254 | output.append(self.norm(x)) 255 | return output 256 | 257 | 258 | def vit_tiny(patch_size=16, **kwargs): 259 | model = VisionTransformer( 260 | patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, 261 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 262 | return model 263 | 264 | 265 | def vit_small(patch_size=16, **kwargs): 266 | model = VisionTransformer( 267 | patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, 268 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 269 | return model 270 | 271 | 272 | def vit_base(patch_size=16, **kwargs): 273 | model = VisionTransformer( 274 | patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, 275 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 276 | return model 277 | 278 | def vit_large(patch_size=16, **kwargs): 279 | model = VisionTransformer( 280 | patch_size=patch_size, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, 281 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 282 | return model 283 | 284 | def vit_huge(patch_size=16, **kwargs): 285 | model = VisionTransformer( 286 | patch_size=patch_size, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, 287 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) 288 | return model 289 | 290 | 291 | 292 | def genmodel(ckpt=None): 293 | model = vit_base() 294 | ckptdir = ckpt 295 | weights = torch.load(ckptdir) 296 | model.load_state_dict(weights) 297 | return model 298 | 299 | if __name__ == "__main__": 300 | model = genmodel('ckptdir') -------------------------------------------------------------------------------- /Slide-level multi-class subtyping task/splits/Camelyon16_subtyping2/splits_0_bool.csv: -------------------------------------------------------------------------------- 1 | Unnamed: 0,train,val,test 2 | normal_001,True,False,False 3 | normal_003,True,False,False 4 | normal_004,True,False,False 5 | normal_005,True,False,False 6 | normal_006,True,False,False 7 | normal_007,True,False,False 8 | normal_008,True,False,False 9 | normal_009,True,False,False 10 | normal_010,True,False,False 11 | normal_011,True,False,False 12 | normal_016,True,False,False 13 | normal_017,True,False,False 14 | normal_018,True,False,False 15 | normal_019,True,False,False 16 | normal_021,True,False,False 17 | normal_023,True,False,False 18 | normal_024,True,False,False 19 | normal_025,True,False,False 20 | normal_026,True,False,False 21 | normal_027,True,False,False 22 | normal_028,True,False,False 23 | normal_029,True,False,False 24 | normal_031,True,False,False 25 | normal_033,True,False,False 26 | normal_034,True,False,False 27 | normal_035,True,False,False 28 | normal_036,True,False,False 29 | normal_037,True,False,False 30 | normal_038,True,False,False 31 | normal_039,True,False,False 32 | normal_040,True,False,False 33 | normal_044,True,False,False 34 | normal_046,True,False,False 35 | normal_047,True,False,False 36 | normal_048,True,False,False 37 | normal_049,True,False,False 38 | normal_050,True,False,False 39 | normal_051,True,False,False 40 | normal_052,True,False,False 41 | normal_053,True,False,False 42 | normal_054,True,False,False 43 | normal_055,True,False,False 44 | normal_056,True,False,False 45 | normal_057,True,False,False 46 | normal_058,True,False,False 47 | normal_059,True,False,False 48 | normal_061,True,False,False 49 | normal_062,True,False,False 50 | normal_063,True,False,False 51 | normal_064,True,False,False 52 | normal_065,True,False,False 53 | normal_066,True,False,False 54 | normal_067,True,False,False 55 | normal_068,True,False,False 56 | normal_069,True,False,False 57 | normal_070,True,False,False 58 | normal_072,True,False,False 59 | normal_073,True,False,False 60 | normal_075,True,False,False 61 | normal_076,True,False,False 62 | normal_077,True,False,False 63 | normal_078,True,False,False 64 | normal_079,True,False,False 65 | normal_080,True,False,False 66 | normal_083,True,False,False 67 | normal_084,True,False,False 68 | normal_085,True,False,False 69 | normal_087,True,False,False 70 | normal_088,True,False,False 71 | normal_089,True,False,False 72 | normal_090,True,False,False 73 | normal_094,True,False,False 74 | normal_095,True,False,False 75 | normal_097,True,False,False 76 | normal_098,True,False,False 77 | normal_100,True,False,False 78 | normal_101,True,False,False 79 | normal_103,True,False,False 80 | normal_104,True,False,False 81 | normal_106,True,False,False 82 | normal_108,True,False,False 83 | normal_110,True,False,False 84 | normal_111,True,False,False 85 | normal_112,True,False,False 86 | normal_113,True,False,False 87 | normal_114,True,False,False 88 | normal_115,True,False,False 89 | normal_117,True,False,False 90 | normal_118,True,False,False 91 | normal_119,True,False,False 92 | normal_120,True,False,False 93 | normal_121,True,False,False 94 | normal_122,True,False,False 95 | normal_123,True,False,False 96 | normal_124,True,False,False 97 | normal_125,True,False,False 98 | normal_126,True,False,False 99 | normal_127,True,False,False 100 | normal_129,True,False,False 101 | normal_130,True,False,False 102 | normal_131,True,False,False 103 | normal_133,True,False,False 104 | normal_134,True,False,False 105 | normal_135,True,False,False 106 | normal_136,True,False,False 107 | normal_137,True,False,False 108 | normal_138,True,False,False 109 | normal_139,True,False,False 110 | normal_140,True,False,False 111 | normal_141,True,False,False 112 | normal_142,True,False,False 113 | normal_143,True,False,False 114 | normal_144,True,False,False 115 | normal_145,True,False,False 116 | normal_146,True,False,False 117 | normal_147,True,False,False 118 | normal_148,True,False,False 119 | normal_149,True,False,False 120 | normal_151,True,False,False 121 | normal_152,True,False,False 122 | normal_153,True,False,False 123 | normal_154,True,False,False 124 | normal_155,True,False,False 125 | normal_156,True,False,False 126 | normal_157,True,False,False 127 | normal_159,True,False,False 128 | normal_160,True,False,False 129 | tumor_002,True,False,False 130 | tumor_003,True,False,False 131 | tumor_004,True,False,False 132 | tumor_005,True,False,False 133 | tumor_006,True,False,False 134 | tumor_007,True,False,False 135 | tumor_009,True,False,False 136 | tumor_010,True,False,False 137 | tumor_012,True,False,False 138 | tumor_013,True,False,False 139 | tumor_014,True,False,False 140 | tumor_015,True,False,False 141 | tumor_017,True,False,False 142 | tumor_018,True,False,False 143 | tumor_019,True,False,False 144 | tumor_022,True,False,False 145 | tumor_023,True,False,False 146 | tumor_024,True,False,False 147 | tumor_025,True,False,False 148 | tumor_026,True,False,False 149 | tumor_027,True,False,False 150 | tumor_028,True,False,False 151 | tumor_029,True,False,False 152 | tumor_030,True,False,False 153 | tumor_031,True,False,False 154 | tumor_032,True,False,False 155 | tumor_033,True,False,False 156 | tumor_034,True,False,False 157 | tumor_035,True,False,False 158 | tumor_036,True,False,False 159 | tumor_037,True,False,False 160 | tumor_038,True,False,False 161 | tumor_039,True,False,False 162 | tumor_040,True,False,False 163 | tumor_041,True,False,False 164 | tumor_042,True,False,False 165 | tumor_044,True,False,False 166 | tumor_046,True,False,False 167 | tumor_047,True,False,False 168 | tumor_048,True,False,False 169 | tumor_049,True,False,False 170 | tumor_050,True,False,False 171 | tumor_051,True,False,False 172 | tumor_053,True,False,False 173 | tumor_054,True,False,False 174 | tumor_055,True,False,False 175 | tumor_056,True,False,False 176 | tumor_057,True,False,False 177 | tumor_058,True,False,False 178 | tumor_059,True,False,False 179 | tumor_062,True,False,False 180 | tumor_063,True,False,False 181 | tumor_065,True,False,False 182 | tumor_066,True,False,False 183 | tumor_067,True,False,False 184 | tumor_068,True,False,False 185 | tumor_069,True,False,False 186 | tumor_070,True,False,False 187 | tumor_071,True,False,False 188 | tumor_072,True,False,False 189 | tumor_073,True,False,False 190 | tumor_074,True,False,False 191 | tumor_075,True,False,False 192 | tumor_076,True,False,False 193 | tumor_078,True,False,False 194 | tumor_079,True,False,False 195 | tumor_080,True,False,False 196 | tumor_081,True,False,False 197 | tumor_082,True,False,False 198 | tumor_083,True,False,False 199 | tumor_084,True,False,False 200 | tumor_085,True,False,False 201 | tumor_086,True,False,False 202 | tumor_090,True,False,False 203 | tumor_091,True,False,False 204 | tumor_092,True,False,False 205 | tumor_094,True,False,False 206 | tumor_095,True,False,False 207 | tumor_097,True,False,False 208 | tumor_098,True,False,False 209 | tumor_099,True,False,False 210 | tumor_101,True,False,False 211 | tumor_103,True,False,False 212 | tumor_104,True,False,False 213 | tumor_107,True,False,False 214 | tumor_108,True,False,False 215 | tumor_109,True,False,False 216 | tumor_110,True,False,False 217 | tumor_111,True,False,False 218 | normal_150,True,False,False 219 | normal_002,True,False,False 220 | normal_132,True,False,False 221 | normal_071,True,False,False 222 | normal_128,True,False,False 223 | normal_032,True,False,False 224 | normal_109,True,False,False 225 | normal_042,True,False,False 226 | normal_102,True,False,False 227 | normal_116,True,False,False 228 | normal_105,True,False,False 229 | normal_013,True,False,False 230 | normal_158,True,False,False 231 | normal_081,True,False,False 232 | normal_014,True,False,False 233 | normal_022,True,False,False 234 | tumor_102,True,False,False 235 | tumor_096,True,False,False 236 | tumor_043,True,False,False 237 | tumor_001,True,False,False 238 | tumor_077,True,False,False 239 | tumor_045,True,False,False 240 | tumor_052,True,False,False 241 | tumor_105,True,False,False 242 | tumor_089,True,False,False 243 | tumor_087,True,False,False 244 | tumor_093,True,False,False 245 | normal_030,False,True,False 246 | normal_043,False,True,False 247 | normal_015,False,True,False 248 | normal_093,False,True,False 249 | normal_082,False,True,False 250 | normal_020,False,True,False 251 | normal_045,False,True,False 252 | normal_012,False,True,False 253 | normal_041,False,True,False 254 | normal_099,False,True,False 255 | normal_091,False,True,False 256 | normal_096,False,True,False 257 | normal_074,False,True,False 258 | normal_107,False,True,False 259 | normal_060,False,True,False 260 | normal_092,False,True,False 261 | tumor_020,False,True,False 262 | tumor_060,False,True,False 263 | tumor_016,False,True,False 264 | tumor_100,False,True,False 265 | tumor_061,False,True,False 266 | tumor_064,False,True,False 267 | tumor_088,False,True,False 268 | tumor_011,False,True,False 269 | tumor_021,False,True,False 270 | tumor_106,False,True,False 271 | tumor_008,False,True,False 272 | test_104,False,False,True 273 | test_083,False,False,True 274 | test_096,False,False,True 275 | test_094,False,False,True 276 | test_084,False,False,True 277 | test_106,False,False,True 278 | test_079,False,False,True 279 | test_109,False,False,True 280 | test_057,False,False,True 281 | test_012,False,False,True 282 | test_053,False,False,True 283 | test_121,False,False,True 284 | test_038,False,False,True 285 | test_046,False,False,True 286 | test_098,False,False,True 287 | test_009,False,False,True 288 | test_125,False,False,True 289 | test_007,False,False,True 290 | test_089,False,False,True 291 | test_054,False,False,True 292 | test_036,False,False,True 293 | test_043,False,False,True 294 | test_017,False,False,True 295 | test_016,False,False,True 296 | test_033,False,False,True 297 | test_061,False,False,True 298 | test_058,False,False,True 299 | test_095,False,False,True 300 | test_014,False,False,True 301 | test_110,False,False,True 302 | test_031,False,False,True 303 | test_003,False,False,True 304 | test_059,False,False,True 305 | test_117,False,False,True 306 | test_047,False,False,True 307 | test_082,False,False,True 308 | test_093,False,False,True 309 | test_091,False,False,True 310 | test_029,False,False,True 311 | test_019,False,False,True 312 | test_072,False,False,True 313 | test_122,False,False,True 314 | test_006,False,False,True 315 | test_052,False,False,True 316 | test_076,False,False,True 317 | test_103,False,False,True 318 | test_028,False,False,True 319 | test_086,False,False,True 320 | test_090,False,False,True 321 | test_074,False,False,True 322 | test_099,False,False,True 323 | test_129,False,False,True 324 | test_070,False,False,True 325 | test_008,False,False,True 326 | test_105,False,False,True 327 | test_119,False,False,True 328 | test_035,False,False,True 329 | test_037,False,False,True 330 | test_002,False,False,True 331 | test_024,False,False,True 332 | test_018,False,False,True 333 | test_060,False,False,True 334 | test_078,False,False,True 335 | test_034,False,False,True 336 | test_023,False,False,True 337 | test_127,False,False,True 338 | test_001,False,False,True 339 | test_004,False,False,True 340 | test_042,False,False,True 341 | test_075,False,False,True 342 | test_066,False,False,True 343 | test_118,False,False,True 344 | test_056,False,False,True 345 | test_116,False,False,True 346 | test_015,False,False,True 347 | test_048,False,False,True 348 | test_071,False,False,True 349 | test_051,False,False,True 350 | test_067,False,False,True 351 | test_100,False,False,True 352 | test_114,False,False,True 353 | test_097,False,False,True 354 | test_081,False,False,True 355 | test_032,False,False,True 356 | test_092,False,False,True 357 | test_108,False,False,True 358 | test_062,False,False,True 359 | test_120,False,False,True 360 | test_073,False,False,True 361 | test_102,False,False,True 362 | test_045,False,False,True 363 | test_128,False,False,True 364 | test_011,False,False,True 365 | test_085,False,False,True 366 | test_064,False,False,True 367 | test_010,False,False,True 368 | test_055,False,False,True 369 | test_101,False,False,True 370 | test_130,False,False,True 371 | test_005,False,False,True 372 | test_025,False,False,True 373 | test_107,False,False,True 374 | test_087,False,False,True 375 | test_020,False,False,True 376 | test_068,False,False,True 377 | test_080,False,False,True 378 | test_040,False,False,True 379 | test_069,False,False,True 380 | test_123,False,False,True 381 | test_041,False,False,True 382 | test_021,False,False,True 383 | test_065,False,False,True 384 | test_111,False,False,True 385 | test_022,False,False,True 386 | test_027,False,False,True 387 | test_077,False,False,True 388 | test_126,False,False,True 389 | test_063,False,False,True 390 | test_113,False,False,True 391 | test_039,False,False,True 392 | test_026,False,False,True 393 | test_044,False,False,True 394 | test_112,False,False,True 395 | test_115,False,False,True 396 | test_124,False,False,True 397 | test_013,False,False,True 398 | test_050,False,False,True 399 | test_030,False,False,True 400 | test_088,False,False,True 401 | --------------------------------------------------------------------------------