├── classification ├── trained_model_DCM_classifier.joblib ├── trained_model_HCM_classifier.joblib ├── trained_model_MINF_classifier.joblib ├── trained_model_RVA_classifier.joblib ├── acdc_testing_set_prediction.txt ├── acdc_testing_set_prediction_MINF_classifier.txt ├── acdc_testing_set_prediction_HCM_classifier.txt ├── acdc_testing_set_prediction_RVA_classifier.txt ├── acdc_testing_set_prediction_DCM_classifier.txt ├── classification_prediction.py ├── data_classification.py └── classifiers.py ├── config.py ├── download_weights.py ├── feature_extraction ├── acdc_pixel_size.py ├── acdc_base.py ├── acdc_motion_index.py ├── acdc_thickness.py ├── acdc_zone_flow.py └── acdc_volume.py ├── ROI ├── data_roi_predict.py ├── predict_roi_net.py ├── module_roi_net.py └── crop_according_to_roi.py ├── processing ├── convert_nifti_to_2D.py ├── acdc_info.py └── acdc_gt_base.py ├── acdc_info ├── acdc_gt_base.txt ├── acdc_base.txt ├── acdc_pixel_size.txt ├── acdc_info.txt └── acdc_motion_index.txt ├── flow ├── data_apparentflow.py ├── module_apparentflow_net.py ├── predict_apparentflow_net.py └── train_apparentflow_net.py ├── LICENSE.md ├── segmentation ├── data_lvrv_segmentation_propagation_acdc.py ├── predict_lvrv_net.py ├── module_lvrv_net.py └── finetune_lvrv_net.py └── README.md /classification/trained_model_DCM_classifier.joblib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/julien-zheng/CardiacMotionFlow/HEAD/classification/trained_model_DCM_classifier.joblib -------------------------------------------------------------------------------- /classification/trained_model_HCM_classifier.joblib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/julien-zheng/CardiacMotionFlow/HEAD/classification/trained_model_HCM_classifier.joblib -------------------------------------------------------------------------------- /classification/trained_model_MINF_classifier.joblib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/julien-zheng/CardiacMotionFlow/HEAD/classification/trained_model_MINF_classifier.joblib -------------------------------------------------------------------------------- /classification/trained_model_RVA_classifier.joblib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/julien-zheng/CardiacMotionFlow/HEAD/classification/trained_model_RVA_classifier.joblib -------------------------------------------------------------------------------- /classification/acdc_testing_set_prediction.txt: -------------------------------------------------------------------------------- 1 | patient101 DCM 2 | patient102 NOR 3 | patient103 MINF 4 | patient104 HCM 5 | patient105 HCM 6 | patient106 RV 7 | patient107 NOR 8 | patient108 HCM 9 | patient109 RV 10 | patient110 NOR 11 | patient111 HCM 12 | patient112 MINF 13 | patient113 DCM 14 | patient114 HCM 15 | patient115 MINF 16 | patient116 HCM 17 | patient117 DCM 18 | patient118 MINF 19 | patient119 RV 20 | patient120 MINF 21 | patient121 RV 22 | patient122 DCM 23 | patient123 NOR 24 | patient124 RV 25 | patient125 NOR 26 | patient126 RV 27 | patient127 RV 28 | patient128 NOR 29 | patient129 RV 30 | patient130 NOR 31 | patient131 DCM 32 | patient132 DCM 33 | patient133 DCM 34 | patient134 HCM 35 | patient135 DCM 36 | patient136 DCM 37 | patient137 MINF 38 | patient138 HCM 39 | patient139 NOR 40 | patient140 RV 41 | patient141 RV 42 | patient142 HCM 43 | patient143 MINF 44 | patient144 NOR 45 | patient145 MINF 46 | patient146 HCM 47 | patient147 RV 48 | patient148 MINF 49 | patient149 MINF 50 | patient150 NOR 51 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | 4 | # Directory of the project 5 | code_dir = '/home/qzheng/Programs/tensorflow/my_models/CardiacMotionFlow' 6 | 7 | 8 | excluded_slice_ratio = 0.2 9 | 10 | 11 | 12 | # For the ACDC data 13 | acdc_data_dir = '/data/asclepios/user/qzheng/Data/MICCAI2017_ACDC_Challenge2/{}' 14 | 15 | acdc_dilated_subjects = ['patient{}'.format(str(x).zfill(3)) for x in range(1, 21)] 16 | acdc_hypertrophic_subjects = ['patient{}'.format(str(x).zfill(3)) for x in range(21, 41)] 17 | acdc_infarct_subjects = ['patient{}'.format(str(x).zfill(3)) for x in range(41, 61)] 18 | acdc_normal_subjects = ['patient{}'.format(str(x).zfill(3)) for x in range(61, 81)] 19 | acdc_rv_subjects = ['patient{}'.format(str(x).zfill(3)) for x in range(81, 101)] 20 | acdc_test_subjects = ['patient{}'.format(str(x).zfill(3)) for x in range(101, 151)] 21 | 22 | acdc_seq_instants = 10 23 | 24 | 25 | 26 | # ROI-net 27 | roi_net_initial_lr = 1e-4 28 | roi_net_decay_rate = 1.0 29 | roi_net_batch_size = 16 30 | roi_net_input_img_size = 128 31 | roi_net_epochs = 50 32 | 33 | 34 | # LVRV-net 35 | lvrv_net_initial_lr = 1e-4 36 | lvrv_net_decay_rate = 1.0 37 | lvrv_net_batch_size = 16 38 | lvrv_net_input_img_size = 192 39 | lvrv_net_epochs = 1000 40 | 41 | 42 | # ApparentFlow-net 43 | apparentflow_net_initial_lr = 1e-4 44 | apparentflow_net_decay_rate = 1.0 45 | apparentflow_net_batch_size = 16 46 | apparentflow_net_input_img_size = 128 47 | apparentflow_net_epochs = 50 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /classification/acdc_testing_set_prediction_MINF_classifier.txt: -------------------------------------------------------------------------------- 1 | patient101 1.0 0.956506285536 2 | patient102 0.0 0.0127365968698 3 | patient103 1.0 0.940115226204 4 | patient104 0.0 0.000816828325961 5 | patient105 0.0 0.00433260931209 6 | patient106 1.0 0.998479104765 7 | patient107 0.0 0.113912381453 8 | patient108 0.0 0.00182973775068 9 | patient109 0.0 0.316284188652 10 | patient110 0.0 0.0228126058844 11 | patient111 0.0 0.0210944416922 12 | patient112 1.0 0.62170850525 13 | patient113 1.0 0.993086232861 14 | patient114 0.0 0.0234847835105 15 | patient115 1.0 0.837166792083 16 | patient116 0.0 0.0503207134153 17 | patient117 1.0 0.946281087282 18 | patient118 1.0 0.947468144359 19 | patient119 0.0 0.146962691241 20 | patient120 1.0 0.908063977851 21 | patient121 0.0 0.0427031436344 22 | patient122 1.0 0.771581901727 23 | patient123 0.0 0.0133821632454 24 | patient124 0.0 0.19297948082 25 | patient125 0.0 0.0753695193948 26 | patient126 0.0 0.0763628195167 27 | patient127 0.0 0.223668673828 28 | patient128 0.0 0.236986878541 29 | patient129 0.0 0.217521169276 30 | patient130 0.0 0.0806755572299 31 | patient131 1.0 0.996451162879 32 | patient132 1.0 0.997053323306 33 | patient133 1.0 0.996096001502 34 | patient134 0.0 0.19624149397 35 | patient135 1.0 0.989769640873 36 | patient136 1.0 0.955920914329 37 | patient137 1.0 0.85523066484 38 | patient138 0.0 0.00825047042273 39 | patient139 0.0 0.0549231546373 40 | patient140 0.0 0.102463517629 41 | patient141 0.0 0.0147602304161 42 | patient142 0.0 0.00277259788637 43 | patient143 1.0 0.979211906674 44 | patient144 0.0 0.0111634937042 45 | patient145 1.0 0.992892609347 46 | patient146 0.0 0.00392578483445 47 | patient147 0.0 0.0080645621594 48 | patient148 1.0 0.954485738334 49 | patient149 1.0 0.721965145857 50 | patient150 0.0 0.0243039305882 51 | -------------------------------------------------------------------------------- /classification/acdc_testing_set_prediction_HCM_classifier.txt: -------------------------------------------------------------------------------- 1 | patient101 0.0 0.000583553521222 2 | patient102 0.0 0.475907188174 3 | patient103 0.0 0.0856026997006 4 | patient104 1.0 0.981778268918 5 | patient105 1.0 0.996763916849 6 | patient106 0.0 0.00011107277206 7 | patient107 0.0 0.00644585273948 8 | patient108 1.0 0.996097335553 9 | patient109 0.0 0.0102558064258 10 | patient110 0.0 0.17124178908 11 | patient111 1.0 0.964832341657 12 | patient112 0.0 0.0413642460318 13 | patient113 0.0 0.0012845873098 14 | patient114 1.0 0.999992628219 15 | patient115 0.0 0.00745724369589 16 | patient116 1.0 0.936853201887 17 | patient117 0.0 0.00216356109244 18 | patient118 0.0 0.0239373956366 19 | patient119 0.0 0.00558264871332 20 | patient120 0.0 0.125483498085 21 | patient121 0.0 0.0413143819967 22 | patient122 0.0 0.021362047458 23 | patient123 0.0 0.291812676083 24 | patient124 0.0 0.0253972149032 25 | patient125 0.0 0.0228003048837 26 | patient126 0.0 0.00730190243135 27 | patient127 0.0 0.00533488464298 28 | patient128 0.0 0.014358838894 29 | patient129 0.0 0.0135636406716 30 | patient130 0.0 0.0410293254982 31 | patient131 0.0 0.000493557786805 32 | patient132 0.0 0.000658838760258 33 | patient133 0.0 0.00377859293203 34 | patient134 1.0 0.574765701742 35 | patient135 0.0 0.00140396800314 36 | patient136 0.0 0.00509689166215 37 | patient137 0.0 0.0171586195761 38 | patient138 1.0 0.990429985507 39 | patient139 0.0 0.0136276498543 40 | patient140 0.0 0.0173671546148 41 | patient141 0.0 0.162367885903 42 | patient142 1.0 0.976435332075 43 | patient143 0.0 0.00762635040703 44 | patient144 0.0 0.118600428163 45 | patient145 0.0 0.126899161187 46 | patient146 1.0 0.931987077869 47 | patient147 1.0 0.68679386367 48 | patient148 0.0 0.00538927980389 49 | patient149 0.0 0.0169269448032 50 | patient150 0.0 0.0367665937628 51 | -------------------------------------------------------------------------------- /classification/acdc_testing_set_prediction_RVA_classifier.txt: -------------------------------------------------------------------------------- 1 | patient101 0.0 0.00141642939646 2 | patient102 0.0 0.0135932848657 3 | patient103 0.0 7.07780093e-05 4 | patient104 0.0 0.00100878401483 5 | patient105 0.0 0.000814588200264 6 | patient106 1.0 0.679076319727 7 | patient107 0.0 0.122416479254 8 | patient108 0.0 0.00677191283311 9 | patient109 1.0 0.876963419085 10 | patient110 0.0 0.190558687499 11 | patient111 0.0 0.00085339770611 12 | patient112 0.0 0.00454278547741 13 | patient113 0.0 0.00282163822454 14 | patient114 0.0 0.000255526603312 15 | patient115 0.0 0.0120355869289 16 | patient116 0.0 0.00268884268949 17 | patient117 0.0 0.0015305466386 18 | patient118 0.0 0.0157953968029 19 | patient119 1.0 0.983641201624 20 | patient120 0.0 0.000201892917603 21 | patient121 1.0 0.805091578439 22 | patient122 0.0 0.000768829200072 23 | patient123 0.0 0.0241042443386 24 | patient124 1.0 0.981135592 25 | patient125 0.0 0.108075183099 26 | patient126 1.0 0.998151814949 27 | patient127 1.0 0.956273399248 28 | patient128 0.0 0.062068003518 29 | patient129 1.0 0.894493989336 30 | patient130 0.0 0.0967726117535 31 | patient131 0.0 0.210141197931 32 | patient132 0.0 0.0157156443365 33 | patient133 0.0 0.00166100087663 34 | patient134 0.0 0.0123527795623 35 | patient135 0.0 0.00230631340564 36 | patient136 0.0 0.00151952817428 37 | patient137 0.0 0.00616674842247 38 | patient138 0.0 0.00374601483709 39 | patient139 0.0 0.0495402572139 40 | patient140 1.0 0.999997489289 41 | patient141 1.0 0.999962157597 42 | patient142 0.0 0.00973084421008 43 | patient143 0.0 0.00102902772472 44 | patient144 0.0 0.00483116833515 45 | patient145 0.0 0.000241314198373 46 | patient146 0.0 0.00472360102658 47 | patient147 1.0 0.99961732982 48 | patient148 0.0 0.00562370588994 49 | patient149 0.0 0.0262033771338 50 | patient150 0.0 0.0291331149663 51 | -------------------------------------------------------------------------------- /classification/acdc_testing_set_prediction_DCM_classifier.txt: -------------------------------------------------------------------------------- 1 | patient101 1.0 0.855596691475 2 | patient102 0.0 0.00242119102148 3 | patient103 0.0 0.000108960043846 4 | patient104 0.0 1.69324511864e-05 5 | patient105 0.0 1.90796081051e-06 6 | patient106 1.0 0.989206259226 7 | patient107 0.0 0.00887324209068 8 | patient108 0.0 5.85386072984e-07 9 | patient109 0.0 0.00270922232121 10 | patient110 0.0 0.00173986891075 11 | patient111 0.0 0.0291209060798 12 | patient112 0.0 0.00860272932411 13 | patient113 1.0 0.970256094412 14 | patient114 0.0 1.62422664778e-05 15 | patient115 0.0 0.00687679046422 16 | patient116 0.0 6.54031604393e-07 17 | patient117 1.0 0.997514852699 18 | patient118 0.0 0.0110675866681 19 | patient119 0.0 5.95160713179e-07 20 | patient120 0.0 0.00501147857945 21 | patient121 0.0 0.00107314035682 22 | patient122 1.0 0.546531183421 23 | patient123 0.0 0.00245310066593 24 | patient124 0.0 0.00030663275174 25 | patient125 0.0 2.75061427673e-05 26 | patient126 0.0 7.25675512696e-06 27 | patient127 0.0 0.000121887487967 28 | patient128 0.0 0.0163973371154 29 | patient129 0.0 0.0304305681889 30 | patient130 0.0 3.50604484816e-05 31 | patient131 1.0 0.958198607317 32 | patient132 1.0 0.997953005996 33 | patient133 1.0 0.975219910857 34 | patient134 0.0 0.00277072119589 35 | patient135 1.0 0.943544205879 36 | patient136 1.0 0.650700514626 37 | patient137 0.0 0.00152998121075 38 | patient138 0.0 0.0199515001854 39 | patient139 0.0 0.000441854638556 40 | patient140 0.0 1.93462545899e-08 41 | patient141 0.0 7.15532610966e-05 42 | patient142 0.0 0.00217432851802 43 | patient143 0.0 0.176986109646 44 | patient144 0.0 4.3951670784e-05 45 | patient145 0.0 0.133490592826 46 | patient146 0.0 1.946591109e-05 47 | patient147 0.0 0.00153012824656 48 | patient148 0.0 0.060648799657 49 | patient149 0.0 0.0382454928001 50 | patient150 0.0 0.00118909249457 51 | -------------------------------------------------------------------------------- /download_weights.py: -------------------------------------------------------------------------------- 1 | """Download the pretrained weights of the networks""" 2 | 3 | import os 4 | import sys 5 | 6 | import config 7 | 8 | def download_weights(): 9 | if sys.version_info >= (3, 0): 10 | import urllib.request as urltool 11 | else: 12 | import urllib as urltool 13 | 14 | code_dir = config.code_dir 15 | 16 | 17 | # ROI-net 18 | print("Downloading pretrained ROI-net") 19 | roi_net_source = 'http://www-sop.inria.fr/members/Qiao.Zheng/CardiacMotionFlow/ROI/model_roi_net_epoch050.h5' 20 | roi_net_destination = os.path.join(code_dir, 'ROI', 'model_roi_net_epoch050.h5') 21 | urltool.urlretrieve(roi_net_source, roi_net_destination) 22 | 23 | 24 | # LVRV-net 25 | print("Downloading pretrained LVRV-net") 26 | 27 | lvrv_net_source = 'http://www-sop.inria.fr/members/Qiao.Zheng/CardiacMotionFlow/segmentation/model_lvrv_net_epoch080.h5' 28 | lvrv_net_destination = os.path.join(code_dir, 'segmentation', 'model_lvrv_net_epoch080.h5') 29 | urltool.urlretrieve(lvrv_net_source, lvrv_net_destination) 30 | 31 | for f in range(0, 6): 32 | lvrv_net_source = 'http://www-sop.inria.fr/members/Qiao.Zheng/CardiacMotionFlow/segmentation/model_lvrv_net_finetune_fold{}_epoch1000.h5'.format(f) 33 | lvrv_net_destination = os.path.join(code_dir, 'segmentation', 'model_lvrv_net_finetune_fold{}_epoch1000.h5'.format(f)) 34 | urltool.urlretrieve(lvrv_net_source, lvrv_net_destination) 35 | 36 | 37 | # ApparentFlow-net 38 | print("Downloading pretrained ApparentFlow-net") 39 | for f in range(0, 6): 40 | apparentflow_net_source = 'http://www-sop.inria.fr/members/Qiao.Zheng/CardiacMotionFlow/flow/model_apparentflow_net_fold{}_epoch050.h5'.format(f) 41 | apparentflow_net_destination = os.path.join(code_dir, 'flow', 'model_apparentflow_net_fold{}_epoch050.h5'.format(f)) 42 | urltool.urlretrieve(apparentflow_net_source, apparentflow_net_destination) 43 | 44 | 45 | 46 | 47 | if __name__ == '__main__': 48 | download_weights() 49 | -------------------------------------------------------------------------------- /feature_extraction/acdc_pixel_size.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.append('..') 4 | 5 | import os 6 | import math 7 | from scipy import misc 8 | import nibabel as nib 9 | 10 | import config 11 | 12 | def acdc_pixel_size(): 13 | data_dir = config.acdc_data_dir 14 | code_dir = config.code_dir 15 | 16 | new_img_size = config.apparentflow_net_input_img_size 17 | 18 | dilated_subjects = config.acdc_dilated_subjects 19 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 20 | infarct_subjects = config.acdc_infarct_subjects 21 | normal_subjects = config.acdc_normal_subjects 22 | rv_subjects = config.acdc_rv_subjects 23 | test_subjects = config.acdc_test_subjects 24 | 25 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects + test_subjects 26 | 27 | pixel_size_info = open(os.path.join(code_dir, 'acdc_info', 'acdc_pixel_size.txt'), 'w') 28 | 29 | for subject in all_subjects: 30 | print(subject) 31 | subject_dir = data_dir.format(subject) 32 | subject_file = os.path.join(subject_dir, '{}_4d.nii.gz'.format(subject)) 33 | subject_img = nib.load(subject_file) 34 | header = subject_img.header 35 | #print(header.get_zooms()) 36 | pixel_size = header.get_zooms()[0] 37 | slice_thickness = header.get_zooms()[2] 38 | 39 | predict_dir = os.path.join(subject_dir, 'predict_2D') 40 | a_prediction_file = '' 41 | for f in os.listdir(predict_dir): 42 | if f.startswith('predict_lvrv2_') and f.endswith('png'): 43 | a_prediction_file = f 44 | break 45 | a_prediction_file_full = os.path.join(predict_dir, a_prediction_file) 46 | a_prediction = misc.imread(a_prediction_file_full) 47 | #print(a_prediction.shape) 48 | roi_size = a_prediction.shape[0] 49 | 50 | new_pixel_size = pixel_size * roi_size / new_img_size 51 | 52 | written = '{} {} {} {} {} {}\n'.format(subject, pixel_size, roi_size, new_pixel_size, new_img_size, slice_thickness) 53 | 54 | pixel_size_info.write(written) 55 | 56 | pixel_size_info.close() 57 | 58 | if __name__ == '__main__': 59 | acdc_pixel_size() 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /ROI/data_roi_predict.py: -------------------------------------------------------------------------------- 1 | """ A function to generate the lists of files for ROI-net inference""" 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | import os 7 | 8 | import config 9 | 10 | def data_roi_predict(): 11 | data_dir = config.acdc_data_dir 12 | code_dir = config.code_dir 13 | 14 | 15 | 16 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 17 | with open(info_file) as in_file: 18 | subject_info = in_file.readlines() 19 | 20 | subject_info = [x.strip() for x in subject_info] 21 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 22 | 23 | 24 | 25 | 26 | predict_img_list = [] 27 | predict_gt_list = [] 28 | 29 | all_subjects = ['patient{}'.format(str(x).zfill(3)) for x in range(1, 151)] 30 | for subject in all_subjects: 31 | subject_dir = data_dir.format(subject) 32 | subject_predict_dir = os.path.join(subject_dir, 'mask_original_2D') 33 | if not os.path.exists(subject_predict_dir): 34 | os.makedirs(subject_predict_dir) 35 | #subject_predict_file = os.path.join(subject_predict_dir, 'mask_original_2D_{}_{}.png') 36 | 37 | 38 | 39 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 40 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 41 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 42 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 43 | 44 | 45 | original_2D_path = os.path.join(subject_dir, 'original_2D') 46 | 47 | # Prediction on the ED stacks only 48 | used_instants = [ed_instant] 49 | 50 | for idx, t in enumerate(used_instants): 51 | for s in range(int(round(slices * 0.1 + 0.001)), int(round(slices * 0.5 + 0.001))): 52 | s_t_image_file = os.path.join(original_2D_path, 'original_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2)) ) 53 | # The adapted ground-truth 54 | s_t_image_gt_file = '' 55 | 56 | predict_img_list.append(s_t_image_file) 57 | predict_gt_list.append(s_t_image_gt_file) 58 | 59 | 60 | print('predict_image_count = {}'.format(len(predict_img_list)) ) 61 | 62 | return predict_img_list, predict_gt_list 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /processing/convert_nifti_to_2D.py: -------------------------------------------------------------------------------- 1 | """ Convert the NIfTI images and ground-truth to groups of 2D PNG files """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | import os 7 | import numpy as np 8 | from PIL import Image 9 | from scipy import interpolate 10 | import nibabel as nib 11 | 12 | import config 13 | 14 | 15 | 16 | def convert_nifti_to_2D(): 17 | data_dir = config.acdc_data_dir 18 | code_dir = config.code_dir 19 | 20 | 21 | subjects = ['patient{}'.format(str(x).zfill(3)) for x in range(1, 151)] 22 | 23 | print('There are {} subjects in total'.format(len(subjects))) 24 | 25 | # For each case 26 | for subject in subjects: 27 | print('Processing {}'.format(subject) ) 28 | 29 | # Define the paths 30 | subject_dir = data_dir.format(subject) 31 | subject_original_2D_dir = os.path.join(subject_dir, 'original_2D') 32 | 33 | if not os.path.exists(subject_original_2D_dir): 34 | os.makedirs(subject_original_2D_dir) 35 | 36 | sa_zip_file = os.path.join(subject_dir, '{}_4d.nii.gz'.format(subject)) 37 | 38 | # If the short-axis image file exists, read the data and perform the conversion 39 | if os.path.isfile(sa_zip_file): 40 | img = nib.load(sa_zip_file) 41 | data = img.get_data() 42 | data_np = np.array(data) 43 | 44 | max_pixel_value = data_np.max() 45 | 46 | if max_pixel_value > 0: 47 | multiplier = 255.0 / max_pixel_value 48 | else: 49 | multiplier = 1.0 50 | 51 | print('max_pixel_value = {}, multiplier = {}'.format(max_pixel_value, multiplier)) 52 | 53 | rows = data.shape[0] 54 | columns = data.shape[1] 55 | slices = data.shape[2] 56 | times = data.shape[3] 57 | 58 | for t in range(times): 59 | for s in range(slices): 60 | s_t_image_file = os.path.join(subject_original_2D_dir, 'original_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2)) ) 61 | Image.fromarray((np.rot90(data[:, ::-1, s, t], 1) * multiplier).astype('uint8')).save(s_t_image_file) 62 | 63 | else: 64 | print('There is no SA image file for {}'.format(subject)) 65 | 66 | 67 | 68 | 69 | 70 | 71 | if __name__ == '__main__': 72 | convert_nifti_to_2D() 73 | 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /acdc_info/acdc_gt_base.txt: -------------------------------------------------------------------------------- 1 | patient001 1 9 2 9 2 | patient002 0 9 1 9 3 | patient003 1 9 1 9 4 | patient004 0 9 0 9 5 | patient005 1 9 2 9 6 | patient006 2 10 3 10 7 | patient007 1 9 2 9 8 | patient008 0 9 1 9 9 | patient009 0 9 0 9 10 | patient010 0 9 1 9 11 | patient011 1 8 1 8 12 | patient012 0 9 1 9 13 | patient013 0 9 0 9 14 | patient014 0 9 1 9 15 | patient015 0 7 0 8 16 | patient016 0 9 1 9 17 | patient017 0 8 0 8 18 | patient018 0 7 0 7 19 | patient019 0 9 0 10 20 | patient020 0 7 0 7 21 | patient021 1 8 2 9 22 | patient022 0 6 0 6 23 | patient023 1 6 1 8 24 | patient024 0 6 0 7 25 | patient025 0 8 2 8 26 | patient026 1 8 2 9 27 | patient027 1 8 2 9 28 | patient028 0 6 1 9 29 | patient029 0 9 2 10 30 | patient030 1 8 2 9 31 | patient031 0 7 1 9 32 | patient032 0 7 2 9 33 | patient033 1 8 2 9 34 | patient034 0 8 1 9 35 | patient035 0 11 2 12 36 | patient036 0 7 1 7 37 | patient037 1 5 1 6 38 | patient038 0 7 0 7 39 | patient039 0 7 1 8 40 | patient040 0 9 1 9 41 | patient041 1 5 1 5 42 | patient042 0 8 2 8 43 | patient043 0 11 2 11 44 | patient044 0 8 1 8 45 | patient045 1 7 1 7 46 | patient046 0 8 1 8 47 | patient047 0 7 1 8 48 | patient048 0 7 1 7 49 | patient049 1 6 2 6 50 | patient050 1 9 2 9 51 | patient051 0 9 1 9 52 | patient052 0 7 0 7 53 | patient053 0 6 0 6 54 | patient054 0 6 1 7 55 | patient055 0 8 0 8 56 | patient056 0 7 1 8 57 | patient057 0 6 1 7 58 | patient058 0 8 1 8 59 | patient059 0 8 1 8 60 | patient060 1 8 2 8 61 | patient061 0 7 2 8 62 | patient062 0 8 1 9 63 | patient063 0 6 2 7 64 | patient064 0 8 1 9 65 | patient065 0 7 2 7 66 | patient066 1 8 2 8 67 | patient067 0 8 1 9 68 | patient068 0 6 1 6 69 | patient069 0 6 1 6 70 | patient070 0 5 1 5 71 | patient071 0 8 2 9 72 | patient072 0 6 1 7 73 | patient073 0 6 1 6 74 | patient074 0 7 2 7 75 | patient075 0 13 1 13 76 | patient076 0 6 2 7 77 | patient077 0 6 1 7 78 | patient078 0 7 1 7 79 | patient079 0 7 1 8 80 | patient080 0 5 1 5 81 | patient081 0 16 2 16 82 | patient082 0 15 2 15 83 | patient083 0 5 0 5 84 | patient084 0 10 0 11 85 | patient085 0 13 0 14 86 | patient086 0 5 0 6 87 | patient087 0 7 0 7 88 | patient088 1 14 4 15 89 | patient089 0 5 0 5 90 | patient090 0 5 1 6 91 | patient091 0 6 1 7 92 | patient092 0 12 2 14 93 | patient093 1 8 2 9 94 | patient094 0 9 0 9 95 | patient095 0 12 0 13 96 | patient096 0 17 2 17 97 | patient097 0 7 1 7 98 | patient098 0 6 0 6 99 | patient099 0 14 2 15 100 | patient100 0 6 1 7 101 | -------------------------------------------------------------------------------- /processing/acdc_info.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.append('..') 4 | 5 | import os 6 | import math 7 | import numpy as np 8 | import nibabel as nib 9 | 10 | import config 11 | 12 | def acdc_info(): 13 | data_dir = config.acdc_data_dir 14 | code_dir = config.code_dir 15 | 16 | dilated_subjects = config.acdc_dilated_subjects 17 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 18 | infarct_subjects = config.acdc_infarct_subjects 19 | normal_subjects = config.acdc_normal_subjects 20 | rv_subjects = config.acdc_rv_subjects 21 | test_subjects = config.acdc_test_subjects 22 | 23 | train_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects 24 | 25 | bsa_info = open(os.path.join(code_dir, 'acdc_info', 'acdc_info.txt'), 'w') 26 | 27 | for subject in train_subjects: 28 | print(subject) 29 | subject_dir = data_dir.format(subject) 30 | subject_info_file = os.path.join(subject_dir, 'Info.cfg') 31 | with open(subject_info_file) as s_file: 32 | subject_info = s_file.readlines() 33 | 34 | subject_info = [x.strip() for x in subject_info] 35 | ED = int(subject_info[0][4:]) - 1 36 | ES = int(subject_info[1][4:]) - 1 37 | group = subject_info[2][7:] 38 | height = float(subject_info[3][8:]) 39 | num_frame = int(subject_info[4][9:]) 40 | weight = float(subject_info[5][8:]) 41 | 42 | bsa = math.sqrt(weight * height / 3600) 43 | 44 | sa_zip_file = os.path.join(subject_dir, '{}_4d.nii.gz'.format(subject)) 45 | img = nib.load(sa_zip_file) 46 | data = img.get_data() 47 | data_np = np.array(data) 48 | slices = data.shape[2] 49 | 50 | 51 | written = '{} {} {} {} {} {} {} {} {}\n'.format(subject, group, num_frame, ED, ES, slices, height, weight, bsa) 52 | 53 | 54 | bsa_info.write(written) 55 | 56 | 57 | for subject in test_subjects: 58 | print(subject) 59 | subject_dir = data_dir.format(subject) 60 | subject_info_file = os.path.join(subject_dir, 'Info.cfg') 61 | with open(subject_info_file) as s_file: 62 | subject_info = s_file.readlines() 63 | 64 | subject_info = [x.strip() for x in subject_info] 65 | ED = int(subject_info[0][4:]) - 1 66 | ES = int(subject_info[1][4:]) - 1 67 | group = 'TEST' 68 | height = float(subject_info[2][8:]) 69 | num_frame = int(subject_info[3][9:]) 70 | weight = float(subject_info[4][8:]) 71 | 72 | bsa = math.sqrt(weight * height / 3600) 73 | 74 | sa_zip_file = os.path.join(subject_dir, '{}_4d.nii.gz'.format(subject)) 75 | img = nib.load(sa_zip_file) 76 | data = img.get_data() 77 | data_np = np.array(data) 78 | slices = data.shape[2] 79 | 80 | 81 | written = '{} {} {} {} {} {} {} {} {}\n'.format(subject, group, num_frame, ED, ES, slices, height, weight, bsa) 82 | 83 | 84 | bsa_info.write(written) 85 | 86 | bsa_info.close() 87 | 88 | if __name__ == '__main__': 89 | acdc_info() 90 | 91 | -------------------------------------------------------------------------------- /classification/classification_prediction.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('..') 3 | 4 | import os 5 | import numpy as np 6 | 7 | import config 8 | 9 | from classifiers import classifiers 10 | 11 | 12 | def classification_prediction(): 13 | code_path = config.code_dir 14 | 15 | classifier_names = ['RVA_classifier', 'HCM_classifier', 'DCM_classifier', 'MINF_classifier'] 16 | data_class_nums = [5, 4, 3, 2] 17 | 18 | 19 | ############################################### 20 | # 5-fold cross validation on ACDC training set 21 | ############################################### 22 | print('\n5-fold cross validation on ACDC training set.') 23 | for k in range(4): 24 | print(classifier_names[k] + ' errors:') 25 | error_list = [] 26 | for f in range(1, 6): 27 | error_list.append(classifiers(f, data_class_nums[k])) 28 | print(error_list) 29 | 30 | 31 | 32 | ################################## 33 | # Prediction on ACDC training set 34 | ################################## 35 | print('\nPrediction on ACDC testing set.\n') 36 | 37 | # Predictions by the 4 binary classifiers 38 | for k in range(4): 39 | classifiers(0, data_class_nums[k], classifier_names[k]) 40 | 41 | 42 | # Assemble the results 43 | test_subjects = config.acdc_test_subjects 44 | 45 | all_results = [[] for k in range(4)] 46 | for k in range(4): 47 | classifier_result_file = os.path.join(code_path, 'classification', 'acdc_testing_set_prediction_{}.txt'.format(classifier_names[k])) 48 | with open(classifier_result_file ) as f: 49 | classifier_result_lines = f.readlines() 50 | results = [x.strip() for x in classifier_result_lines] 51 | results = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in results] 52 | all_results[k] = results 53 | 54 | 55 | rva_classifier_results = all_results[0] 56 | hyp_classifier_results = all_results[1] 57 | dcm_classifier_results = all_results[2] 58 | minf_classifier_results = all_results[3] 59 | 60 | 61 | record_file = os.path.join(code_path, 'classification', 'acdc_testing_set_prediction.txt') 62 | record = open(record_file, 'w') 63 | 64 | 65 | for subject in test_subjects: 66 | rva_classifier_prediction = [x for x in rva_classifier_results if x[0] == subject][0][1] 67 | hyp_classifier_prediction = [x for x in hyp_classifier_results if x[0] == subject][0][1] 68 | dcm_classifier_prediction = [x for x in dcm_classifier_results if x[0] == subject][0][1] 69 | minf_classifier_prediction = [x for x in minf_classifier_results if x[0] == subject][0][1] 70 | if rva_classifier_prediction == 1: 71 | final_prediction = 'RV' 72 | elif hyp_classifier_prediction == 1: 73 | final_prediction = 'HCM' 74 | elif dcm_classifier_prediction == 1: 75 | final_prediction = 'DCM' 76 | elif minf_classifier_prediction == 1: 77 | final_prediction = 'MINF' 78 | else: 79 | final_prediction = 'NOR' 80 | 81 | written = '{} {}\n'.format(subject, final_prediction) 82 | record.write(written) 83 | 84 | record.close() 85 | 86 | 87 | 88 | 89 | if __name__ == '__main__': 90 | classification_prediction() 91 | 92 | -------------------------------------------------------------------------------- /acdc_info/acdc_base.txt: -------------------------------------------------------------------------------- 1 | patient001 1 9 2 9 2 | patient002 0 9 1 9 3 | patient003 1 9 1 9 4 | patient004 0 9 0 9 5 | patient005 1 9 1 9 6 | patient006 2 10 2 10 7 | patient007 1 9 2 9 8 | patient008 0 9 1 9 9 | patient009 0 9 0 9 10 | patient010 0 9 1 9 11 | patient011 0 8 0 8 12 | patient012 2 9 0 9 13 | patient013 0 9 0 9 14 | patient014 1 9 1 9 15 | patient015 0 7 0 8 16 | patient016 0 9 0 9 17 | patient017 0 8 0 8 18 | patient018 0 7 0 7 19 | patient019 0 9 0 10 20 | patient020 0 7 0 7 21 | patient021 1 8 3 9 22 | patient022 0 5 0 6 23 | patient023 0 6 1 8 24 | patient024 0 6 0 7 25 | patient025 0 8 1 8 26 | patient026 1 8 1 9 27 | patient027 2 8 2 9 28 | patient028 0 7 1 9 29 | patient029 1 8 2 10 30 | patient030 1 8 1 9 31 | patient031 0 8 1 9 32 | patient032 1 7 2 9 33 | patient033 1 8 2 9 34 | patient034 0 8 1 9 35 | patient035 0 11 1 12 36 | patient036 0 7 1 7 37 | patient037 1 5 2 6 38 | patient038 0 7 0 7 39 | patient039 0 6 1 8 40 | patient040 0 9 1 9 41 | patient041 1 5 1 5 42 | patient042 0 8 1 8 43 | patient043 0 11 1 11 44 | patient044 0 7 1 8 45 | patient045 1 7 1 7 46 | patient046 0 8 1 8 47 | patient047 0 8 1 8 48 | patient048 0 6 1 7 49 | patient049 1 6 2 6 50 | patient050 1 9 2 9 51 | patient051 0 9 1 9 52 | patient052 0 7 1 7 53 | patient053 0 6 0 6 54 | patient054 0 6 1 7 55 | patient055 0 8 1 8 56 | patient056 0 8 1 8 57 | patient057 0 6 0 7 58 | patient058 0 8 1 8 59 | patient059 0 8 1 8 60 | patient060 0 8 1 8 61 | patient061 0 7 2 8 62 | patient062 0 8 0 9 63 | patient063 0 6 1 7 64 | patient064 0 8 1 9 65 | patient065 0 7 2 7 66 | patient066 1 8 2 8 67 | patient067 0 8 0 9 68 | patient068 0 6 1 6 69 | patient069 0 6 1 6 70 | patient070 0 5 0 5 71 | patient071 0 8 2 9 72 | patient072 0 6 1 7 73 | patient073 0 6 0 6 74 | patient074 0 6 2 7 75 | patient075 0 13 0 13 76 | patient076 1 7 2 7 77 | patient077 0 6 1 7 78 | patient078 0 7 1 7 79 | patient079 0 8 1 8 80 | patient080 0 5 1 5 81 | patient081 0 16 1 16 82 | patient082 0 15 2 15 83 | patient083 0 5 0 5 84 | patient084 1 9 1 11 85 | patient085 0 14 1 14 86 | patient086 0 5 0 6 87 | patient087 0 7 1 7 88 | patient088 1 14 4 15 89 | patient089 0 5 0 5 90 | patient090 0 6 0 6 91 | patient091 0 6 1 7 92 | patient092 0 12 1 14 93 | patient093 0 8 1 9 94 | patient094 0 9 2 9 95 | patient095 0 12 0 13 96 | patient096 0 17 2 17 97 | patient097 0 7 1 7 98 | patient098 0 6 0 6 99 | patient099 0 14 2 15 100 | patient100 0 7 0 7 101 | patient101 1 9 2 9 102 | patient102 0 6 2 7 103 | patient103 0 8 1 8 104 | patient104 0 8 2 8 105 | patient105 1 8 2 9 106 | patient106 0 8 1 8 107 | patient107 0 7 1 8 108 | patient108 0 7 2 9 109 | patient109 0 7 1 7 110 | patient110 0 8 1 8 111 | patient111 1 5 1 5 112 | patient112 0 9 2 9 113 | patient113 0 9 1 9 114 | patient114 0 9 1 10 115 | patient115 0 9 1 9 116 | patient116 0 8 1 9 117 | patient117 1 9 2 9 118 | patient118 0 8 1 8 119 | patient119 0 15 2 16 120 | patient120 1 6 1 8 121 | patient121 0 7 1 7 122 | patient122 0 8 1 8 123 | patient123 1 7 2 7 124 | patient124 1 18 1 20 125 | patient125 0 15 2 16 126 | patient126 2 13 4 15 127 | patient127 1 16 2 18 128 | patient128 0 7 1 8 129 | patient129 1 15 3 16 130 | patient130 0 9 2 9 131 | patient131 0 9 0 9 132 | patient132 0 8 0 9 133 | patient133 0 9 1 9 134 | patient134 3 10 4 10 135 | patient135 1 9 2 9 136 | patient136 1 8 2 9 137 | patient137 1 8 2 8 138 | patient138 1 7 1 8 139 | patient139 0 15 2 15 140 | patient140 1 17 3 19 141 | patient141 1 8 3 9 142 | patient142 1 6 2 7 143 | patient143 1 9 2 9 144 | patient144 1 6 2 7 145 | patient145 1 8 2 9 146 | patient146 1 7 2 8 147 | patient147 0 11 2 14 148 | patient148 0 6 0 7 149 | patient149 0 8 2 9 150 | patient150 0 7 1 7 151 | -------------------------------------------------------------------------------- /processing/acdc_gt_base.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.append('..') 4 | 5 | import os 6 | import math 7 | import numpy as np 8 | from PIL import Image 9 | 10 | import config 11 | 12 | def acdc_gt_base(): 13 | data_dir = config.acdc_data_dir 14 | code_dir = config.code_dir 15 | 16 | dilated_subjects = config.acdc_dilated_subjects 17 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 18 | infarct_subjects = config.acdc_infarct_subjects 19 | normal_subjects = config.acdc_normal_subjects 20 | rv_subjects = config.acdc_rv_subjects 21 | test_subjects = config.acdc_test_subjects 22 | 23 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects 24 | 25 | 26 | 27 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 28 | with open(info_file) as in_file: 29 | subject_info = in_file.readlines() 30 | 31 | subject_info = [x.strip() for x in subject_info] 32 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 33 | 34 | 35 | base_info = open(os.path.join(code_dir, 'acdc_info', 'acdc_gt_base.txt'), 'w') 36 | 37 | for subject in all_subjects: 38 | subject_dir = data_dir.format(subject) 39 | subject_predict_dir = os.path.join(subject_dir, 'crop_2D') 40 | subject_predict_file = os.path.join(subject_predict_dir, 'crop_2D_gt_{}_{}.png') 41 | 42 | 43 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 44 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 45 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 46 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 47 | 48 | base_slice = 0 49 | have_rv = False 50 | for i in range(slices): 51 | img_file = subject_predict_file.format(str(i).zfill(2), str(ed_instant).zfill(2)) 52 | img = Image.open(img_file) 53 | img.load() 54 | data = np.array(img) 55 | if 150 in data: 56 | base_slice = i 57 | have_rv = True 58 | break 59 | if not have_rv: 60 | for i in range(slices): 61 | img_file = subject_predict_file.format(str(i).zfill(2), str(ed_instant).zfill(2)) 62 | img = Image.open(img_file) 63 | img.load() 64 | data = np.array(img) 65 | if 50 in data: 66 | base_slice = i 67 | break 68 | 69 | apex_slice = slices-1 70 | for j in range(slices-1, -1, -1): 71 | img_file = subject_predict_file.format(str(j).zfill(2), str(ed_instant).zfill(2)) 72 | img = Image.open(img_file) 73 | img.load() 74 | data = np.array(img) 75 | if 50 in data: 76 | apex_slice = j 77 | break 78 | 79 | 80 | es_base_slice = 0 81 | have_rv = False 82 | for i in range(slices): 83 | img_file = subject_predict_file.format(str(i).zfill(2), str(es_instant).zfill(2)) 84 | img = Image.open(img_file) 85 | img.load() 86 | data = np.array(img) 87 | if 150 in data: 88 | es_base_slice = i 89 | have_rv = True 90 | break 91 | if not have_rv: 92 | for i in range(slices): 93 | img_file = subject_predict_file.format(str(i).zfill(2), str(es_instant).zfill(2)) 94 | img = Image.open(img_file) 95 | img.load() 96 | data = np.array(img) 97 | if 50 in data: 98 | es_base_slice = i 99 | break 100 | 101 | es_apex_slice = slices-1 102 | for j in range(slices-1, -1, -1): 103 | img_file = subject_predict_file.format(str(j).zfill(2), str(es_instant).zfill(2)) 104 | img = Image.open(img_file) 105 | img.load() 106 | data = np.array(img) 107 | if 50 in data: 108 | apex_slice = j 109 | break 110 | 111 | print(subject, base_slice, apex_slice, es_base_slice, es_apex_slice) 112 | written = '{} {} {} {} {}\n'.format(subject, base_slice, apex_slice, es_base_slice, es_apex_slice) 113 | base_info.write(written) 114 | 115 | base_info.close() 116 | 117 | if __name__ == '__main__': 118 | acdc_gt_base() 119 | 120 | -------------------------------------------------------------------------------- /feature_extraction/acdc_base.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.append('..') 4 | 5 | import os 6 | import math 7 | import numpy as np 8 | from PIL import Image 9 | 10 | import config 11 | 12 | def acdc_base(): 13 | data_dir = config.acdc_data_dir 14 | code_dir = config.code_dir 15 | 16 | dilated_subjects = config.acdc_dilated_subjects 17 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 18 | infarct_subjects = config.acdc_infarct_subjects 19 | normal_subjects = config.acdc_normal_subjects 20 | rv_subjects = config.acdc_rv_subjects 21 | test_subjects = config.acdc_test_subjects 22 | 23 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects + test_subjects 24 | 25 | 26 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 27 | with open(info_file) as in_file: 28 | subject_info = in_file.readlines() 29 | 30 | subject_info = [x.strip() for x in subject_info] 31 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 32 | 33 | 34 | base_info = open(os.path.join(code_dir, 'acdc_info', 'acdc_base.txt'), 'w') 35 | 36 | for subject in all_subjects: 37 | subject_dir = data_dir.format(subject) 38 | subject_predict_dir = os.path.join(subject_dir, 'predict_2D') 39 | subject_predict_file = os.path.join(subject_predict_dir, 'predict_lvrv2_{}_{}.png') 40 | 41 | 42 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 43 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 44 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 45 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 46 | 47 | base_slice = 0 48 | have_rv = False 49 | for i in range(slices): 50 | img_file = subject_predict_file.format(str(i).zfill(2), str(ed_instant).zfill(2)) 51 | img = Image.open(img_file) 52 | img.load() 53 | data = np.array(img) 54 | if 150 in data: 55 | base_slice = i 56 | have_rv = True 57 | break 58 | if not have_rv: 59 | for i in range(slices): 60 | img_file = subject_predict_file.format(str(i).zfill(2), str(ed_instant).zfill(2)) 61 | img = Image.open(img_file) 62 | img.load() 63 | data = np.array(img) 64 | if 50 in data: 65 | base_slice = i 66 | break 67 | 68 | apex_slice = slices-1 69 | for j in range(slices-1, -1, -1): 70 | img_file = subject_predict_file.format(str(j).zfill(2), str(ed_instant).zfill(2)) 71 | img = Image.open(img_file) 72 | img.load() 73 | data = np.array(img) 74 | if 50 in data: 75 | apex_slice = j 76 | break 77 | 78 | 79 | es_base_slice = 0 80 | have_rv = False 81 | for i in range(slices): 82 | img_file = subject_predict_file.format(str(i).zfill(2), str(es_instant).zfill(2)) 83 | img = Image.open(img_file) 84 | img.load() 85 | data = np.array(img) 86 | if 150 in data: 87 | es_base_slice = i 88 | have_rv = True 89 | break 90 | if not have_rv: 91 | for i in range(slices): 92 | img_file = subject_predict_file.format(str(i).zfill(2), str(es_instant).zfill(2)) 93 | img = Image.open(img_file) 94 | img.load() 95 | data = np.array(img) 96 | if 50 in data: 97 | es_base_slice = i 98 | break 99 | 100 | es_apex_slice = slices-1 101 | for j in range(slices-1, -1, -1): 102 | img_file = subject_predict_file.format(str(j).zfill(2), str(es_instant).zfill(2)) 103 | img = Image.open(img_file) 104 | img.load() 105 | data = np.array(img) 106 | if 50 in data: 107 | apex_slice = j 108 | break 109 | 110 | print(subject, base_slice, apex_slice, es_base_slice, es_apex_slice) 111 | written = '{} {} {} {} {}\n'.format(subject, base_slice, apex_slice, es_base_slice, es_apex_slice) 112 | base_info.write(written) 113 | 114 | base_info.close() 115 | 116 | if __name__ == '__main__': 117 | acdc_base() 118 | 119 | -------------------------------------------------------------------------------- /flow/data_apparentflow.py: -------------------------------------------------------------------------------- 1 | """ A function to generate the lists of files for ApparentFlow-net """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | import os 7 | import math 8 | 9 | import config 10 | 11 | def data_apparentflow(mode='all', fold = 1): 12 | 13 | data_dir = config.acdc_data_dir 14 | code_dir = config.code_dir 15 | 16 | dilated_subjects = config.acdc_dilated_subjects 17 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 18 | infarct_subjects = config.acdc_infarct_subjects 19 | normal_subjects = config.acdc_normal_subjects 20 | rv_subjects = config.acdc_rv_subjects 21 | test_subjects = config.acdc_test_subjects 22 | 23 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects 24 | 25 | if mode == 'all': 26 | subjects = all_subjects 27 | elif mode == 'train': 28 | subjects = [x for i,x in enumerate(all_subjects) if (i % 5) != (fold % 5)] 29 | elif mode == 'val': 30 | subjects = [x for i,x in enumerate(all_subjects) if (i % 5) == (fold % 5)] 31 | elif mode == 'predict': 32 | subjects = test_subjects 33 | else: 34 | print('Incorrect mode') 35 | 36 | print(subjects) 37 | 38 | excluded_slice_ratio = config.excluded_slice_ratio 39 | 40 | seq_instants = config.acdc_seq_instants 41 | 42 | 43 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 44 | with open(info_file) as in_file: 45 | subject_info = in_file.readlines() 46 | 47 | subject_info = [x.strip() for x in subject_info] 48 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 49 | 50 | 51 | gt_base_file = os.path.join(code_dir, 'acdc_info', 'acdc_gt_base.txt') 52 | 53 | with open(gt_base_file) as g_file: 54 | gt_base_info = g_file.readlines() 55 | 56 | gt_base_info = [x.strip() for x in gt_base_info] 57 | gt_base_info = [ [y.split()[0]] + [int(z) for z in y.split()[1:]] for y in gt_base_info] 58 | 59 | 60 | print('There will be {} used subjects'.format(len(subjects)) ) 61 | 62 | img_list0 = [] 63 | img_list1 = [] 64 | seg_list0 = [] 65 | seg_list1 = [] 66 | 67 | segmented_pair_count = 0 68 | unsegmented_pair_count = 0 69 | for subject in subjects: 70 | #print(subject) 71 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 72 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 73 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 74 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 75 | subject_dir = data_dir.format(subject) 76 | 77 | 78 | if mode in ['test', 'predict']: 79 | start_slice = 0 80 | end_slice = slices 81 | else: 82 | base_slice = int([x for x in gt_base_info if x[0] == subject][0][1]) 83 | apex_slice = int([x for x in gt_base_info if x[0] == subject][0][2]) 84 | es_base_slice = int([x for x in gt_base_info if x[0] == subject][0][3]) 85 | es_apex_slice = int([x for x in gt_base_info if x[0] == subject][0][4]) 86 | 87 | # The start_slice is smaller than the end_slice 88 | start_slice = base_slice + int(round((apex_slice + 1 - base_slice) * excluded_slice_ratio)) 89 | end_slice = apex_slice + 1 - int(round((apex_slice + 1 - base_slice) * excluded_slice_ratio)) 90 | 91 | 92 | for i in range(start_slice, end_slice): 93 | 94 | 95 | for t in range(0, instants): 96 | 97 | img0 = os.path.join(subject_dir, 'crop_2D', 'crop_2D_{}_{}.png'.format(str(i).zfill(2), str(ed_instant).zfill(2)) ) 98 | img1 = os.path.join(subject_dir, 'crop_2D', 'crop_2D_{}_{}.png'.format(str(i).zfill(2), str(t).zfill(2)) ) 99 | if t == es_instant: 100 | seg0 = os.path.join(subject_dir, 'crop_2D', 'crop_2D_gt_{}_{}.png'.format(str(i).zfill(2), str(ed_instant).zfill(2)) ) 101 | seg1 = os.path.join(subject_dir, 'crop_2D', 'crop_2D_gt_{}_{}.png'.format(str(i).zfill(2), str(t).zfill(2)) ) 102 | segmented_pair_count += 1 103 | else: 104 | seg0 = os.path.join(subject_dir, 'crop_2D', 'crop_2D_gt_{}_{}.png'.format(str(i).zfill(2), str(-1).zfill(2)) ) 105 | seg1 = os.path.join(subject_dir, 'crop_2D', 'crop_2D_gt_{}_{}.png'.format(str(i).zfill(2), str(-1).zfill(2)) ) 106 | unsegmented_pair_count += 1 107 | img_list0.append(img0) 108 | img_list1.append(img1) 109 | seg_list0.append(seg0) 110 | seg_list1.append(seg1) 111 | 112 | 113 | 114 | print('pair count = {}'.format(len(img_list0)) ) 115 | print('segmented_pair_count = {}'.format(segmented_pair_count), 'unsegmented_pair_count = {}'.format(unsegmented_pair_count)) 116 | 117 | return img_list0, img_list1, seg_list0, seg_list1 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | *** 2 | 3 | SOFTWARE LICENSE AGREEMENT 4 | 5 | 6 | Software CardiacMotionFlow ©Inria – 2018, all rights reserved, hereinafter "the Software". 7 | 8 | This software has been developed by researchers of EPIONE project team of Inria (Institut National de Recherche en Informatique et Automatique). 9 | 10 | Inria, Domaine de Voluceau, Rocquencourt - BP 105 11 | 78153 Le Chesnay Cedex, FRANCE 12 | 13 | 14 | Inria holds all the ownership rights on the Software. 15 | 16 | The Software has been registered with the Agence pour la Protection des Programmes (APP) under IDDN.FR.XXX. 17 | 18 | The Software is still being currently developed. It is the Inria’s aim for the Software to be used by the scientific community so as to test it and, evaluate it so that Inria may improve it. 19 | 20 | For these reasons Inria has decided to distribute the Software. 21 | 22 | Inria grants to the academic user, a free of charge, without right to sublicense non-exclusive right to use the Software for research purposes for a period of one (1) year from the date of the download of the source code. Any other use without of prior consent of Inria is prohibited. 23 | 24 | The academic user explicitly acknowledges having received from Inria all information allowing him to appreciate the adequacy between of the Software and his needs and to undertake all necessary precautions for his execution and use. 25 | 26 | In case of using the Software for a publication or other results obtained through the use of the Software, user should cite the Software as follows : 27 | 28 | @misc{ 29 | } 30 | 31 | 32 | Every user of the Software could communicate to the developers at the following address (qiao.zheng@inria.fr) his or her remarks as to the use of the Software. 33 | 34 | THE USER CANNOT USE, EXPLOIT OR COMMERCIALY DISTRIBUTE THE SOFTWARE WITHOUT PRIOR AND EXPLICIT CONSENT OF INRIA (stip-sam@inria.fr). ANY SUCH ACTION WILL CONSTITUTE A FORGERY. 35 | 36 | THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY WARRANTIES OF ANY NATURE AND ANY EXPRESS OR IMPLIED WARRANTIES,WITH REGARDS TO COMMERCIAL USE, PROFESSIONNAL USE, LEGAL OR NOT, OR OTHER, OR COMMERCIALISATION OR ADAPTATION. 37 | 38 | UNLESS EXPLICITLY PROVIDED BY LAW, IN NO EVENT, SHALL INRIA OR THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 39 | 40 | 41 | 42 | *** 43 | CONTRAT DE LICENCE DE LOGICIEL 44 | 45 | Logiciel CardiacMotionFlow ©Inria 2018, tout droit réservé, ci-après dénommé "le Logiciel". 46 | 47 | Le Logiciel a été conçu et réalisé par des chercheurs de l’équipe-projet EPIONE d’Inria (Institut National de Recherche en Informatique et Automatique). 48 | 49 | Inria, Domaine de Voluceau, Rocquencourt - BP 105 50 | 78153 Le Chesnay Cedex, France 51 | 52 | Inria détient tous les droits de propriété sur le Logiciel. 53 | 54 | Le Logiciel a été déposé auprès de l'Agence pour la Protection des Programmes (APP) sous le numéro IDDN.FR.XXX 55 | 56 | Le Logiciel est en cours de développement et Inria souhaite qu'il soit utilisé par la communauté scientifique de façon à le tester et l'évaluer, et afin qu’Inria puisse le cas échéant le faire évoluer. 57 | 58 | A cette fin, Inria a décidé de distribuer le Logiciel. 59 | 60 | Inria concède à l'utilisateur académique, gratuitement, sans droit de sous-licence, pour une période de un (1) an à compter du téléchargement du code source, le droit non-exclusif d'utiliser le Logiciel à fins de recherche. Toute autre utilisation sans l’accord préalable d’Inria est exclue. 61 | 62 | L’utilisateur académique reconnaît expressément avoir reçu d’Inria toutes les informations lui permettant d’apprécier l’adéquation du Logiciel à ses besoins et de prendre toutes les précautions utiles pour sa mise en œuvre et son utilisation. 63 | 64 | Si le Logiciel est utilisé pour la publication de résultats, l’utilisateur devra citer le Logiciel de la façon suivante : 65 | 66 | @misc{ 67 | } 68 | 69 | 70 | Tout utilisateur du Logiciel pourra communiquer ses remarques d'utilisation du Logiciel aux développeurs à l’adresse suivante : qiao.zheng@inria.fr 71 | 72 | 73 | L'UTILISATEUR NE PEUT FAIRE NI UTILISATION NI EXPLOITATION NI DISTRIBUTION COMMERCIALE DU LOGICIEL SANS L'ACCORD EXPRÈS PRÉALABLE d’INRIA (stip-sam@inria.fr). 74 | TOUT ACTE CONTRAIRE CONSTITUERAIT UNE CONTREFAÇON. 75 | 76 | LE LOGICIEL EST FOURNI "TEL QU'EN L'ÉTAT" SANS AUCUNE GARANTIE DE QUELQUE NATURE, IMPLICITE OU EXPLICITE, QUANT À SON UTILISATION COMMERCIALE, PROFESSIONNELLE, LÉGALE OU NON, OU AUTRE, SA COMMERCIALISATION OU SON ADAPTATION. 77 | 78 | SAUF LORSQU'EXPLICITEMENT PRÉVU PAR LA LOI, INRIA NE POURRA ÊTRE TENU POUR RESPONSABLE DE TOUT DOMMAGE OU PRÉJUDICE DIRECT, INDIRECT, (PERTES FINANCIÈRES DUES AU MANQUE À GAGNER, À L'INTERRUPTION D'ACTIVITÉS OU À LA PERTE DE DONNÉES, ETC...) DÉCOULANT DE L'UTILISATION DE TOUT OU PARTIE DU LOGICIEL OU DE L'IMPOSSIBILITÉ D'UTILISER CELUI-CI. 79 |   80 | 81 | -------------------------------------------------------------------------------- /feature_extraction/acdc_motion_index.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | 4 | import config 5 | 6 | 7 | 8 | def acdc_motion_index(): 9 | 10 | 11 | data_dir = config.acdc_data_dir 12 | code_dir = config.code_dir 13 | 14 | excluded_slice_ratio = config.excluded_slice_ratio 15 | seq_instants = config.acdc_seq_instants 16 | 17 | dilated_subjects = config.acdc_dilated_subjects 18 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 19 | infarct_subjects = config.acdc_infarct_subjects 20 | normal_subjects = config.acdc_normal_subjects 21 | rv_subjects = config.acdc_rv_subjects 22 | test_subjects = config.acdc_test_subjects 23 | 24 | 25 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects + test_subjects 26 | subjects = all_subjects 27 | 28 | 29 | 30 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 31 | with open(info_file) as in_file: 32 | subject_info = in_file.readlines() 33 | 34 | subject_info = [x.strip() for x in subject_info] 35 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 36 | 37 | 38 | pixel_file = os.path.join(code_dir, 'acdc_info', 'acdc_pixel_size.txt') 39 | with open(pixel_file) as p_file: 40 | pixel_size_info = p_file.readlines() 41 | 42 | pixel_size_info = [x.strip() for x in pixel_size_info] 43 | pixel_size_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in pixel_size_info] 44 | 45 | 46 | base_file = os.path.join(code_dir, 'acdc_info', 'acdc_base.txt') 47 | with open(base_file) as b_file: 48 | base_info = b_file.readlines() 49 | 50 | base_info = [x.strip() for x in base_info] 51 | base_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in base_info] 52 | 53 | 54 | 55 | zfill_num = 2 56 | 57 | 58 | motion_info = open(os.path.join(code_dir, 'acdc_info', 'acdc_motion_index.txt'), 'w') 59 | 60 | for subject in subjects: 61 | print('\n'+subject) 62 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 63 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 64 | base_slice = int([x for x in base_info if x[0] == subject][0][1]) 65 | apex_slice = int([x for x in base_info if x[0] == subject][0][2]) 66 | es_base_slice = int([x for x in base_info if x[0] == subject][0][3]) 67 | es_apex_slice = int([x for x in base_info if x[0] == subject][0][4]) 68 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 69 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 70 | bsa = [x for x in subject_info if x[0] == subject][0][8] 71 | pixel_size = [x for x in pixel_size_info if x[0] == subject][0][3] 72 | slice_thickness = [x for x in pixel_size_info if x[0] == subject][0][5] 73 | 74 | subject_dir = data_dir.format(subject) 75 | folder = subject_dir + '/predict_2D/' 76 | 77 | 78 | slice_range = range(base_slice + int(round((apex_slice + 1 - base_slice)*excluded_slice_ratio*0.5)), apex_slice + 1 - int(round((apex_slice + 1 - base_slice)*excluded_slice_ratio*1))) 79 | 80 | 81 | 82 | all_radius_flow = np.zeros((1, 0, seq_instants)) 83 | 84 | asyn_thickness = 0.0 85 | thickness_diff = -10.0 86 | for slice_idx in slice_range: 87 | zone_avg_inner_border_normalized_flow = np.load(folder + 'radius_flow_{}.npy'.format(str(slice_idx).zfill(zfill_num))) 88 | zone_avg_myo_thickness_flow = np.load(folder + 'thickness_flow_{}.npy'.format(str(slice_idx).zfill(zfill_num))) 89 | 90 | all_radius_flow = np.concatenate((all_radius_flow, zone_avg_inner_border_normalized_flow), axis=1) 91 | 92 | scale_thickness = zone_avg_myo_thickness_flow[0, :, 0].min() 93 | slice_asyn_thickness = (zone_avg_myo_thickness_flow[0, :, :].max(axis=0) - zone_avg_myo_thickness_flow[0, :, :].min(axis=0)) / scale_thickness 94 | asyn_thickness = max(asyn_thickness, slice_asyn_thickness.max()) 95 | 96 | segment_thickness_diff = zone_avg_myo_thickness_flow[0, :, :].max(axis=1) - zone_avg_myo_thickness_flow[0, :, 0] 97 | thickness_diff = max(thickness_diff, segment_thickness_diff.max()) 98 | 99 | 100 | 101 | 102 | 103 | 104 | for i in range(all_radius_flow.shape[1]): 105 | all_radius_flow[0, i, :] = (all_radius_flow[0, i, 0] - all_radius_flow[0, i, :]) / all_radius_flow[0, i, 0] 106 | 107 | asyn_radius = (all_radius_flow[0, :, :].max(axis=0) - all_radius_flow[0, :, :].min(axis=0)).max() 108 | 109 | 110 | 111 | written = '{} {} {} {}\n'.format(subject, asyn_radius, asyn_thickness, thickness_diff) 112 | motion_info.write(written) 113 | 114 | 115 | motion_info.close() 116 | 117 | 118 | 119 | 120 | if __name__ == '__main__': 121 | acdc_motion_index() 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | -------------------------------------------------------------------------------- /segmentation/data_lvrv_segmentation_propagation_acdc.py: -------------------------------------------------------------------------------- 1 | """ A function to generate the lists of files for LVRV-net inference """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | import os 7 | import math 8 | 9 | import config 10 | 11 | def data_lvrv_segmentation_propagation_acdc(mode='all', fold = 1): 12 | 13 | data_dir = config.acdc_data_dir 14 | code_dir = config.code_dir 15 | 16 | dilated_subjects = config.acdc_dilated_subjects 17 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 18 | infarct_subjects = config.acdc_infarct_subjects 19 | normal_subjects = config.acdc_normal_subjects 20 | rv_subjects = config.acdc_rv_subjects 21 | test_subjects = config.acdc_test_subjects 22 | 23 | excluded_slice_ratio = config.excluded_slice_ratio 24 | 25 | seq_instants = config.acdc_seq_instants 26 | 27 | 28 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects 29 | 30 | 31 | if mode == 'all': 32 | subjects = all_subjects 33 | elif mode == 'train': 34 | subjects = [x for i,x in enumerate(all_subjects) if (i % 5) != (fold % 5)] 35 | elif mode == 'val' or mode == 'val_predict': 36 | subjects = [x for i,x in enumerate(all_subjects) if (i % 5) == (fold % 5)] 37 | elif mode == 'predict': 38 | subjects = test_subjects 39 | else: 40 | print('Incorrect mode') 41 | 42 | print(subjects) 43 | 44 | 45 | 46 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 47 | with open(info_file) as in_file: 48 | subject_info = in_file.readlines() 49 | 50 | subject_info = [x.strip() for x in subject_info] 51 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 52 | 53 | 54 | print('There will be {} used subjects'.format(len(subjects)) ) 55 | 56 | 57 | seq_context_imgs = [] 58 | seq_context_segs = [] 59 | seq_imgs = [] 60 | seq_segs = [] 61 | 62 | seq_context_imgs_no_group = [] 63 | seq_context_segs_no_group = [] 64 | seq_imgs_no_group = [] 65 | seq_segs_no_group = [] 66 | 67 | 68 | for subject in subjects: 69 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 70 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 71 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 72 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 73 | subject_dir = data_dir.format(subject) 74 | 75 | start_slice = 0 76 | end_slice = slices 77 | 78 | if not os.path.exists(os.path.join(subject_dir, 'predict_2D')): 79 | os.makedirs(os.path.join(subject_dir, 'predict_2D')) 80 | 81 | 82 | for t in [ed_instant, es_instant]: 83 | context_imgs = [] 84 | context_segs = [] 85 | imgs = [] 86 | segs = [] 87 | 88 | for i in range(start_slice, end_slice): 89 | if i == start_slice: 90 | i_minus = -1 91 | else: 92 | i_minus = i - 1 93 | 94 | 95 | context_img = os.path.join(subject_dir, 'crop_2D', 'crop_2D_{}_{}.png'.format(str(i_minus).zfill(2), str(t).zfill(2)) ) 96 | if mode in ['all', 'train', 'val']: 97 | context_seg = os.path.join(subject_dir, 'crop_2D', 'crop_2D_gt_{}_{}.png'.format(str(i_minus).zfill(2), str(t).zfill(2)) ) 98 | elif mode in ['predict', 'val_predict']: 99 | context_seg = os.path.join(subject_dir, 'predict_2D', 'predict_lvrv2_{}_{}.png'.format(str(i_minus).zfill(2), str(t).zfill(2)) ) 100 | 101 | 102 | 103 | img = os.path.join(subject_dir, 'crop_2D', 'crop_2D_{}_{}.png'.format(str(i).zfill(2), str(t).zfill(2)) ) 104 | if mode in ['all', 'train', 'val']: 105 | seg = os.path.join(subject_dir, 'crop_2D', 'crop_2D_gt_{}_{}.png'.format(str(i).zfill(2), str(t).zfill(2)) ) 106 | elif mode in ['predict', 'val_predict']: 107 | seg = os.path.join(subject_dir, 'predict_2D', 'predict_lvrv2_{}_{}.png'.format(str(i).zfill(2), str(t).zfill(2)) ) 108 | 109 | seq_context_imgs_no_group.append(context_img) 110 | seq_context_segs_no_group.append(context_seg) 111 | seq_imgs_no_group.append(img) 112 | seq_segs_no_group.append(seg) 113 | 114 | 115 | context_imgs.append(context_img) 116 | context_segs.append(context_seg) 117 | imgs.append(img) 118 | segs.append(seg) 119 | 120 | seq_context_imgs.append(context_imgs) 121 | seq_context_segs.append(context_segs) 122 | seq_imgs.append(imgs) 123 | seq_segs.append(segs) 124 | 125 | 126 | if mode in ['all', 'train', 'val']: 127 | return seq_context_imgs_no_group, seq_context_segs_no_group, seq_imgs_no_group, seq_segs_no_group 128 | elif mode in ['predict', 'val_predict']: 129 | return seq_context_imgs, seq_context_segs, seq_imgs, seq_segs 130 | 131 | 132 | -------------------------------------------------------------------------------- /ROI/predict_roi_net.py: -------------------------------------------------------------------------------- 1 | """ The main file to launch the inference of ROI-net """ 2 | 3 | 4 | import os 5 | import numpy as np 6 | import scipy 7 | import math 8 | from PIL import Image as pil_image 9 | import tensorflow as tf 10 | 11 | from keras.models import ( 12 | Model, 13 | load_model 14 | ) 15 | from keras.optimizers import Adam 16 | from keras import backend as K 17 | 18 | K.set_image_data_format('channels_last') # TF dimension ordering in this code 19 | 20 | from helpers import ( 21 | dice_coef2, 22 | dice_coef2_loss, 23 | mean_variance_normalization5 24 | ) 25 | from image2 import ( 26 | array_to_img, 27 | ImageDataGenerator2 28 | ) 29 | from data_roi_predict import data_roi_predict 30 | 31 | from module_roi_net import net_module 32 | 33 | import config 34 | 35 | 36 | 37 | def predict_roi_net(): 38 | 39 | code_path = config.code_dir 40 | 41 | initial_lr = config.roi_net_initial_lr 42 | batch_size = config.roi_net_batch_size 43 | input_img_size = config.roi_net_input_img_size 44 | 45 | epochs = config.roi_net_epochs 46 | 47 | ########### 48 | # The model 49 | model = net_module(input_shape=(input_img_size, input_img_size, 1), num_outputs=1) 50 | 51 | model.load_weights(filepath=os.path.join(code_path, 'ROI', 'model_roi_net_epoch{}.h5'.format(str(epochs).zfill(3))) ) 52 | 53 | model.compile(optimizer=Adam(lr=initial_lr), loss=dice_coef2_loss, 54 | metrics=[dice_coef2]) 55 | 56 | ###### 57 | # Data 58 | predict_img_list, predict_gt_list = data_roi_predict() 59 | 60 | 61 | predict_img_list = sorted(predict_img_list) 62 | predict_gt_list = sorted(predict_gt_list) 63 | 64 | predict_sample = len(predict_img_list) 65 | 66 | # we create two instances with the same arguments 67 | data_gen_args = dict(featurewise_center=False, 68 | samplewise_center=False, 69 | featurewise_std_normalization=False, 70 | samplewise_std_normalization=False, 71 | zca_whitening=False, 72 | zca_epsilon=1e-6, 73 | rotation_range=0.0, 74 | width_shift_range=0.0, 75 | height_shift_range=0.0, 76 | shear_range=0., 77 | zoom_range=0.0, 78 | channel_shift_range=0., 79 | fill_mode='constant', 80 | cval=0., 81 | horizontal_flip=False, 82 | vertical_flip=False, 83 | rescale=None, 84 | preprocessing_function=mean_variance_normalization5, 85 | data_format=K.image_data_format()) 86 | 87 | 88 | ########################### 89 | # Generators for predicting 90 | image_datagen = ImageDataGenerator2(**data_gen_args) 91 | 92 | seed = 1 93 | image_datagen.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 94 | 95 | image_generator = image_datagen.flow_from_path_list( 96 | path_list=predict_img_list, 97 | target_size=(input_img_size, input_img_size), 98 | pad_to_square=True, 99 | resize_mode='nearest', 100 | histogram_based_preprocessing=True, 101 | clahe=True, 102 | color_mode='grayscale', 103 | class_list=None, 104 | class_mode=None, 105 | batch_size=batch_size, 106 | shuffle=False, 107 | seed=seed, 108 | save_to_dir=None, 109 | save_prefix='', 110 | save_format='png', 111 | save_period=100, 112 | follow_links=False) 113 | 114 | 115 | print('Start prediction') 116 | print('There will be {} batches with batch-size {}'.format(int(math.ceil(float(predict_sample) / batch_size)), batch_size) ) 117 | 118 | for i in range(int(math.ceil(float(predict_sample) / batch_size)) ): 119 | print('batch {}'.format(i) ) 120 | start_idx = i * batch_size 121 | end_idx = min((i + 1) * batch_size, predict_sample) 122 | img_list_batch = predict_img_list[start_idx:end_idx] 123 | 124 | batch_img = next(image_generator) 125 | 126 | predict_masks = model.predict(batch_img, 127 | batch_size=batch_size, 128 | verbose=0) 129 | binarized_predict_masks = np.where(predict_masks >= 0.5, 1.0, 0.0) 130 | 131 | for j in range(len(img_list_batch)): 132 | img_path = img_list_batch[j] 133 | #print(img_path) 134 | img_size = pil_image.open(img_path).size 135 | h = img_size[0] 136 | w = img_size[1] 137 | size = max(h, w) 138 | 139 | # reshape and crop the predicted mask to the original size 140 | mask = np.reshape(binarized_predict_masks[j], newshape=(input_img_size, input_img_size)) 141 | resized_mask = scipy.misc.imresize(mask, size=(size, size), interp='nearest')/255.0 142 | cropped_resized_mask = resized_mask[((size-w)//2):((size-w)//2 + w), 143 | ((size-h)//2):((size-h)//2 + h)] 144 | cropped_resized_mask = np.reshape(cropped_resized_mask, newshape=(w, h, 1)) 145 | 146 | predicted_mask_path = img_path.replace('original_2D', 'mask_original_2D', 2) 147 | 148 | # save txt file 149 | predicted_mask_txt_path = predicted_mask_path.replace('.png', '.txt', 1) 150 | np.savetxt(predicted_mask_txt_path, cropped_resized_mask, fmt='%.6f') 151 | 152 | # save image 153 | cropped_resized_mask_img = array_to_img(cropped_resized_mask, 154 | data_format=None, 155 | scale=True) 156 | cropped_resized_mask_img.save(predicted_mask_path) 157 | 158 | 159 | 160 | if __name__ == '__main__': 161 | predict_roi_net() 162 | 163 | 164 | -------------------------------------------------------------------------------- /feature_extraction/acdc_thickness.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | 4 | from helpers import myo_mask_max_min_mean_thickness 5 | from image2 import load_img2 6 | 7 | import config 8 | 9 | 10 | 11 | def acdc_thickness(): 12 | 13 | 14 | data_dir = config.acdc_data_dir 15 | code_dir = config.code_dir 16 | 17 | excluded_slice_ratio = config.excluded_slice_ratio 18 | seq_instants = config.acdc_seq_instants 19 | 20 | dilated_subjects = config.acdc_dilated_subjects 21 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 22 | infarct_subjects = config.acdc_infarct_subjects 23 | normal_subjects = config.acdc_normal_subjects 24 | rv_subjects = config.acdc_rv_subjects 25 | test_subjects = config.acdc_test_subjects 26 | 27 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects + test_subjects 28 | subjects = all_subjects 29 | 30 | 31 | 32 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 33 | with open(info_file) as in_file: 34 | subject_info = in_file.readlines() 35 | 36 | subject_info = [x.strip() for x in subject_info] 37 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 38 | 39 | 40 | pixel_file = os.path.join(code_dir, 'acdc_info', 'acdc_pixel_size.txt') 41 | with open(pixel_file) as p_file: 42 | pixel_size_info = p_file.readlines() 43 | 44 | pixel_size_info = [x.strip() for x in pixel_size_info] 45 | pixel_size_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in pixel_size_info] 46 | 47 | 48 | base_file = os.path.join(code_dir, 'acdc_info', 'acdc_base.txt') 49 | with open(base_file) as b_file: 50 | base_info = b_file.readlines() 51 | 52 | base_info = [x.strip() for x in base_info] 53 | base_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in base_info] 54 | 55 | 56 | 57 | zfill_num = 2 58 | img_size = config.apparentflow_net_input_img_size 59 | shape = (img_size, img_size ,2) 60 | 61 | 62 | 63 | thickness_info = open(os.path.join(code_dir, 'acdc_info', 'acdc_thickness.txt'), 'w') 64 | 65 | for subject in subjects: 66 | print('\n'+subject) 67 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 68 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 69 | base_slice = int([x for x in base_info if x[0] == subject][0][1]) 70 | apex_slice = int([x for x in base_info if x[0] == subject][0][2]) 71 | es_base_slice = int([x for x in base_info if x[0] == subject][0][3]) 72 | es_apex_slice = int([x for x in base_info if x[0] == subject][0][4]) 73 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 74 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 75 | bsa = [x for x in subject_info if x[0] == subject][0][8] 76 | pixel_size = [x for x in pixel_size_info if x[0] == subject][0][3] 77 | slice_thickness = [x for x in pixel_size_info if x[0] == subject][0][5] 78 | 79 | subject_dir = data_dir.format(subject) 80 | folder = subject_dir + '/predict_2D/' 81 | 82 | normalize_term = pixel_size / (bsa**(1.0/2)) 83 | 84 | idx_range = [(int(round(float(instants) * t / seq_instants)) + ed_instant) % instants for t in range(0, seq_instants)] 85 | 86 | 87 | 88 | start_slice = base_slice 89 | end_slice = apex_slice + 1 90 | 91 | es_start_slice = es_base_slice 92 | es_end_slice = es_apex_slice + 1 93 | 94 | 95 | ed_max = 0.0 96 | ed_min = 10000.0 97 | ed_sum = 0.0 98 | ed_used_slice_count = 0 99 | es_max = 0.0 100 | es_min = 10000.0 101 | es_sum = 0.0 102 | es_used_slice_count = 0 103 | 104 | for slice_idx in range(start_slice, end_slice): 105 | mask_file = folder + 'predict_lvrv2_{}_{}.png'.format(str(slice_idx).zfill(zfill_num), str(ed_instant).zfill(zfill_num)) 106 | mask = load_img2(mask_file, grayscale=True, 107 | target_size=(shape[0], 108 | shape[1]), 109 | pad_to_square=True, resize_mode='nearest') 110 | ed_max_thick, ed_min_thick, ed_mean_thick = myo_mask_max_min_mean_thickness(np.array(mask)/50.0) 111 | if ed_max_thick >= 0.0: 112 | ed_max = max(ed_max, ed_max_thick) 113 | ed_min = min(ed_min, ed_min_thick) 114 | ed_sum += ed_mean_thick 115 | ed_used_slice_count += 1 116 | ed_mean = ed_sum / ed_used_slice_count 117 | 118 | 119 | for slice_idx in range(es_start_slice, es_end_slice): 120 | es_mask_file = folder + 'predict_lvrv2_{}_{}.png'.format(str(slice_idx).zfill(zfill_num), str(es_instant).zfill(zfill_num)) 121 | es_mask = load_img2(es_mask_file, grayscale=True, 122 | target_size=(shape[0], 123 | shape[1]), 124 | pad_to_square=True, resize_mode='nearest') 125 | es_max_thick, es_min_thick, es_mean_thick = myo_mask_max_min_mean_thickness(np.array(es_mask)/50.0) 126 | if es_max_thick >= 0.0: 127 | es_max = max(es_max, es_max_thick) 128 | es_min = min(es_min, es_min_thick) 129 | es_sum += es_mean_thick 130 | es_used_slice_count += 1 131 | es_mean = es_sum / es_used_slice_count 132 | 133 | ed_max *= pixel_size 134 | ed_min *= pixel_size 135 | es_max *= pixel_size 136 | es_min *= pixel_size 137 | ed_mean *= pixel_size 138 | es_mean *= pixel_size 139 | 140 | print(ed_max, ed_min, es_max, es_min, ed_mean, es_mean) 141 | 142 | 143 | written = '{} {} {} {} {} {} {}\n'.format(subject, ed_max, ed_min, es_max, es_min, ed_mean, es_mean) 144 | thickness_info.write(written) 145 | 146 | thickness_info.close() 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | if __name__ == '__main__': 156 | acdc_thickness() 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | -------------------------------------------------------------------------------- /feature_extraction/acdc_zone_flow.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | 4 | from helpers import ( 5 | mask_barycenter2, 6 | masked_flow_transform2, 7 | flow_by_zone3, 8 | enlarge_mask4 9 | ) 10 | from image2 import load_img2 11 | import config 12 | 13 | 14 | 15 | def acdc_zone_flow(): 16 | 17 | 18 | data_dir = config.acdc_data_dir 19 | code_dir = config.code_dir 20 | 21 | excluded_slice_ratio = config.excluded_slice_ratio 22 | seq_instants = config.acdc_seq_instants 23 | 24 | dilated_subjects = config.acdc_dilated_subjects 25 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 26 | infarct_subjects = config.acdc_infarct_subjects 27 | normal_subjects = config.acdc_normal_subjects 28 | rv_subjects = config.acdc_rv_subjects 29 | test_subjects = config.acdc_test_subjects 30 | 31 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects + test_subjects 32 | subjects = all_subjects 33 | 34 | 35 | 36 | 37 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 38 | with open(info_file) as in_file: 39 | subject_info = in_file.readlines() 40 | 41 | subject_info = [x.strip() for x in subject_info] 42 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 43 | 44 | 45 | pixel_file = os.path.join(code_dir, 'acdc_info', 'acdc_pixel_size.txt') 46 | with open(pixel_file) as p_file: 47 | pixel_size_info = p_file.readlines() 48 | 49 | pixel_size_info = [x.strip() for x in pixel_size_info] 50 | pixel_size_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in pixel_size_info] 51 | 52 | 53 | base_file = os.path.join(code_dir, 'acdc_info', 'acdc_base.txt') 54 | with open(base_file) as b_file: 55 | base_info = b_file.readlines() 56 | 57 | base_info = [x.strip() for x in base_info] 58 | base_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in base_info] 59 | 60 | 61 | num_zone = 6 62 | zfill_num = 2 63 | img_size = config.apparentflow_net_input_img_size 64 | shape = (img_size, img_size ,2) 65 | 66 | 67 | 68 | 69 | for subject in subjects: 70 | print('\n'+subject) 71 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 72 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 73 | base_slice = int([x for x in base_info if x[0] == subject][0][1]) 74 | apex_slice = int([x for x in base_info if x[0] == subject][0][2]) 75 | es_base_slice = int([x for x in base_info if x[0] == subject][0][3]) 76 | es_apex_slice = int([x for x in base_info if x[0] == subject][0][4]) 77 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 78 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 79 | bsa = [x for x in subject_info if x[0] == subject][0][8] 80 | pixel_size = [x for x in pixel_size_info if x[0] == subject][0][3] 81 | slice_thickness = [x for x in pixel_size_info if x[0] == subject][0][5] 82 | 83 | subject_dir = data_dir.format(subject) 84 | folder = subject_dir + '/predict_2D/' 85 | 86 | normalize_term = pixel_size / (bsa**(1.0/2)) 87 | 88 | idx_range = [(int(round(float(instants) * t / seq_instants)) + ed_instant) % instants for t in range(0, seq_instants)] 89 | 90 | 91 | 92 | start_slice = base_slice 93 | end_slice = apex_slice + 1 94 | 95 | for slice_idx in range(start_slice, end_slice): 96 | print('slice #{}'.format(slice_idx)) 97 | 98 | # Get the mask 99 | mask_file = folder + 'predict_lvrv2_{}_{}.png'.format(str(slice_idx).zfill(zfill_num), str(ed_instant).zfill(zfill_num)) 100 | mask = load_img2(mask_file, grayscale=True, 101 | target_size=(shape[0], 102 | shape[1]), 103 | pad_to_square=True, resize_mode='nearest') 104 | mask = np.reshape(np.array(mask)/50.0, (1, shape[0], shape[1], 1)) 105 | mask = enlarge_mask4(mask, width=1, enlarge_value=2.0, neighbor_values=[1.0]) 106 | 107 | 108 | # Get the flow 109 | flow = np.zeros((1, shape[0], shape[1], 0)) 110 | for idx in idx_range: 111 | flow_file = folder + 'flow2_{}_{}.npy'.format(str(slice_idx).zfill(zfill_num), str(idx).zfill(zfill_num)) 112 | if idx != idx_range[0] and os.path.isfile(flow_file): 113 | flow_idx = np.reshape(np.load(flow_file), (1, shape[0], shape[1], shape[2])) 114 | else: 115 | flow_idx = np.zeros((1, shape[0], shape[1], shape[2])) 116 | flow = np.concatenate((flow, flow_idx), axis=-1) 117 | 118 | 119 | # Compute the barycenter coordinates 120 | lv_mask = np.where(np.logical_or(\ 121 | np.equal(mask, 2.0 * np.ones_like(mask)), 122 | np.equal(mask, 1.0 * np.ones_like(mask)) ), 123 | np.ones_like(mask), np.zeros_like(mask)) 124 | rv_mask = np.where(np.equal(mask, 3.0 * np.ones_like(mask)), 125 | np.ones_like(mask), np.zeros_like(mask)) 126 | barycenters = mask_barycenter2(flow, mask, mask_value=1.0) 127 | rv_barycenters = mask_barycenter2(flow, mask, mask_value=3.0) 128 | 129 | 130 | # Transform the flow 131 | transformed_flow, angles, distance_flows, norms, boundary_pixels = \ 132 | masked_flow_transform2(flow, mask, barycenters, lvm_value = 2.0, lvc_value = 1.0) 133 | 134 | # Average the transformed flow by zone 135 | zone_avg_flow, zone_std_original_flow, \ 136 | zone_avg_inner_border_normalized_flow, zone_avg_outer_border_normalized_flow, \ 137 | zone_avg_myo_thickness_flow, zone_map = \ 138 | flow_by_zone3(transformed_flow, flow, angles, distance_flows, norms, boundary_pixels, num_zone, start_random = False, barycenters = barycenters, rv_barycenters = rv_barycenters) 139 | 140 | # Normalize the flow 141 | zone_avg_inner_border_normalized_flow *= normalize_term 142 | zone_avg_myo_thickness_flow *= normalize_term 143 | 144 | np.save(folder + 'radius_flow_{}.npy'.format(str(slice_idx).zfill(zfill_num)), zone_avg_inner_border_normalized_flow) 145 | np.save(folder + 'thickness_flow_{}.npy'.format(str(slice_idx).zfill(zfill_num)), zone_avg_myo_thickness_flow) 146 | 147 | 148 | 149 | if __name__ == '__main__': 150 | acdc_zone_flow() 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | -------------------------------------------------------------------------------- /feature_extraction/acdc_volume.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | 4 | from helpers import volume_calculation_given_slice_area 5 | from image2 import load_img2 6 | 7 | import config 8 | 9 | 10 | 11 | def acdc_volume(): 12 | 13 | 14 | data_dir = config.acdc_data_dir 15 | code_dir = config.code_dir 16 | 17 | excluded_slice_ratio = config.excluded_slice_ratio 18 | seq_instants = config.acdc_seq_instants 19 | 20 | dilated_subjects = config.acdc_dilated_subjects 21 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 22 | infarct_subjects = config.acdc_infarct_subjects 23 | normal_subjects = config.acdc_normal_subjects 24 | rv_subjects = config.acdc_rv_subjects 25 | test_subjects = config.acdc_test_subjects 26 | 27 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects + test_subjects 28 | subjects = all_subjects 29 | 30 | 31 | 32 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 33 | with open(info_file) as in_file: 34 | subject_info = in_file.readlines() 35 | 36 | subject_info = [x.strip() for x in subject_info] 37 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 38 | 39 | 40 | pixel_file = os.path.join(code_dir, 'acdc_info', 'acdc_pixel_size.txt') 41 | with open(pixel_file) as p_file: 42 | pixel_size_info = p_file.readlines() 43 | 44 | pixel_size_info = [x.strip() for x in pixel_size_info] 45 | pixel_size_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in pixel_size_info] 46 | 47 | 48 | base_file = os.path.join(code_dir, 'acdc_info', 'acdc_base.txt') 49 | with open(base_file) as b_file: 50 | base_info = b_file.readlines() 51 | 52 | base_info = [x.strip() for x in base_info] 53 | base_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in base_info] 54 | 55 | 56 | 57 | zfill_num = 2 58 | img_size = config.apparentflow_net_input_img_size 59 | shape = (img_size, img_size ,2) 60 | 61 | 62 | 63 | volume_info = open(os.path.join(code_dir, 'acdc_info', 'acdc_volume.txt'), 'w') 64 | 65 | for subject in subjects: 66 | print('\n'+subject) 67 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 68 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 69 | base_slice = int([x for x in base_info if x[0] == subject][0][1]) 70 | apex_slice = int([x for x in base_info if x[0] == subject][0][2]) 71 | es_base_slice = int([x for x in base_info if x[0] == subject][0][3]) 72 | es_apex_slice = int([x for x in base_info if x[0] == subject][0][4]) 73 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 74 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 75 | bsa = [x for x in subject_info if x[0] == subject][0][8] 76 | pixel_size = [x for x in pixel_size_info if x[0] == subject][0][3] 77 | slice_thickness = [x for x in pixel_size_info if x[0] == subject][0][5] 78 | 79 | subject_dir = data_dir.format(subject) 80 | folder = subject_dir + '/predict_2D/' 81 | 82 | normalize_term = pixel_size / (bsa**(1.0/2)) 83 | 84 | idx_range = [(int(round(float(instants) * t / seq_instants)) + ed_instant) % instants for t in range(0, seq_instants)] 85 | 86 | 87 | 88 | start_slice = 0 89 | end_slice = slices 90 | 91 | 92 | lv_area_ed = [] 93 | lv_area_es = [] 94 | lvm_area_ed = [] 95 | lvm_area_es = [] 96 | rv_area_ed = [] 97 | rv_area_es = [] 98 | 99 | for slice_idx in range(start_slice, end_slice): 100 | #print('slice #{}'.format(slice_idx)) 101 | 102 | 103 | # Get the segmentation 104 | #print('Get the segmentation') 105 | seg = np.zeros((1, shape[0], shape[1], 0)) 106 | for idx in [ed_instant, es_instant]: 107 | seg_file = folder + 'predict_lvrv2_{}_{}.png'.format(str(slice_idx).zfill(zfill_num), str(idx).zfill(zfill_num)) 108 | 109 | seg_idx = load_img2(seg_file, grayscale=True, 110 | target_size=(shape[0], 111 | shape[1]), 112 | pad_to_square=True, resize_mode='nearest') 113 | seg_idx = np.reshape(np.array(seg_idx)/50.0, (1, shape[0], shape[1], 1)) 114 | seg = np.concatenate((seg, seg_idx), axis=-1) 115 | 116 | seg_rv = np.where(np.equal(seg, 3.0 * np.ones_like(seg)), 117 | np.ones_like(seg), np.zeros_like(seg)) 118 | seg_rv_area = np.sum(seg_rv, axis=(1,2)) 119 | seg_rv_area *= ((normalize_term**2)/1000) 120 | 121 | seg_lv = np.where(np.equal(seg, 1.0 * np.ones_like(seg)), 122 | np.ones_like(seg), np.zeros_like(seg)) 123 | seg_lv_area = np.sum(seg_lv, axis=(1,2)) 124 | seg_lv_area *= ((normalize_term**2)/1000) 125 | 126 | seg_lvm = np.where(np.equal(seg, 2.0 * np.ones_like(seg)), 127 | np.ones_like(seg), np.zeros_like(seg)) 128 | seg_lvm_area = np.sum(seg_lvm, axis=(1,2)) 129 | seg_lvm_area *= ((normalize_term**2)/1000) 130 | 131 | 132 | lv_area_ed.append(seg_lv_area[0, 0]) 133 | lv_area_es.append(seg_lv_area[0, 1]) 134 | lvm_area_ed.append(seg_lvm_area[0, 0]) 135 | lvm_area_es.append(seg_lvm_area[0, 1]) 136 | rv_area_ed.append(seg_rv_area[0, 0]) 137 | rv_area_es.append(seg_rv_area[0, 1]) 138 | 139 | lv_volume_ed = volume_calculation_given_slice_area(lv_area_ed, slice_thickness) 140 | lv_volume_es = volume_calculation_given_slice_area(lv_area_es, slice_thickness) 141 | lvm_volume_ed = volume_calculation_given_slice_area(lvm_area_ed, slice_thickness) 142 | lvm_volume_es = volume_calculation_given_slice_area(lvm_area_es, slice_thickness) 143 | rv_volume_ed = volume_calculation_given_slice_area(rv_area_ed, slice_thickness) 144 | rv_volume_es = volume_calculation_given_slice_area(rv_area_es, slice_thickness) 145 | 146 | 147 | lv_ratio = 1.0 - lv_volume_es / lv_volume_ed 148 | lvm_ratio = lvm_volume_es / lvm_volume_ed 149 | rv_ratio = 1.0 - rv_volume_es / rv_volume_ed 150 | lvrv_ratio = rv_volume_ed / lv_volume_ed 151 | lvmrv_ratio = rv_volume_ed / (lv_volume_ed + lvm_volume_ed) 152 | lvmlv_ratio = lvm_volume_ed / lv_volume_ed 153 | print(lv_volume_ed, rv_volume_ed, lvm_volume_ed, lv_ratio, rv_ratio, lvm_ratio, lvrv_ratio, lvmrv_ratio, lvmlv_ratio) 154 | 155 | 156 | 157 | written = '{} {} {} {} {} {} {} {} {} {}\n'.format(subject, lv_volume_ed, rv_volume_ed, lvm_volume_ed, lv_ratio, rv_ratio, lvm_ratio, lvrv_ratio, lvmrv_ratio, lvmlv_ratio) 158 | volume_info.write(written) 159 | 160 | volume_info.close() 161 | 162 | 163 | 164 | 165 | if __name__ == '__main__': 166 | acdc_volume() 167 | 168 | 169 | 170 | 171 | -------------------------------------------------------------------------------- /flow/module_apparentflow_net.py: -------------------------------------------------------------------------------- 1 | """ The module of ApparentFlow-net """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | from keras.models import Model 7 | from keras.layers import ( 8 | Input, 9 | Activation, 10 | UpSampling2D 11 | ) 12 | from keras.layers.convolutional import ( 13 | Conv2D, 14 | MaxPooling2D 15 | ) 16 | from keras.layers.core import ( 17 | Reshape, 18 | Lambda 19 | ) 20 | from keras.layers.merge import ( 21 | Add, 22 | Concatenate 23 | ) 24 | from keras import backend as K 25 | 26 | 27 | from helpers import ( 28 | conv_bn_leakyrelu_repetition_block, 29 | handle_dim_ordering, 30 | one_hot 31 | ) 32 | 33 | 34 | def net_module(input_shape, num_outputs): 35 | """Builds a net architecture. 36 | Args: 37 | input_shape: The input shape in the form (nb_rows, nb_cols, nb_channels) 38 | num_outputs: The number of outputs at final softmax layer 39 | Returns: 40 | The keras `Model`. 41 | """ 42 | CHANNEL_AXIS = 3 43 | handle_dim_ordering() 44 | if len(input_shape) != 3: 45 | raise Exception("Input shape should be a tuple (nb_rows, nb_cols, nb_channels)") 46 | 47 | # Permute dimension order if necessary 48 | if K.image_dim_ordering() != 'tf': 49 | input_shape = (input_shape[2], input_shape[0], input_shape[1]) 50 | 51 | input_img0 = Input(shape=input_shape, name="input_img0") 52 | 53 | input_img1 = Input(shape=input_shape, name="input_img1") 54 | 55 | 56 | concatenate = Concatenate(axis=CHANNEL_AXIS, name="concatenate")([input_img0, 57 | input_img1]) 58 | 59 | 60 | base_channel = 24 61 | 62 | 63 | 64 | 65 | block_conv_1 = conv_bn_leakyrelu_repetition_block(filters=1*base_channel, kernel_size=(3,3), 66 | repetitions=2, first_layer_down_size=False, alpha=0.0, 67 | name="conv_block1")(concatenate) 68 | 69 | 70 | block_pool_2 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 71 | data_format=None, name="pool_block2")(block_conv_1) 72 | 73 | block_conv_2 = conv_bn_leakyrelu_repetition_block(filters=2*base_channel, kernel_size=(3,3), 74 | repetitions=2, first_layer_down_size=False, alpha=0.0, 75 | name="conv_block2")(block_pool_2) 76 | 77 | 78 | block_pool_4 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 79 | data_format=None, name="pool_block4")(block_conv_2) 80 | 81 | block_conv_4 = conv_bn_leakyrelu_repetition_block(filters=4*base_channel, kernel_size=(3,3), 82 | repetitions=2, first_layer_down_size=False, alpha=0.0, 83 | name="conv_block4")(block_pool_4) 84 | 85 | 86 | block_pool_8 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 87 | data_format=None, name="pool_block8")(block_conv_4) 88 | 89 | block_conv_8 = conv_bn_leakyrelu_repetition_block(filters=8*base_channel, kernel_size=(3,3), 90 | repetitions=2, first_layer_down_size=False, alpha=0.0, 91 | name="conv_block8")(block_pool_8) 92 | 93 | 94 | block_pool_16 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 95 | data_format=None, name="pool_block16")(block_conv_8) 96 | 97 | block_conv_16 = conv_bn_leakyrelu_repetition_block(filters=16*base_channel, kernel_size=(3,3), 98 | repetitions=2, first_layer_down_size=False, alpha=0.0, 99 | name="conv_block16")(block_pool_16) 100 | 101 | 102 | 103 | 104 | 105 | block_up_8 = UpSampling2D(size=(2,2), name="up_block8")(block_conv_16) 106 | 107 | block_concat_8 = Concatenate(axis=CHANNEL_AXIS, name="concat8")([block_up_8, block_conv_8]) 108 | 109 | block_expan_conv_8 = conv_bn_leakyrelu_repetition_block(filters=8*base_channel, kernel_size=(3,3), 110 | repetitions=2, first_layer_down_size=False, alpha=0.0, 111 | name="expan_conv_block8")(block_concat_8) 112 | 113 | 114 | block_up_4 = UpSampling2D(size=(2,2), name="up_block4")(block_expan_conv_8) 115 | 116 | block_concat_4 = Concatenate(axis=CHANNEL_AXIS, name="concat4")([block_up_4, block_conv_4]) 117 | 118 | block_expan_conv_4 = conv_bn_leakyrelu_repetition_block(filters=4*base_channel, kernel_size=(3,3), 119 | repetitions=2, first_layer_down_size=False, alpha=0.0, 120 | name="expan_conv_block4")(block_concat_4) 121 | 122 | 123 | block_up_2 = UpSampling2D(size=(2,2), name="up_block2")(block_expan_conv_4) 124 | 125 | block_concat_2 = Concatenate(axis=CHANNEL_AXIS, name="concat2")([block_up_2, block_conv_2]) 126 | 127 | block_expan_conv_2 = conv_bn_leakyrelu_repetition_block(filters=2*base_channel, kernel_size=(3,3), 128 | repetitions=2, first_layer_down_size=False, alpha=0.0, 129 | name="expan_conv_block2")(block_concat_2) 130 | 131 | 132 | block_up_1 = UpSampling2D(size=(2,2), name="up_block1")(block_expan_conv_2) 133 | 134 | block_concat_1 = Concatenate(axis=CHANNEL_AXIS, name="concat1")([block_up_1, block_conv_1]) 135 | 136 | block_expan_conv_1 = conv_bn_leakyrelu_repetition_block(filters=1*base_channel, kernel_size=(3,3), 137 | repetitions=2, first_layer_down_size=False, alpha=0.0, 138 | name="expan_conv_block1")(block_concat_1) 139 | 140 | 141 | 142 | 143 | 144 | 145 | block_seg_4 = Conv2D(filters=num_outputs, kernel_size=(1,1), strides=(1,1), 146 | padding="same", data_format=None, dilation_rate=(1, 1), activation=None, 147 | use_bias=True, kernel_initializer="he_normal", bias_initializer="zeros", 148 | kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, 149 | kernel_constraint=None, bias_constraint=None, 150 | name="seg_block4")(block_expan_conv_4) 151 | 152 | block_seg_2 = Conv2D(filters=num_outputs, kernel_size=(1,1), strides=(1,1), 153 | padding="same", data_format=None, dilation_rate=(1, 1), activation=None, 154 | use_bias=True, kernel_initializer="he_normal", bias_initializer="zeros", 155 | kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, 156 | kernel_constraint=None, bias_constraint=None, 157 | name="seg_block2")(block_expan_conv_2) 158 | 159 | block_seg_1 = Conv2D(filters=num_outputs, kernel_size=(1,1), strides=(1,1), 160 | padding="same", data_format=None, dilation_rate=(1, 1), activation=None, 161 | use_bias=True, kernel_initializer="he_normal", bias_initializer="zeros", 162 | kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, 163 | kernel_constraint=None, bias_constraint=None, 164 | name="seg_block1")(block_expan_conv_1) 165 | 166 | block_seg_up_2 = UpSampling2D(size=(2,2), name="seg_up_block2")(block_seg_4) 167 | 168 | block_add_2 = Add(name="add_block2")([block_seg_up_2, block_seg_2]) 169 | 170 | block_seg_up_1 = UpSampling2D(size=(2,2), name="seg_up_block1")(block_add_2) 171 | 172 | output = Add(name="output")([block_seg_up_1, block_seg_1]) 173 | 174 | 175 | 176 | model = Model(inputs=[input_img0, input_img1], outputs=output) 177 | 178 | return model 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | -------------------------------------------------------------------------------- /ROI/module_roi_net.py: -------------------------------------------------------------------------------- 1 | """ The module of ROI-net """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | from keras.models import Model 7 | from keras.layers import ( 8 | Input, 9 | Activation 10 | ) 11 | from keras.layers.convolutional import ( 12 | Conv2D, 13 | MaxPooling2D, 14 | UpSampling2D 15 | ) 16 | from keras.layers.merge import ( 17 | Add, 18 | Concatenate 19 | ) 20 | from keras import backend as K 21 | 22 | from helpers import ( 23 | conv_bn_leakyrelu_repetition_block, 24 | handle_dim_ordering 25 | ) 26 | 27 | 28 | def net_module(input_shape, num_outputs): 29 | """Builds a net architecture. 30 | Args: 31 | input_shape: The input shape in the form (nb_rows, nb_cols, nb_channels) 32 | num_outputs: The number of outputs at final softmax layer 33 | Returns: 34 | The keras `Model`. 35 | """ 36 | CHANNEL_AXIS = 3 37 | handle_dim_ordering() 38 | if len(input_shape) != 3: 39 | raise Exception("Input shape should be a tuple (nb_rows, nb_cols, nb_channels)") 40 | 41 | # Permute dimension order if necessary 42 | if K.image_dim_ordering() != 'tf': 43 | input_shape = (input_shape[2], input_shape[0], input_shape[1]) 44 | 45 | input = Input(shape=input_shape, name="input") 46 | 47 | base_channel = 24 48 | 49 | 50 | 51 | 52 | block_conv_1 = conv_bn_leakyrelu_repetition_block(filters=base_channel, kernel_size=(3,3), 53 | repetitions=2, first_layer_down_size=False, alpha=0.1, 54 | name="conv_block1")(input) 55 | 56 | 57 | block_pool_2 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 58 | data_format=None, name="pool_block2")(block_conv_1) 59 | 60 | block_conv_2 = conv_bn_leakyrelu_repetition_block(filters=2*base_channel, kernel_size=(3,3), 61 | repetitions=2, first_layer_down_size=False, alpha=0.1, 62 | name="conv_block2")(block_pool_2) 63 | 64 | 65 | block_pool_4 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 66 | data_format=None, name="pool_block4")(block_conv_2) 67 | 68 | block_conv_4 = conv_bn_leakyrelu_repetition_block(filters=4*base_channel, kernel_size=(3,3), 69 | repetitions=2, first_layer_down_size=False, alpha=0.1, 70 | name="conv_block4")(block_pool_4) 71 | 72 | 73 | block_pool_8 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 74 | data_format=None, name="pool_block8")(block_conv_4) 75 | 76 | block_conv_8 = conv_bn_leakyrelu_repetition_block(filters=8*base_channel, kernel_size=(3,3), 77 | repetitions=2, first_layer_down_size=False, alpha=0.1, 78 | name="conv_block8")(block_pool_8) 79 | 80 | 81 | block_pool_16 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 82 | data_format=None, name="pool_block16")(block_conv_8) 83 | 84 | block_conv_16 = conv_bn_leakyrelu_repetition_block(filters=16*base_channel, kernel_size=(3,3), 85 | repetitions=2, first_layer_down_size=False, alpha=0.1, 86 | name="conv_block16")(block_pool_16) 87 | 88 | 89 | block_pool_32 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 90 | data_format=None, name="pool_block32")(block_conv_16) 91 | 92 | block_conv_32 = conv_bn_leakyrelu_repetition_block(filters=32*base_channel, kernel_size=(3,3), 93 | repetitions=2, first_layer_down_size=False, alpha=0.1, 94 | name="conv_block32")(block_pool_32) 95 | 96 | 97 | 98 | 99 | block_up_16 = UpSampling2D(size=(2,2), name="up_block16")(block_conv_32) 100 | 101 | block_concat_16 = Concatenate(axis=CHANNEL_AXIS, name="concat16")([block_up_16, block_conv_16]) 102 | 103 | block_expan_conv_16 = conv_bn_leakyrelu_repetition_block(filters=16*base_channel, kernel_size=(3,3), 104 | repetitions=2, first_layer_down_size=False, alpha=0.1, 105 | name="expan_conv_block16")(block_concat_16) 106 | 107 | 108 | 109 | block_up_8 = UpSampling2D(size=(2,2), name="up_block8")(block_expan_conv_16) 110 | 111 | block_concat_8 = Concatenate(axis=CHANNEL_AXIS, name="concat8")([block_up_8, block_conv_8]) 112 | 113 | block_expan_conv_8 = conv_bn_leakyrelu_repetition_block(filters=8*base_channel, kernel_size=(3,3), 114 | repetitions=2, first_layer_down_size=False, alpha=0.1, 115 | name="expan_conv_block8")(block_concat_8) 116 | 117 | 118 | block_up_4 = UpSampling2D(size=(2,2), name="up_block4")(block_expan_conv_8) 119 | 120 | block_concat_4 = Concatenate(axis=CHANNEL_AXIS, name="concat4")([block_up_4, block_conv_4]) 121 | 122 | block_expan_conv_4 = conv_bn_leakyrelu_repetition_block(filters=4*base_channel, kernel_size=(3,3), 123 | repetitions=2, first_layer_down_size=False, alpha=0.1, 124 | name="expan_conv_block4")(block_concat_4) 125 | 126 | 127 | block_up_2 = UpSampling2D(size=(2,2), name="up_block2")(block_expan_conv_4) 128 | 129 | block_concat_2 = Concatenate(axis=CHANNEL_AXIS, name="concat2")([block_up_2, block_conv_2]) 130 | 131 | block_expan_conv_2 = conv_bn_leakyrelu_repetition_block(filters=2*base_channel, kernel_size=(3,3), 132 | repetitions=2, first_layer_down_size=False, alpha=0.1, 133 | name="expan_conv_block2")(block_concat_2) 134 | 135 | 136 | block_up_1 = UpSampling2D(size=(2,2), name="up_block1")(block_expan_conv_2) 137 | 138 | block_concat_1 = Concatenate(axis=CHANNEL_AXIS, name="concat1")([block_up_1, block_conv_1]) 139 | 140 | block_expan_conv_1 = conv_bn_leakyrelu_repetition_block(filters=base_channel, kernel_size=(3,3), 141 | repetitions=2, first_layer_down_size=False, alpha=0.1, 142 | name="expan_conv_block1")(block_concat_1) 143 | 144 | 145 | 146 | 147 | 148 | 149 | block_seg_4 = Conv2D(filters=num_outputs, kernel_size=(1,1), strides=(1,1), 150 | padding="same", data_format=None, dilation_rate=(1, 1), activation=None, 151 | use_bias=True, kernel_initializer="he_normal", bias_initializer="zeros", 152 | kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, 153 | kernel_constraint=None, bias_constraint=None, 154 | name="seg_block4")(block_expan_conv_4) 155 | 156 | block_seg_2 = Conv2D(filters=num_outputs, kernel_size=(1,1), strides=(1,1), 157 | padding="same", data_format=None, dilation_rate=(1, 1), activation=None, 158 | use_bias=True, kernel_initializer="he_normal", bias_initializer="zeros", 159 | kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, 160 | kernel_constraint=None, bias_constraint=None, 161 | name="seg_block2")(block_expan_conv_2) 162 | 163 | block_seg_1 = Conv2D(filters=num_outputs, kernel_size=(1,1), strides=(1,1), 164 | padding="same", data_format=None, dilation_rate=(1, 1), activation=None, 165 | use_bias=True, kernel_initializer="he_normal", bias_initializer="zeros", 166 | kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, 167 | kernel_constraint=None, bias_constraint=None, 168 | name="seg_block1")(block_expan_conv_1) 169 | 170 | block_seg_up_2 = UpSampling2D(size=(2,2), name="seg_up_block2")(block_seg_4) 171 | 172 | block_add_2 = Add(name="add_block2")([block_seg_up_2, block_seg_2]) 173 | 174 | block_seg_up_1 = UpSampling2D(size=(2,2), name="seg_up_block1")(block_add_2) 175 | 176 | prediction = Add(name="prediction")([block_seg_up_1, block_seg_1]) 177 | 178 | 179 | 180 | output = Activation("sigmoid", name="output")(prediction) 181 | 182 | 183 | 184 | model = Model(inputs=input, outputs=output) 185 | 186 | return model 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | -------------------------------------------------------------------------------- /acdc_info/acdc_pixel_size.txt: -------------------------------------------------------------------------------- 1 | patient001 1.5625 124 1.513671875 128 10.0 2 | patient002 1.3671875 132 1.40991210938 128 10.0 3 | patient003 1.5625 128 1.5625 128 10.0 4 | patient004 1.3671875 128 1.3671875 128 10.0 5 | patient005 1.40625 138 1.51611328125 128 10.0 6 | patient006 1.7578125 128 1.7578125 128 10.0 7 | patient007 1.875 121 1.7724609375 128 10.0 8 | patient008 1.5625 132 1.611328125 128 10.0 9 | patient009 1.3671900034 118 1.26037828438 128 10.0 10 | patient010 1.5625 128 1.5625 128 10.0 11 | patient011 1.48438000679 124 1.43799313158 128 10.0 12 | patient012 1.5625 122 1.4892578125 128 10.0 13 | patient013 1.4453099966 138 1.55822484009 128 10.0 14 | patient014 1.3671900034 176 1.87988625467 128 10.0 15 | patient015 1.3671900034 122 1.30310297199 128 10.0 16 | patient016 1.4453099966 138 1.55822484009 128 10.0 17 | patient017 1.5625 134 1.6357421875 128 10.0 18 | patient018 1.3671900034 134 1.4312770348 128 10.0 19 | patient019 1.4453099966 132 1.490475934 128 10.0 20 | patient020 1.7578099966 106 1.45568640344 128 10.0 21 | patient021 1.6796875 122 1.60095214844 128 10.0 22 | patient022 1.40625 102 1.12060546875 128 10.0 23 | patient023 1.4453125 100 1.12915039062 128 10.0 24 | patient024 1.5625 112 1.3671875 128 10.0 25 | patient025 1.3671875 144 1.5380859375 128 10.0 26 | patient026 1.3671875 132 1.40991210938 128 10.0 27 | patient027 1.3671875 122 1.30310058594 128 10.0 28 | patient028 1.5625 112 1.3671875 128 10.0 29 | patient029 1.3671875 122 1.30310058594 128 10.0 30 | patient030 1.4453125 92 1.03881835938 128 10.0 31 | patient031 1.484375 124 1.43798828125 128 10.0 32 | patient032 1.5625 122 1.4892578125 128 10.0 33 | patient033 1.5625 108 1.318359375 128 10.0 34 | patient034 1.5625 108 1.318359375 128 10.0 35 | patient035 1.6796900034 106 1.39099328406 128 5.0 36 | patient036 1.4453099966 112 1.26464624703 128 10.0 37 | patient037 1.875 84 1.23046875 128 10.0 38 | patient038 0.833333015442 186 1.21093703806 128 10.0 39 | patient039 1.64061999321 108 1.38427311927 128 10.0 40 | patient040 1.48438000679 124 1.43799313158 128 10.0 41 | patient041 1.78570997715 91 1.26952818688 128 10.0 42 | patient042 1.7578125 112 1.5380859375 128 10.0 43 | patient043 1.91963994503 86 1.28975808807 128 6.5 44 | patient044 1.3671875 108 1.15356445312 128 10.0 45 | patient045 1.5625 118 1.4404296875 128 10.0 46 | patient046 1.5625 128 1.5625 128 10.0 47 | patient047 1.6796900034 112 1.46972875297 128 10.0 48 | patient048 1.5625 100 1.220703125 128 10.0 49 | patient049 1.3671875 124 1.32446289062 128 10.0 50 | patient050 1.3671875 122 1.30310058594 128 10.0 51 | patient051 1.64061999321 108 1.38427311927 128 10.0 52 | patient052 1.48438000679 118 1.36841281876 128 10.0 53 | patient053 1.3671900034 124 1.32446531579 128 10.0 54 | patient054 1.48438000679 118 1.36841281876 128 10.0 55 | patient055 1.48438000679 116 1.34521938115 128 10.0 56 | patient056 1.3671900034 122 1.30310297199 128 10.0 57 | patient057 0.703125 262 1.43920898438 128 10.0 58 | patient058 1.40625 116 1.2744140625 128 10.0 59 | patient059 1.3671900034 118 1.26037828438 128 10.0 60 | patient060 1.5625 106 1.2939453125 128 10.0 61 | patient061 1.3671875 128 1.3671875 128 10.0 62 | patient062 1.3671875 132 1.40991210938 128 10.0 63 | patient063 1.3671875 106 1.13220214844 128 10.0 64 | patient064 1.4453125 116 1.30981445312 128 10.0 65 | patient065 1.875 96 1.40625 128 10.0 66 | patient066 1.3671875 132 1.40991210938 128 10.0 67 | patient067 1.3671875 122 1.30310058594 128 10.0 68 | patient068 1.5625 100 1.220703125 128 10.0 69 | patient069 1.5625 92 1.123046875 128 10.0 70 | patient070 1.5625 92 1.123046875 128 10.0 71 | patient071 1.5625 108 1.318359375 128 10.0 72 | patient072 1.5625 108 1.318359375 128 10.0 73 | patient073 1.640625 106 1.35864257812 128 10.0 74 | patient074 1.5625 128 1.5625 128 10.0 75 | patient075 1.78570997715 90 1.25557732768 128 5.0 76 | patient076 1.5625 128 1.5625 128 10.0 77 | patient077 1.40625 122 1.34033203125 128 10.0 78 | patient078 1.3671900034 128 1.3671900034 128 10.0 79 | patient079 1.3671900034 118 1.26037828438 128 10.0 80 | patient080 1.7578099966 90 1.23596015386 128 10.0 81 | patient081 1.65179002285 97 1.25174712669 128 5.0 82 | patient082 1.78570997715 111 1.54854537081 128 5.0 83 | patient083 1.5625 106 1.2939453125 128 10.0 84 | patient084 1.3671900034 102 1.08947953396 128 5.0 85 | patient085 1.215280056 129 1.22477443144 128 5.0 86 | patient086 1.5625 116 1.416015625 128 10.0 87 | patient087 1.4453099966 134 1.5130589027 128 10.0 88 | patient088 1.65179002285 112 1.44531626999 128 5.0 89 | patient089 1.45833003521 111 1.26464557741 128 10.0 90 | patient090 1.78570997715 86 1.1997738909 128 10.0 91 | patient091 1.5625 96 1.171875 128 10.0 92 | patient092 1.68269002438 111 1.45920775551 128 5.0 93 | patient093 1.5625 121 1.47705078125 128 7.0 94 | patient094 1.68269002438 91 1.1962874392 128 5.0 95 | patient095 1.5625 113 1.37939453125 128 5.0 96 | patient096 1.68269002438 105 1.38033166062 128 5.0 97 | patient097 1.48438000679 118 1.36841281876 128 10.0 98 | patient098 1.25 132 1.2890625 128 10.0 99 | patient099 1.78570997715 101 1.40903677884 128 5.0 100 | patient100 1.31579005718 111 1.14103669021 128 10.0 101 | patient101 1.64061999321 108 1.38427311927 128 10.0 102 | patient102 1.5625 92 1.123046875 128 10.0 103 | patient103 1.5625 106 1.2939453125 128 10.0 104 | patient104 1.3671900034 134 1.4312770348 128 10.0 105 | patient105 1.48438000679 112 1.29883250594 128 10.0 106 | patient106 1.5625 132 1.611328125 128 10.0 107 | patient107 1.3671900034 118 1.26037828438 128 10.0 108 | patient108 1.5625 116 1.416015625 128 10.0 109 | patient109 1.5625 118 1.4404296875 128 10.0 110 | patient110 1.5625 106 1.2939453125 128 10.0 111 | patient111 1.40625 112 1.23046875 128 10.0 112 | patient112 1.3671900034 134 1.4312770348 128 10.0 113 | patient113 1.68269002438 123 1.6169599453 128 10.0 114 | patient114 1.3671900034 138 1.47400172241 128 10.0 115 | patient115 1.3671900034 128 1.3671900034 128 10.0 116 | patient116 1.5625 106 1.2939453125 128 10.0 117 | patient117 1.3671900034 150 1.60217578523 128 10.0 118 | patient118 1.48438000679 118 1.36841281876 128 10.0 119 | patient119 1.68269002438 101 1.32774759736 128 5.0 120 | patient120 1.40625 106 1.16455078125 128 10.0 121 | patient121 1.66666996479 121 1.57552395109 128 10.0 122 | patient122 1.69642996788 112 1.4843762219 128 10.0 123 | patient123 1.48438000679 112 1.29883250594 128 10.0 124 | patient124 1.68269002438 96 1.26201751828 128 5.0 125 | patient125 1.68269002438 92 1.20943345502 128 5.0 126 | patient126 1.68269002438 92 1.20943345502 128 5.0 127 | patient127 1.68269002438 105 1.38033166062 128 5.0 128 | patient128 1.48438000679 124 1.43799313158 128 10.0 129 | patient129 1.65179002285 116 1.49693470821 128 5.0 130 | patient130 1.3671900034 134 1.4312770348 128 10.0 131 | patient131 1.40625 144 1.58203125 128 10.0 132 | patient132 1.3671900034 154 1.64490047283 128 10.0 133 | patient133 1.5625 122 1.4892578125 128 10.0 134 | patient134 1.4453125 116 1.30981445312 128 10.0 135 | patient135 1.95311999321 108 1.64794499427 128 10.0 136 | patient136 1.64061999321 112 1.43554249406 128 10.0 137 | patient137 1.3671900034 144 1.53808875382 128 10.0 138 | patient138 1.3671900034 124 1.32446531579 128 10.0 139 | patient139 1.68269002438 105 1.38033166062 128 5.0 140 | patient140 1.68269002438 124 1.63010596111 128 5.0 141 | patient141 1.3671900034 132 1.409914691 128 10.0 142 | patient142 1.3671900034 140 1.49536406621 128 10.0 143 | patient143 1.3671900034 144 1.53808875382 128 10.0 144 | patient144 1.40625 116 1.2744140625 128 10.0 145 | patient145 1.7578099966 122 1.67541265301 128 10.0 146 | patient146 1.48438000679 134 1.55396031961 128 10.0 147 | patient147 1.736109972 127 1.72254661284 128 5.0 148 | patient148 1.48438000679 122 1.41479969397 128 10.0 149 | patient149 1.40625 128 1.40625 128 10.0 150 | patient150 1.3671900034 106 1.13220422156 128 10.0 151 | -------------------------------------------------------------------------------- /acdc_info/acdc_info.txt: -------------------------------------------------------------------------------- 1 | patient001 DCM 30 0 11 10 184.0 95.0 2.20353251747 2 | patient002 DCM 30 0 11 10 160.0 70.0 1.76383420738 3 | patient003 DCM 30 0 14 10 165.0 77.0 1.87860764043 4 | patient004 DCM 28 0 14 10 159.0 46.0 1.42536545022 5 | patient005 DCM 30 0 12 10 165.0 77.0 1.87860764043 6 | patient006 DCM 28 0 15 11 180.0 70.0 1.87082869339 7 | patient007 DCM 16 0 6 10 173.0 107.0 2.26758559804 8 | patient008 DCM 28 0 12 10 180.0 100.0 2.2360679775 9 | patient009 DCM 35 0 12 10 153.0 61.0 1.61012421881 10 | patient010 DCM 28 0 12 10 170.0 68.0 1.79195734076 11 | patient011 DCM 15 0 7 9 180.0 70.0 1.87082869339 12 | patient012 DCM 30 0 12 10 160.0 59.0 1.61932770687 13 | patient013 DCM 30 0 13 10 175.0 57.0 1.66458202962 14 | patient014 DCM 30 0 12 10 175.0 75.0 1.90940653956 15 | patient015 DCM 21 0 9 9 158.0 57.0 1.58166578855 16 | patient016 DCM 30 0 11 10 173.0 54.0 1.61090036936 17 | patient017 DCM 19 0 8 9 178.0 85.0 2.05006774956 18 | patient018 DCM 13 0 9 8 161.0 79.0 1.87964240098 19 | patient019 DCM 30 0 10 11 191.0 97.0 2.26856538122 20 | patient020 DCM 20 0 10 8 182.0 106.0 2.31492740467 21 | patient021 HCM 30 0 12 10 192.0 110.0 2.42212028328 22 | patient022 HCM 28 0 10 7 165.0 42.0 1.38744369255 23 | patient023 HCM 25 0 8 9 166.0 74.0 1.84722013367 24 | patient024 HCM 28 0 8 8 175.0 85.0 2.03271848628 25 | patient025 HCM 25 0 8 9 174.0 82.0 1.99081222955 26 | patient026 HCM 28 0 11 10 170.0 70.0 1.81811868577 27 | patient027 HCM 30 0 10 10 158.0 60.0 1.62275485929 28 | patient028 HCM 28 0 8 10 177.0 76.0 1.93304595565 29 | patient029 HCM 30 0 11 11 163.0 55.0 1.57806139861 30 | patient030 HCM 35 0 11 10 150.0 54.0 1.5 31 | patient031 HCM 30 0 9 10 176.0 89.0 2.08593171295 32 | patient032 HCM 30 0 11 10 186.0 123.0 2.52091253319 33 | patient033 HCM 30 0 13 10 165.0 89.0 2.0196946964 34 | patient034 HCM 30 0 15 10 172.0 79.0 1.94279294945 35 | patient035 HCM 28 0 10 13 170.0 80.0 1.94365063162 36 | patient036 HCM 35 0 11 8 160.0 80.0 1.88561808316 37 | patient037 HCM 30 0 11 7 169.0 105.0 2.22017266596 38 | patient038 HCM 24 0 10 8 160.0 59.0 1.61932770687 39 | patient039 HCM 28 0 9 9 175.0 100.0 2.20479275922 40 | patient040 HCM 35 0 12 10 172.0 95.0 2.13046682417 41 | patient041 MINF 17 0 10 6 170.0 70.0 1.81811868577 42 | patient042 MINF 35 0 15 9 168.0 99.0 2.14941852602 43 | patient043 MINF 13 0 6 12 165.0 76.0 1.86636902389 44 | patient044 MINF 30 0 10 9 180.0 54.0 1.64316767252 45 | patient045 MINF 35 0 12 8 174.0 68.0 1.81291661878 46 | patient046 MINF 30 0 9 9 186.0 74.0 1.95533458347 47 | patient047 MINF 22 0 8 9 180.0 82.0 2.02484567313 48 | patient048 MINF 28 0 7 8 167.0 73.0 1.84021435949 49 | patient049 MINF 35 0 10 7 171.0 72.0 1.84932420089 50 | patient050 MINF 30 0 11 10 162.0 57.0 1.6015617378 51 | patient051 MINF 30 0 10 10 170.0 91.0 2.07297424543 52 | patient052 MINF 20 0 8 8 161.0 85.0 1.9497150789 53 | patient053 MINF 30 0 11 7 176.0 77.0 1.94021762811 54 | patient054 MINF 30 0 11 8 176.0 94.0 2.14372469211 55 | patient055 MINF 25 0 9 9 180.0 82.0 2.02484567313 56 | patient056 MINF 30 0 11 9 170.0 62.0 1.71107503569 57 | patient057 MINF 25 0 8 8 175.0 80.0 1.97202659437 58 | patient058 MINF 30 0 13 9 168.0 68.0 1.78138522878 59 | patient059 MINF 30 0 8 9 173.0 68.0 1.80769958173 60 | patient060 MINF 30 0 13 9 172.0 51.0 1.56098259653 61 | patient061 NOR 30 0 9 9 187.0 95.0 2.22142346756 62 | patient062 NOR 30 0 8 10 172.0 74.0 1.88030730349 63 | patient063 NOR 35 0 15 8 165.0 49.0 1.49861046751 64 | patient064 NOR 35 0 11 10 175.0 97.0 2.1714690368 65 | patient065 NOR 30 0 13 8 180.0 92.0 2.14476105895 66 | patient066 NOR 30 0 10 9 175.0 80.0 1.97202659437 67 | patient067 NOR 30 0 9 10 178.0 60.0 1.72240142437 68 | patient068 NOR 28 0 11 7 154.0 64.0 1.6546231528 69 | patient069 NOR 30 0 11 7 160.0 53.0 1.53478192443 70 | patient070 NOR 28 0 9 6 158.0 79.0 1.86204785712 71 | patient071 NOR 30 0 8 10 169.0 61.0 1.69222076311 72 | patient072 NOR 30 0 10 8 160.0 78.0 1.8618986725 73 | patient073 NOR 35 0 9 7 185.0 96.0 2.22111083319 74 | patient074 NOR 30 0 11 8 175.0 87.0 2.05649377988 75 | patient075 NOR 14 0 5 14 179.0 93.0 2.15038756197 76 | patient076 NOR 30 0 11 8 175.0 87.0 2.05649377988 77 | patient077 NOR 25 0 8 8 180.0 75.0 1.9364916731 78 | patient078 NOR 30 0 8 8 185.0 84.0 2.07765893897 79 | patient079 NOR 30 0 10 9 178.0 80.0 1.98885785202 80 | patient080 NOR 21 0 9 6 155.0 74.0 1.78496809807 81 | patient081 RV 16 0 6 17 177.0 70.0 1.85517294791 82 | patient082 RV 17 0 6 16 183.0 82.0 2.04164965979 83 | patient083 RV 17 0 7 6 160.0 58.0 1.60554594384 84 | patient084 RV 35 0 9 12 140.0 35.0 1.16666666667 85 | patient085 RV 30 0 8 15 172.0 61.0 1.70717440364 86 | patient086 RV 19 0 7 7 165.0 70.0 1.79118210502 87 | patient087 RV 28 0 9 8 170.0 64.0 1.73845397472 88 | patient088 RV 25 0 11 16 180.0 70.0 1.87082869339 89 | patient089 RV 30 0 9 6 170.0 103.0 2.20542261004 90 | patient090 RV 12 3 10 7 150.0 66.0 1.65831239518 91 | patient091 RV 16 0 8 8 162.0 53.0 1.54434452115 92 | patient092 RV 15 0 5 15 170.0 69.0 1.80508540887 93 | patient093 RV 30 0 13 10 165.0 65.0 1.72602626477 94 | patient094 RV 14 0 6 10 164.0 45.0 1.43178210633 95 | patient095 RV 30 0 11 14 165.0 76.0 1.86636902389 96 | patient096 RV 15 0 7 18 186.0 76.0 1.98158185969 97 | patient097 RV 30 0 10 8 187.0 82.0 2.06384215589 98 | patient098 RV 22 0 8 7 167.0 50.0 1.52297224021 99 | patient099 RV 19 0 8 16 180.0 80.0 2.0 100 | patient100 RV 34 0 12 8 165.0 63.0 1.6992645468 101 | patient101 TEST 30 0 13 10 169.0 79.0 1.92577545709 102 | patient102 TEST 30 0 12 8 156.0 75.0 1.80277563773 103 | patient103 TEST 30 0 10 9 175.0 107.0 2.2806553639 104 | patient104 TEST 30 0 10 9 180.0 74.0 1.92353840617 105 | patient105 TEST 30 0 9 10 173.0 84.0 2.00914575546 106 | patient106 TEST 30 0 12 9 181.0 91.0 2.1389898966 107 | patient107 TEST 30 0 9 9 155.0 47.0 1.42253685756 108 | patient108 TEST 30 0 8 10 170.0 90.0 2.06155281281 109 | patient109 TEST 30 0 9 8 180.0 60.0 1.73205080757 110 | patient110 TEST 30 0 10 9 167.0 116.0 2.31972220559 111 | patient111 TEST 15 0 6 6 172.0 80.0 1.95505043982 112 | patient112 TEST 30 0 11 10 172.0 90.0 2.07364413533 113 | patient113 TEST 15 0 7 10 183.0 95.0 2.19753649951 114 | patient114 TEST 30 0 10 11 172.0 80.0 1.95505043982 115 | patient115 TEST 30 0 12 10 165.0 92.0 2.0534523775 116 | patient116 TEST 30 0 8 10 165.0 83.0 1.95042730361 117 | patient117 TEST 30 0 12 10 180.0 92.0 2.14476105895 118 | patient118 TEST 25 0 9 9 176.0 80.0 1.97765292989 119 | patient119 TEST 15 0 8 17 162.0 60.0 1.64316767252 120 | patient120 TEST 25 0 7 9 161.0 55.0 1.56835015931 121 | patient121 TEST 35 0 9 8 183.0 75.0 1.95256241898 122 | patient122 TEST 14 0 5 9 165.0 65.0 1.72602626477 123 | patient123 TEST 30 0 10 8 170.0 70.0 1.81811868577 124 | patient124 TEST 15 0 6 21 174.0 64.0 1.7587874611 125 | patient125 TEST 15 0 6 17 160.0 46.0 1.42984070597 126 | patient126 TEST 15 0 6 16 137.0 35.0 1.15409897515 127 | patient127 TEST 18 0 6 19 165.0 54.0 1.57321327226 128 | patient128 TEST 30 0 10 9 170.0 74.0 1.86934331904 129 | patient129 TEST 16 0 7 17 183.0 75.0 1.95256241898 130 | patient130 TEST 30 0 10 10 185.0 95.0 2.20951226393 131 | patient131 TEST 25 0 8 10 164.0 64.0 1.70749979665 132 | patient132 TEST 30 0 14 10 163.0 80.0 1.90321365648 133 | patient133 TEST 14 0 9 10 185.0 121.0 2.4936029266 134 | patient134 TEST 35 0 14 11 185.0 104.0 2.31180545125 135 | patient135 TEST 20 0 9 10 184.0 95.0 2.20353251747 136 | patient136 TEST 24 0 11 10 167.0 89.0 2.03189840078 137 | patient137 TEST 30 0 10 9 170.0 68.0 1.79195734076 138 | patient138 TEST 30 0 9 9 174.0 90.0 2.08566536146 139 | patient139 TEST 15 0 7 16 175.0 75.0 1.90940653956 140 | patient140 TEST 15 0 8 20 165.0 68.0 1.76540835692 141 | patient141 TEST 19 0 10 10 170.0 75.0 1.88193163177 142 | patient142 TEST 30 0 11 8 160.0 98.0 2.086996779 143 | patient143 TEST 30 0 11 10 161.0 85.0 1.9497150789 144 | patient144 TEST 25 0 8 8 157.0 88.0 1.95902470066 145 | patient145 TEST 30 0 12 10 171.0 80.0 1.94935886896 146 | patient146 TEST 30 0 9 9 170.0 172.0 2.84995126664 147 | patient147 TEST 19 0 8 15 172.0 104.0 2.22910046631 148 | patient148 TEST 35 0 9 8 170.0 70.0 1.81811868577 149 | patient149 TEST 25 0 11 10 173.0 70.0 1.83409075263 150 | patient150 TEST 30 0 11 8 158.0 56.0 1.56773013551 151 | -------------------------------------------------------------------------------- /classification/data_classification.py: -------------------------------------------------------------------------------- 1 | """ A function to generate the lists of features for classification """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | import os 7 | 8 | import config 9 | 10 | def data_classification(mode='all', fold = 1, data_class_num = 5, normalization=True): 11 | 12 | data_dir = config.acdc_data_dir 13 | code_dir = config.code_dir 14 | 15 | dilated_subjects = config.acdc_dilated_subjects 16 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 17 | infarct_subjects = config.acdc_infarct_subjects 18 | normal_subjects = config.acdc_normal_subjects 19 | rv_subjects = config.acdc_rv_subjects 20 | test_subjects = config.acdc_test_subjects 21 | 22 | 23 | if data_class_num == 2: 24 | all_subjects = infarct_subjects + normal_subjects 25 | elif data_class_num == 3: 26 | all_subjects = dilated_subjects + infarct_subjects + normal_subjects 27 | elif data_class_num == 4: 28 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects 29 | elif data_class_num == 5: 30 | all_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects 31 | 32 | 33 | if mode == 'all': 34 | subjects = all_subjects 35 | elif mode == 'train': 36 | subjects = [x for i,x in enumerate(all_subjects) if (i % 5) != (fold % 5)] 37 | elif mode == 'val': 38 | subjects = [x for i,x in enumerate(all_subjects) if (i % 5) == (fold % 5)] 39 | elif mode == 'predict': 40 | subjects = test_subjects 41 | else: 42 | print('Incorrect mode') 43 | 44 | #print(subjects) 45 | 46 | excluded_slice_ratio = config.excluded_slice_ratio 47 | 48 | seq_instants = config.acdc_seq_instants 49 | 50 | zfill_num = 2 51 | 52 | 53 | 54 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 55 | 56 | with open(info_file) as in_file: 57 | subject_info = in_file.readlines() 58 | 59 | subject_info = [x.strip() for x in subject_info] 60 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 61 | 62 | 63 | pixel_file = os.path.join(code_dir, 'acdc_info', 'acdc_pixel_size.txt') 64 | 65 | with open(pixel_file) as p_file: 66 | pixel_size_info = p_file.readlines() 67 | 68 | pixel_size_info = [x.strip() for x in pixel_size_info] 69 | pixel_size_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in pixel_size_info] 70 | 71 | 72 | base_file = os.path.join(code_dir, 'acdc_info', 'acdc_base.txt') 73 | 74 | with open(base_file) as b_file: 75 | base_info = b_file.readlines() 76 | 77 | base_info = [x.strip() for x in base_info] 78 | base_info = [ [y.split()[0]] + [int(z) for z in y.split()[1:]] for y in base_info] 79 | 80 | 81 | volume_file = os.path.join(code_dir, 'acdc_info', 'acdc_volume.txt') 82 | 83 | with open(volume_file) as v_file: 84 | volume_info = v_file.readlines() 85 | 86 | volume_info = [x.strip() for x in volume_info] 87 | volume_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in volume_info] 88 | 89 | 90 | thickness_file = os.path.join(code_dir, 'acdc_info', 'acdc_thickness.txt') 91 | 92 | with open(thickness_file) as t_file: 93 | thickness_info = t_file.readlines() 94 | 95 | thickness_info = [x.strip() for x in thickness_info] 96 | thickness_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in thickness_info] 97 | 98 | 99 | motion_file = os.path.join(code_dir, 'acdc_info', 'acdc_motion_index.txt') 100 | 101 | with open(motion_file) as m_file: 102 | motion_info = m_file.readlines() 103 | 104 | motion_info = [x.strip() for x in motion_info] 105 | motion_info = [ [y.split()[0]] + [float(z) for z in y.split()[1:]] for y in motion_info] 106 | 107 | 108 | 109 | 110 | #print('There will be {} used subjects'.format(len(subjects)) ) 111 | 112 | list_subject_idx = [] 113 | list_lv_volume = [] 114 | list_rv_volume = [] 115 | list_lv_ratio = [] 116 | list_rv_ratio = [] 117 | list_lvmrv_ratio = [] 118 | list_lvmlv_ratio = [] 119 | list_lvmlv_mass = [] 120 | list_thickness = [] 121 | list_thickness_diff = [] 122 | list_asyn_radius = [] 123 | list_asyn_thickness = [] 124 | list_gt = [] 125 | 126 | 127 | 128 | for subject in subjects: 129 | subject_idx = int(subject[-3:]) 130 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 131 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 132 | base_slice = int([x for x in base_info if x[0] == subject][0][1]) 133 | apex_slice = int([x for x in base_info if x[0] == subject][0][2]) 134 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 135 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 136 | bsa = [x for x in subject_info if x[0] == subject][0][8] 137 | pixel_size = [x for x in pixel_size_info if x[0] == subject][0][3] 138 | slice_thickness = [x for x in pixel_size_info if x[0] == subject][0][5] 139 | 140 | subject_dir = data_dir.format(subject) 141 | folder = subject_dir + '/predict_2D/' 142 | 143 | slice_range = range(base_slice + int(round((apex_slice + 1 - base_slice)*excluded_slice_ratio)), apex_slice + 1 - int(round((apex_slice + 1 - base_slice)*2*excluded_slice_ratio))) 144 | 145 | normalize_term = pixel_size / (bsa**(1.0/2)) 146 | 147 | lv_volume = [x for x in volume_info if x[0] == subject][0][1] 148 | rv_volume = [x for x in volume_info if x[0] == subject][0][2] 149 | lvm_volume = [x for x in volume_info if x[0] == subject][0][3] 150 | lv_ratio = [x for x in volume_info if x[0] == subject][0][4] 151 | rv_ratio = [x for x in volume_info if x[0] == subject][0][5] 152 | lvm_ratio = [x for x in volume_info if x[0] == subject][0][6] 153 | lvmrv_ratio = [x for x in volume_info if x[0] == subject][0][8] 154 | lvmlv_ratio = [x for x in volume_info if x[0] == subject][0][9] 155 | thickness = [x for x in thickness_info if x[0] == subject][0][1] 156 | es_thickness = [x for x in thickness_info if x[0] == subject][0][3] 157 | 158 | if not normalization: 159 | lv_volume *= bsa 160 | rv_volume *= bsa 161 | lvm_volume *= bsa 162 | 163 | lvmlv_mass = 1.06 * (lv_volume + lvm_volume) 164 | 165 | 166 | lv_volume_es = (1.0 - lv_ratio) * lv_volume 167 | rv_volume_es = (1.0 - rv_ratio) * rv_volume 168 | lvm_volume_es = lvm_ratio * lvm_volume 169 | lvmlv_ratio_es = lvm_volume_es / lv_volume_es 170 | lvmrv_ratio_es = rv_volume_es / (lv_volume_es + lvm_volume_es) 171 | 172 | 173 | 174 | asyn_radius = [x for x in motion_info if x[0] == subject][0][1] 175 | asyn_thickness = [x for x in motion_info if x[0] == subject][0][2] 176 | thickness_diff = [x for x in motion_info if x[0] == subject][0][3] 177 | 178 | 179 | if subject in dilated_subjects: 180 | gt = 0 181 | elif subject in hypertrophic_subjects: 182 | gt = 1 183 | elif subject in infarct_subjects: 184 | gt = 2 185 | elif subject in normal_subjects: 186 | gt = 3 187 | elif subject in rv_subjects: 188 | gt = 4 189 | elif subject in test_subjects: 190 | gt = -1 191 | 192 | 193 | 194 | 195 | list_subject_idx.append(subject_idx) 196 | list_lv_volume.append(lv_volume_es) 197 | list_rv_volume.append(rv_volume) 198 | list_lv_ratio.append(lv_ratio) 199 | list_rv_ratio.append(rv_ratio) 200 | list_lvmrv_ratio.append(lvmrv_ratio) 201 | list_lvmlv_ratio.append(lvmlv_ratio) 202 | list_lvmlv_mass.append(lvmlv_mass) 203 | list_thickness.append(thickness) 204 | list_thickness_diff.append(thickness_diff) 205 | list_asyn_radius.append(asyn_radius) 206 | list_asyn_thickness.append(asyn_thickness) 207 | list_gt.append(gt) 208 | 209 | 210 | return list_subject_idx, list_lv_volume, list_rv_volume, list_lv_ratio, list_rv_ratio, list_lvmrv_ratio, list_lvmlv_ratio, list_lvmlv_mass, list_thickness, list_thickness_diff, list_asyn_radius, list_asyn_thickness, list_gt 211 | 212 | 213 | 214 | -------------------------------------------------------------------------------- /flow/predict_apparentflow_net.py: -------------------------------------------------------------------------------- 1 | """ The main file to launch the inference of ApparentFlow-net """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | import os 7 | import copy 8 | import math 9 | import numpy as np 10 | from PIL import Image as pil_image 11 | from scipy.misc import imresize 12 | from itertools import izip 13 | import tensorflow as tf 14 | 15 | from keras.models import Model 16 | from keras.optimizers import Adam 17 | from keras.utils import plot_model 18 | from keras import backend as K 19 | 20 | K.set_image_data_format('channels_last') # TF dimension ordering in this code 21 | 22 | from helpers import ( 23 | warp_array_according_to_flow, 24 | flow_warped_gt_comparison_dice_loss_lvc, 25 | flow_warped_gt_comparison_dice_loss_lvm, 26 | flow_warped_gt_comparison_dice_loss_rvc, 27 | flow_combined_loss3, 28 | mean_variance_normalization5, 29 | elementwise_multiplication 30 | ) 31 | 32 | from image2 import ( 33 | array_to_img, 34 | ImageDataGenerator2 35 | ) 36 | from data_apparentflow import data_apparentflow 37 | 38 | from module_apparentflow_net import net_module 39 | 40 | import config 41 | 42 | 43 | 44 | def predict_apparentflow_net(): 45 | 46 | code_path = config.code_dir 47 | 48 | fold = int(sys.argv[1]) 49 | print('fold = {}'.format(fold)) 50 | if fold == 0: 51 | mode = 'predict' 52 | elif fold in range(1,6): 53 | mode = 'val' 54 | else: 55 | print('Incorrect fold') 56 | 57 | initial_lr = config.apparentflow_net_initial_lr 58 | decay_rate = config.apparentflow_net_decay_rate 59 | batch_size = config.apparentflow_net_batch_size 60 | input_img_size = config.apparentflow_net_input_img_size 61 | epochs = config.apparentflow_net_epochs 62 | 63 | 64 | ########### 65 | # The model 66 | model = net_module(input_shape=(input_img_size, input_img_size, 1), num_outputs=2) 67 | print('Loading model') 68 | model.load_weights(filepath=os.path.join(code_path, 'flow', 'model_apparentflow_net_fold{}_epoch{}.h5'.format(str(fold), str(epochs).zfill(3))) ) 69 | 70 | 71 | model.compile(optimizer=Adam(lr=initial_lr), loss=flow_combined_loss3, 72 | metrics=[flow_warped_gt_comparison_dice_loss_lvc, flow_warped_gt_comparison_dice_loss_lvm, flow_warped_gt_comparison_dice_loss_rvc]) 73 | 74 | print('This model has {} parameters'.format(model.count_params()) ) 75 | 76 | 77 | 78 | # Load data lists 79 | img_list0, img_list1, seg_list0, seg_list1 = data_apparentflow(mode=mode, fold = fold) 80 | 81 | predict_sample = len(img_list0) 82 | predict_img_list = [img_list0, img_list1, seg_list0, seg_list1] 83 | 84 | # we create two instances with the same arguments for random transformation 85 | img_data_gen_args = dict(featurewise_center=False, 86 | samplewise_center=False, 87 | featurewise_std_normalization=False, 88 | samplewise_std_normalization=False, 89 | zca_whitening=False, 90 | zca_epsilon=1e-6, 91 | rotation_range=0., 92 | width_shift_range=0., 93 | height_shift_range=0., 94 | shear_range=0., 95 | zoom_range=0., 96 | channel_shift_range=0., 97 | fill_mode='constant', 98 | cval=0., 99 | horizontal_flip=False, 100 | vertical_flip=False, 101 | rescale=None, 102 | preprocessing_function=mean_variance_normalization5, 103 | data_format=K.image_data_format()) 104 | 105 | # deep copy is necessary 106 | mask_data_gen_args = copy.deepcopy(img_data_gen_args) 107 | mask_data_gen_args['preprocessing_function'] = elementwise_multiplication 108 | 109 | ######################### 110 | # Generators for prediction 111 | print('Creating generators for prediction') 112 | seed = 1 113 | generators = [] 114 | # The generators for the 2 inputs 115 | for k in range(0, 2): 116 | img_datagen_k = ImageDataGenerator2(**img_data_gen_args) 117 | img_datagen_k.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 118 | img_generator_k = img_datagen_k.flow_from_path_list( 119 | path_list=predict_img_list[k], 120 | target_size=(input_img_size, input_img_size), 121 | pad_to_square=True, 122 | resize_mode='nearest', 123 | histogram_based_preprocessing=False, 124 | clahe=False, 125 | color_mode='grayscale', 126 | class_list=None, 127 | class_mode=None, 128 | batch_size=batch_size, 129 | shuffle=False, 130 | seed=seed, 131 | save_to_dir=None, 132 | save_prefix='', 133 | save_format='png', 134 | save_period=500, 135 | follow_links=False) 136 | generators.append(img_generator_k) 137 | 138 | 139 | for k in range(2, 4): 140 | seg_datagen = ImageDataGenerator2(**mask_data_gen_args) 141 | seg_datagen.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 142 | seg_generator = seg_datagen.flow_from_path_list( 143 | path_list=predict_img_list[k], 144 | target_size=(input_img_size, input_img_size), 145 | pad_to_square=True, 146 | resize_mode='nearest', 147 | histogram_based_preprocessing=False, 148 | clahe=False, 149 | color_mode='grayscale', 150 | class_list=None, 151 | class_mode=None, 152 | batch_size=batch_size, 153 | shuffle=False, 154 | seed=seed, 155 | save_to_dir=None, 156 | save_prefix='', 157 | save_format='png', 158 | save_period=500, 159 | follow_links=False) 160 | generators.append(seg_generator) 161 | 162 | # Combine generators into one which yields image and masks 163 | predict_generator = izip(*tuple(generators)) 164 | 165 | 166 | ############### 167 | # Prediction the model 168 | print('Start prediction') 169 | print('There will be {} forwards'.format( int(math.ceil(float(predict_sample)/batch_size)) ) ) 170 | 171 | 172 | for j in range( int(math.ceil(float(predict_sample)/batch_size)) ): 173 | paths = predict_img_list[1][j*batch_size : min((j+1)*batch_size, predict_sample)] 174 | predict_batch = next(predict_generator) 175 | # flow: t -> ED flow2: ED -> t 176 | flows = model.predict([predict_batch[1], predict_batch[0]], 177 | batch_size=batch_size, verbose=0) 178 | flows2 = model.predict([predict_batch[0], predict_batch[1]], 179 | batch_size=batch_size, verbose=0) 180 | 181 | 182 | warped_seg = warp_array_according_to_flow(predict_batch[2], flows, mode = 'nearest') 183 | warped_seg2 = warp_array_according_to_flow(predict_batch[3], flows2, mode = 'nearest') 184 | 185 | 186 | # Save flow2 187 | for i in range(predict_batch[0].shape[0]): 188 | path = paths[i] 189 | save_path = path.replace('/crop_2D/', '/predict_2D/', 1) 190 | save_path = save_path.replace('/crop_2D_', '/flow2_', 1) 191 | save_path = save_path.replace('.png', '.npy', 1) 192 | np.save(save_path, flows2[i]) 193 | 194 | 195 | 196 | # Resize and save the warped segmentation mask2 197 | for i in range(predict_batch[0].shape[0]): 198 | original_img_size = pil_image.open(paths[i]).size 199 | original_size = original_img_size[0] 200 | 201 | path = paths[i] 202 | warped_seg_resized2 = np.zeros((original_size, original_size, 1)) 203 | warped_seg_resized2[:, :, 0] = imresize(warped_seg2[i, :, :, 0], (original_size, original_size), interp = 'nearest', mode = 'F') 204 | 205 | warped_seg_resized2 = np.rint(warped_seg_resized2) 206 | warped_save_path2 = path.replace('/crop_2D/', '/predict_2D/', 1) 207 | warped_save_path2 = warped_save_path2.replace('/crop_2D_', '/predict_flow_warp2_', 1) 208 | warped_seg_mask2 = array_to_img(warped_seg_resized2 * 50.0, data_format=None, scale=False) 209 | warped_seg_mask2.save(warped_save_path2) 210 | 211 | 212 | 213 | 214 | 215 | K.clear_session() 216 | 217 | print('Prediction is done!') 218 | 219 | 220 | if __name__ == '__main__': 221 | predict_apparentflow_net() 222 | 223 | 224 | 225 | 226 | -------------------------------------------------------------------------------- /acdc_info/acdc_motion_index.txt: -------------------------------------------------------------------------------- 1 | patient001 0.227336259733 0.666683497399 3.6694427998 2 | patient002 0.429941158181 1.10288817531 5.83374466241 3 | patient003 0.202157745384 0.559407625264 3.00344218059 4 | patient004 0.351934989934 0.858862241466 5.08174438212 5 | patient005 0.456086629851 0.940641123305 3.81869183303 6 | patient006 0.351270366424 0.819480687254 3.91435325294 7 | patient007 0.273457420074 0.998825098272 2.09083751543 8 | patient008 0.242957823858 0.646725316681 2.67714398473 9 | patient009 0.401603574359 1.43733904931 4.00367464811 10 | patient010 0.169459567615 0.73810046816 2.68404488021 11 | patient011 0.30014727746 0.881537912281 2.52914268412 12 | patient012 0.322762514542 1.06441240055 4.84492297404 13 | patient013 0.312633548698 0.54759091444 4.60630057115 14 | patient014 0.204208028792 0.503495104261 2.31235564064 15 | patient015 0.245919035696 0.714618269978 3.82359813261 16 | patient016 0.191049568932 0.948403917614 2.37072176255 17 | patient017 0.179613879019 0.760444194046 2.38095417049 18 | patient018 0.163418621853 0.73594623903 3.12576060046 19 | patient019 0.320848160402 1.01105761769 5.11514197717 20 | patient020 0.363994087253 0.990216858975 4.03691485032 21 | patient021 0.300088655671 0.975784009938 6.69060338315 22 | patient022 0.652723819447 1.44322715957 7.27893582593 23 | patient023 0.56760315512 0.985101284547 12.4935092153 24 | patient024 0.533644623318 1.16547012673 7.82414520211 25 | patient025 0.502414937047 2.03336722203 8.36027257871 26 | patient026 0.51415977928 1.03736168471 8.88537106023 27 | patient027 0.427183275052 0.929357448626 9.14198040109 28 | patient028 0.607953248828 0.985123845461 11.0836646598 29 | patient029 0.510316262539 1.5748075177 10.0533781245 30 | patient030 0.581414039917 0.78069063239 7.62447958727 31 | patient031 0.515165985536 1.38111448776 9.82869873535 32 | patient032 0.489812835093 0.711052284546 7.92456995762 33 | patient033 0.357385284192 2.34134804015 8.79158958218 34 | patient034 0.557479117844 1.57823515394 9.64743753428 35 | patient035 0.617550381021 1.17410152229 6.59109703357 36 | patient036 0.317621467743 1.79412216189 6.58789336033 37 | patient037 0.350957105895 0.575097591763 8.15528448668 38 | patient038 0.414634501795 1.57272195289 7.5783822673 39 | patient039 0.301733296348 1.30943730807 7.68983034672 40 | patient040 0.668161982655 0.634303059778 8.38172489238 41 | patient041 0.379008573006 2.56866955441 6.38859206216 42 | patient042 0.371715814953 2.76193000153 4.76070782232 43 | patient043 0.489663470256 0.836492353641 4.92106364651 44 | patient044 0.525212934265 1.67902308817 5.26958326939 45 | patient045 0.656962319124 2.00268452858 4.25100781474 46 | patient046 0.304204810621 0.848866063035 5.6788203208 47 | patient047 0.615562249758 1.09023905991 4.93548629492 48 | patient048 0.576566534961 1.56005260473 8.12975772865 49 | patient049 0.471642199073 1.05440809488 3.6358993713 50 | patient050 0.244505142652 1.17321117564 2.51265741518 51 | patient051 0.480824300071 1.88874082087 5.87634641099 52 | patient052 0.43416663791 2.34712140805 6.42353034188 53 | patient053 0.518200151901 1.17851720081 8.10373332726 54 | patient054 0.415515379336 1.15841365796 5.30096621403 55 | patient055 0.653570714597 1.22966234934 5.27587531695 56 | patient056 0.457388945362 1.9933673548 5.60320500373 57 | patient057 0.74425217734 1.26436081245 6.39390898063 58 | patient058 0.576006945339 1.60969323392 5.82844552307 59 | patient059 0.455090894965 2.45982858454 5.67137390588 60 | patient060 0.316079008757 1.24550767419 6.55157249633 61 | patient061 0.382946255222 0.781876697029 5.00786511882 62 | patient062 0.368970028957 0.638977440244 5.81377582665 63 | patient063 0.297177766369 1.42494036127 7.53943416138 64 | patient064 0.38443100327 0.944889327814 4.95923589644 65 | patient065 0.326417712239 1.07518602254 5.74322691212 66 | patient066 0.424996411475 0.740898664529 7.21128764778 67 | patient067 0.478351173835 1.51975614272 6.34417570631 68 | patient068 0.280977431948 1.02324590117 6.6669456579 69 | patient069 0.525859972725 0.671253179783 4.57302851767 70 | patient070 0.310104961128 0.848404329566 6.15105754546 71 | patient071 0.719190326488 1.47781146004 6.20242040576 72 | patient072 0.579767865347 0.780178524883 6.31819715513 73 | patient073 0.472343689019 1.47837523119 8.73400616053 74 | patient074 0.441269981846 1.19960341418 5.01161095299 75 | patient075 0.35971664699 0.847509197658 4.95221192636 76 | patient076 0.386256345166 1.31730885538 6.11364829625 77 | patient077 0.401247036793 0.922300677603 6.90858629152 78 | patient078 0.376610969528 0.842530575126 6.85624892272 79 | patient079 0.508812929855 1.01484171756 6.69904491201 80 | patient080 0.277231047302 0.89237289524 5.83865073976 81 | patient081 0.497196426619 1.34555502565 6.66862206226 82 | patient082 0.470693970142 1.28776671186 6.77102491352 83 | patient083 0.747610393526 0.713612693646 4.80619485717 84 | patient084 0.621344507284 1.50585628208 3.94032943182 85 | patient085 0.653743802844 0.988568296081 4.47218813546 86 | patient086 0.470264110472 2.07838360384 5.66117400924 87 | patient087 0.286764753673 0.675776048983 5.54161781894 88 | patient088 0.430570086685 1.22697490562 6.15080883235 89 | patient089 0.494749119523 1.30895308212 3.84598192915 90 | patient090 0.688984215785 1.72532696654 7.55418623014 91 | patient091 0.442994384318 1.6062347204 7.03053317416 92 | patient092 0.589424201272 1.28066562024 6.25062268652 93 | patient093 0.694466761194 1.92420156763 11.2258188067 94 | patient094 0.607716544063 1.10675656493 3.80195024038 95 | patient095 0.605737676009 1.27016469957 5.82511438436 96 | patient096 0.439149830447 1.05689687037 5.55501116867 97 | patient097 0.411861127466 1.08985837237 5.63851731399 98 | patient098 0.819893000651 1.5200748913 8.32960100008 99 | patient099 0.322944530856 1.29121379345 4.32635827245 100 | patient100 0.354926291595 0.650141066714 5.36010407114 101 | patient101 0.243130548472 0.71584684121 2.34917390213 102 | patient102 0.426174561572 0.827551763856 6.40353761471 103 | patient103 0.497671525453 1.72115557325 7.76047255548 104 | patient104 0.534676318369 1.41602844821 10.4995828359 105 | patient105 0.459100887042 1.78934896848 8.40433365372 106 | patient106 0.157621446508 0.828040818742 2.2214001604 107 | patient107 0.331118968735 0.978045545705 5.45111651046 108 | patient108 0.577106862757 1.90823285084 8.16986281308 109 | patient109 0.289777419006 1.22338983129 6.38598834556 110 | patient110 0.469690098502 0.935266285758 4.47313174411 111 | patient111 0.417137600472 0.515374741414 5.30095069536 112 | patient112 0.450863921215 1.22822612041 5.69222521826 113 | patient113 0.282677011776 0.882736884185 4.3246413003 114 | patient114 0.368281779232 1.73970827935 9.85569024611 115 | patient115 0.479994146073 1.22725297974 6.35132866878 116 | patient116 0.401106514877 2.06896676974 7.36991794881 117 | patient117 0.23911303137 0.701911247982 3.42415228445 118 | patient118 0.595686274819 1.13816988405 4.67015378332 119 | patient119 0.414201391986 2.20969616376 11.3126609849 120 | patient120 0.338040706883 1.30893494105 3.55799407389 121 | patient121 0.321976069112 1.15723188266 6.27651629626 122 | patient122 0.306074131262 0.850760647019 5.70586471232 123 | patient123 0.343907407328 0.925085298917 6.86924306843 124 | patient124 0.616967569488 1.31425603733 7.55476404287 125 | patient125 0.533468752811 1.62592819563 6.10088772256 126 | patient126 0.55003005063 1.73211173912 6.20429949292 127 | patient127 0.442461819345 1.58820558074 7.20610946496 128 | patient128 0.269888024345 0.947358853366 7.31433518431 129 | patient129 0.383378347611 0.88959202973 6.18573680914 130 | patient130 0.552211728159 1.62905634622 6.14749978707 131 | patient131 0.160804094525 0.999793078729 2.40048533191 132 | patient132 0.207341830662 0.572125392519 2.92211258969 133 | patient133 0.225830568122 1.05561992485 3.73889182509 134 | patient134 0.337056470049 0.977125013129 4.73458548366 135 | patient135 0.300625174751 0.872324119592 4.695701571 136 | patient136 0.360248282323 0.992694089632 5.05035025864 137 | patient137 0.477177155407 1.64839426414 6.31321691001 138 | patient138 0.627962458804 0.577269756898 8.22207851151 139 | patient139 0.458784453313 1.24330276185 6.45229581498 140 | patient140 0.641880176354 2.60871143718 11.1269096061 141 | patient141 0.508615252567 1.36197926083 9.50557399363 142 | patient142 0.336399454372 0.817319070394 7.56657219221 143 | patient143 0.425768629145 1.27611698773 3.31993324782 144 | patient144 0.40167015968 1.37904695667 7.07885297588 145 | patient145 0.434662293512 1.22290770193 5.59120771759 146 | patient146 0.478679275261 1.4308382756 6.12174425282 147 | patient147 0.523259770147 0.899098054885 7.16261216298 148 | patient148 0.448079318588 0.914680792312 4.82250549223 149 | patient149 0.427392440048 1.13695343702 4.5241523731 150 | patient150 0.296731671103 1.06990925685 7.69707328934 151 | -------------------------------------------------------------------------------- /segmentation/predict_lvrv_net.py: -------------------------------------------------------------------------------- 1 | """ The main file to launch the inference of LVRV-net """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | import os 7 | import copy 8 | import numpy as np 9 | from itertools import izip 10 | from scipy.misc import imresize 11 | from PIL import Image as pil_image 12 | import tensorflow as tf 13 | 14 | from keras.models import Model 15 | from keras.optimizers import Adam 16 | from keras.utils import plot_model 17 | from keras import backend as K 18 | 19 | K.set_image_data_format('channels_last') # TF dimension ordering in this code 20 | 21 | from helpers import ( 22 | dice_coef5_0, 23 | dice_coef5_1, 24 | dice_coef5_2, 25 | dice_coef5_3, 26 | dice_coef5, 27 | dice_coef5_loss, 28 | mean_variance_normalization5, 29 | elementwise_multiplication, 30 | keep_largest_components, 31 | touch_length_count, 32 | number_of_components, 33 | second_largest_component_count 34 | ) 35 | 36 | from image2 import ( 37 | array_to_img, 38 | ImageDataGenerator2 39 | ) 40 | 41 | from data_lvrv_segmentation_propagation_acdc import data_lvrv_segmentation_propagation_acdc 42 | 43 | from module_lvrv_net import net_module 44 | 45 | import config 46 | 47 | 48 | 49 | def predict_lvrv_net(): 50 | 51 | code_path = config.code_dir 52 | 53 | fold = int(sys.argv[1]) 54 | print('fold = {}'.format(fold)) 55 | if fold == 0: 56 | mode = 'predict' 57 | elif fold in range(1,6): 58 | mode = 'val_predict' 59 | else: 60 | print('Incorrect fold') 61 | 62 | initial_lr = config.lvrv_net_initial_lr 63 | input_img_size = config.lvrv_net_input_img_size 64 | epochs = config.lvrv_net_epochs 65 | batch_size = 1 66 | 67 | ########### 68 | # The model 69 | model = net_module(input_shape=(input_img_size, input_img_size, 1), num_outputs=4) 70 | print('Loading model') 71 | 72 | 73 | model.load_weights(filepath=os.path.join(code_path, 'segmentation', 'model_lvrv_net_finetune_fold{}_epoch{}.h5'.format(str(fold), str(epochs).zfill(3))) ) 74 | 75 | 76 | model.compile(optimizer=Adam(lr=initial_lr),loss=dice_coef5_loss, 77 | metrics=[dice_coef5, dice_coef5_0, dice_coef5_1, dice_coef5_2, dice_coef5_3]) 78 | 79 | print('This model has {} parameters'.format(model.count_params()) ) 80 | 81 | 82 | seq_context_imgs, seq_context_segs, seq_imgs, seq_segs = data_lvrv_segmentation_propagation_acdc(mode = mode, fold = fold) 83 | 84 | 85 | 86 | predict_sequence = len(seq_imgs) 87 | 88 | # we create two instances with the same arguments for random transformation 89 | img_data_gen_args = dict(featurewise_center=False, 90 | samplewise_center=False, 91 | featurewise_std_normalization=False, 92 | samplewise_std_normalization=False, 93 | zca_whitening=False, 94 | zca_epsilon=1e-6, 95 | rotation_range=0., 96 | width_shift_range=0., 97 | height_shift_range=0., 98 | shear_range=0., 99 | zoom_range=0., 100 | channel_shift_range=0., 101 | fill_mode='constant', 102 | cval=0., 103 | horizontal_flip= False, 104 | vertical_flip=False, 105 | rescale=None, 106 | preprocessing_function=mean_variance_normalization5, 107 | data_format=K.image_data_format()) 108 | 109 | # deep copy is necessary 110 | mask_data_gen_args = copy.deepcopy(img_data_gen_args) 111 | mask_data_gen_args['preprocessing_function'] = elementwise_multiplication 112 | 113 | ######################### 114 | # Generators for training 115 | print('Creating generators for prediction') 116 | image_context_datagen = ImageDataGenerator2(**img_data_gen_args) 117 | image_datagen = ImageDataGenerator2(**img_data_gen_args) 118 | mask_context_datagen = ImageDataGenerator2(**mask_data_gen_args) 119 | 120 | # Provide the same seed and keyword arguments to the fit and flow methods 121 | seed = 1 122 | image_context_datagen.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 123 | image_datagen.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 124 | mask_context_datagen.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 125 | 126 | 127 | print('Start prediction') 128 | print('There will be {} sequences'.format(predict_sequence) ) 129 | 130 | 131 | for i in range(predict_sequence): 132 | print('Sequence # {}'.format(i) ) 133 | 134 | # The lists fot the sequence 135 | context_imgs = seq_context_imgs[i] 136 | context_segs = seq_context_segs[i] 137 | imgs = seq_imgs[i] 138 | segs = seq_segs[i] 139 | 140 | 141 | image_context_generator = image_context_datagen.flow_from_path_list( 142 | path_list=context_imgs, 143 | target_size=(input_img_size, input_img_size), 144 | pad_to_square=True, 145 | resize_mode='nearest', 146 | histogram_based_preprocessing=False, 147 | clahe=False, 148 | color_mode='grayscale', 149 | class_list=None, 150 | class_mode=None, 151 | batch_size=batch_size, 152 | shuffle=False, 153 | seed=seed, 154 | save_to_dir=None, 155 | save_prefix='', 156 | save_format='png', 157 | save_period=500, 158 | follow_links=False) 159 | 160 | image_generator = image_datagen.flow_from_path_list( 161 | path_list=imgs, 162 | target_size=(input_img_size, input_img_size), 163 | pad_to_square=True, 164 | resize_mode='nearest', 165 | histogram_based_preprocessing=False, 166 | clahe=False, 167 | color_mode='grayscale', 168 | class_list=None, 169 | class_mode=None, 170 | batch_size=batch_size, 171 | shuffle=False, 172 | seed=seed, 173 | save_to_dir=None, 174 | save_prefix='', 175 | save_format='png', 176 | save_period=500, 177 | follow_links=False) 178 | 179 | mask_context_generator = mask_context_datagen.flow_from_path_list( 180 | path_list=context_segs, 181 | target_size=(input_img_size, input_img_size), 182 | pad_to_square=True, 183 | resize_mode='nearest', 184 | histogram_based_preprocessing=False, 185 | clahe=False, 186 | color_mode='grayscale', 187 | class_list=None, 188 | class_mode=None, 189 | batch_size=batch_size, 190 | shuffle=False, 191 | seed=seed, 192 | save_to_dir=None, 193 | save_prefix='', 194 | save_format='png', 195 | save_period=500, 196 | follow_links=False) 197 | 198 | 199 | # Combine generators into one which yields image and masks 200 | predict_generator = izip(image_context_generator, image_generator, mask_context_generator) 201 | 202 | 203 | img_size = pil_image.open(imgs[0]).size 204 | size = img_size[0] 205 | 206 | 207 | 208 | for j in range(len(imgs)): 209 | 210 | img_context, img, mask_context = next(predict_generator) 211 | masks = model.predict([img_context, img, mask_context], 212 | batch_size=batch_size, verbose=0) 213 | 214 | masks = np.reshape(masks, newshape=(input_img_size, input_img_size, 4)) 215 | masks_resized = np.zeros((size, size, 4)) 216 | for c in range(4): 217 | masks_resized[:, :, c] = imresize(masks[:, :, c], (size, size), interp='bilinear') 218 | prediction_resized = np.argmax(masks_resized, axis=-1) 219 | prediction_resized = np.reshape(prediction_resized, newshape=(size, size, 1)) 220 | 221 | # Check whether the prediction is successful 222 | have_lvc = (1 in prediction_resized) 223 | have_lvm = (2 in prediction_resized) 224 | lvc_touch_background_length = touch_length_count(prediction_resized, size, size, 1, 0) 225 | lvc_touch_lvm_length = touch_length_count(prediction_resized, size, size, 1, 2) 226 | lvc_touch_rvc_length = touch_length_count(prediction_resized, size, size, 1, 3) 227 | 228 | lvc_second_largest_component_count = second_largest_component_count(prediction_resized, 1) 229 | lvm_second_largest_component_count = second_largest_component_count(prediction_resized, 2) 230 | rvc_second_largest_component_count = second_largest_component_count(prediction_resized, 3) 231 | 232 | 233 | success = have_lvm and \ 234 | ((lvc_touch_background_length + lvc_touch_rvc_length) <= 0.5 * lvc_touch_lvm_length) 235 | 236 | 237 | 238 | if not success: 239 | prediction_resized = 0 * prediction_resized 240 | print('Unsuccessful segmentation for {}'.format(imgs[j])) 241 | else: 242 | prediction_resized = keep_largest_components(prediction_resized, keep_values=[1, 2, 3], values=[1, 2, 3]) 243 | 244 | 245 | # save txt file 246 | prediction_path = segs[j] 247 | prediction_txt_path = prediction_path.replace('.png', '.txt', 1) 248 | np.savetxt(prediction_txt_path, prediction_resized, fmt='%.6f') 249 | 250 | # save image 251 | prediction_img = array_to_img(prediction_resized * 50.0, 252 | data_format=None, 253 | scale=False) 254 | prediction_img.save(prediction_path) 255 | 256 | 257 | 258 | K.clear_session() 259 | 260 | 261 | 262 | if __name__ == '__main__': 263 | predict_lvrv_net() 264 | 265 | 266 | 267 | 268 | -------------------------------------------------------------------------------- /ROI/crop_according_to_roi.py: -------------------------------------------------------------------------------- 1 | """ The main file to launch ROI cropping according to the prediction of ROI-net """ 2 | 3 | import os 4 | import sys 5 | sys.path.append('..') 6 | 7 | import numpy as np 8 | import scipy 9 | from scipy import ndimage 10 | import math 11 | import nibabel as nib 12 | from PIL import Image 13 | 14 | import multiprocessing.pool 15 | from functools import partial 16 | 17 | import config 18 | 19 | 20 | # Auxiliary function 21 | def determine_rectangle_roi(img_path): 22 | img = Image.open(img_path) 23 | columns, rows = img.size 24 | roi_c_min = columns 25 | roi_c_max = -1 26 | roi_r_min = rows 27 | roi_r_max = -1 28 | box = img.getbbox() 29 | if box: 30 | roi_r_min = box[0] 31 | roi_c_min = box[1] 32 | roi_r_max = box[2] - 1 33 | roi_c_max = box[3] - 1 34 | return [roi_c_min, roi_c_max, roi_r_min, roi_r_max] 35 | 36 | # Auxiliary function 37 | def determine_rectangle_roi2(img_path): 38 | img = Image.open(img_path) 39 | img_array = np.array(img) 40 | connected_components, num_connected_components = ndimage.label(img_array) 41 | if (num_connected_components > 1): 42 | unique, counts = np.unique(connected_components, return_counts=True) 43 | max_idx = np.where(counts == max(counts[1:]))[0][0] 44 | single_component = connected_components * (connected_components == max_idx) 45 | img = Image.fromarray(single_component) 46 | 47 | columns, rows = img.size 48 | roi_c_min = columns 49 | roi_c_max = -1 50 | roi_r_min = rows 51 | roi_r_max = -1 52 | box = img.getbbox() 53 | if box: 54 | roi_r_min = box[0] 55 | roi_c_min = box[1] 56 | roi_r_max = box[2] - 1 57 | roi_c_max = box[3] - 1 58 | return [roi_c_min, roi_c_max, roi_r_min, roi_r_max] 59 | 60 | 61 | def change_array_values(array): 62 | output = array 63 | for u in range(output.shape[0]): 64 | for v in range(output.shape[1]): 65 | if output[u,v] == 1: 66 | output[u,v] = 3 67 | elif output[u,v] == 3: 68 | output[u,v] = 1 69 | return output 70 | 71 | 72 | def crop_according_to_roi(): 73 | # The ratio that determines the width of the margin 74 | pixel_margin_ratio = 0.3 75 | 76 | # If for a case there is non-zero pixels on the border of ROI, the case is stored in 77 | # this list for further examination. This list is eventually empty for UK Biobank cases. 78 | border_problem_subject = [] 79 | 80 | 81 | 82 | data_dir = config.acdc_data_dir 83 | code_dir = config.code_dir 84 | 85 | dilated_subjects = config.acdc_dilated_subjects 86 | hypertrophic_subjects = config.acdc_hypertrophic_subjects 87 | infarct_subjects = config.acdc_infarct_subjects 88 | normal_subjects = config.acdc_normal_subjects 89 | rv_subjects = config.acdc_rv_subjects 90 | test_subjects = config.acdc_test_subjects 91 | 92 | train_subjects = dilated_subjects + hypertrophic_subjects + infarct_subjects + normal_subjects + rv_subjects 93 | 94 | all_subjects = train_subjects + test_subjects 95 | 96 | 97 | 98 | info_file = os.path.join(code_dir, 'acdc_info', 'acdc_info.txt') 99 | with open(info_file) as in_file: 100 | subject_info = in_file.readlines() 101 | 102 | subject_info = [x.strip() for x in subject_info] 103 | subject_info = [ y.split()[0:2] + [float(z) for z in y.split()[2:]] for y in subject_info] 104 | 105 | 106 | predict_img_list = [] 107 | predict_gt_list = [] 108 | 109 | 110 | for subject in all_subjects: 111 | print(subject) 112 | subject_dir = data_dir.format(subject) 113 | subject_mask_original_dir = os.path.join(subject_dir, 'mask_original_2D') 114 | crop_2D_path = os.path.join(subject_dir, 'crop_2D') 115 | if not os.path.exists(crop_2D_path): 116 | os.makedirs(crop_2D_path) 117 | 118 | 119 | instants = int([x for x in subject_info if x[0] == subject][0][2]) 120 | ed_instant = int([x for x in subject_info if x[0] == subject][0][3]) 121 | es_instant = int([x for x in subject_info if x[0] == subject][0][4]) 122 | slices = int([x for x in subject_info if x[0] == subject][0][5]) 123 | 124 | used_instants_roi = [ed_instant] 125 | img_path_list = [] 126 | for t in used_instants_roi: 127 | for s in range(int(round(slices * 0.1 + 0.001)), int(round(slices * 0.5 + 0.001))): 128 | s_t_mask_image_file = os.path.join(subject_mask_original_dir, 'mask_original_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2)) ) 129 | img_path_list.append(s_t_mask_image_file) 130 | 131 | 132 | 133 | # Multithread 134 | pool = multiprocessing.pool.ThreadPool() 135 | function_partial = partial(determine_rectangle_roi2) 136 | roi_results = pool.map(function_partial, (img_path for img_path in img_path_list)) 137 | roi_c_min = min([res[0] for res in roi_results]) 138 | roi_c_max = max([res[1] for res in roi_results]) 139 | roi_r_min = min([res[2] for res in roi_results]) 140 | roi_r_max = max([res[3] for res in roi_results]) 141 | pool.close() 142 | pool.join() 143 | 144 | # ROI size (without adding margin) 145 | roi_c_length = roi_c_max - roi_c_min + 1 146 | roi_r_length = roi_r_max - roi_r_min + 1 147 | roi_length = max(roi_c_length, roi_r_length) 148 | print('roi_length = {}'.format(roi_length) ) 149 | 150 | written = '{0} {1} {2} {3} {4} {5}\n'.format(subject, roi_c_min, roi_c_max, roi_r_min, roi_r_max, roi_length) 151 | 152 | 153 | # The size of margin, determined by the ratio we defined above 154 | pixel_margin = int(round(pixel_margin_ratio * roi_length + 0.001)) 155 | 156 | crop_c_min = ((roi_c_min + roi_c_max) // 2) - (roi_length // 2) - pixel_margin 157 | crop_c_max = crop_c_min + pixel_margin + roi_length - 1 + pixel_margin 158 | crop_r_min = ((roi_r_min + roi_r_max) // 2) - (roi_length // 2) - pixel_margin 159 | crop_r_max = crop_r_min + pixel_margin + roi_length - 1 + pixel_margin 160 | 161 | 162 | # Crop the original images 163 | image_file = os.path.join(subject_dir, '{}_4d.nii.gz'.format(subject)) 164 | image_load = nib.load(image_file) 165 | image_data = image_load.get_data() 166 | original_r_min = max(0, crop_r_min) 167 | original_r_max = min(image_data.shape[0]-1, crop_r_max) 168 | original_c_min = max(0, crop_c_min) 169 | original_c_max = min(image_data.shape[1]-1, crop_c_max) 170 | crop_image_data = np.zeros((roi_length + 2 * pixel_margin, roi_length + 2 * pixel_margin, 171 | image_data.shape[2], image_data.shape[3])) 172 | crop_image_data[(original_r_min - crop_r_min):(original_r_max - crop_r_min + 1), 173 | (original_c_min - crop_c_min):(original_c_max - crop_c_min + 1), 174 | :, 175 | :] = \ 176 | image_data[original_r_min:(original_r_max + 1), 177 | original_c_min:(original_c_max + 1), 178 | :, 179 | :] 180 | crop_image_data = crop_image_data[::-1, ::-1, :, :] 181 | crop_image_file = os.path.join(subject_dir, 'crop_{}_4d.nii.gz'.format(subject)) 182 | nib.save(nib.Nifti1Image(crop_image_data, np.eye(4)), crop_image_file) 183 | 184 | # Crop the original labels 185 | if subject in train_subjects: 186 | for i in [ed_instant+1, es_instant+1]: 187 | label_file = os.path.join(subject_dir, '{}_frame{}_gt.nii.gz'.format(subject,str(i).zfill(2))) 188 | label_load = nib.load(label_file) 189 | label_data = label_load.get_data() 190 | crop_label_data = np.zeros((roi_length + 2 * pixel_margin, 191 | roi_length + 2 * pixel_margin, 192 | image_data.shape[2])) 193 | crop_label_data[(original_r_min - crop_r_min):(original_r_max - crop_r_min + 1), 194 | (original_c_min - crop_c_min):(original_c_max - crop_c_min + 1), 195 | :] = \ 196 | label_data[original_r_min:(original_r_max + 1), 197 | original_c_min:(original_c_max + 1), 198 | :] 199 | crop_label_data = crop_label_data[::-1, ::-1, :] 200 | crop_label_file = os.path.join(subject_dir, 201 | 'crop_{}_frame{}_gt.nii.gz'.format(subject,str(i).zfill(2))) 202 | nib.save(nib.Nifti1Image(crop_label_data, np.eye(4)), crop_label_file) 203 | 204 | 205 | 206 | # Save cropped 2D images 207 | crop_image_data = nib.load(crop_image_file).get_data() 208 | 209 | max_pixel_value = crop_image_data.max() 210 | 211 | if max_pixel_value > 0: 212 | multiplier = 255.0 / max_pixel_value 213 | else: 214 | multiplier = 1.0 215 | 216 | print('max_pixel_value = {}, multiplier = {}'.format(max_pixel_value, multiplier) ) 217 | 218 | for s in range(slices): 219 | for t in range(instants): 220 | s_t_image_file = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2)) ) 221 | Image.fromarray((np.rot90(crop_image_data[:, ::-1, s, t], 3) * multiplier).astype('uint8')).save(s_t_image_file) 222 | 223 | 224 | # Save cropped 2D labels 225 | if subject in train_subjects: 226 | for s in range(slices): 227 | for t in [ed_instant, es_instant]: 228 | crop_label_file = os.path.join(subject_dir, 229 | 'crop_{}_frame{}_gt.nii.gz'.format(subject,str(t+1).zfill(2))) 230 | crop_label_data = nib.load(crop_label_file).get_data() 231 | s_t_label_file = os.path.join(crop_2D_path, 'crop_2D_gt_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2)) ) 232 | Image.fromarray((np.rot90(change_array_values(crop_label_data[:, ::-1, s]), 3) * 50).astype('uint8')).save(s_t_label_file) 233 | 234 | 235 | 236 | 237 | print('Done!') 238 | 239 | 240 | 241 | if __name__ == '__main__': 242 | crop_according_to_roi() 243 | 244 | 245 | 246 | 247 | -------------------------------------------------------------------------------- /flow/train_apparentflow_net.py: -------------------------------------------------------------------------------- 1 | """ The main file to launch the training of ApparentFlow-net """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | import os 7 | import copy 8 | import math 9 | import numpy as np 10 | from itertools import izip 11 | import tensorflow as tf 12 | 13 | from keras.models import Model 14 | from keras.optimizers import Adam 15 | from keras.utils import plot_model 16 | from keras import backend as K 17 | 18 | K.set_image_data_format('channels_last') # TF dimension ordering in this code 19 | 20 | from helpers import ( 21 | flow_warped_img_comparison_loss, 22 | flow_warped_gt_comparison_dice_loss, 23 | flow_diffeomorphism_loss, 24 | flow_combined_loss3, 25 | mean_variance_normalization5, 26 | elementwise_multiplication 27 | ) 28 | 29 | from image2 import ImageDataGenerator2 30 | 31 | 32 | 33 | from data_apparentflow import data_apparentflow 34 | 35 | from module_apparentflow_net import net_module 36 | 37 | import config 38 | 39 | save_period = 10000 40 | 41 | def train_apparentflow_net(): 42 | 43 | code_path = config.code_dir 44 | 45 | fold = int(sys.argv[1]) 46 | print('fold = {}'.format(fold)) 47 | if fold == 0: 48 | mode_train = 'all' 49 | mode_val = 'all' 50 | elif fold in range(1,6): 51 | mode_train = 'train' 52 | mode_val = 'val' 53 | else: 54 | print('Incorrect fold') 55 | 56 | 57 | initial_lr = config.apparentflow_net_initial_lr 58 | decay_rate = config.apparentflow_net_decay_rate 59 | batch_size = config.apparentflow_net_batch_size 60 | input_img_size = config.apparentflow_net_input_img_size 61 | epochs = config.apparentflow_net_epochs 62 | 63 | current_epoch = 0 64 | new_start_epoch = current_epoch 65 | 66 | ########### 67 | # The model 68 | model = net_module(input_shape=(input_img_size, input_img_size, 1), num_outputs=2) 69 | # Train from scratch 70 | if current_epoch == 0: 71 | print('Building model') 72 | # Finetune 73 | else: 74 | print('Loading model') 75 | model.load_weights(filepath=os.path.join(code_path, 'flow', 'model_apparentflow_net_fold{}_epoch{}.h5'.format(str(fold), str(current_epoch).zfill(3))) ) 76 | 77 | 78 | model.compile(optimizer=Adam(lr=initial_lr), loss=flow_combined_loss3, 79 | metrics=[flow_warped_img_comparison_loss, flow_warped_gt_comparison_dice_loss, flow_diffeomorphism_loss]) 80 | 81 | 82 | print('This model has {} parameters'.format(model.count_params()) ) 83 | 84 | # Load data lists 85 | train_img_list0, train_img_list1, train_gt_list0, train_gt_list1 = data_apparentflow(mode=mode_train, fold = fold) 86 | test_img_list0, test_img_list1, test_gt_list0, test_gt_list1 = data_apparentflow(mode=mode_val, fold = fold) 87 | 88 | train_sample = len(train_img_list0) 89 | val_sample = len(test_img_list0) 90 | 91 | train_img_list = [train_img_list0, train_img_list1, train_gt_list0, train_gt_list1] 92 | val_img_list = [test_img_list0, test_img_list1, test_gt_list0, test_gt_list1] 93 | 94 | # we create two instances with the same arguments for random transformation 95 | img_data_gen_args = dict(featurewise_center=False, 96 | samplewise_center=False, 97 | featurewise_std_normalization=False, 98 | samplewise_std_normalization=False, 99 | zca_whitening=False, 100 | zca_epsilon=1e-6, 101 | rotation_range=180., 102 | width_shift_range=0.15, 103 | height_shift_range=0.15, 104 | shear_range=0., 105 | zoom_range=0.15, 106 | channel_shift_range=0., 107 | fill_mode='constant', 108 | cval=0., 109 | horizontal_flip=True, 110 | vertical_flip=True, 111 | rescale=None, 112 | preprocessing_function=mean_variance_normalization5, 113 | data_format=K.image_data_format()) 114 | 115 | # deep copy is necessary 116 | mask_data_gen_args = copy.deepcopy(img_data_gen_args) 117 | mask_data_gen_args['preprocessing_function'] = elementwise_multiplication 118 | 119 | ######################### 120 | # Generators for training 121 | print('Creating generators for training') 122 | seed = 1 123 | generators = [] 124 | # The generators for the 2 inputs 125 | for k in range(0, 2): 126 | seg_datagen_k = ImageDataGenerator2(**img_data_gen_args) 127 | seg_datagen_k.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 128 | seg_generator_k = seg_datagen_k.flow_from_path_list( 129 | path_list=train_img_list[k], 130 | target_size=(input_img_size, input_img_size), 131 | pad_to_square=True, 132 | resize_mode='nearest', 133 | histogram_based_preprocessing=False, 134 | clahe=False, 135 | color_mode='grayscale', 136 | class_list=None, 137 | class_mode=None, 138 | batch_size=batch_size, 139 | shuffle=True, 140 | seed=seed, 141 | save_to_dir=None, 142 | save_prefix='', 143 | save_format='png', 144 | save_period=save_period, 145 | follow_links=False) 146 | generators.append(seg_generator_k) 147 | # The generators for the 2 masks 148 | for k in range(2, 4): 149 | seg_datagen_k = ImageDataGenerator2(**mask_data_gen_args) 150 | seg_datagen_k.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 151 | seg_generator_k = seg_datagen_k.flow_from_path_list( 152 | path_list=train_img_list[k], 153 | target_size=(input_img_size, input_img_size), 154 | pad_to_square=True, 155 | resize_mode='nearest', 156 | histogram_based_preprocessing=False, 157 | clahe=False, 158 | color_mode='grayscale', 159 | class_list=None, 160 | class_mode=None, 161 | batch_size=batch_size, 162 | shuffle=True, 163 | seed=seed, 164 | save_to_dir=None, 165 | save_prefix='', 166 | save_format='png', 167 | save_period=save_period, 168 | follow_links=False) 169 | generators.append(seg_generator_k) 170 | 171 | # Combine generators into one which yields image and masks 172 | train_generator = izip(*tuple(generators)) 173 | 174 | 175 | ########################### 176 | # Generators for validation 177 | print('Creating generators for validation') 178 | val_seed = 2 179 | generators2 = [] 180 | # The generators for the inputs 181 | for k in range(0, 2): 182 | seg_datagen_k = ImageDataGenerator2(**img_data_gen_args) 183 | seg_datagen_k.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 184 | seg_generator_k = seg_datagen_k.flow_from_path_list( 185 | path_list=val_img_list[k], 186 | target_size=(input_img_size, input_img_size), 187 | pad_to_square=True, 188 | resize_mode='nearest', 189 | histogram_based_preprocessing=False, 190 | clahe=False, 191 | color_mode='grayscale', 192 | class_list=None, 193 | class_mode=None, 194 | batch_size=batch_size, 195 | shuffle=True, 196 | seed=seed, 197 | save_to_dir=None, 198 | save_prefix='val_', 199 | save_format='png', 200 | save_period=save_period, 201 | follow_links=False) 202 | generators2.append(seg_generator_k) 203 | # The generators for the 2 masks 204 | for k in range(2, 4): 205 | seg_datagen_k = ImageDataGenerator2(**mask_data_gen_args) 206 | seg_datagen_k.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 207 | seg_generator_k = seg_datagen_k.flow_from_path_list( 208 | path_list=val_img_list[k], 209 | target_size=(input_img_size, input_img_size), 210 | pad_to_square=True, 211 | resize_mode='nearest', 212 | histogram_based_preprocessing=False, 213 | clahe=False, 214 | color_mode='grayscale', 215 | class_list=None, 216 | class_mode=None, 217 | batch_size=batch_size, 218 | shuffle=True, 219 | seed=seed, 220 | save_to_dir=None, 221 | save_prefix='val_', 222 | save_format='png', 223 | save_period=save_period, 224 | follow_links=False) 225 | generators2.append(seg_generator_k) 226 | 227 | # Combine generators into one which yields image and masks 228 | val_generator = izip(*tuple(generators2)) 229 | 230 | 231 | 232 | ############### 233 | # Train the model 234 | print('Start training') 235 | steps = int(math.ceil(float(train_sample) / batch_size)) 236 | print('There will be {} epochs with {} steps in each epoch'.format(epochs, steps) ) 237 | 238 | 239 | total_step = 0 240 | for epoch in range(new_start_epoch + 1, new_start_epoch + epochs + 1): 241 | print('\n\n##########\nEpoch {}\n##########'.format(epoch) ) 242 | 243 | for step in range(steps): 244 | print('\n****** Epoch {} Step {} ******'.format(epoch, step) ) 245 | train_batch = next(train_generator) 246 | 247 | print(model.train_on_batch([train_batch[0], train_batch[1]], 248 | np.concatenate((train_batch[1], train_batch[0], train_batch[3], train_batch[2]), axis=-1), 249 | sample_weight=None, class_weight=None)) 250 | 251 | 252 | 253 | 254 | # perform test 255 | if (total_step % save_period == 0): 256 | val_batch = next(val_generator) 257 | print('test:') 258 | print(model.test_on_batch([val_batch[0], val_batch[1]], 259 | np.concatenate((val_batch[1], val_batch[0], val_batch[3], val_batch[2]), axis=-1), sample_weight=None)) 260 | 261 | 262 | total_step += 1 263 | 264 | 265 | # adjust learning rate 266 | if (epoch % 10 == 0): 267 | old_lr = float(K.get_value(model.optimizer.lr)) 268 | new_lr = initial_lr * (decay_rate**(epoch//10)) 269 | K.set_value(model.optimizer.lr, new_lr) 270 | print("learning rate is reset to %.8f" % (new_lr)) 271 | 272 | # save the model 273 | if (epoch % 50 == 0): 274 | model.save_weights(os.path.join(code_path, 'flow', 'model_apparentflow_net_fold{}_epoch{}.h5'.format(str(fold), str(epoch).zfill(3))) ) 275 | 276 | 277 | 278 | print('Training is done!') 279 | 280 | 281 | if __name__ == '__main__': 282 | train_apparentflow_net() 283 | 284 | 285 | 286 | 287 | -------------------------------------------------------------------------------- /segmentation/module_lvrv_net.py: -------------------------------------------------------------------------------- 1 | """ The module of LVRV-net """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | from keras.models import Model 7 | from keras.layers import ( 8 | Input, 9 | Activation, 10 | UpSampling2D 11 | ) 12 | from keras.layers.convolutional import ( 13 | Conv2D, 14 | MaxPooling2D 15 | ) 16 | from keras.layers.core import ( 17 | Reshape, 18 | Lambda 19 | ) 20 | from keras.layers.merge import ( 21 | Add, 22 | Concatenate 23 | ) 24 | from keras import backend as K 25 | 26 | from helpers import ( 27 | conv_bn_leakyrelu_repetition_block, 28 | handle_dim_ordering, 29 | one_hot 30 | ) 31 | 32 | 33 | def net_module(input_shape, num_outputs): 34 | """Builds a net architecture. 35 | Args: 36 | input_shape: The input shape in the form (nb_rows, nb_cols, nb_channels) 37 | num_outputs: The number of outputs at final softmax layer 38 | Returns: 39 | The keras `Model`. 40 | """ 41 | CHANNEL_AXIS = 3 42 | handle_dim_ordering() 43 | if len(input_shape) != 3: 44 | raise Exception("Input shape should be a tuple (nb_rows, nb_cols, nb_channels)") 45 | 46 | # Permute dimension order if necessary 47 | if K.image_dim_ordering() != 'tf': 48 | input_shape = (input_shape[2], input_shape[0], input_shape[1]) 49 | 50 | input_img0 = Input(shape=input_shape, name="input_img0") 51 | 52 | input_img1 = Input(shape=input_shape, name="input_img1") 53 | 54 | input_mask0 = Input(shape=input_shape, name="input_mask0") 55 | 56 | input_mask0_one_hot = Lambda(one_hot, 57 | arguments={'num_classes': num_outputs})(input_mask0) 58 | 59 | concatenate = Concatenate(axis=CHANNEL_AXIS, name="concatenate")([input_img0, 60 | input_mask0_one_hot]) 61 | 62 | base_channel = 32 63 | 64 | 65 | 66 | 67 | block_conv_1 = conv_bn_leakyrelu_repetition_block(filters=1*base_channel, kernel_size=(3,3), 68 | repetitions=2, first_layer_down_size=False, alpha=0.0, 69 | name="conv_block1")(input_img1) 70 | 71 | 72 | block_pool_2 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 73 | data_format=None, name="pool_block2")(block_conv_1) 74 | 75 | block_conv_2 = conv_bn_leakyrelu_repetition_block(filters=2*base_channel, kernel_size=(3,3), 76 | repetitions=2, first_layer_down_size=False, alpha=0.0, 77 | name="conv_block2")(block_pool_2) 78 | 79 | 80 | block_pool_4 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 81 | data_format=None, name="pool_block4")(block_conv_2) 82 | 83 | block_conv_4 = conv_bn_leakyrelu_repetition_block(filters=4*base_channel, kernel_size=(3,3), 84 | repetitions=2, first_layer_down_size=False, alpha=0.0, 85 | name="conv_block4")(block_pool_4) 86 | 87 | 88 | block_pool_8 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 89 | data_format=None, name="pool_block8")(block_conv_4) 90 | 91 | block_conv_8 = conv_bn_leakyrelu_repetition_block(filters=8*base_channel, kernel_size=(3,3), 92 | repetitions=2, first_layer_down_size=False, alpha=0.0, 93 | name="conv_block8")(block_pool_8) 94 | 95 | 96 | block_pool_16 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 97 | data_format=None, name="pool_block16")(block_conv_8) 98 | 99 | block_conv_16 = conv_bn_leakyrelu_repetition_block(filters=16*base_channel, kernel_size=(3,3), 100 | repetitions=2, first_layer_down_size=False, alpha=0.0, 101 | name="conv_block16")(block_pool_16) 102 | 103 | 104 | block_pool_32 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 105 | data_format=None, name="pool_block32")(block_conv_16) 106 | 107 | block_conv_32 = conv_bn_leakyrelu_repetition_block(filters=32*base_channel, kernel_size=(3,3), 108 | repetitions=1, first_layer_down_size=False, alpha=0.0, 109 | name="conv_block32")(block_pool_32) 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | block_context_conv_1 = conv_bn_leakyrelu_repetition_block(filters=1*base_channel, kernel_size=(3,3), 118 | repetitions=2, first_layer_down_size=False, alpha=0.0, 119 | name="context_conv_block1")(concatenate) 120 | 121 | 122 | block_context_pool_2 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 123 | data_format=None, name="context_pool_block2")(block_context_conv_1) 124 | 125 | block_context_conv_2 = conv_bn_leakyrelu_repetition_block(filters=2*base_channel, kernel_size=(3,3), 126 | repetitions=2, first_layer_down_size=False, alpha=0.0, 127 | name="context_conv_block2")(block_context_pool_2) 128 | 129 | 130 | block_context_pool_4 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 131 | data_format=None, name="context_pool_block4")(block_context_conv_2) 132 | 133 | block_context_conv_4 = conv_bn_leakyrelu_repetition_block(filters=4*base_channel, kernel_size=(3,3), 134 | repetitions=2, first_layer_down_size=False, alpha=0.0, 135 | name="context_conv_block4")(block_context_pool_4) 136 | 137 | 138 | block_context_pool_8 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 139 | data_format=None, name="context_pool_block8")(block_context_conv_4) 140 | 141 | block_context_conv_8 = conv_bn_leakyrelu_repetition_block(filters=8*base_channel, kernel_size=(3,3), 142 | repetitions=2, first_layer_down_size=False, alpha=0.0, 143 | name="context_conv_block8")(block_context_pool_8) 144 | 145 | 146 | block_context_pool_16 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 147 | data_format=None, name="context_pool_block16")(block_context_conv_8) 148 | 149 | block_context_conv_16 = conv_bn_leakyrelu_repetition_block(filters=16*base_channel, kernel_size=(3,3), 150 | repetitions=2, first_layer_down_size=False, alpha=0.0, 151 | name="context_conv_block16")(block_context_pool_16) 152 | 153 | 154 | block_context_pool_32 = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid', 155 | data_format=None, name="context_pool_block32")(block_context_conv_16) 156 | 157 | block_context_conv_32 = conv_bn_leakyrelu_repetition_block(filters=32*base_channel, kernel_size=(3,3), 158 | repetitions=1, first_layer_down_size=False, alpha=0.0, 159 | name="context_conv_block32")(block_context_pool_32) 160 | 161 | 162 | 163 | 164 | 165 | 166 | block_concat_32 = Concatenate(axis=CHANNEL_AXIS, name="concat32")([block_conv_32, block_context_conv_32]) 167 | 168 | block_expan_conv_32 = conv_bn_leakyrelu_repetition_block(filters=32*base_channel, kernel_size=(3,3), 169 | repetitions=2, first_layer_down_size=False, alpha=0.0, 170 | name="expan_conv_block32")(block_concat_32) 171 | 172 | 173 | block_up_16 = UpSampling2D(size=(2,2), name="up_block16")(block_expan_conv_32) 174 | 175 | block_concat_16 = Concatenate(axis=CHANNEL_AXIS, name="concat16")([block_up_16, block_conv_16]) 176 | 177 | block_expan_conv_16 = conv_bn_leakyrelu_repetition_block(filters=16*base_channel, kernel_size=(3,3), 178 | repetitions=2, first_layer_down_size=False, alpha=0.0, 179 | name="expan_conv_block16")(block_concat_16) 180 | 181 | 182 | block_up_8 = UpSampling2D(size=(2,2), name="up_block8")(block_expan_conv_16) 183 | 184 | block_concat_8 = Concatenate(axis=CHANNEL_AXIS, name="concat8")([block_up_8, block_conv_8]) 185 | 186 | block_expan_conv_8 = conv_bn_leakyrelu_repetition_block(filters=8*base_channel, kernel_size=(3,3), 187 | repetitions=2, first_layer_down_size=False, alpha=0.0, 188 | name="expan_conv_block8")(block_concat_8) 189 | 190 | 191 | block_up_4 = UpSampling2D(size=(2,2), name="up_block4")(block_expan_conv_8) 192 | 193 | block_concat_4 = Concatenate(axis=CHANNEL_AXIS, name="concat4")([block_up_4, block_conv_4]) 194 | 195 | block_expan_conv_4 = conv_bn_leakyrelu_repetition_block(filters=4*base_channel, kernel_size=(3,3), 196 | repetitions=2, first_layer_down_size=False, alpha=0.0, 197 | name="expan_conv_block4")(block_concat_4) 198 | 199 | 200 | block_up_2 = UpSampling2D(size=(2,2), name="up_block2")(block_expan_conv_4) 201 | 202 | block_concat_2 = Concatenate(axis=CHANNEL_AXIS, name="concat2")([block_up_2, block_conv_2]) 203 | 204 | block_expan_conv_2 = conv_bn_leakyrelu_repetition_block(filters=2*base_channel, kernel_size=(3,3), 205 | repetitions=2, first_layer_down_size=False, alpha=0.0, 206 | name="expan_conv_block2")(block_concat_2) 207 | 208 | 209 | block_up_1 = UpSampling2D(size=(2,2), name="up_block1")(block_expan_conv_2) 210 | 211 | block_concat_1 = Concatenate(axis=CHANNEL_AXIS, name="concat1")([block_up_1, block_conv_1]) 212 | 213 | block_expan_conv_1 = conv_bn_leakyrelu_repetition_block(filters=1*base_channel, kernel_size=(3,3), 214 | repetitions=2, first_layer_down_size=False, alpha=0.0, 215 | name="expan_conv_block1")(block_concat_1) 216 | 217 | 218 | 219 | 220 | 221 | 222 | block_seg_4 = Conv2D(filters=num_outputs, kernel_size=(1,1), strides=(1,1), 223 | padding="same", data_format=None, dilation_rate=(1, 1), activation=None, 224 | use_bias=True, kernel_initializer="he_normal", bias_initializer="zeros", 225 | kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, 226 | kernel_constraint=None, bias_constraint=None, 227 | name="seg_block4")(block_expan_conv_4) 228 | 229 | block_seg_2 = Conv2D(filters=num_outputs, kernel_size=(1,1), strides=(1,1), 230 | padding="same", data_format=None, dilation_rate=(1, 1), activation=None, 231 | use_bias=True, kernel_initializer="he_normal", bias_initializer="zeros", 232 | kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, 233 | kernel_constraint=None, bias_constraint=None, 234 | name="seg_block2")(block_expan_conv_2) 235 | 236 | block_seg_1 = Conv2D(filters=num_outputs, kernel_size=(1,1), strides=(1,1), 237 | padding="same", data_format=None, dilation_rate=(1, 1), activation=None, 238 | use_bias=True, kernel_initializer="he_normal", bias_initializer="zeros", 239 | kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, 240 | kernel_constraint=None, bias_constraint=None, 241 | name="seg_block1")(block_expan_conv_1) 242 | 243 | block_seg_up_2 = UpSampling2D(size=(2,2), name="seg_up_block2")(block_seg_4) 244 | 245 | block_add_2 = Add(name="add_block2")([block_seg_up_2, block_seg_2]) 246 | 247 | block_seg_up_1 = UpSampling2D(size=(2,2), name="seg_up_block1")(block_add_2) 248 | 249 | prediction = Add(name="prediction")([block_seg_up_1, block_seg_1]) 250 | 251 | 252 | 253 | 254 | 255 | 256 | reshape1 = Reshape((input_shape[0]*input_shape[1], input_shape[2] * num_outputs), 257 | name="reshape1")(prediction) 258 | 259 | prediction_softmax = Activation("softmax", name="softmax")(reshape1) 260 | 261 | reshape2 = Reshape((input_shape[0], input_shape[1], input_shape[2] * num_outputs), 262 | name="output")(prediction_softmax) 263 | 264 | model = Model(inputs=[input_img0, input_img1, input_mask0], outputs=reshape2) 265 | 266 | return model 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | -------------------------------------------------------------------------------- /classification/classifiers.py: -------------------------------------------------------------------------------- 1 | """ The file of the binary classifiers """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | sys.path.append('.') 6 | 7 | import os 8 | 9 | from sklearn import svm, linear_model, ensemble, neural_network 10 | from sklearn.externals import joblib 11 | 12 | 13 | from data_classification import data_classification 14 | 15 | 16 | import config 17 | 18 | 19 | 20 | def classifiers(fold, data_class_num, classifier_name=None): 21 | code_path = config.code_dir 22 | 23 | #print('fold = {}'.format(fold)) 24 | if fold == 0: 25 | mode_train = 'all' 26 | mode_val = 'predict' 27 | elif fold in range(1,6): 28 | mode_train = 'train' 29 | mode_val = 'val' 30 | else: 31 | print('Incorrect fold') 32 | 33 | 34 | model_type = 'lr' 35 | 36 | normalization = True 37 | 38 | train_list_subject_idx, train_list_lv_volume, train_list_rv_volume, train_list_lv_ratio, train_list_rv_ratio, train_list_lvmrv_ratio, train_list_lvmlv_ratio, train_list_lvmlv_mass, train_list_thickness, train_list_thickness_diff, train_list_asyn_radius, train_list_asyn_thickness, train_list_gt = data_classification(mode = mode_train, fold=fold, data_class_num=data_class_num, normalization=normalization) 39 | train_sample = len(train_list_subject_idx) 40 | 41 | val_list_subject_idx, val_list_lv_volume, val_list_rv_volume, val_list_lv_ratio, val_list_rv_ratio, val_list_lvmrv_ratio, val_list_lvmlv_ratio, val_list_lvmlv_mass, val_list_thickness, val_list_thickness_diff, val_list_asyn_radius, val_list_asyn_thickness, val_list_gt = data_classification(mode = mode_val, fold=fold, data_class_num=data_class_num, normalization=normalization) 42 | val_sample = len(val_list_subject_idx) 43 | 44 | 45 | 46 | train_id = [0 for x in range(train_sample)] 47 | train_list = [[] for x in range(train_sample)] 48 | train_gt = [0. for x in range(train_sample)] 49 | 50 | val_id = [0 for x in range(val_sample)] 51 | val_list = [[] for x in range(val_sample)] 52 | val_gt = [0. for x in range(val_sample)] 53 | 54 | 55 | 56 | 57 | if data_class_num == 5: 58 | for i in range(train_sample): 59 | train_id[i] = train_list_subject_idx[i] 60 | train_list[i].append(train_list_rv_volume[i]) 61 | train_list[i].append(train_list_rv_ratio[i]) 62 | train_list[i].append(train_list_lvmrv_ratio[i]) 63 | train_gt[i] = float(train_list_gt[i] == 4) 64 | for i in range(val_sample): 65 | val_id[i] = val_list_subject_idx[i] 66 | val_list[i].append(val_list_rv_volume[i]) 67 | val_list[i].append(val_list_rv_ratio[i]) 68 | val_list[i].append(val_list_lvmrv_ratio[i]) 69 | val_gt[i] = float(val_list_gt[i] == 4) 70 | elif data_class_num == 4: 71 | for i in range(train_sample): 72 | train_id[i] = train_list_subject_idx[i] 73 | train_list[i].append(train_list_lv_ratio[i]) 74 | train_list[i].append(train_list_lvmlv_ratio[i]) 75 | train_list[i].append(train_list_thickness[i]) 76 | train_gt[i] = float(train_list_gt[i] == 1) 77 | for i in range(val_sample): 78 | val_id[i] = val_list_subject_idx[i] 79 | val_list[i].append(val_list_lv_ratio[i]) 80 | val_list[i].append(val_list_lvmlv_ratio[i]) 81 | val_list[i].append(val_list_thickness[i]) 82 | val_gt[i] = float(val_list_gt[i] == 1) 83 | elif data_class_num == 3: 84 | for i in range(train_sample): 85 | train_id[i] = train_list_subject_idx[i] 86 | train_list[i].append(train_list_lv_volume[i]) 87 | train_list[i].append(train_list_asyn_radius[i]) 88 | train_list[i].append(train_list_asyn_thickness[i]) 89 | train_gt[i] = float(train_list_gt[i] == 0) 90 | for i in range(val_sample): 91 | val_id[i] = val_list_subject_idx[i] 92 | val_list[i].append(val_list_lv_volume[i]) 93 | val_list[i].append(val_list_asyn_radius[i]) 94 | val_list[i].append(val_list_asyn_thickness[i]) 95 | val_gt[i] = float(val_list_gt[i] == 0) 96 | elif data_class_num == 2: 97 | for i in range(train_sample): 98 | train_id[i] = train_list_subject_idx[i] 99 | train_list[i].append(train_list_lv_ratio[i]) 100 | train_gt[i] = float(train_list_gt[i] == 2) 101 | for i in range(val_sample): 102 | val_id[i] = val_list_subject_idx[i] 103 | val_list[i].append(val_list_lv_ratio[i]) 104 | val_gt[i] = float(val_list_gt[i] == 2) 105 | else: 106 | print('Incorrect data_class_num') 107 | 108 | 109 | 110 | ''' 111 | if data_class_num == 5: 112 | for i in range(train_sample): 113 | train_id[i] = train_list_subject_idx[i] 114 | train_list[i].append(train_list_rv_volume[i]) 115 | train_list[i].append(train_list_rv_ratio[i]) 116 | train_list[i].append(train_list_lvmrv_ratio[i]) 117 | train_list[i].append(train_list_lv_ratio[i]) 118 | train_list[i].append(train_list_lvmlv_ratio[i]) 119 | train_list[i].append(train_list_thickness[i]) 120 | train_list[i].append(train_list_lv_volume[i]) 121 | train_list[i].append(train_list_asyn_radius[i]) 122 | train_list[i].append(train_list_asyn_thickness[i]) 123 | train_gt[i] = float(train_list_gt[i] == 4) 124 | for i in range(val_sample): 125 | val_id[i] = val_list_subject_idx[i] 126 | val_list[i].append(val_list_rv_volume[i]) 127 | val_list[i].append(val_list_rv_ratio[i]) 128 | val_list[i].append(val_list_lvmrv_ratio[i]) 129 | val_list[i].append(val_list_lv_ratio[i]) 130 | val_list[i].append(val_list_lvmlv_ratio[i]) 131 | val_list[i].append(val_list_thickness[i]) 132 | val_list[i].append(val_list_lv_volume[i]) 133 | val_list[i].append(val_list_asyn_radius[i]) 134 | val_list[i].append(val_list_asyn_thickness[i]) 135 | val_gt[i] = float(val_list_gt[i] == 4) 136 | elif data_class_num == 4: 137 | for i in range(train_sample): 138 | train_id[i] = train_list_subject_idx[i] 139 | train_list[i].append(train_list_rv_volume[i]) 140 | train_list[i].append(train_list_rv_ratio[i]) 141 | train_list[i].append(train_list_lvmrv_ratio[i]) 142 | train_list[i].append(train_list_lv_ratio[i]) 143 | train_list[i].append(train_list_lvmlv_ratio[i]) 144 | train_list[i].append(train_list_thickness[i]) 145 | train_list[i].append(train_list_lv_volume[i]) 146 | train_list[i].append(train_list_asyn_radius[i]) 147 | train_list[i].append(train_list_asyn_thickness[i]) 148 | train_gt[i] = float(train_list_gt[i] == 1) 149 | for i in range(val_sample): 150 | val_id[i] = val_list_subject_idx[i] 151 | val_list[i].append(val_list_rv_volume[i]) 152 | val_list[i].append(val_list_rv_ratio[i]) 153 | val_list[i].append(val_list_lvmrv_ratio[i]) 154 | val_list[i].append(val_list_lv_ratio[i]) 155 | val_list[i].append(val_list_lvmlv_ratio[i]) 156 | val_list[i].append(val_list_thickness[i]) 157 | val_list[i].append(val_list_lv_volume[i]) 158 | val_list[i].append(val_list_asyn_radius[i]) 159 | val_list[i].append(val_list_asyn_thickness[i]) 160 | val_gt[i] = float(val_list_gt[i] == 1) 161 | elif data_class_num == 3: 162 | for i in range(train_sample): 163 | train_id[i] = train_list_subject_idx[i] 164 | train_list[i].append(train_list_rv_volume[i]) 165 | train_list[i].append(train_list_rv_ratio[i]) 166 | train_list[i].append(train_list_lvmrv_ratio[i]) 167 | train_list[i].append(train_list_lv_ratio[i]) 168 | train_list[i].append(train_list_lvmlv_ratio[i]) 169 | train_list[i].append(train_list_thickness[i]) 170 | train_list[i].append(train_list_lv_volume[i]) 171 | train_list[i].append(train_list_asyn_radius[i]) 172 | train_list[i].append(train_list_asyn_thickness[i]) 173 | train_gt[i] = float(train_list_gt[i] == 0) 174 | for i in range(val_sample): 175 | val_id[i] = val_list_subject_idx[i] 176 | val_list[i].append(val_list_rv_volume[i]) 177 | val_list[i].append(val_list_rv_ratio[i]) 178 | val_list[i].append(val_list_lvmrv_ratio[i]) 179 | val_list[i].append(val_list_lv_ratio[i]) 180 | val_list[i].append(val_list_lvmlv_ratio[i]) 181 | val_list[i].append(val_list_thickness[i]) 182 | val_list[i].append(val_list_lv_volume[i]) 183 | val_list[i].append(val_list_asyn_radius[i]) 184 | val_list[i].append(val_list_asyn_thickness[i]) 185 | val_gt[i] = float(val_list_gt[i] == 0) 186 | elif data_class_num == 2: 187 | for i in range(train_sample): 188 | train_id[i] = train_list_subject_idx[i] 189 | train_list[i].append(train_list_rv_volume[i]) 190 | train_list[i].append(train_list_rv_ratio[i]) 191 | train_list[i].append(train_list_lvmrv_ratio[i]) 192 | train_list[i].append(train_list_lv_ratio[i]) 193 | train_list[i].append(train_list_lvmlv_ratio[i]) 194 | train_list[i].append(train_list_thickness[i]) 195 | train_list[i].append(train_list_lv_volume[i]) 196 | train_list[i].append(train_list_asyn_radius[i]) 197 | train_list[i].append(train_list_asyn_thickness[i]) 198 | train_gt[i] = float(train_list_gt[i] == 2) 199 | for i in range(val_sample): 200 | val_id[i] = val_list_subject_idx[i] 201 | val_list[i].append(val_list_rv_volume[i]) 202 | val_list[i].append(val_list_rv_ratio[i]) 203 | val_list[i].append(val_list_lvmrv_ratio[i]) 204 | val_list[i].append(val_list_lv_ratio[i]) 205 | val_list[i].append(val_list_lvmlv_ratio[i]) 206 | val_list[i].append(val_list_thickness[i]) 207 | val_list[i].append(val_list_lv_volume[i]) 208 | val_list[i].append(val_list_asyn_radius[i]) 209 | val_list[i].append(val_list_asyn_thickness[i]) 210 | val_gt[i] = float(val_list_gt[i] == 2) 211 | else: 212 | print('Incorrect data_class_num') 213 | ''' 214 | 215 | 216 | 217 | if model_type == 'lr': 218 | clf = linear_model.LogisticRegression(C=50., solver='liblinear', random_state=0) 219 | clf.fit(train_list, train_gt) 220 | #print(clf.fit(train_list, train_gt)) 221 | 222 | 223 | prediction = clf.predict(val_list) 224 | prediction_proba = clf.predict_proba(val_list) 225 | 226 | if fold in range(1,6): 227 | error = [x for i,x in enumerate(val_id) if prediction[i] != val_gt[i]] 228 | return error 229 | 230 | elif fold == 0: 231 | # Save the classifier model 232 | joblib.dump(clf, os.path.join(code_path, 'classification', 'trained_model_{}.joblib'.format(classifier_name))) 233 | 234 | #print(clf.coef_) 235 | #print(clf.intercept_) 236 | 237 | # Save the predicted results 238 | record_file = os.path.join(code_path, 'classification', 'acdc_testing_set_prediction_{}.txt'.format(classifier_name)) 239 | record = open(record_file, 'w') 240 | for i in range(len(val_id)): 241 | subject = 'patient{}'.format(str(val_id[i]).zfill(3)) 242 | prediction_result = prediction[i] 243 | proba = prediction_proba[i][1] 244 | written = '{} {} {}\n'.format(subject, prediction_result, proba) 245 | record.write(written) 246 | 247 | record.close() 248 | return 249 | 250 | 251 | 252 | 253 | 254 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Explainable cardiac pathology classification on cine MRI with motion characterization by semi-supervised learning of apparent flow 2 | 3 | This is an implementation of the models in the following paper which is submitted to the **Medical Image Analysis** journal: 4 | 5 | Explainable cardiac pathology classification on cine MRI with motion characterization by semi-supervised learning of apparent flow 6 | Qiao Zheng, Hervé Delingette, Nicholas Ayache 7 | 8 | 9 | 10 | **In case you find the code useful, please consider giving appropriate credit to it by citing the paper above.** 11 | 12 | ``` 13 | @ARTICLE{Qiao:2018:2, 14 | Author = {Zheng, Q and Delingette, H and Ayache, N}, 15 | Journal = {Medical Image Analysis}, 16 | Title = {Explainable cardiac pathology classification on cine MRI with motion characterization by semi-supervised learning of apparent flow}, 17 | Volume = {56}, 18 | Pages = {80-95}, 19 | Year = {2019} 20 | } 21 | 22 | ``` 23 | 24 | ``` 25 | DOI: 10.1016/j.media.2019.06.001 26 | ``` 27 | 28 | ## Requirements 29 | The code should work with both Python 2 and Python 3, as the compatibility with both has been taken into consideration. It depends on some external libraries which should be installed, including: 30 | - tensorflow 31 | - keras 32 | - numpy and scipy 33 | - math 34 | - PIL 35 | - cv2 36 | - nibabel 37 | - itertools 38 | - copy 39 | - six 40 | - threading 41 | - warnings 42 | - multiprocessing 43 | - functools 44 | - h5py 45 | - sklearn 46 | - urllib 47 | 48 | On the other hand, **to apply this package on any data, the images should be named, formatted, and arranged accordingly.**. Please refer to Section VIII for more details. 49 | 50 | 51 | ## Usage 52 | The steps of the cardiac pathology classification method presented in the paper are described below, along with the corresponding files. **First, modify the `code_dir` and `acdc_data_dir` in *config.py* to the corresponding path of the code root directory and the ACDC data root directory.** This is the very first step in the use of the software as these paths are necessary for the other scripts. The default values of the other variables in *config.py* are those used by the paper. Then, read the following sections according to your application scenario: 53 | - If you want to train and test the model yourself on the ACDC dataset as we have done, read from Section I to Section VI. 54 | - If you are only interested in applying the pretrained model on the ACDC testing set, you may mainly focus on the sections I, II, III.2, IV.2, V and VI. 55 | - If you want to train and/or test the model using another dataset instead of the ACDC dataset, useful details are provided in Section VIII. 56 | 57 | Depending on the version of this package, it may or may not contain the trained weights of the networks (the .h5 files for ROI-net, LVRV-net and ApparentFlow-net). For instance, due to the constraints on the size of files, the version on GitHub does not contain the trained weights. **If the trained weights are not included in the package, they can be downloaded by running**: 58 | ``` 59 | python download_weights.py 60 | ``` 61 | 62 | ### I. Preprocessing of ACDC Data 63 | In this section, the preprocessing of ACDC data is presented step by step. However, it is also possible to preprocess other datasets accordingly and then use them to train or test our model (more details are presented in Section VIII). 64 | 65 | #### I.1 Download the data 66 | Download the data to a directory (and adapt `acdc_data_dir` in *config.py* accordingly). Make sure that in this directory, the data of each of the 150 cases, including 100 in the training set and 50 in the testing set, is in a sub-directory named "patientxxx" (e.g. "patient001", "patient068", "patient124") following the convention of ACDC. 67 | 68 | #### I.2 Save as 2D images 69 | ``` 70 | python processing/convert_nifti_to_2D.py 71 | ``` 72 | To accelerate the reading of the image of a short-axis slice at an instant, the short-axis images, as well as their ground-truth segmentation (if exists), are converted to and saved as PNG format. The following file is for this step: 73 | - processing/convert_nifti_to_2D.py 74 | 75 | #### I.3 Basic information 76 | ``` 77 | python processing/acdc_info.py 78 | ``` 79 | Save the useful basic information (e.g. number of slices and frames) of the subjects to a file. The following file is for this step: 80 | - processing/acdc_info.py 81 | 82 | The resulting information is saved to *acdc_info/acdc_info.txt*. 83 | 84 | #### I.4 Determination of key slices based on ground-truth segmentation 85 | ``` 86 | python processing/acdc_gt_base.py 87 | ``` 88 | Some key slices (e.g. the first slice below the base and the last slice above the apex) are determined based on ground-truth segmentation using: 89 | - processing/acdc_gt_base.py 90 | 91 | The resulting information is saved to *acdc_info/acdc_gt_base.txt*. This information is used in the training process only. 92 | 93 | 94 | ### II. Region of Interest (ROI) Determination Using ROI-net 95 | ROI-net, the network for ROI determination as presented and trained in our previous paper "3D Consistent & Robust Segmentation of Cardiac Images by Deep Learning with Spatial Propagation" is applied on the ACDC data. 96 | 97 | #### II.1 Prediction 98 | ``` 99 | python ROI/predict_roi_net.py 100 | ``` 101 | - ROI/predict_roi_net.py: the main file to launch the prediction 102 | - ROI/data_roi_predict.py: a function to generate lists of files for prediction 103 | - ROI/module_roi_net.py: definition of the network 104 | - ROI/model_roi_net_epoch050.h5: the weights of the trained network 105 | 106 | #### II.2 ROI cropping 107 | ``` 108 | python ROI/crop_according_to_roi.py 109 | ``` 110 | - ROI/crop_according_to_roi.py: crop and save the determined ROIs 111 | 112 | 113 | 114 | ### III. Apparent Flow Generation Using ApparentFlow-net 115 | ApparentFlow-net is trained on the whole ACDC training set for prediction on the ACDC tesing set (this version is called "fold 0" in this project); it is also trained for a 5-fold cross-validation on the ACDC training set (called fold 1, 2, 3, 4 and 5 respectively). 116 | #### III.1 Training 117 | For fold `f` = 0, 1, 2, 3, 4 and 5, run: 118 | ``` 119 | python flow/train_apparentflow_net.py f 120 | ``` 121 | - flow/train_apparentflow_net.py: the main file to launch the training 122 | - flow/data_apparentflow.py: a function to generate lists of files for training and prediction 123 | - flow/module_apparentflow_net.py: the file that defines ApparentFlow-net 124 | - flow/model_apparentflow_net_fold`f`_epoch050.h5: the weights of the trained network for fold `f` = 0, 1, 2, 3, 4 and 5 125 | 126 | #### III.2 Prediction 127 | For fold `f` = 0, 1, 2, 3, 4 and 5, run: 128 | ``` 129 | python flow/predict_apparentflow_net.py f 130 | ``` 131 | - flow/predict_lvrv_net.py: the main file to launch the prediction 132 | 133 | 134 | 135 | ### IV. Cardiac Segmentation Using LVRV-net 136 | LVRV-net, the network for cardiac segmentation as presented and trained in our previous paper "3D Consistent & Robust Segmentation of Cardiac Images by Deep Learning with Spatial Propagation" is finetuned and then applied on the ACDC data. 137 | 138 | #### IV.1 Finetuning 139 | For fold `f` = 0, 1, 2, 3, 4 and 5, run: 140 | ``` 141 | python segmentation/finetune_lvrv_net.py f 142 | ``` 143 | - segmentation/finetune_lvrv_net.py: the main file to launch the finetuning 144 | - segmentation/data_lvrv_segmentation_propagation_acdc.py: a function to generate lists of files for finetuning and prediction 145 | - segmentation/module_lvrv_net.py: the file that defines LVRV-net 146 | - segmentation/model_lvrv_net_epoch080.h5: the weights of the network trained on UK Biobank 147 | - segmentation/model_lvrv_net_finetune_fold`f`_epoch1000.h5: the weights of the network trained on UK Biobank and then finetuned on ACDC for fold `f` = 0, 1, 2, 3, 4 and 5 148 | 149 | #### IV.2 Prediction 150 | For fold `f` = 0, 1, 2, 3, 4 and 5, run: 151 | ``` 152 | python segmentation/prediction_lvrv_net.py f 153 | ``` 154 | - segmentation/predict_lvrv_net.py: the main file to launch the prediction 155 | 156 | 157 | 158 | ### V. Feature Extraction 159 | #### V.1 Extraction of Shape-Related Features 160 | ``` 161 | python feature_extraction/acdc_base.py 162 | python feature_extraction/acdc_pixel_size.py 163 | python feature_extraction/acdc_thickness.py 164 | python feature_extraction/acdc_volume.py 165 | ``` 166 | - feature_extraction/acdc_base.py: determination of the key slices (e.g. the first slice below the base and the last slice above the apex) based on the segmentation predicted by LVRV-net. The resulting information is saved to *acdc_info/acdc_base.txt* 167 | - feature_extraction/acdc_pixel_size.py: extract the pixel size of the images. The resulting information is saved to *acdc_info/acdc_pixel_size.txt* 168 | - feature_extraction/acdc_thickness.py: extract the thickness of myocardium based on the segmentation predicted by LVRV-net. The resulting information is saved to *acdc_info/acdc_thickness.txt* 169 | - feature_extraction/acdc_volume.py: extract the cardiac volumes based on the segmentation predicted by LVRV-net. The resulting information is saved to *acdc_info/acdc_volume.txt* 170 | 171 | #### V.2 Extraction of Motion-Characteristic Features 172 | ``` 173 | python feature_extraction/acdc_zone_flow.py 174 | python feature_extraction/acdc_motion_index.py 175 | ``` 176 | - feature_extraction/acdc_zone_flow.py: extract the time series characterizing cardiac segmental motion based on the apparent flow predicted by ApparentFlow-net. The resulting time series are saved to the corresponding sub-directories in `acdc_data_dir` 177 | - feature_extraction/acdc_motion_index.py: extract the features characterizing cardiac motion based on the apparent flow predicted by ApparentFlow-net. The resulting information is saved to *acdc_info/acdc_motion_index.txt* 178 | 179 | 180 | ### VI. Classification 181 | ``` 182 | python classification/classification_prediction.py 183 | ``` 184 | - classification/classification_prediction.py: the main file to launch the classification 185 | - classification/data_classification.py: a function to generate lists of files for classification 186 | - classification/classifiers.py: the file that defines the binary classifiers and their respective training and prediction processes 187 | - classification/trained_model_`XXX`_classifier.joblib: the 4 binary classifiers trained on the ACDC training set, for `XXX` = RVA, HCM, DCM and MINF 188 | - classification/acdc_testing_set_prediction_`XXX`_classifier.txt: the prediction on the ACDC testing set by the 4 binary classifiers trained on the ACDC training set, for `XXX` = RVA, HCM, DCM and MINF 189 | - classification/acdc_testing_set_prediction.txt: the prediction on the ACDC testing set by the overall classification model (which consists of the 4 binary classifiers) trained on the ACDC training set 190 | 191 | 192 | 193 | ### VII. Auxiliary functions 194 | Just for information, the auxiliary functions are defined in the following files: 195 | - helpers.py 196 | - image2.py 197 | 198 | In particular, image2.py is used for real-time data augmentation. It is adapted from the code in a file of the Keras project (https://github.com/keras-team/keras/blob/master/keras/preprocessing/image.py). 199 | 200 | 201 | 202 | ### VIII. Using Other Datasets 203 | Instead of using the ACDC data, it is also possible to preprocess another dataset accordingly and then use it to train and/or test our model, as long as the following conventions on name and format of the preprocessed short-axis images are met. 204 | 205 | In the data directory, the path of a 2D image, which is identified by its case name string `C` (e.g. 'patient007'), the two-digit slice index `S` (e.g. 02) in the stack, and the two-digit instant index `I` (e.g. 06) in the temporal sequence, should be the following: 206 | - *`'{C}/original_2D/original_2D_{S}_{I}.png'`* (e.g. *'patient007/original_2D/original_2D_02_06.png'*) 207 | 208 | The corresponding ground-truth should have the path: 209 | - *`'{C}/original_2D/original_gt_2D_{S}_{I}.png'`* (e.g. *'patient007/original_2D/original_gt_2D_02_16.png'*) 210 | 211 | **Please note that the two-digit slice index `S` in the stack should be arranged to increment slice by slice from the base to the apex. This is essential as it makes sure that the propagation is performed in the correct base-to-apex order.** 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | -------------------------------------------------------------------------------- /segmentation/finetune_lvrv_net.py: -------------------------------------------------------------------------------- 1 | """ The main file to launch the finetuning of LVRV-net """ 2 | 3 | import sys 4 | sys.path.append('..') 5 | 6 | import os 7 | import copy 8 | import math 9 | import numpy as np 10 | from itertools import izip 11 | import tensorflow as tf 12 | 13 | from keras.models import Model 14 | from keras.optimizers import Adam 15 | from keras.utils import plot_model 16 | from keras import backend as K 17 | 18 | K.set_image_data_format('channels_last') # TF dimension ordering in this code 19 | 20 | from helpers import ( 21 | dice_coef5, 22 | dice_coef5_loss, 23 | dice_coef5_0, 24 | dice_coef5_1, 25 | dice_coef5_2, 26 | dice_coef5_3, 27 | mean_variance_normalization5, 28 | elementwise_multiplication 29 | ) 30 | 31 | from image2 import ImageDataGenerator2 32 | 33 | 34 | from data_lvrv_segmentation_propagation_acdc import data_lvrv_segmentation_propagation_acdc 35 | 36 | from module_lvrv_net import net_module 37 | 38 | import config 39 | 40 | save_period = 10000 41 | 42 | def finetune_lvrv_net(): 43 | 44 | code_path = config.code_dir 45 | 46 | fold = int(sys.argv[1]) 47 | print('fold = {}'.format(fold)) 48 | if fold == 0: 49 | mode_train = 'all' 50 | mode_val = 'all' 51 | elif fold in range(1,6): 52 | mode_train = 'train' 53 | mode_val = 'val' 54 | else: 55 | print('Incorrect fold') 56 | 57 | initial_lr = config.lvrv_net_initial_lr 58 | decay_rate = config.lvrv_net_decay_rate 59 | batch_size = config.lvrv_net_batch_size 60 | input_img_size = config.lvrv_net_input_img_size 61 | epochs = config.lvrv_net_epochs 62 | 63 | current_epoch = 80 64 | new_start_epoch = current_epoch 65 | 66 | ########### 67 | # The model 68 | model = net_module(input_shape=(input_img_size, input_img_size, 1), num_outputs=4) 69 | 70 | # Finetune 71 | print('Loading model') 72 | model.load_weights(filepath=os.path.join(code_path, 'segmentation', 'model_lvrv_net_epoch{}.h5'.format(str(current_epoch).zfill(3))) ) 73 | 74 | model.compile(optimizer=Adam(lr=initial_lr), loss=dice_coef5_loss, 75 | metrics=[dice_coef5, dice_coef5_0, dice_coef5_1, dice_coef5_2, dice_coef5_3]) 76 | 77 | 78 | print('This model has {} parameters'.format(model.count_params()) ) 79 | 80 | 81 | # Load data lists 82 | train_img_list0, train_gt_list0, train_img_list1, train_gt_list1 = \ 83 | data_lvrv_segmentation_propagation_acdc(mode = mode_train, fold = fold) 84 | 85 | test_img_list0, test_gt_list0, test_img_list1, test_gt_list1 = \ 86 | data_lvrv_segmentation_propagation_acdc(mode = mode_val, fold = fold) 87 | 88 | training_sample = len(train_img_list0) 89 | 90 | # we create two instances with the same arguments for random transformation 91 | img_data_gen_args = dict(featurewise_center=False, 92 | samplewise_center=False, 93 | featurewise_std_normalization=False, 94 | samplewise_std_normalization=False, 95 | zca_whitening=False, 96 | zca_epsilon=1e-6, 97 | rotation_range=180., 98 | width_shift_range=0.15, 99 | height_shift_range=0.15, 100 | shear_range=0., 101 | zoom_range=0.15, 102 | channel_shift_range=0., 103 | fill_mode='constant', 104 | cval=0., 105 | horizontal_flip=True, 106 | vertical_flip=True, 107 | rescale=None, 108 | preprocessing_function=mean_variance_normalization5, 109 | data_format=K.image_data_format()) 110 | 111 | # deep copy is necessary 112 | mask_data_gen_args = copy.deepcopy(img_data_gen_args) 113 | mask_data_gen_args['preprocessing_function'] = elementwise_multiplication 114 | 115 | 116 | ######################### 117 | # Generators for training 118 | print('Creating generators for training') 119 | image_datagen0 = ImageDataGenerator2(**img_data_gen_args) 120 | image_datagen1 = ImageDataGenerator2(**img_data_gen_args) 121 | mask_datagen0 = ImageDataGenerator2(**mask_data_gen_args) 122 | mask_datagen1 = ImageDataGenerator2(**mask_data_gen_args) 123 | 124 | # Provide the same seed and keyword arguments to the fit and flow methods 125 | seed = 1 126 | image_datagen0.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 127 | image_datagen1.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 128 | mask_datagen0.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 129 | mask_datagen1.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=seed) 130 | 131 | image_generator0 = image_datagen0.flow_from_path_list( 132 | path_list=train_img_list0, 133 | target_size=(input_img_size, input_img_size), 134 | pad_to_square=True, 135 | resize_mode='nearest', 136 | histogram_based_preprocessing=False, 137 | clahe=False, 138 | color_mode='grayscale', 139 | class_list=None, 140 | class_mode=None, 141 | batch_size=batch_size, 142 | shuffle=True, 143 | seed=seed, 144 | save_to_dir=None, 145 | save_prefix='img0_', 146 | save_format='png', 147 | save_period=save_period, 148 | follow_links=False) 149 | 150 | image_generator1 = image_datagen1.flow_from_path_list( 151 | path_list=train_img_list1, 152 | target_size=(input_img_size, input_img_size), 153 | pad_to_square=True, 154 | resize_mode='nearest', 155 | histogram_based_preprocessing=False, 156 | clahe=False, 157 | color_mode='grayscale', 158 | class_list=None, 159 | class_mode=None, 160 | batch_size=batch_size, 161 | shuffle=True, 162 | seed=seed, 163 | save_to_dir=None, 164 | save_prefix='img1_', 165 | save_format='png', 166 | save_period=save_period, 167 | follow_links=False) 168 | 169 | mask_generator0 = mask_datagen0.flow_from_path_list( 170 | path_list=train_gt_list0, 171 | target_size=(input_img_size, input_img_size), 172 | pad_to_square=True, 173 | resize_mode='nearest', 174 | histogram_based_preprocessing=False, 175 | clahe=False, 176 | color_mode='grayscale', 177 | class_list=None, 178 | class_mode=None, 179 | batch_size=batch_size, 180 | shuffle=True, 181 | seed=seed, 182 | save_to_dir=None, 183 | save_prefix='mask0_', 184 | save_format='png', 185 | save_period=save_period, 186 | follow_links=False) 187 | 188 | mask_generator1 = mask_datagen1.flow_from_path_list( 189 | path_list=train_gt_list1, 190 | target_size=(input_img_size, input_img_size), 191 | pad_to_square=True, 192 | resize_mode='nearest', 193 | histogram_based_preprocessing=False, 194 | clahe=False, 195 | color_mode='grayscale', 196 | class_list=None, 197 | class_mode=None, 198 | batch_size=batch_size, 199 | shuffle=True, 200 | seed=seed, 201 | save_to_dir=None, 202 | save_prefix='mask1_', 203 | save_format='png', 204 | save_period=save_period, 205 | follow_links=False) 206 | 207 | # Combine generators into one which yields image and masks 208 | train_generator = izip(image_generator0, image_generator1, 209 | mask_generator0, mask_generator1) 210 | 211 | 212 | ########################### 213 | # Generators for validation 214 | print('Creating generators for validation') 215 | val_image_datagen0 = ImageDataGenerator2(**img_data_gen_args) 216 | val_image_datagen1 = ImageDataGenerator2(**img_data_gen_args) 217 | val_mask_datagen0 = ImageDataGenerator2(**mask_data_gen_args) 218 | val_mask_datagen1 = ImageDataGenerator2(**mask_data_gen_args) 219 | 220 | # Provide the same seed and keyword arguments to the fit and flow methods 221 | val_seed = 2 222 | val_image_datagen0.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=val_seed) 223 | val_image_datagen1.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=val_seed) 224 | val_mask_datagen0.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=val_seed) 225 | val_mask_datagen1.fit(np.zeros((1,1,1,1)), augment=False, rounds=0, seed=val_seed) 226 | 227 | val_image_generator0 = val_image_datagen0.flow_from_path_list( 228 | path_list=test_img_list0, 229 | target_size=(input_img_size, input_img_size), 230 | pad_to_square=True, 231 | resize_mode='nearest', 232 | histogram_based_preprocessing=False, 233 | clahe=False, 234 | color_mode='grayscale', 235 | class_list=None, 236 | class_mode=None, 237 | batch_size=batch_size, 238 | shuffle=True, 239 | seed=val_seed, 240 | save_to_dir=None, 241 | save_prefix='img0_', 242 | save_format='png', 243 | save_period=1, 244 | follow_links=False) 245 | 246 | val_image_generator1 = val_image_datagen1.flow_from_path_list( 247 | path_list=test_img_list1, 248 | target_size=(input_img_size, input_img_size), 249 | pad_to_square=True, 250 | resize_mode='nearest', 251 | histogram_based_preprocessing=False, 252 | clahe=False, 253 | color_mode='grayscale', 254 | class_list=None, 255 | class_mode=None, 256 | batch_size=batch_size, 257 | shuffle=True, 258 | seed=val_seed, 259 | save_to_dir=None, 260 | save_prefix='img1_', 261 | save_format='png', 262 | save_period=1, 263 | follow_links=False) 264 | 265 | val_mask_generator0 = val_mask_datagen0.flow_from_path_list( 266 | path_list=test_gt_list0, 267 | target_size=(input_img_size, input_img_size), 268 | pad_to_square=True, 269 | resize_mode='nearest', 270 | histogram_based_preprocessing=False, 271 | clahe=False, 272 | color_mode='grayscale', 273 | class_list=None, 274 | class_mode=None, 275 | batch_size=batch_size, 276 | shuffle=True, 277 | seed=val_seed, 278 | save_to_dir=None, 279 | save_prefix='mask0_', 280 | save_format='png', 281 | save_period=1, 282 | follow_links=False) 283 | 284 | val_mask_generator1 = val_mask_datagen1.flow_from_path_list( 285 | path_list=test_gt_list1, 286 | target_size=(input_img_size, input_img_size), 287 | pad_to_square=True, 288 | resize_mode='nearest', 289 | histogram_based_preprocessing=False, 290 | clahe=False, 291 | color_mode='grayscale', 292 | class_list=None, 293 | class_mode=None, 294 | batch_size=batch_size, 295 | shuffle=True, 296 | seed=val_seed, 297 | save_to_dir=None, 298 | save_prefix='mask1_', 299 | save_format='png', 300 | save_period=1, 301 | follow_links=False) 302 | 303 | 304 | # Combine generators into one which yields image and masks 305 | validation_generator = izip(val_image_generator0, val_image_generator1, 306 | val_mask_generator0, val_mask_generator1) 307 | 308 | 309 | ############### 310 | # Train the model 311 | print('Start training') 312 | steps = int(math.ceil(float(training_sample) / batch_size)) 313 | print('There will be {} epochs with {} steps in each epoch'.format(epochs, steps) ) 314 | 315 | 316 | total_step = 0 317 | for epoch in range(new_start_epoch + 1, epochs + 1): 318 | print('\n\n##########\nEpoch {}\n##########'.format(epoch) ) 319 | 320 | for step in range(steps): 321 | print('\n****** Epoch {} Step {} ******'.format(epoch, step) ) 322 | batch_img0, batch_img1, batch_mask0, batch_mask1 = next(train_generator) 323 | print(model.train_on_batch([batch_img0, batch_img1, batch_mask0], 324 | batch_mask1, sample_weight=None, class_weight=None)) 325 | 326 | 327 | # perform test 328 | if (total_step % save_period == 0): 329 | val_batch_img0, val_batch_img1, \ 330 | val_batch_mask0, val_batch_mask1 = next(validation_generator) 331 | print('test:') 332 | print(model.test_on_batch([val_batch_img0, val_batch_img1, val_batch_mask0], 333 | val_batch_mask1, sample_weight=None)) 334 | 335 | total_step += 1 336 | 337 | 338 | # adjust learning rate 339 | if (epoch % 10 == 0): 340 | old_lr = float(K.get_value(model.optimizer.lr)) 341 | new_lr = initial_lr * (decay_rate**(epoch//10)) 342 | K.set_value(model.optimizer.lr, new_lr) 343 | print('learning rate is reset to %.8f' % (new_lr)) 344 | 345 | # save the weights of the model 346 | if (epoch % 1000 == 0): 347 | model.save_weights(os.path.join(code_path, 'segmentation', 'model_lvrv_net_finetune_fold{}_epoch{}.h5'.format(str(fold), str(epoch).zfill(3)) ) ) 348 | 349 | 350 | print('Training is done!') 351 | 352 | 353 | if __name__ == '__main__': 354 | finetune_lvrv_net() 355 | 356 | 357 | 358 | 359 | --------------------------------------------------------------------------------