├── .DS_Store ├── README.md ├── data ├── .DS_Store ├── BraTS2Dpreprocessing-master │ ├── .gitattributes │ ├── 18hgg.csv │ ├── 18lgg.csv │ ├── 19hgg.csv │ ├── 19lgg.csv │ ├── GetTestingSetsFrom2019.ipynb │ ├── GetTrainingSets.ipynb │ └── README.md └── BraTS3Dpreprocessing-master │ ├── .gitattributes │ ├── 18hgg.csv │ ├── 18lgg.csv │ ├── 19hgg.csv │ ├── 19lgg.csv │ ├── Get3Dtraintestdata.ipynb │ └── README.md ├── model2D ├── .DS_Store ├── DenseUnet_BraTs-master │ ├── .gitattributes │ ├── README.md │ ├── dataset.py │ ├── denseUnet.py │ ├── losses.py │ ├── metrics.py │ ├── test.py │ ├── train.py │ └── utils.py ├── FCN2D_For_BraTs-master │ ├── .gitattributes │ ├── FCN.py │ ├── README.md │ ├── __pycache__ │ │ ├── FCN.cpython-36.pyc │ │ ├── dataset.cpython-36.pyc │ │ ├── losses.cpython-36.pyc │ │ ├── metrics.cpython-36.pyc │ │ └── utils.cpython-36.pyc │ ├── dataset.py │ ├── losses.py │ ├── metrics.py │ ├── test.py │ ├── train.py │ └── utils.py ├── NestedUnet_ResBlock-master │ ├── .gitattributes │ ├── .vs │ │ └── slnx.sqlite │ ├── README.md │ ├── __pycache__ │ │ ├── dataset.cpython-36.pyc │ │ ├── losses.cpython-36.pyc │ │ ├── metrics.cpython-36.pyc │ │ ├── myresnet34unetplus.cpython-36.pyc │ │ ├── myresnetunetplus.cpython-36.pyc │ │ ├── resnet34unetplus.cpython-36.pyc │ │ └── utils.cpython-36.pyc │ ├── dataset.py │ ├── losses.py │ ├── metrics.py │ ├── myresnetunetplus.py │ ├── test.py │ ├── train.py │ └── utils.py ├── UNet2D_BraTs-master │ ├── .gitattributes │ ├── README.md │ ├── dataset.py │ ├── losses.py │ ├── metrics.py │ ├── test.py │ ├── train.py │ ├── unet.py │ └── utils.py ├── deepresunet_brats-master │ ├── dataset.py │ ├── losses.py │ ├── metrics.py │ ├── mymodel.py │ ├── test.py │ ├── train.py │ └── utils.py ├── hybridresunet-master │ ├── .gitattributes │ ├── README.md │ ├── dataset.py │ ├── losses.py │ ├── metrics.py │ ├── mymodel.py │ ├── test.py │ ├── train.py │ └── utils.py ├── unet3P-master │ ├── .gitattributes │ ├── README.md │ ├── UNet_3Plus.py │ ├── dataset.py │ ├── init_weights.py │ ├── layers.py │ ├── losses.py │ ├── metrics.py │ ├── test.py │ ├── train.py │ └── utils.py └── vnet_code-master │ ├── .gitattributes │ ├── README.md │ ├── dataset.py │ ├── losses.py │ ├── metrics.py │ ├── test.py │ ├── train.py │ ├── utils.py │ └── vnet.py └── model3D ├── .DS_Store ├── Unet3D-master ├── .DS_Store ├── .gitattributes ├── .ipynb_checkpoints │ └── TEST-checkpoint.ipynb ├── README.md ├── __pycache__ │ ├── dataset.cpython-36.pyc │ ├── losses.cpython-36.pyc │ ├── metrics.cpython-36.pyc │ ├── unet3d.cpython-36.pyc │ └── utils.cpython-36.pyc ├── dataset.py ├── losses.py ├── metrics.py ├── test.py ├── train.py ├── unet3d.py └── utils.py └── vnet_code ├── .DS_Store ├── README.md ├── dataset.py ├── losses.py ├── metrics.py ├── test.py ├── train.py ├── utils.py └── vnet.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/.DS_Store -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BrainTumorSegmentation 2 | 脑肿瘤分割,包括常用的U-Net网络和V-Net网络等。 3 | -------------------------------------------------------------------------------- /data/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/data/.DS_Store -------------------------------------------------------------------------------- /data/BraTS2Dpreprocessing-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /data/BraTS2Dpreprocessing-master/18hgg.csv: -------------------------------------------------------------------------------- 1 | _2013_10_1 2 | _2013_11_1 3 | _2013_12_1 4 | _2013_13_1 5 | _2013_14_1 6 | _2013_17_1 7 | _2013_18_1 8 | _2013_19_1 9 | _2013_20_1 10 | _2013_21_1 11 | _2013_22_1 12 | _2013_23_1 13 | _2013_25_1 14 | _2013_26_1 15 | _2013_27_1 16 | _2013_2_1 17 | _2013_3_1 18 | _2013_4_1 19 | _2013_5_1 20 | _2013_7_1 21 | _CBICA_AAB_1 22 | _CBICA_AAG_1 23 | _CBICA_AAL_1 24 | _CBICA_AAP_1 25 | _CBICA_ABB_1 26 | _CBICA_ABE_1 27 | _CBICA_ABM_1 28 | _CBICA_ABN_1 29 | _CBICA_ABO_1 30 | _CBICA_ABY_1 31 | _CBICA_ALN_1 32 | _CBICA_ALU_1 33 | _CBICA_ALX_1 34 | _CBICA_AME_1 35 | _CBICA_AMH_1 36 | _CBICA_ANG_1 37 | _CBICA_ANI_1 38 | _CBICA_ANP_1 39 | _CBICA_ANZ_1 40 | _CBICA_AOD_1 41 | _CBICA_AOH_1 42 | _CBICA_AOO_1 43 | _CBICA_AOP_1 44 | _CBICA_AOZ_1 45 | _CBICA_APR_1 46 | _CBICA_APY_1 47 | _CBICA_APZ_1 48 | _CBICA_AQA_1 49 | _CBICA_AQD_1 50 | _CBICA_AQG_1 51 | _CBICA_AQJ_1 52 | _CBICA_AQN_1 53 | _CBICA_AQO_1 54 | _CBICA_AQP_1 55 | _CBICA_AQQ_1 56 | _CBICA_AQR_1 57 | _CBICA_AQT_1 58 | _CBICA_AQU_1 59 | _CBICA_AQV_1 60 | _CBICA_AQY_1 61 | _CBICA_AQZ_1 62 | _CBICA_ARF_1 63 | _CBICA_ARW_1 64 | _CBICA_ARZ_1 65 | _CBICA_ASA_1 66 | _CBICA_ASE_1 67 | _CBICA_ASG_1 68 | _CBICA_ASH_1 69 | _CBICA_ASK_1 70 | _CBICA_ASN_1 71 | _CBICA_ASO_1 72 | _CBICA_ASU_1 73 | _CBICA_ASV_1 74 | _CBICA_ASW_1 75 | _CBICA_ASY_1 76 | _CBICA_ATB_1 77 | _CBICA_ATD_1 78 | _CBICA_ATF_1 79 | _CBICA_ATP_1 80 | _CBICA_ATV_1 81 | _CBICA_ATX_1 82 | _CBICA_AUN_1 83 | _CBICA_AUQ_1 84 | _CBICA_AUR_1 85 | _CBICA_AVG_1 86 | _CBICA_AVJ_1 87 | _CBICA_AVV_1 88 | _CBICA_AWG_1 89 | _CBICA_AWH_1 90 | _CBICA_AWI_1 91 | _CBICA_AXJ_1 92 | _CBICA_AXL_1 93 | _CBICA_AXM_1 94 | _CBICA_AXN_1 95 | _CBICA_AXO_1 96 | _CBICA_AXQ_1 97 | _CBICA_AXW_1 98 | _CBICA_AYA_1 99 | _CBICA_AYI_1 100 | _CBICA_AYU_1 101 | _CBICA_AYW_1 102 | _CBICA_AZD_1 103 | _CBICA_AZH_1 104 | _CBICA_BFB_1 105 | _CBICA_BFP_1 106 | _CBICA_BHB_1 107 | _CBICA_BHK_1 108 | _CBICA_BHM_1 109 | _TCIA01_131_1 110 | _TCIA01_147_1 111 | _TCIA01_150_1 112 | _TCIA01_180_1 113 | _TCIA01_186_1 114 | _TCIA01_190_1 115 | _TCIA01_201_1 116 | _TCIA01_203_1 117 | _TCIA01_221_1 118 | _TCIA01_231_1 119 | _TCIA01_235_1 120 | _TCIA01_335_1 121 | _TCIA01_378_1 122 | _TCIA01_390_1 123 | _TCIA01_401_1 124 | _TCIA01_411_1 125 | _TCIA01_412_1 126 | _TCIA01_425_1 127 | _TCIA01_429_1 128 | _TCIA01_448_1 129 | _TCIA01_460_1 130 | _TCIA01_499_1 131 | _TCIA02_117_1 132 | _TCIA02_118_1 133 | _TCIA02_135_1 134 | _TCIA02_151_1 135 | _TCIA02_168_1 136 | _TCIA02_171_1 137 | _TCIA02_179_1 138 | _TCIA02_198_1 139 | _TCIA02_208_1 140 | _TCIA02_222_1 141 | _TCIA02_226_1 142 | _TCIA02_274_1 143 | _TCIA02_283_1 144 | _TCIA02_290_1 145 | _TCIA02_300_1 146 | _TCIA02_309_1 147 | _TCIA02_314_1 148 | _TCIA02_321_1 149 | _TCIA02_322_1 150 | _TCIA02_331_1 151 | _TCIA02_368_1 152 | _TCIA02_370_1 153 | _TCIA02_374_1 154 | _TCIA02_377_1 155 | _TCIA02_394_1 156 | _TCIA02_430_1 157 | _TCIA02_455_1 158 | _TCIA02_471_1 159 | _TCIA02_473_1 160 | _TCIA02_491_1 161 | _TCIA02_605_1 162 | _TCIA02_606_1 163 | _TCIA02_607_1 164 | _TCIA02_608_1 165 | _TCIA03_121_1 166 | _TCIA03_133_1 167 | _TCIA03_138_1 168 | _TCIA03_199_1 169 | _TCIA03_257_1 170 | _TCIA03_265_1 171 | _TCIA03_296_1 172 | _TCIA03_338_1 173 | _TCIA03_375_1 174 | _TCIA03_419_1 175 | _TCIA03_474_1 176 | _TCIA03_498_1 177 | _TCIA04_111_1 178 | _TCIA04_149_1 179 | _TCIA04_192_1 180 | _TCIA04_328_1 181 | _TCIA04_343_1 182 | _TCIA04_361_1 183 | _TCIA04_437_1 184 | _TCIA04_479_1 185 | _TCIA05_277_1 186 | _TCIA05_396_1 187 | _TCIA05_444_1 188 | _TCIA05_478_1 189 | _TCIA06_165_1 190 | _TCIA06_184_1 191 | _TCIA06_211_1 192 | _TCIA06_247_1 193 | _TCIA06_332_1 194 | _TCIA06_372_1 195 | _TCIA06_409_1 196 | _TCIA06_603_1 197 | _TCIA08_105_1 198 | _TCIA08_113_1 199 | _TCIA08_162_1 200 | _TCIA08_167_1 201 | _TCIA08_205_1 202 | _TCIA08_218_1 203 | _TCIA08_234_1 204 | _TCIA08_242_1 205 | _TCIA08_278_1 206 | _TCIA08_280_1 207 | _TCIA08_319_1 208 | _TCIA08_406_1 209 | _TCIA08_436_1 210 | _TCIA08_469_1 211 | -------------------------------------------------------------------------------- /data/BraTS2Dpreprocessing-master/18lgg.csv: -------------------------------------------------------------------------------- 1 | _2013_0_1 2 | _2013_15_1 3 | _2013_16_1 4 | _2013_1_1 5 | _2013_24_1 6 | _2013_28_1 7 | _2013_29_1 8 | _2013_6_1 9 | _2013_8_1 10 | _2013_9_1 11 | _TCIA09_141_1 12 | _TCIA09_177_1 13 | _TCIA09_254_1 14 | _TCIA09_255_1 15 | _TCIA09_312_1 16 | _TCIA09_402_1 17 | _TCIA09_428_1 18 | _TCIA09_451_1 19 | _TCIA09_462_1 20 | _TCIA09_493_1 21 | _TCIA09_620_1 22 | _TCIA10_103_1 23 | _TCIA10_109_1 24 | _TCIA10_130_1 25 | _TCIA10_152_1 26 | _TCIA10_175_1 27 | _TCIA10_202_1 28 | _TCIA10_241_1 29 | _TCIA10_261_1 30 | _TCIA10_266_1 31 | _TCIA10_276_1 32 | _TCIA10_282_1 33 | _TCIA10_299_1 34 | _TCIA10_307_1 35 | _TCIA10_310_1 36 | _TCIA10_325_1 37 | _TCIA10_330_1 38 | _TCIA10_346_1 39 | _TCIA10_351_1 40 | _TCIA10_387_1 41 | _TCIA10_393_1 42 | _TCIA10_408_1 43 | _TCIA10_410_1 44 | _TCIA10_413_1 45 | _TCIA10_420_1 46 | _TCIA10_442_1 47 | _TCIA10_449_1 48 | _TCIA10_490_1 49 | _TCIA10_625_1 50 | _TCIA10_628_1 51 | _TCIA10_629_1 52 | _TCIA10_632_1 53 | _TCIA10_637_1 54 | _TCIA10_639_1 55 | _TCIA10_640_1 56 | _TCIA10_644_1 57 | _TCIA12_101_1 58 | _TCIA12_249_1 59 | _TCIA12_298_1 60 | _TCIA12_466_1 61 | _TCIA12_470_1 62 | _TCIA12_480_1 63 | _TCIA13_615_1 64 | _TCIA13_618_1 65 | _TCIA13_621_1 66 | _TCIA13_623_1 67 | _TCIA13_624_1 68 | _TCIA13_630_1 69 | _TCIA13_633_1 70 | _TCIA13_634_1 71 | _TCIA13_642_1 72 | _TCIA13_645_1 73 | _TCIA13_650_1 74 | _TCIA13_653_1 75 | _TCIA13_654_1 76 | -------------------------------------------------------------------------------- /data/BraTS2Dpreprocessing-master/19hgg.csv: -------------------------------------------------------------------------------- 1 | _2013_10_1 2 | _2013_11_1 3 | _2013_12_1 4 | _2013_13_1 5 | _2013_14_1 6 | _2013_17_1 7 | _2013_18_1 8 | _2013_19_1 9 | _2013_20_1 10 | _2013_21_1 11 | _2013_22_1 12 | _2013_23_1 13 | _2013_25_1 14 | _2013_26_1 15 | _2013_27_1 16 | _2013_2_1 17 | _2013_3_1 18 | _2013_4_1 19 | _2013_5_1 20 | _2013_7_1 21 | _CBICA_AAB_1 22 | _CBICA_AAG_1 23 | _CBICA_AAL_1 24 | _CBICA_AAP_1 25 | _CBICA_ABB_1 26 | _CBICA_ABE_1 27 | _CBICA_ABM_1 28 | _CBICA_ABN_1 29 | _CBICA_ABO_1 30 | _CBICA_ABY_1 31 | _CBICA_ALN_1 32 | _CBICA_ALU_1 33 | _CBICA_ALX_1 34 | _CBICA_AME_1 35 | _CBICA_AMH_1 36 | _CBICA_ANG_1 37 | _CBICA_ANI_1 38 | _CBICA_ANP_1 39 | _CBICA_ANV_1 40 | _CBICA_ANZ_1 41 | _CBICA_AOC_1 42 | _CBICA_AOD_1 43 | _CBICA_AOH_1 44 | _CBICA_AOO_1 45 | _CBICA_AOP_1 46 | _CBICA_AOS_1 47 | _CBICA_AOZ_1 48 | _CBICA_APK_1 49 | _CBICA_APR_1 50 | _CBICA_APY_1 51 | _CBICA_APZ_1 52 | _CBICA_AQA_1 53 | _CBICA_AQD_1 54 | _CBICA_AQG_1 55 | _CBICA_AQJ_1 56 | _CBICA_AQN_1 57 | _CBICA_AQO_1 58 | _CBICA_AQP_1 59 | _CBICA_AQQ_1 60 | _CBICA_AQR_1 61 | _CBICA_AQT_1 62 | _CBICA_AQU_1 63 | _CBICA_AQV_1 64 | _CBICA_AQY_1 65 | _CBICA_AQZ_1 66 | _CBICA_ARF_1 67 | _CBICA_ARW_1 68 | _CBICA_ARZ_1 69 | _CBICA_ASA_1 70 | _CBICA_ASE_1 71 | _CBICA_ASF_1 72 | _CBICA_ASG_1 73 | _CBICA_ASH_1 74 | _CBICA_ASK_1 75 | _CBICA_ASN_1 76 | _CBICA_ASO_1 77 | _CBICA_ASR_1 78 | _CBICA_ASU_1 79 | _CBICA_ASV_1 80 | _CBICA_ASW_1 81 | _CBICA_ASY_1 82 | _CBICA_ATB_1 83 | _CBICA_ATD_1 84 | _CBICA_ATF_1 85 | _CBICA_ATN_1 86 | _CBICA_ATP_1 87 | _CBICA_ATV_1 88 | _CBICA_ATX_1 89 | _CBICA_AUA_1 90 | _CBICA_AUN_1 91 | _CBICA_AUQ_1 92 | _CBICA_AUR_1 93 | _CBICA_AUW_1 94 | _CBICA_AUX_1 95 | _CBICA_AVB_1 96 | _CBICA_AVF_1 97 | _CBICA_AVG_1 98 | _CBICA_AVJ_1 99 | _CBICA_AVT_1 100 | _CBICA_AVV_1 101 | _CBICA_AWG_1 102 | _CBICA_AWH_1 103 | _CBICA_AWI_1 104 | _CBICA_AWV_1 105 | _CBICA_AWX_1 106 | _CBICA_AXJ_1 107 | _CBICA_AXL_1 108 | _CBICA_AXM_1 109 | _CBICA_AXN_1 110 | _CBICA_AXO_1 111 | _CBICA_AXQ_1 112 | _CBICA_AXW_1 113 | _CBICA_AYA_1 114 | _CBICA_AYC_1 115 | _CBICA_AYG_1 116 | _CBICA_AYI_1 117 | _CBICA_AYU_1 118 | _CBICA_AYW_1 119 | _CBICA_AZD_1 120 | _CBICA_AZH_1 121 | _CBICA_BAN_1 122 | _CBICA_BAP_1 123 | _CBICA_BAX_1 124 | _CBICA_BBG_1 125 | _CBICA_BCF_1 126 | _CBICA_BCL_1 127 | _CBICA_BDK_1 128 | _CBICA_BEM_1 129 | _CBICA_BFB_1 130 | _CBICA_BFP_1 131 | _CBICA_BGE_1 132 | _CBICA_BGG_1 133 | _CBICA_BGN_1 134 | _CBICA_BGO_1 135 | _CBICA_BGR_1 136 | _CBICA_BGT_1 137 | _CBICA_BGW_1 138 | _CBICA_BGX_1 139 | _CBICA_BHB_1 140 | _CBICA_BHK_1 141 | _CBICA_BHM_1 142 | _CBICA_BHQ_1 143 | _CBICA_BHV_1 144 | _CBICA_BHZ_1 145 | _CBICA_BIC_1 146 | _CBICA_BJY_1 147 | _CBICA_BKV_1 148 | _CBICA_BLJ_1 149 | _CBICA_BNR_1 150 | _TCIA01_131_1 151 | _TCIA01_147_1 152 | _TCIA01_150_1 153 | _TCIA01_180_1 154 | _TCIA01_186_1 155 | _TCIA01_190_1 156 | _TCIA01_201_1 157 | _TCIA01_203_1 158 | _TCIA01_221_1 159 | _TCIA01_231_1 160 | _TCIA01_235_1 161 | _TCIA01_335_1 162 | _TCIA01_378_1 163 | _TCIA01_390_1 164 | _TCIA01_401_1 165 | _TCIA01_411_1 166 | _TCIA01_412_1 167 | _TCIA01_425_1 168 | _TCIA01_429_1 169 | _TCIA01_448_1 170 | _TCIA01_460_1 171 | _TCIA01_499_1 172 | _TCIA02_117_1 173 | _TCIA02_118_1 174 | _TCIA02_135_1 175 | _TCIA02_151_1 176 | _TCIA02_168_1 177 | _TCIA02_171_1 178 | _TCIA02_179_1 179 | _TCIA02_198_1 180 | _TCIA02_208_1 181 | _TCIA02_222_1 182 | _TCIA02_226_1 183 | _TCIA02_274_1 184 | _TCIA02_283_1 185 | _TCIA02_290_1 186 | _TCIA02_300_1 187 | _TCIA02_309_1 188 | _TCIA02_314_1 189 | _TCIA02_321_1 190 | _TCIA02_322_1 191 | _TCIA02_331_1 192 | _TCIA02_368_1 193 | _TCIA02_370_1 194 | _TCIA02_374_1 195 | _TCIA02_377_1 196 | _TCIA02_394_1 197 | _TCIA02_430_1 198 | _TCIA02_455_1 199 | _TCIA02_471_1 200 | _TCIA02_473_1 201 | _TCIA02_491_1 202 | _TCIA02_605_1 203 | _TCIA02_606_1 204 | _TCIA02_607_1 205 | _TCIA02_608_1 206 | _TCIA03_121_1 207 | _TCIA03_133_1 208 | _TCIA03_138_1 209 | _TCIA03_199_1 210 | _TCIA03_257_1 211 | _TCIA03_265_1 212 | _TCIA03_296_1 213 | _TCIA03_338_1 214 | _TCIA03_375_1 215 | _TCIA03_419_1 216 | _TCIA03_474_1 217 | _TCIA03_498_1 218 | _TCIA04_111_1 219 | _TCIA04_149_1 220 | _TCIA04_192_1 221 | _TCIA04_328_1 222 | _TCIA04_343_1 223 | _TCIA04_361_1 224 | _TCIA04_437_1 225 | _TCIA04_479_1 226 | _TCIA05_277_1 227 | _TCIA05_396_1 228 | _TCIA05_444_1 229 | _TCIA05_478_1 230 | _TCIA06_165_1 231 | _TCIA06_184_1 232 | _TCIA06_211_1 233 | _TCIA06_247_1 234 | _TCIA06_332_1 235 | _TCIA06_372_1 236 | _TCIA06_409_1 237 | _TCIA06_603_1 238 | _TCIA08_105_1 239 | _TCIA08_113_1 240 | _TCIA08_162_1 241 | _TCIA08_167_1 242 | _TCIA08_205_1 243 | _TCIA08_218_1 244 | _TCIA08_234_1 245 | _TCIA08_242_1 246 | _TCIA08_278_1 247 | _TCIA08_280_1 248 | _TCIA08_319_1 249 | _TCIA08_406_1 250 | _TCIA08_436_1 251 | _TCIA08_469_1 252 | _TMC_06290_1 253 | _TMC_06643_1 254 | _TMC_11964_1 255 | _TMC_12866_1 256 | _TMC_15477_1 257 | _TMC_21360_1 258 | _TMC_27374_1 259 | _TMC_30014_1 260 | -------------------------------------------------------------------------------- /data/BraTS2Dpreprocessing-master/19lgg.csv: -------------------------------------------------------------------------------- 1 | _2013_0_1 2 | _2013_15_1 3 | _2013_16_1 4 | _2013_1_1 5 | _2013_24_1 6 | _2013_28_1 7 | _2013_29_1 8 | _2013_6_1 9 | _2013_8_1 10 | _2013_9_1 11 | _TCIA09_141_1 12 | _TCIA09_177_1 13 | _TCIA09_254_1 14 | _TCIA09_255_1 15 | _TCIA09_312_1 16 | _TCIA09_402_1 17 | _TCIA09_428_1 18 | _TCIA09_451_1 19 | _TCIA09_462_1 20 | _TCIA09_493_1 21 | _TCIA09_620_1 22 | _TCIA10_103_1 23 | _TCIA10_109_1 24 | _TCIA10_130_1 25 | _TCIA10_152_1 26 | _TCIA10_175_1 27 | _TCIA10_202_1 28 | _TCIA10_241_1 29 | _TCIA10_261_1 30 | _TCIA10_266_1 31 | _TCIA10_276_1 32 | _TCIA10_282_1 33 | _TCIA10_299_1 34 | _TCIA10_307_1 35 | _TCIA10_310_1 36 | _TCIA10_325_1 37 | _TCIA10_330_1 38 | _TCIA10_346_1 39 | _TCIA10_351_1 40 | _TCIA10_387_1 41 | _TCIA10_393_1 42 | _TCIA10_408_1 43 | _TCIA10_410_1 44 | _TCIA10_413_1 45 | _TCIA10_420_1 46 | _TCIA10_442_1 47 | _TCIA10_449_1 48 | _TCIA10_490_1 49 | _TCIA10_625_1 50 | _TCIA10_628_1 51 | _TCIA10_629_1 52 | _TCIA10_632_1 53 | _TCIA10_637_1 54 | _TCIA10_639_1 55 | _TCIA10_640_1 56 | _TCIA10_644_1 57 | _TCIA12_101_1 58 | _TCIA12_249_1 59 | _TCIA12_298_1 60 | _TCIA12_466_1 61 | _TCIA12_470_1 62 | _TCIA12_480_1 63 | _TCIA13_615_1 64 | _TCIA13_618_1 65 | _TCIA13_621_1 66 | _TCIA13_623_1 67 | _TCIA13_624_1 68 | _TCIA13_630_1 69 | _TCIA13_633_1 70 | _TCIA13_634_1 71 | _TCIA13_642_1 72 | _TCIA13_645_1 73 | _TCIA13_650_1 74 | _TCIA13_653_1 75 | _TCIA13_654_1 76 | _TMC_09043_1 77 | -------------------------------------------------------------------------------- /data/BraTS2Dpreprocessing-master/README.md: -------------------------------------------------------------------------------- 1 | # BraTS2Dpreprocessing 2 | 3 | -------------------------------------------------------------------------------- /data/BraTS3Dpreprocessing-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /data/BraTS3Dpreprocessing-master/18hgg.csv: -------------------------------------------------------------------------------- 1 | _2013_10_1 2 | _2013_11_1 3 | _2013_12_1 4 | _2013_13_1 5 | _2013_14_1 6 | _2013_17_1 7 | _2013_18_1 8 | _2013_19_1 9 | _2013_20_1 10 | _2013_21_1 11 | _2013_22_1 12 | _2013_23_1 13 | _2013_25_1 14 | _2013_26_1 15 | _2013_27_1 16 | _2013_2_1 17 | _2013_3_1 18 | _2013_4_1 19 | _2013_5_1 20 | _2013_7_1 21 | _CBICA_AAB_1 22 | _CBICA_AAG_1 23 | _CBICA_AAL_1 24 | _CBICA_AAP_1 25 | _CBICA_ABB_1 26 | _CBICA_ABE_1 27 | _CBICA_ABM_1 28 | _CBICA_ABN_1 29 | _CBICA_ABO_1 30 | _CBICA_ABY_1 31 | _CBICA_ALN_1 32 | _CBICA_ALU_1 33 | _CBICA_ALX_1 34 | _CBICA_AME_1 35 | _CBICA_AMH_1 36 | _CBICA_ANG_1 37 | _CBICA_ANI_1 38 | _CBICA_ANP_1 39 | _CBICA_ANZ_1 40 | _CBICA_AOD_1 41 | _CBICA_AOH_1 42 | _CBICA_AOO_1 43 | _CBICA_AOP_1 44 | _CBICA_AOZ_1 45 | _CBICA_APR_1 46 | _CBICA_APY_1 47 | _CBICA_APZ_1 48 | _CBICA_AQA_1 49 | _CBICA_AQD_1 50 | _CBICA_AQG_1 51 | _CBICA_AQJ_1 52 | _CBICA_AQN_1 53 | _CBICA_AQO_1 54 | _CBICA_AQP_1 55 | _CBICA_AQQ_1 56 | _CBICA_AQR_1 57 | _CBICA_AQT_1 58 | _CBICA_AQU_1 59 | _CBICA_AQV_1 60 | _CBICA_AQY_1 61 | _CBICA_AQZ_1 62 | _CBICA_ARF_1 63 | _CBICA_ARW_1 64 | _CBICA_ARZ_1 65 | _CBICA_ASA_1 66 | _CBICA_ASE_1 67 | _CBICA_ASG_1 68 | _CBICA_ASH_1 69 | _CBICA_ASK_1 70 | _CBICA_ASN_1 71 | _CBICA_ASO_1 72 | _CBICA_ASU_1 73 | _CBICA_ASV_1 74 | _CBICA_ASW_1 75 | _CBICA_ASY_1 76 | _CBICA_ATB_1 77 | _CBICA_ATD_1 78 | _CBICA_ATF_1 79 | _CBICA_ATP_1 80 | _CBICA_ATV_1 81 | _CBICA_ATX_1 82 | _CBICA_AUN_1 83 | _CBICA_AUQ_1 84 | _CBICA_AUR_1 85 | _CBICA_AVG_1 86 | _CBICA_AVJ_1 87 | _CBICA_AVV_1 88 | _CBICA_AWG_1 89 | _CBICA_AWH_1 90 | _CBICA_AWI_1 91 | _CBICA_AXJ_1 92 | _CBICA_AXL_1 93 | _CBICA_AXM_1 94 | _CBICA_AXN_1 95 | _CBICA_AXO_1 96 | _CBICA_AXQ_1 97 | _CBICA_AXW_1 98 | _CBICA_AYA_1 99 | _CBICA_AYI_1 100 | _CBICA_AYU_1 101 | _CBICA_AYW_1 102 | _CBICA_AZD_1 103 | _CBICA_AZH_1 104 | _CBICA_BFB_1 105 | _CBICA_BFP_1 106 | _CBICA_BHB_1 107 | _CBICA_BHK_1 108 | _CBICA_BHM_1 109 | _TCIA01_131_1 110 | _TCIA01_147_1 111 | _TCIA01_150_1 112 | _TCIA01_180_1 113 | _TCIA01_186_1 114 | _TCIA01_190_1 115 | _TCIA01_201_1 116 | _TCIA01_203_1 117 | _TCIA01_221_1 118 | _TCIA01_231_1 119 | _TCIA01_235_1 120 | _TCIA01_335_1 121 | _TCIA01_378_1 122 | _TCIA01_390_1 123 | _TCIA01_401_1 124 | _TCIA01_411_1 125 | _TCIA01_412_1 126 | _TCIA01_425_1 127 | _TCIA01_429_1 128 | _TCIA01_448_1 129 | _TCIA01_460_1 130 | _TCIA01_499_1 131 | _TCIA02_117_1 132 | _TCIA02_118_1 133 | _TCIA02_135_1 134 | _TCIA02_151_1 135 | _TCIA02_168_1 136 | _TCIA02_171_1 137 | _TCIA02_179_1 138 | _TCIA02_198_1 139 | _TCIA02_208_1 140 | _TCIA02_222_1 141 | _TCIA02_226_1 142 | _TCIA02_274_1 143 | _TCIA02_283_1 144 | _TCIA02_290_1 145 | _TCIA02_300_1 146 | _TCIA02_309_1 147 | _TCIA02_314_1 148 | _TCIA02_321_1 149 | _TCIA02_322_1 150 | _TCIA02_331_1 151 | _TCIA02_368_1 152 | _TCIA02_370_1 153 | _TCIA02_374_1 154 | _TCIA02_377_1 155 | _TCIA02_394_1 156 | _TCIA02_430_1 157 | _TCIA02_455_1 158 | _TCIA02_471_1 159 | _TCIA02_473_1 160 | _TCIA02_491_1 161 | _TCIA02_605_1 162 | _TCIA02_606_1 163 | _TCIA02_607_1 164 | _TCIA02_608_1 165 | _TCIA03_121_1 166 | _TCIA03_133_1 167 | _TCIA03_138_1 168 | _TCIA03_199_1 169 | _TCIA03_257_1 170 | _TCIA03_265_1 171 | _TCIA03_296_1 172 | _TCIA03_338_1 173 | _TCIA03_375_1 174 | _TCIA03_419_1 175 | _TCIA03_474_1 176 | _TCIA03_498_1 177 | _TCIA04_111_1 178 | _TCIA04_149_1 179 | _TCIA04_192_1 180 | _TCIA04_328_1 181 | _TCIA04_343_1 182 | _TCIA04_361_1 183 | _TCIA04_437_1 184 | _TCIA04_479_1 185 | _TCIA05_277_1 186 | _TCIA05_396_1 187 | _TCIA05_444_1 188 | _TCIA05_478_1 189 | _TCIA06_165_1 190 | _TCIA06_184_1 191 | _TCIA06_211_1 192 | _TCIA06_247_1 193 | _TCIA06_332_1 194 | _TCIA06_372_1 195 | _TCIA06_409_1 196 | _TCIA06_603_1 197 | _TCIA08_105_1 198 | _TCIA08_113_1 199 | _TCIA08_162_1 200 | _TCIA08_167_1 201 | _TCIA08_205_1 202 | _TCIA08_218_1 203 | _TCIA08_234_1 204 | _TCIA08_242_1 205 | _TCIA08_278_1 206 | _TCIA08_280_1 207 | _TCIA08_319_1 208 | _TCIA08_406_1 209 | _TCIA08_436_1 210 | _TCIA08_469_1 211 | -------------------------------------------------------------------------------- /data/BraTS3Dpreprocessing-master/18lgg.csv: -------------------------------------------------------------------------------- 1 | _2013_0_1 2 | _2013_15_1 3 | _2013_16_1 4 | _2013_1_1 5 | _2013_24_1 6 | _2013_28_1 7 | _2013_29_1 8 | _2013_6_1 9 | _2013_8_1 10 | _2013_9_1 11 | _TCIA09_141_1 12 | _TCIA09_177_1 13 | _TCIA09_254_1 14 | _TCIA09_255_1 15 | _TCIA09_312_1 16 | _TCIA09_402_1 17 | _TCIA09_428_1 18 | _TCIA09_451_1 19 | _TCIA09_462_1 20 | _TCIA09_493_1 21 | _TCIA09_620_1 22 | _TCIA10_103_1 23 | _TCIA10_109_1 24 | _TCIA10_130_1 25 | _TCIA10_152_1 26 | _TCIA10_175_1 27 | _TCIA10_202_1 28 | _TCIA10_241_1 29 | _TCIA10_261_1 30 | _TCIA10_266_1 31 | _TCIA10_276_1 32 | _TCIA10_282_1 33 | _TCIA10_299_1 34 | _TCIA10_307_1 35 | _TCIA10_310_1 36 | _TCIA10_325_1 37 | _TCIA10_330_1 38 | _TCIA10_346_1 39 | _TCIA10_351_1 40 | _TCIA10_387_1 41 | _TCIA10_393_1 42 | _TCIA10_408_1 43 | _TCIA10_410_1 44 | _TCIA10_413_1 45 | _TCIA10_420_1 46 | _TCIA10_442_1 47 | _TCIA10_449_1 48 | _TCIA10_490_1 49 | _TCIA10_625_1 50 | _TCIA10_628_1 51 | _TCIA10_629_1 52 | _TCIA10_632_1 53 | _TCIA10_637_1 54 | _TCIA10_639_1 55 | _TCIA10_640_1 56 | _TCIA10_644_1 57 | _TCIA12_101_1 58 | _TCIA12_249_1 59 | _TCIA12_298_1 60 | _TCIA12_466_1 61 | _TCIA12_470_1 62 | _TCIA12_480_1 63 | _TCIA13_615_1 64 | _TCIA13_618_1 65 | _TCIA13_621_1 66 | _TCIA13_623_1 67 | _TCIA13_624_1 68 | _TCIA13_630_1 69 | _TCIA13_633_1 70 | _TCIA13_634_1 71 | _TCIA13_642_1 72 | _TCIA13_645_1 73 | _TCIA13_650_1 74 | _TCIA13_653_1 75 | _TCIA13_654_1 76 | -------------------------------------------------------------------------------- /data/BraTS3Dpreprocessing-master/19hgg.csv: -------------------------------------------------------------------------------- 1 | _2013_10_1 2 | _2013_11_1 3 | _2013_12_1 4 | _2013_13_1 5 | _2013_14_1 6 | _2013_17_1 7 | _2013_18_1 8 | _2013_19_1 9 | _2013_20_1 10 | _2013_21_1 11 | _2013_22_1 12 | _2013_23_1 13 | _2013_25_1 14 | _2013_26_1 15 | _2013_27_1 16 | _2013_2_1 17 | _2013_3_1 18 | _2013_4_1 19 | _2013_5_1 20 | _2013_7_1 21 | _CBICA_AAB_1 22 | _CBICA_AAG_1 23 | _CBICA_AAL_1 24 | _CBICA_AAP_1 25 | _CBICA_ABB_1 26 | _CBICA_ABE_1 27 | _CBICA_ABM_1 28 | _CBICA_ABN_1 29 | _CBICA_ABO_1 30 | _CBICA_ABY_1 31 | _CBICA_ALN_1 32 | _CBICA_ALU_1 33 | _CBICA_ALX_1 34 | _CBICA_AME_1 35 | _CBICA_AMH_1 36 | _CBICA_ANG_1 37 | _CBICA_ANI_1 38 | _CBICA_ANP_1 39 | _CBICA_ANV_1 40 | _CBICA_ANZ_1 41 | _CBICA_AOC_1 42 | _CBICA_AOD_1 43 | _CBICA_AOH_1 44 | _CBICA_AOO_1 45 | _CBICA_AOP_1 46 | _CBICA_AOS_1 47 | _CBICA_AOZ_1 48 | _CBICA_APK_1 49 | _CBICA_APR_1 50 | _CBICA_APY_1 51 | _CBICA_APZ_1 52 | _CBICA_AQA_1 53 | _CBICA_AQD_1 54 | _CBICA_AQG_1 55 | _CBICA_AQJ_1 56 | _CBICA_AQN_1 57 | _CBICA_AQO_1 58 | _CBICA_AQP_1 59 | _CBICA_AQQ_1 60 | _CBICA_AQR_1 61 | _CBICA_AQT_1 62 | _CBICA_AQU_1 63 | _CBICA_AQV_1 64 | _CBICA_AQY_1 65 | _CBICA_AQZ_1 66 | _CBICA_ARF_1 67 | _CBICA_ARW_1 68 | _CBICA_ARZ_1 69 | _CBICA_ASA_1 70 | _CBICA_ASE_1 71 | _CBICA_ASF_1 72 | _CBICA_ASG_1 73 | _CBICA_ASH_1 74 | _CBICA_ASK_1 75 | _CBICA_ASN_1 76 | _CBICA_ASO_1 77 | _CBICA_ASR_1 78 | _CBICA_ASU_1 79 | _CBICA_ASV_1 80 | _CBICA_ASW_1 81 | _CBICA_ASY_1 82 | _CBICA_ATB_1 83 | _CBICA_ATD_1 84 | _CBICA_ATF_1 85 | _CBICA_ATN_1 86 | _CBICA_ATP_1 87 | _CBICA_ATV_1 88 | _CBICA_ATX_1 89 | _CBICA_AUA_1 90 | _CBICA_AUN_1 91 | _CBICA_AUQ_1 92 | _CBICA_AUR_1 93 | _CBICA_AUW_1 94 | _CBICA_AUX_1 95 | _CBICA_AVB_1 96 | _CBICA_AVF_1 97 | _CBICA_AVG_1 98 | _CBICA_AVJ_1 99 | _CBICA_AVT_1 100 | _CBICA_AVV_1 101 | _CBICA_AWG_1 102 | _CBICA_AWH_1 103 | _CBICA_AWI_1 104 | _CBICA_AWV_1 105 | _CBICA_AWX_1 106 | _CBICA_AXJ_1 107 | _CBICA_AXL_1 108 | _CBICA_AXM_1 109 | _CBICA_AXN_1 110 | _CBICA_AXO_1 111 | _CBICA_AXQ_1 112 | _CBICA_AXW_1 113 | _CBICA_AYA_1 114 | _CBICA_AYC_1 115 | _CBICA_AYG_1 116 | _CBICA_AYI_1 117 | _CBICA_AYU_1 118 | _CBICA_AYW_1 119 | _CBICA_AZD_1 120 | _CBICA_AZH_1 121 | _CBICA_BAN_1 122 | _CBICA_BAP_1 123 | _CBICA_BAX_1 124 | _CBICA_BBG_1 125 | _CBICA_BCF_1 126 | _CBICA_BCL_1 127 | _CBICA_BDK_1 128 | _CBICA_BEM_1 129 | _CBICA_BFB_1 130 | _CBICA_BFP_1 131 | _CBICA_BGE_1 132 | _CBICA_BGG_1 133 | _CBICA_BGN_1 134 | _CBICA_BGO_1 135 | _CBICA_BGR_1 136 | _CBICA_BGT_1 137 | _CBICA_BGW_1 138 | _CBICA_BGX_1 139 | _CBICA_BHB_1 140 | _CBICA_BHK_1 141 | _CBICA_BHM_1 142 | _CBICA_BHQ_1 143 | _CBICA_BHV_1 144 | _CBICA_BHZ_1 145 | _CBICA_BIC_1 146 | _CBICA_BJY_1 147 | _CBICA_BKV_1 148 | _CBICA_BLJ_1 149 | _CBICA_BNR_1 150 | _TCIA01_131_1 151 | _TCIA01_147_1 152 | _TCIA01_150_1 153 | _TCIA01_180_1 154 | _TCIA01_186_1 155 | _TCIA01_190_1 156 | _TCIA01_201_1 157 | _TCIA01_203_1 158 | _TCIA01_221_1 159 | _TCIA01_231_1 160 | _TCIA01_235_1 161 | _TCIA01_335_1 162 | _TCIA01_378_1 163 | _TCIA01_390_1 164 | _TCIA01_401_1 165 | _TCIA01_411_1 166 | _TCIA01_412_1 167 | _TCIA01_425_1 168 | _TCIA01_429_1 169 | _TCIA01_448_1 170 | _TCIA01_460_1 171 | _TCIA01_499_1 172 | _TCIA02_117_1 173 | _TCIA02_118_1 174 | _TCIA02_135_1 175 | _TCIA02_151_1 176 | _TCIA02_168_1 177 | _TCIA02_171_1 178 | _TCIA02_179_1 179 | _TCIA02_198_1 180 | _TCIA02_208_1 181 | _TCIA02_222_1 182 | _TCIA02_226_1 183 | _TCIA02_274_1 184 | _TCIA02_283_1 185 | _TCIA02_290_1 186 | _TCIA02_300_1 187 | _TCIA02_309_1 188 | _TCIA02_314_1 189 | _TCIA02_321_1 190 | _TCIA02_322_1 191 | _TCIA02_331_1 192 | _TCIA02_368_1 193 | _TCIA02_370_1 194 | _TCIA02_374_1 195 | _TCIA02_377_1 196 | _TCIA02_394_1 197 | _TCIA02_430_1 198 | _TCIA02_455_1 199 | _TCIA02_471_1 200 | _TCIA02_473_1 201 | _TCIA02_491_1 202 | _TCIA02_605_1 203 | _TCIA02_606_1 204 | _TCIA02_607_1 205 | _TCIA02_608_1 206 | _TCIA03_121_1 207 | _TCIA03_133_1 208 | _TCIA03_138_1 209 | _TCIA03_199_1 210 | _TCIA03_257_1 211 | _TCIA03_265_1 212 | _TCIA03_296_1 213 | _TCIA03_338_1 214 | _TCIA03_375_1 215 | _TCIA03_419_1 216 | _TCIA03_474_1 217 | _TCIA03_498_1 218 | _TCIA04_111_1 219 | _TCIA04_149_1 220 | _TCIA04_192_1 221 | _TCIA04_328_1 222 | _TCIA04_343_1 223 | _TCIA04_361_1 224 | _TCIA04_437_1 225 | _TCIA04_479_1 226 | _TCIA05_277_1 227 | _TCIA05_396_1 228 | _TCIA05_444_1 229 | _TCIA05_478_1 230 | _TCIA06_165_1 231 | _TCIA06_184_1 232 | _TCIA06_211_1 233 | _TCIA06_247_1 234 | _TCIA06_332_1 235 | _TCIA06_372_1 236 | _TCIA06_409_1 237 | _TCIA06_603_1 238 | _TCIA08_105_1 239 | _TCIA08_113_1 240 | _TCIA08_162_1 241 | _TCIA08_167_1 242 | _TCIA08_205_1 243 | _TCIA08_218_1 244 | _TCIA08_234_1 245 | _TCIA08_242_1 246 | _TCIA08_278_1 247 | _TCIA08_280_1 248 | _TCIA08_319_1 249 | _TCIA08_406_1 250 | _TCIA08_436_1 251 | _TCIA08_469_1 252 | _TMC_06290_1 253 | _TMC_06643_1 254 | _TMC_11964_1 255 | _TMC_12866_1 256 | _TMC_15477_1 257 | _TMC_21360_1 258 | _TMC_27374_1 259 | _TMC_30014_1 260 | -------------------------------------------------------------------------------- /data/BraTS3Dpreprocessing-master/19lgg.csv: -------------------------------------------------------------------------------- 1 | _2013_0_1 2 | _2013_15_1 3 | _2013_16_1 4 | _2013_1_1 5 | _2013_24_1 6 | _2013_28_1 7 | _2013_29_1 8 | _2013_6_1 9 | _2013_8_1 10 | _2013_9_1 11 | _TCIA09_141_1 12 | _TCIA09_177_1 13 | _TCIA09_254_1 14 | _TCIA09_255_1 15 | _TCIA09_312_1 16 | _TCIA09_402_1 17 | _TCIA09_428_1 18 | _TCIA09_451_1 19 | _TCIA09_462_1 20 | _TCIA09_493_1 21 | _TCIA09_620_1 22 | _TCIA10_103_1 23 | _TCIA10_109_1 24 | _TCIA10_130_1 25 | _TCIA10_152_1 26 | _TCIA10_175_1 27 | _TCIA10_202_1 28 | _TCIA10_241_1 29 | _TCIA10_261_1 30 | _TCIA10_266_1 31 | _TCIA10_276_1 32 | _TCIA10_282_1 33 | _TCIA10_299_1 34 | _TCIA10_307_1 35 | _TCIA10_310_1 36 | _TCIA10_325_1 37 | _TCIA10_330_1 38 | _TCIA10_346_1 39 | _TCIA10_351_1 40 | _TCIA10_387_1 41 | _TCIA10_393_1 42 | _TCIA10_408_1 43 | _TCIA10_410_1 44 | _TCIA10_413_1 45 | _TCIA10_420_1 46 | _TCIA10_442_1 47 | _TCIA10_449_1 48 | _TCIA10_490_1 49 | _TCIA10_625_1 50 | _TCIA10_628_1 51 | _TCIA10_629_1 52 | _TCIA10_632_1 53 | _TCIA10_637_1 54 | _TCIA10_639_1 55 | _TCIA10_640_1 56 | _TCIA10_644_1 57 | _TCIA12_101_1 58 | _TCIA12_249_1 59 | _TCIA12_298_1 60 | _TCIA12_466_1 61 | _TCIA12_470_1 62 | _TCIA12_480_1 63 | _TCIA13_615_1 64 | _TCIA13_618_1 65 | _TCIA13_621_1 66 | _TCIA13_623_1 67 | _TCIA13_624_1 68 | _TCIA13_630_1 69 | _TCIA13_633_1 70 | _TCIA13_634_1 71 | _TCIA13_642_1 72 | _TCIA13_645_1 73 | _TCIA13_650_1 74 | _TCIA13_653_1 75 | _TCIA13_654_1 76 | _TMC_09043_1 77 | -------------------------------------------------------------------------------- /data/BraTS3Dpreprocessing-master/README.md: -------------------------------------------------------------------------------- 1 | # BraTS3Dpreprocessing 2 | 3 | -------------------------------------------------------------------------------- /model2D/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/.DS_Store -------------------------------------------------------------------------------- /model2D/DenseUnet_BraTs-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /model2D/DenseUnet_BraTs-master/README.md: -------------------------------------------------------------------------------- 1 | # DenseUnet_BraTs 2 | 3 | -------------------------------------------------------------------------------- /model2D/DenseUnet_BraTs-master/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | #读numpy数据(npy)的代码 28 | npimage = np.load(img_path) 29 | npmask = np.load(mask_path) 30 | npimage = npimage.transpose((2, 0, 1)) 31 | 32 | WT_Label = npmask.copy() 33 | WT_Label[npmask == 1] = 1. 34 | WT_Label[npmask == 2] = 1. 35 | WT_Label[npmask == 4] = 1. 36 | TC_Label = npmask.copy() 37 | TC_Label[npmask == 1] = 1. 38 | TC_Label[npmask == 2] = 0. 39 | TC_Label[npmask == 4] = 1. 40 | ET_Label = npmask.copy() 41 | ET_Label[npmask == 1] = 0. 42 | ET_Label[npmask == 2] = 0. 43 | ET_Label[npmask == 4] = 1. 44 | nplabel = np.empty((160, 160, 3)) 45 | nplabel[:, :, 0] = WT_Label 46 | nplabel[:, :, 1] = TC_Label 47 | nplabel[:, :, 2] = ET_Label 48 | nplabel = nplabel.transpose((2, 0, 1)) 49 | 50 | nplabel = nplabel.astype("float32") 51 | npimage = npimage.astype("float32") 52 | 53 | return npimage,nplabel 54 | 55 | 56 | #读图片(如jpg、png)的代码 57 | ''' 58 | image = imread(img_path) 59 | mask = imread(mask_path) 60 | 61 | image = image.astype('float32') / 255 62 | mask = mask.astype('float32') / 255 63 | 64 | if self.aug: 65 | if random.uniform(0, 1) > 0.5: 66 | image = image[:, ::-1, :].copy() 67 | mask = mask[:, ::-1].copy() 68 | if random.uniform(0, 1) > 0.5: 69 | image = image[::-1, :, :].copy() 70 | mask = mask[::-1, :].copy() 71 | 72 | image = color.gray2rgb(image) 73 | #image = image[:,:,np.newaxis] 74 | image = image.transpose((2, 0, 1)) 75 | mask = mask[:,:,np.newaxis] 76 | mask = mask.transpose((2, 0, 1)) 77 | return image, mask 78 | ''' 79 | 80 | -------------------------------------------------------------------------------- /model2D/DenseUnet_BraTs-master/denseUnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class Single_level_densenet(nn.Module): 7 | def __init__(self, filters, num_conv=4): 8 | super(Single_level_densenet, self).__init__() 9 | self.num_conv = num_conv 10 | self.conv_list = nn.ModuleList() 11 | self.bn_list = nn.ModuleList() 12 | for i in range(self.num_conv): 13 | self.conv_list.append(nn.Conv2d(filters, filters, 3, padding=1)) 14 | self.bn_list.append(nn.BatchNorm2d(filters)) 15 | 16 | def forward(self, x): 17 | outs = [] 18 | outs.append(x) 19 | for i in range(self.num_conv): 20 | temp_out = self.conv_list[i](outs[i]) 21 | if i > 0: 22 | for j in range(i): 23 | temp_out += outs[j] 24 | outs.append(F.relu(self.bn_list[i](temp_out))) 25 | out_final = outs[-1] 26 | del outs 27 | return out_final 28 | 29 | 30 | class Down_sample(nn.Module): 31 | def __init__(self, kernel_size=2, stride=2): 32 | super(Down_sample, self).__init__() 33 | self.down_sample_layer = nn.MaxPool2d(kernel_size, stride) 34 | 35 | def forward(self, x): 36 | y = self.down_sample_layer(x) 37 | return y, x 38 | 39 | 40 | class Upsample_n_Concat(nn.Module): 41 | def __init__(self, filters): 42 | super(Upsample_n_Concat, self).__init__() 43 | self.upsample_layer = nn.ConvTranspose2d(filters, filters, 4, padding=1, stride=2) 44 | self.conv = nn.Conv2d(2 * filters, filters, 3, padding=1) 45 | self.bn = nn.BatchNorm2d(filters) 46 | 47 | def forward(self, x, y): 48 | x = self.upsample_layer(x) 49 | x = torch.cat([x, y], dim=1) 50 | x = F.relu(self.bn(self.conv(x))) 51 | return x 52 | 53 | 54 | class Dense_Unet(nn.Module): 55 | def __init__(self, args): 56 | self.args = args 57 | num_conv = 4 58 | filters = 64 59 | in_chan = 4 60 | out_chan = 3 61 | super(Dense_Unet, self).__init__() 62 | self.conv1 = nn.Conv2d(in_chan, filters, 1) 63 | self.d1 = Single_level_densenet(filters, num_conv) 64 | self.down1 = Down_sample() 65 | self.d2 = Single_level_densenet(filters, num_conv) 66 | self.down2 = Down_sample() 67 | self.d3 = Single_level_densenet(filters, num_conv) 68 | self.down3 = Down_sample() 69 | self.d4 = Single_level_densenet(filters, num_conv) 70 | self.down4 = Down_sample() 71 | self.bottom = Single_level_densenet(filters, num_conv) 72 | self.up4 = Upsample_n_Concat(filters) 73 | self.u4 = Single_level_densenet(filters, num_conv) 74 | self.up3 = Upsample_n_Concat(filters) 75 | self.u3 = Single_level_densenet(filters, num_conv) 76 | self.up2 = Upsample_n_Concat(filters) 77 | self.u2 = Single_level_densenet(filters, num_conv) 78 | self.up1 = Upsample_n_Concat(filters) 79 | self.u1 = Single_level_densenet(filters, num_conv) 80 | self.outconv = nn.Conv2d(filters, out_chan, 1) 81 | 82 | # self.outconvp1 = nn.Conv2d(filters,out_chan, 1) 83 | # self.outconvm1 = nn.Conv2d(filters,out_chan, 1) 84 | 85 | def forward(self, x): 86 | #bsz = x.shape[0] 87 | x = self.conv1(x) 88 | x, y1 = self.down1(self.d1(x)) 89 | x, y2 = self.down1(self.d2(x)) 90 | x, y3 = self.down1(self.d3(x)) 91 | x, y4 = self.down1(self.d4(x)) 92 | x = self.bottom(x) 93 | x = self.u4(self.up4(x, y4)) 94 | x = self.u3(self.up3(x, y3)) 95 | x = self.u2(self.up2(x, y2)) 96 | x = self.u1(self.up1(x, y1)) 97 | x1 = self.outconv(x) 98 | # xm1 = self.outconvm1(x) 99 | # xp1 = self.outconvp1(x) 100 | 101 | return x1 -------------------------------------------------------------------------------- /model2D/DenseUnet_BraTs-master/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model2D/DenseUnet_BraTs-master/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model2D/DenseUnet_BraTs-master/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/README.md: -------------------------------------------------------------------------------- 1 | # FCN2D 2 | 3 | -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/__pycache__/FCN.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/FCN2D_For_BraTs-master/__pycache__/FCN.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/__pycache__/dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/FCN2D_For_BraTs-master/__pycache__/dataset.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/__pycache__/losses.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/FCN2D_For_BraTs-master/__pycache__/losses.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/__pycache__/metrics.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/FCN2D_For_BraTs-master/__pycache__/metrics.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/FCN2D_For_BraTs-master/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | #读numpy数据(npy)的代码 28 | npimage = np.load(img_path) 29 | npmask = np.load(mask_path) 30 | npimage = npimage.transpose((2, 0, 1)) 31 | 32 | WT_Label = npmask.copy() 33 | WT_Label[npmask == 1] = 1. 34 | WT_Label[npmask == 2] = 1. 35 | WT_Label[npmask == 4] = 1. 36 | TC_Label = npmask.copy() 37 | TC_Label[npmask == 1] = 1. 38 | TC_Label[npmask == 2] = 0. 39 | TC_Label[npmask == 4] = 1. 40 | ET_Label = npmask.copy() 41 | ET_Label[npmask == 1] = 0. 42 | ET_Label[npmask == 2] = 0. 43 | ET_Label[npmask == 4] = 1. 44 | nplabel = np.empty((160, 160, 3)) 45 | nplabel[:, :, 0] = WT_Label 46 | nplabel[:, :, 1] = TC_Label 47 | nplabel[:, :, 2] = ET_Label 48 | nplabel = nplabel.transpose((2, 0, 1)) 49 | 50 | nplabel = nplabel.astype("float32") 51 | npimage = npimage.astype("float32") 52 | 53 | return npimage,nplabel 54 | 55 | 56 | #读图片(如jpg、png)的代码 57 | ''' 58 | image = imread(img_path) 59 | mask = imread(mask_path) 60 | 61 | image = image.astype('float32') / 255 62 | mask = mask.astype('float32') / 255 63 | 64 | if self.aug: 65 | if random.uniform(0, 1) > 0.5: 66 | image = image[:, ::-1, :].copy() 67 | mask = mask[:, ::-1].copy() 68 | if random.uniform(0, 1) > 0.5: 69 | image = image[::-1, :, :].copy() 70 | mask = mask[::-1, :].copy() 71 | 72 | image = color.gray2rgb(image) 73 | #image = image[:,:,np.newaxis] 74 | image = image.transpose((2, 0, 1)) 75 | mask = mask[:,:,np.newaxis] 76 | mask = mask.transpose((2, 0, 1)) 77 | return image, mask 78 | ''' 79 | 80 | -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/train.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import time 4 | import os 5 | import math 6 | import argparse 7 | from glob import glob 8 | from collections import OrderedDict 9 | import random 10 | import warnings 11 | from datetime import datetime 12 | 13 | import numpy as np 14 | from tqdm import tqdm 15 | 16 | from sklearn.model_selection import train_test_split 17 | from sklearn.externals import joblib 18 | from skimage.io import imread 19 | 20 | import torch 21 | import torch.nn as nn 22 | import torch.nn.functional as F 23 | from torch.autograd import Variable 24 | import torch.optim as optim 25 | from torch.optim import lr_scheduler 26 | from torch.utils.data import DataLoader 27 | import torch.backends.cudnn as cudnn 28 | import torchvision 29 | from torchvision import datasets, models, transforms 30 | 31 | from dataset import Dataset 32 | 33 | from metrics import dice_coef, batch_iou, mean_iou, iou_score 34 | import losses 35 | from utils import str2bool, count_params 36 | import pandas as pd 37 | import FCN 38 | 39 | arch_names = list(FCN.__dict__.keys()) 40 | loss_names = list(losses.__dict__.keys()) 41 | loss_names.append('BCEWithLogitsLoss') 42 | 43 | 44 | def parse_args(): 45 | parser = argparse.ArgumentParser() 46 | 47 | parser.add_argument('--name', default=None, 48 | help='model name: (default: arch+timestamp)') 49 | parser.add_argument('--arch', '-a', metavar='ARCH', default='FCN8s', 50 | choices=arch_names, 51 | help='model architecture: ' + 52 | ' | '.join(arch_names) + 53 | ' (default: NestedUNet)') 54 | parser.add_argument('--deepsupervision', default=False, type=str2bool) 55 | parser.add_argument('--dataset', default="jiu0Monkey", 56 | help='dataset name') 57 | parser.add_argument('--input-channels', default=4, type=int, 58 | help='input channels') 59 | parser.add_argument('--image-ext', default='png', 60 | help='image file extension') 61 | parser.add_argument('--mask-ext', default='png', 62 | help='mask file extension') 63 | parser.add_argument('--aug', default=False, type=str2bool) 64 | parser.add_argument('--loss', default='BCEDiceLoss', 65 | choices=loss_names, 66 | help='loss: ' + 67 | ' | '.join(loss_names) + 68 | ' (default: BCEDiceLoss)') 69 | parser.add_argument('--epochs', default=10000, type=int, metavar='N', 70 | help='number of total epochs to run') 71 | parser.add_argument('--early-stop', default=20, type=int, 72 | metavar='N', help='early stopping (default: 20)') 73 | parser.add_argument('-b', '--batch-size', default=18, type=int, 74 | metavar='N', help='mini-batch size (default: 16)') 75 | parser.add_argument('--optimizer', default='Adam', 76 | choices=['Adam', 'SGD'], 77 | help='loss: ' + 78 | ' | '.join(['Adam', 'SGD']) + 79 | ' (default: Adam)') 80 | parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float, 81 | metavar='LR', help='initial learning rate') 82 | parser.add_argument('--momentum', default=0.9, type=float, 83 | help='momentum') 84 | parser.add_argument('--weight-decay', default=1e-4, type=float, 85 | help='weight decay') 86 | parser.add_argument('--nesterov', default=False, type=str2bool, 87 | help='nesterov') 88 | 89 | args = parser.parse_args() 90 | 91 | return args 92 | 93 | class AverageMeter(object): 94 | """Computes and stores the average and current value""" 95 | def __init__(self): 96 | self.reset() 97 | 98 | def reset(self): 99 | self.val = 0 100 | self.avg = 0 101 | self.sum = 0 102 | self.count = 0 103 | 104 | def update(self, val, n=1): 105 | self.val = val 106 | self.sum += val * n 107 | self.count += n 108 | self.avg = self.sum / self.count 109 | 110 | 111 | def train(args, train_loader, model, criterion, optimizer, epoch, scheduler=None): 112 | losses = AverageMeter() 113 | ious = AverageMeter() 114 | 115 | model.train() 116 | 117 | for i, (input, target) in tqdm(enumerate(train_loader), total=len(train_loader)): 118 | input = input.cuda() 119 | target = target.cuda() 120 | 121 | # compute output 122 | if args.deepsupervision: 123 | outputs = model(input) 124 | loss = 0 125 | for output in outputs: 126 | loss += criterion(output, target) 127 | loss /= len(outputs) 128 | iou = iou_score(outputs[-1], target) 129 | else: 130 | output = model(input) 131 | loss = criterion(output, target) 132 | iou = iou_score(output, target) 133 | 134 | losses.update(loss.item(), input.size(0)) 135 | ious.update(iou, input.size(0)) 136 | 137 | # compute gradient and do optimizing step 138 | optimizer.zero_grad() 139 | loss.backward() 140 | optimizer.step() 141 | 142 | log = OrderedDict([ 143 | ('loss', losses.avg), 144 | ('iou', ious.avg), 145 | ]) 146 | 147 | return log 148 | 149 | 150 | def validate(args, val_loader, model, criterion): 151 | losses = AverageMeter() 152 | ious = AverageMeter() 153 | 154 | # switch to evaluate mode 155 | model.eval() 156 | 157 | with torch.no_grad(): 158 | for i, (input, target) in tqdm(enumerate(val_loader), total=len(val_loader)): 159 | input = input.cuda() 160 | target = target.cuda() 161 | 162 | # compute output 163 | if args.deepsupervision: 164 | outputs = model(input) 165 | loss = 0 166 | for output in outputs: 167 | loss += criterion(output, target) 168 | loss /= len(outputs) 169 | iou = iou_score(outputs[-1], target) 170 | else: 171 | output = model(input) 172 | loss = criterion(output, target) 173 | iou = iou_score(output, target) 174 | 175 | losses.update(loss.item(), input.size(0)) 176 | ious.update(iou, input.size(0)) 177 | 178 | log = OrderedDict([ 179 | ('loss', losses.avg), 180 | ('iou', ious.avg), 181 | ]) 182 | 183 | return log 184 | 185 | 186 | def main(): 187 | args = parse_args() 188 | #args.dataset = "datasets" 189 | 190 | 191 | if args.name is None: 192 | if args.deepsupervision: 193 | args.name = '%s_%s_wDS' %(args.dataset, args.arch) 194 | else: 195 | args.name = '%s_%s_woDS' %(args.dataset, args.arch) 196 | if not os.path.exists('models/%s' %args.name): 197 | os.makedirs('models/%s' %args.name) 198 | 199 | print('Config -----') 200 | for arg in vars(args): 201 | print('%s: %s' %(arg, getattr(args, arg))) 202 | print('------------') 203 | 204 | with open('models/%s/args.txt' %args.name, 'w') as f: 205 | for arg in vars(args): 206 | print('%s: %s' %(arg, getattr(args, arg)), file=f) 207 | 208 | joblib.dump(args, 'models/%s/args.pkl' %args.name) 209 | 210 | # define loss function (criterion) 211 | if args.loss == 'BCEWithLogitsLoss': 212 | criterion = nn.BCEWithLogitsLoss().cuda() 213 | else: 214 | criterion = losses.__dict__[args.loss]().cuda() 215 | 216 | cudnn.benchmark = True 217 | 218 | # Data loading code 219 | img_paths = glob(r'D:\Project\CollegeDesign\dataset\Brats2018FoulModel2D\trainImage\*') 220 | mask_paths = glob(r'D:\Project\CollegeDesign\dataset\Brats2018FoulModel2D\trainMask\*') 221 | 222 | train_img_paths, val_img_paths, train_mask_paths, val_mask_paths = \ 223 | train_test_split(img_paths, mask_paths, test_size=0.2, random_state=41) 224 | print("train_num:%s"%str(len(train_img_paths))) 225 | print("val_num:%s"%str(len(val_img_paths))) 226 | 227 | 228 | # create model 229 | print("=> creating model %s" %args.arch) 230 | model = FCN.__dict__[args.arch](args) 231 | 232 | model = model.cuda() 233 | 234 | print(count_params(model)) 235 | 236 | if args.optimizer == 'Adam': 237 | optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) 238 | elif args.optimizer == 'SGD': 239 | optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, 240 | momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) 241 | 242 | train_dataset = Dataset(args, train_img_paths, train_mask_paths, args.aug) 243 | val_dataset = Dataset(args, val_img_paths, val_mask_paths) 244 | 245 | train_loader = torch.utils.data.DataLoader( 246 | train_dataset, 247 | batch_size=args.batch_size, 248 | shuffle=True, 249 | pin_memory=True, 250 | drop_last=True) 251 | val_loader = torch.utils.data.DataLoader( 252 | val_dataset, 253 | batch_size=args.batch_size, 254 | shuffle=False, 255 | pin_memory=True, 256 | drop_last=False) 257 | 258 | log = pd.DataFrame(index=[], columns=[ 259 | 'epoch', 'lr', 'loss', 'iou', 'val_loss', 'val_iou' 260 | ]) 261 | 262 | best_iou = 0 263 | trigger = 0 264 | for epoch in range(args.epochs): 265 | print('Epoch [%d/%d]' %(epoch, args.epochs)) 266 | 267 | # train for one epoch 268 | train_log = train(args, train_loader, model, criterion, optimizer, epoch) 269 | # evaluate on validation set 270 | val_log = validate(args, val_loader, model, criterion) 271 | 272 | print('loss %.4f - iou %.4f - val_loss %.4f - val_iou %.4f' 273 | %(train_log['loss'], train_log['iou'], val_log['loss'], val_log['iou'])) 274 | 275 | tmp = pd.Series([ 276 | epoch, 277 | args.lr, 278 | train_log['loss'], 279 | train_log['iou'], 280 | val_log['loss'], 281 | val_log['iou'], 282 | ], index=['epoch', 'lr', 'loss', 'iou', 'val_loss', 'val_iou']) 283 | 284 | log = log.append(tmp, ignore_index=True) 285 | log.to_csv('models/%s/log.csv' %args.name, index=False) 286 | 287 | trigger += 1 288 | 289 | if val_log['iou'] > best_iou: 290 | torch.save(model.state_dict(), 'models/%s/model.pth' %args.name) 291 | best_iou = val_log['iou'] 292 | print("=> saved best model") 293 | trigger = 0 294 | 295 | # early stopping 296 | if not args.early_stop is None: 297 | if trigger >= args.early_stop: 298 | print("=> early stopping") 299 | break 300 | 301 | torch.cuda.empty_cache() 302 | 303 | 304 | 305 | if __name__ == '__main__': 306 | main() 307 | -------------------------------------------------------------------------------- /model2D/FCN2D_For_BraTs-master/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/.vs/slnx.sqlite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/NestedUnet_ResBlock-master/.vs/slnx.sqlite -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/README.md: -------------------------------------------------------------------------------- 1 | # NestedUnet_ResBlock 2 | 3 | -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/__pycache__/dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/NestedUnet_ResBlock-master/__pycache__/dataset.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/__pycache__/losses.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/NestedUnet_ResBlock-master/__pycache__/losses.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/__pycache__/metrics.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/NestedUnet_ResBlock-master/__pycache__/metrics.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/__pycache__/myresnet34unetplus.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/NestedUnet_ResBlock-master/__pycache__/myresnet34unetplus.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/__pycache__/myresnetunetplus.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/NestedUnet_ResBlock-master/__pycache__/myresnetunetplus.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/__pycache__/resnet34unetplus.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/NestedUnet_ResBlock-master/__pycache__/resnet34unetplus.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model2D/NestedUnet_ResBlock-master/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | #读numpy数据(npy)的代码 28 | npimage = np.load(img_path) 29 | npmask = np.load(mask_path) 30 | npimage = npimage.transpose((2, 0, 1)) 31 | 32 | WT_Label = npmask.copy() 33 | WT_Label[npmask == 1] = 1. 34 | WT_Label[npmask == 2] = 1. 35 | WT_Label[npmask == 4] = 1. 36 | TC_Label = npmask.copy() 37 | TC_Label[npmask == 1] = 1. 38 | TC_Label[npmask == 2] = 0. 39 | TC_Label[npmask == 4] = 1. 40 | ET_Label = npmask.copy() 41 | ET_Label[npmask == 1] = 0. 42 | ET_Label[npmask == 2] = 0. 43 | ET_Label[npmask == 4] = 1. 44 | nplabel = np.empty((160, 160, 3)) 45 | nplabel[:, :, 0] = WT_Label 46 | nplabel[:, :, 1] = TC_Label 47 | nplabel[:, :, 2] = ET_Label 48 | nplabel = nplabel.transpose((2, 0, 1)) 49 | 50 | nplabel = nplabel.astype("float32") 51 | npimage = npimage.astype("float32") 52 | 53 | return npimage,nplabel 54 | 55 | 56 | #读图片(如jpg、png)的代码 57 | ''' 58 | image = imread(img_path) 59 | mask = imread(mask_path) 60 | 61 | image = image.astype('float32') / 255 62 | mask = mask.astype('float32') / 255 63 | 64 | if self.aug: 65 | if random.uniform(0, 1) > 0.5: 66 | image = image[:, ::-1, :].copy() 67 | mask = mask[:, ::-1].copy() 68 | if random.uniform(0, 1) > 0.5: 69 | image = image[::-1, :, :].copy() 70 | mask = mask[::-1, :].copy() 71 | 72 | image = color.gray2rgb(image) 73 | #image = image[:,:,np.newaxis] 74 | image = image.transpose((2, 0, 1)) 75 | mask = mask[:,:,np.newaxis] 76 | mask = mask.transpose((2, 0, 1)) 77 | return image, mask 78 | ''' 79 | 80 | -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/myresnetunetplus.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import numpy as np 3 | 4 | from torch import nn 5 | from torch.nn import functional as F 6 | import torch 7 | from torchvision import models 8 | import torchvision 9 | 10 | 11 | class VGGBlock(nn.Module): 12 | def __init__(self, in_channels, middle_channels, out_channels, act_func=nn.ReLU(inplace=True)): 13 | super(VGGBlock, self).__init__() 14 | self.act_func = act_func 15 | self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1) 16 | self.bn1 = nn.BatchNorm2d(middle_channels) 17 | self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1) 18 | self.bn2 = nn.BatchNorm2d(out_channels) 19 | 20 | def forward(self, x): 21 | out = self.conv1(x) 22 | out = self.bn1(out) 23 | out = self.act_func(out) 24 | 25 | out = self.conv2(out) 26 | out = self.bn2(out) 27 | out = self.act_func(out) 28 | 29 | return out 30 | 31 | class DoubleConv(nn.Module): 32 | def __init__(self, in_channels, out_channels): 33 | super(DoubleConv, self).__init__() 34 | self.double_conv = nn.Sequential( 35 | nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), 36 | nn.BatchNorm2d(out_channels), 37 | nn.ReLU(inplace=True), 38 | nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), 39 | nn.BatchNorm2d(out_channels), 40 | nn.ReLU(inplace=True), 41 | ) 42 | 43 | def forward(self, x): 44 | return self.double_conv(x) 45 | 46 | class ResBlock(nn.Module): 47 | def __init__(self, in_channels, out_channels): 48 | super(ResBlock, self).__init__() 49 | self.downsample = nn.Sequential( 50 | nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), 51 | nn.BatchNorm2d(out_channels)) 52 | self.double_conv = DoubleConv(in_channels, out_channels) 53 | self.down_sample = nn.MaxPool2d(2) 54 | self.relu = nn.ReLU() 55 | 56 | def forward(self, x): 57 | identity = self.downsample(x) 58 | out = self.double_conv(x) 59 | out = self.relu(out + identity) 60 | return self.down_sample(out),out 61 | 62 | class ResNetUnetPlus(nn.Module): 63 | def __init__(self, args): 64 | super().__init__() 65 | self.args = args 66 | num_class = 3 67 | num_channels = 4 68 | nb_filter = [32, 64, 128, 256, 512] 69 | 70 | """ 71 | Basebone 72 | """ 73 | #resnet = models.resnet34(pretrained=False) 74 | self.resblock1 = ResBlock(num_channels,nb_filter[0]) 75 | self.resblock2 = ResBlock(nb_filter[0], nb_filter[1]) 76 | self.resblock3 = ResBlock(nb_filter[1], nb_filter[2]) 77 | self.resblock4 = ResBlock(nb_filter[2], nb_filter[3]) 78 | self.resblock5 = ResBlock(nb_filter[3], nb_filter[4]) 79 | #self.firstconv = nn.Conv2d(num_channels, 32, kernel_size=3, stride=1, padding=1, bias=False) 80 | #self.firstbn = nn.BatchNorm2d(32) 81 | #self.firstrelu = resnet.relu 82 | #self.firstmaxpool = resnet.maxpool 83 | 84 | #self.encoder1 = resnet.layer1 85 | #self.encoder2 = resnet.layer2 86 | #self.encoder3 = resnet.layer3 87 | #self.encoder4 = resnet.layer4 88 | 89 | #self.pool = nn.MaxPool2d(2, 2) 90 | self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 91 | 92 | #self.conv0_0 = VGGBlock(args.input_channels, nb_filter[0], nb_filter[0]) 93 | #self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1]) 94 | #self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2]) 95 | #self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3]) 96 | #self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4]) 97 | 98 | self.conv0_1 = VGGBlock(nb_filter[0]+nb_filter[1], nb_filter[0], nb_filter[0]) 99 | self.conv1_1 = VGGBlock(nb_filter[1]+nb_filter[2], nb_filter[1], nb_filter[1]) 100 | self.conv2_1 = VGGBlock(nb_filter[2]+nb_filter[3], nb_filter[2], nb_filter[2]) 101 | self.conv3_1 = VGGBlock(nb_filter[3]+nb_filter[4], nb_filter[3], nb_filter[3]) 102 | 103 | self.conv0_2 = VGGBlock(nb_filter[0]*2+nb_filter[1], nb_filter[0], nb_filter[0]) 104 | self.conv1_2 = VGGBlock(nb_filter[1]*2+nb_filter[2], nb_filter[1], nb_filter[1]) 105 | self.conv2_2 = VGGBlock(nb_filter[2]*2+nb_filter[3], nb_filter[2], nb_filter[2]) 106 | 107 | self.conv0_3 = VGGBlock(nb_filter[0]*3+nb_filter[1], nb_filter[0], nb_filter[0]) 108 | self.conv1_3 = VGGBlock(nb_filter[1]*3+nb_filter[2], nb_filter[1], nb_filter[1]) 109 | 110 | self.conv0_4 = VGGBlock(nb_filter[0]*4+nb_filter[1], nb_filter[0], nb_filter[0]) 111 | 112 | if self.args.deepsupervision: 113 | self.final1 = nn.Conv2d(nb_filter[0], 1, kernel_size=1) 114 | self.final2 = nn.Conv2d(nb_filter[0], 1, kernel_size=1) 115 | self.final3 = nn.Conv2d(nb_filter[0], 1, kernel_size=1) 116 | self.final4 = nn.Conv2d(nb_filter[0], 1, kernel_size=1) 117 | else: 118 | self.final = nn.Conv2d(nb_filter[0], 3, kernel_size=1) 119 | 120 | 121 | def forward(self, input): 122 | x,x0_0 = self.resblock1(input) 123 | x,x1_0 = self.resblock2(x) 124 | x, x2_0 = self.resblock3(x) 125 | x, x3_0 = self.resblock4(x) 126 | _, x4_0 = self.resblock5(x) 127 | #print (x0_0.shape)32, 160, 160 128 | #print (x1_0.shape)64, 80, 80 129 | #print (x2_0.shape)128, 40, 40 130 | #print (x3_0.shape)256, 20, 20 131 | #print (x4_0.shape)512, 10, 10 132 | 133 | #input = self.firstconv(input) 134 | #input = self.firstbn(input) 135 | #input = F.relu(input) 136 | # Encoder 137 | #x = self.firstmaxpool(input) # 32 138 | #print(x.shape) 139 | #e1 = self.encoder1(x) # 64 140 | #e2 = self.encoder2(e1) # 128 141 | #e3 = self.encoder3(e2) # 256 142 | #e4 = self.encoder4(e3) # 512 143 | 144 | 145 | #Decoder 146 | #x0_0 = input # 64, 160, 160 147 | #print(x0_0.shape) 148 | #x1_0 = e1 # 64, 80, 80 149 | #print(x1_0.shape) 150 | #out = torch.cat([x0_0, self.up(x1_0)], 1) # 128, 160, 160 151 | #print(out.shape) 152 | x0_1 = self.conv0_1(torch.cat([x0_0, self.up(x1_0)], 1)) 153 | 154 | #x2_0 = e2 155 | x1_1 = self.conv1_1(torch.cat([x1_0, self.up(x2_0)], 1)) 156 | x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.up(x1_1)], 1)) 157 | 158 | #x3_0 = e3 159 | x2_1 = self.conv2_1(torch.cat([x2_0, self.up(x3_0)], 1)) 160 | x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up(x2_1)], 1)) 161 | x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up(x1_2)], 1)) 162 | 163 | #x4_0 = e4 164 | x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1)) 165 | x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up(x3_1)], 1)) 166 | x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up(x2_2)], 1)) 167 | x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.up(x1_3)], 1)) 168 | 169 | if self.args.deepsupervision: 170 | output1 = self.final1(x0_1) 171 | output2 = self.final2(x0_2) 172 | output3 = self.final3(x0_3) 173 | output4 = self.final4(x0_4) 174 | return [output1, output2, output3, output4] 175 | 176 | else: 177 | output = self.final(x0_4) 178 | return output 179 | -------------------------------------------------------------------------------- /model2D/NestedUnet_ResBlock-master/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model2D/UNet2D_BraTs-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /model2D/UNet2D_BraTs-master/README.md: -------------------------------------------------------------------------------- 1 | # UNet2D_BraTs 2 | 3 | -------------------------------------------------------------------------------- /model2D/UNet2D_BraTs-master/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | #读numpy数据(npy)的代码 28 | npimage = np.load(img_path) 29 | npmask = np.load(mask_path) 30 | npimage = npimage.transpose((2, 0, 1)) 31 | 32 | WT_Label = npmask.copy() 33 | WT_Label[npmask == 1] = 1. 34 | WT_Label[npmask == 2] = 1. 35 | WT_Label[npmask == 4] = 1. 36 | TC_Label = npmask.copy() 37 | TC_Label[npmask == 1] = 1. 38 | TC_Label[npmask == 2] = 0. 39 | TC_Label[npmask == 4] = 1. 40 | ET_Label = npmask.copy() 41 | ET_Label[npmask == 1] = 0. 42 | ET_Label[npmask == 2] = 0. 43 | ET_Label[npmask == 4] = 1. 44 | nplabel = np.empty((160, 160, 3)) 45 | nplabel[:, :, 0] = WT_Label 46 | nplabel[:, :, 1] = TC_Label 47 | nplabel[:, :, 2] = ET_Label 48 | nplabel = nplabel.transpose((2, 0, 1)) 49 | 50 | nplabel = nplabel.astype("float32") 51 | npimage = npimage.astype("float32") 52 | 53 | return npimage,nplabel 54 | 55 | 56 | #读图片(如jpg、png)的代码 57 | ''' 58 | image = imread(img_path) 59 | mask = imread(mask_path) 60 | 61 | image = image.astype('float32') / 255 62 | mask = mask.astype('float32') / 255 63 | 64 | if self.aug: 65 | if random.uniform(0, 1) > 0.5: 66 | image = image[:, ::-1, :].copy() 67 | mask = mask[:, ::-1].copy() 68 | if random.uniform(0, 1) > 0.5: 69 | image = image[::-1, :, :].copy() 70 | mask = mask[::-1, :].copy() 71 | 72 | image = color.gray2rgb(image) 73 | #image = image[:,:,np.newaxis] 74 | image = image.transpose((2, 0, 1)) 75 | mask = mask[:,:,np.newaxis] 76 | mask = mask.transpose((2, 0, 1)) 77 | return image, mask 78 | ''' 79 | 80 | -------------------------------------------------------------------------------- /model2D/UNet2D_BraTs-master/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model2D/UNet2D_BraTs-master/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model2D/UNet2D_BraTs-master/train.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import time 4 | import os 5 | import math 6 | import argparse 7 | from glob import glob 8 | from collections import OrderedDict 9 | import random 10 | import warnings 11 | from datetime import datetime 12 | 13 | import numpy as np 14 | from tqdm import tqdm 15 | 16 | from sklearn.model_selection import train_test_split 17 | from sklearn.externals import joblib 18 | from skimage.io import imread 19 | 20 | import torch 21 | import torch.nn as nn 22 | import torch.nn.functional as F 23 | from torch.autograd import Variable 24 | import torch.optim as optim 25 | from torch.optim import lr_scheduler 26 | from torch.utils.data import DataLoader 27 | import torch.backends.cudnn as cudnn 28 | import torchvision 29 | from torchvision import datasets, models, transforms 30 | 31 | from dataset import Dataset 32 | 33 | from metrics import dice_coef, batch_iou, mean_iou, iou_score 34 | import losses 35 | from utils import str2bool, count_params 36 | import pandas as pd 37 | import unet 38 | 39 | arch_names = list(unet.__dict__.keys()) 40 | loss_names = list(losses.__dict__.keys()) 41 | loss_names.append('BCEWithLogitsLoss') 42 | 43 | 44 | def parse_args(): 45 | parser = argparse.ArgumentParser() 46 | 47 | parser.add_argument('--name', default=None, 48 | help='model name: (default: arch+timestamp)') 49 | parser.add_argument('--arch', '-a', metavar='ARCH', default='Unet', 50 | choices=arch_names, 51 | help='model architecture: ' + 52 | ' | '.join(arch_names) + 53 | ' (default: NestedUNet)') 54 | parser.add_argument('--deepsupervision', default=False, type=str2bool) 55 | parser.add_argument('--dataset', default="jiu0Monkey", 56 | help='dataset name') 57 | parser.add_argument('--input-channels', default=4, type=int, 58 | help='input channels') 59 | parser.add_argument('--image-ext', default='png', 60 | help='image file extension') 61 | parser.add_argument('--mask-ext', default='png', 62 | help='mask file extension') 63 | parser.add_argument('--aug', default=False, type=str2bool) 64 | parser.add_argument('--loss', default='BCEDiceLoss', 65 | choices=loss_names, 66 | help='loss: ' + 67 | ' | '.join(loss_names) + 68 | ' (default: BCEDiceLoss)') 69 | parser.add_argument('--epochs', default=10000, type=int, metavar='N', 70 | help='number of total epochs to run') 71 | parser.add_argument('--early-stop', default=20, type=int, 72 | metavar='N', help='early stopping (default: 20)') 73 | parser.add_argument('-b', '--batch-size', default=18, type=int, 74 | metavar='N', help='mini-batch size (default: 16)') 75 | parser.add_argument('--optimizer', default='Adam', 76 | choices=['Adam', 'SGD'], 77 | help='loss: ' + 78 | ' | '.join(['Adam', 'SGD']) + 79 | ' (default: Adam)') 80 | parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float, 81 | metavar='LR', help='initial learning rate') 82 | parser.add_argument('--momentum', default=0.9, type=float, 83 | help='momentum') 84 | parser.add_argument('--weight-decay', default=1e-4, type=float, 85 | help='weight decay') 86 | parser.add_argument('--nesterov', default=False, type=str2bool, 87 | help='nesterov') 88 | 89 | args = parser.parse_args() 90 | 91 | return args 92 | 93 | class AverageMeter(object): 94 | """Computes and stores the average and current value""" 95 | def __init__(self): 96 | self.reset() 97 | 98 | def reset(self): 99 | self.val = 0 100 | self.avg = 0 101 | self.sum = 0 102 | self.count = 0 103 | 104 | def update(self, val, n=1): 105 | self.val = val 106 | self.sum += val * n 107 | self.count += n 108 | self.avg = self.sum / self.count 109 | 110 | 111 | def train(args, train_loader, model, criterion, optimizer, epoch, scheduler=None): 112 | losses = AverageMeter() 113 | ious = AverageMeter() 114 | 115 | model.train() 116 | 117 | for i, (input, target) in tqdm(enumerate(train_loader), total=len(train_loader)): 118 | input = input.cuda() 119 | target = target.cuda() 120 | 121 | # compute output 122 | if args.deepsupervision: 123 | outputs = model(input) 124 | loss = 0 125 | for output in outputs: 126 | loss += criterion(output, target) 127 | loss /= len(outputs) 128 | iou = iou_score(outputs[-1], target) 129 | else: 130 | output = model(input) 131 | loss = criterion(output, target) 132 | iou = iou_score(output, target) 133 | 134 | losses.update(loss.item(), input.size(0)) 135 | ious.update(iou, input.size(0)) 136 | 137 | # compute gradient and do optimizing step 138 | optimizer.zero_grad() 139 | loss.backward() 140 | optimizer.step() 141 | 142 | log = OrderedDict([ 143 | ('loss', losses.avg), 144 | ('iou', ious.avg), 145 | ]) 146 | 147 | return log 148 | 149 | 150 | def validate(args, val_loader, model, criterion): 151 | losses = AverageMeter() 152 | ious = AverageMeter() 153 | 154 | # switch to evaluate mode 155 | model.eval() 156 | 157 | with torch.no_grad(): 158 | for i, (input, target) in tqdm(enumerate(val_loader), total=len(val_loader)): 159 | input = input.cuda() 160 | target = target.cuda() 161 | 162 | # compute output 163 | if args.deepsupervision: 164 | outputs = model(input) 165 | loss = 0 166 | for output in outputs: 167 | loss += criterion(output, target) 168 | loss /= len(outputs) 169 | iou = iou_score(outputs[-1], target) 170 | else: 171 | output = model(input) 172 | loss = criterion(output, target) 173 | iou = iou_score(output, target) 174 | 175 | losses.update(loss.item(), input.size(0)) 176 | ious.update(iou, input.size(0)) 177 | 178 | log = OrderedDict([ 179 | ('loss', losses.avg), 180 | ('iou', ious.avg), 181 | ]) 182 | 183 | return log 184 | 185 | 186 | def main(): 187 | args = parse_args() 188 | #args.dataset = "datasets" 189 | 190 | 191 | if args.name is None: 192 | if args.deepsupervision: 193 | args.name = '%s_%s_wDS' %(args.dataset, args.arch) 194 | else: 195 | args.name = '%s_%s_woDS' %(args.dataset, args.arch) 196 | if not os.path.exists('models/%s' %args.name): 197 | os.makedirs('models/%s' %args.name) 198 | 199 | print('Config -----') 200 | for arg in vars(args): 201 | print('%s: %s' %(arg, getattr(args, arg))) 202 | print('------------') 203 | 204 | with open('models/%s/args.txt' %args.name, 'w') as f: 205 | for arg in vars(args): 206 | print('%s: %s' %(arg, getattr(args, arg)), file=f) 207 | 208 | joblib.dump(args, 'models/%s/args.pkl' %args.name) 209 | 210 | # define loss function (criterion) 211 | if args.loss == 'BCEWithLogitsLoss': 212 | criterion = nn.BCEWithLogitsLoss().cuda() 213 | else: 214 | criterion = losses.__dict__[args.loss]().cuda() 215 | 216 | cudnn.benchmark = True 217 | 218 | # Data loading code 219 | img_paths = glob(r'D:\Project\CollegeDesign\dataset\Brats2018FoulModel2D\trainImage\*') 220 | mask_paths = glob(r'D:\Project\CollegeDesign\dataset\Brats2018FoulModel2D\trainMask\*') 221 | 222 | train_img_paths, val_img_paths, train_mask_paths, val_mask_paths = \ 223 | train_test_split(img_paths, mask_paths, test_size=0.2, random_state=41) 224 | print("train_num:%s"%str(len(train_img_paths))) 225 | print("val_num:%s"%str(len(val_img_paths))) 226 | 227 | 228 | # create model 229 | print("=> creating model %s" %args.arch) 230 | model = unet.__dict__[args.arch](args) 231 | 232 | model = model.cuda() 233 | 234 | print(count_params(model)) 235 | 236 | if args.optimizer == 'Adam': 237 | optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) 238 | elif args.optimizer == 'SGD': 239 | optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, 240 | momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) 241 | 242 | train_dataset = Dataset(args, train_img_paths, train_mask_paths, args.aug) 243 | val_dataset = Dataset(args, val_img_paths, val_mask_paths) 244 | 245 | train_loader = torch.utils.data.DataLoader( 246 | train_dataset, 247 | batch_size=args.batch_size, 248 | shuffle=True, 249 | pin_memory=True, 250 | drop_last=True) 251 | val_loader = torch.utils.data.DataLoader( 252 | val_dataset, 253 | batch_size=args.batch_size, 254 | shuffle=False, 255 | pin_memory=True, 256 | drop_last=False) 257 | 258 | log = pd.DataFrame(index=[], columns=[ 259 | 'epoch', 'lr', 'loss', 'iou', 'val_loss', 'val_iou' 260 | ]) 261 | 262 | best_iou = 0 263 | trigger = 0 264 | for epoch in range(args.epochs): 265 | print('Epoch [%d/%d]' %(epoch, args.epochs)) 266 | 267 | # train for one epoch 268 | train_log = train(args, train_loader, model, criterion, optimizer, epoch) 269 | # evaluate on validation set 270 | val_log = validate(args, val_loader, model, criterion) 271 | 272 | print('loss %.4f - iou %.4f - val_loss %.4f - val_iou %.4f' 273 | %(train_log['loss'], train_log['iou'], val_log['loss'], val_log['iou'])) 274 | 275 | tmp = pd.Series([ 276 | epoch, 277 | args.lr, 278 | train_log['loss'], 279 | train_log['iou'], 280 | val_log['loss'], 281 | val_log['iou'], 282 | ], index=['epoch', 'lr', 'loss', 'iou', 'val_loss', 'val_iou']) 283 | 284 | log = log.append(tmp, ignore_index=True) 285 | log.to_csv('models/%s/log.csv' %args.name, index=False) 286 | 287 | trigger += 1 288 | 289 | if val_log['iou'] > best_iou: 290 | torch.save(model.state_dict(), 'models/%s/model.pth' %args.name) 291 | best_iou = val_log['iou'] 292 | print("=> saved best model") 293 | trigger = 0 294 | 295 | # early stopping 296 | if not args.early_stop is None: 297 | if trigger >= args.early_stop: 298 | print("=> early stopping") 299 | break 300 | 301 | torch.cuda.empty_cache() 302 | 303 | 304 | 305 | if __name__ == '__main__': 306 | main() 307 | -------------------------------------------------------------------------------- /model2D/UNet2D_BraTs-master/unet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class Downsample_block(nn.Module): 7 | def __init__(self, in_channels, out_channels): 8 | super(Downsample_block, self).__init__() 9 | self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1) 10 | self.bn1 = nn.BatchNorm2d(out_channels) 11 | self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1) 12 | self.bn2 = nn.BatchNorm2d(out_channels) 13 | 14 | def forward(self, x): 15 | x = F.relu(self.bn1(self.conv1(x))) 16 | y = F.relu(self.bn2(self.conv2(x))) 17 | x = F.max_pool2d(y, 2, stride=2) 18 | 19 | return x, y 20 | 21 | 22 | class Upsample_block(nn.Module): 23 | def __init__(self, in_channels, out_channels): 24 | super(Upsample_block, self).__init__() 25 | self.transconv = nn.ConvTranspose2d(in_channels, out_channels, 4, padding=1, stride=2) 26 | self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1) 27 | self.bn1 = nn.BatchNorm2d(out_channels) 28 | self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1) 29 | self.bn2 = nn.BatchNorm2d(out_channels) 30 | 31 | def forward(self, x, y): 32 | x = self.transconv(x) 33 | x = torch.cat((x, y), dim=1) 34 | x = F.relu(self.bn1(self.conv1(x))) 35 | x = F.relu(self.bn2(self.conv2(x))) 36 | 37 | return x 38 | 39 | 40 | class Unet(nn.Module): 41 | def __init__(self, args): 42 | in_chan = 4 43 | out_chan = 3 44 | super(Unet, self).__init__() 45 | self.down1 = Downsample_block(in_chan, 64) 46 | self.down2 = Downsample_block(64, 128) 47 | self.down3 = Downsample_block(128, 256) 48 | self.down4 = Downsample_block(256, 512) 49 | self.conv1 = nn.Conv2d(512, 1024, 3, padding=1) 50 | self.bn1 = nn.BatchNorm2d(1024) 51 | self.conv2 = nn.Conv2d(1024, 1024, 3, padding=1) 52 | self.bn2 = nn.BatchNorm2d(1024) 53 | self.up4 = Upsample_block(1024, 512) 54 | self.up3 = Upsample_block(512, 256) 55 | self.up2 = Upsample_block(256, 128) 56 | self.up1 = Upsample_block(128, 64) 57 | self.outconv = nn.Conv2d(64, out_chan, 1) 58 | self.outconvp1 = nn.Conv2d(64, out_chan, 1) 59 | self.outconvm1 = nn.Conv2d(64, out_chan, 1) 60 | 61 | def forward(self, x): 62 | x, y1 = self.down1(x) 63 | x, y2 = self.down2(x) 64 | x, y3 = self.down3(x) 65 | x, y4 = self.down4(x) 66 | x = F.dropout2d(F.relu(self.bn1(self.conv1(x)))) 67 | x = F.dropout2d(F.relu(self.bn2(self.conv2(x)))) 68 | x = self.up4(x, y4) 69 | x = self.up3(x, y3) 70 | x = self.up2(x, y2) 71 | x = self.up1(x, y1) 72 | x1 = self.outconv(x) 73 | 74 | return x1 -------------------------------------------------------------------------------- /model2D/UNet2D_BraTs-master/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model2D/deepresunet_brats-master/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | #读numpy数据(npy)的代码 28 | npimage = np.load(img_path) 29 | npmask = np.load(mask_path) 30 | npimage = npimage.transpose((2, 0, 1)) 31 | 32 | WT_Label = npmask.copy() 33 | WT_Label[npmask == 1] = 1. 34 | WT_Label[npmask == 2] = 1. 35 | WT_Label[npmask == 4] = 1. 36 | TC_Label = npmask.copy() 37 | TC_Label[npmask == 1] = 1. 38 | TC_Label[npmask == 2] = 0. 39 | TC_Label[npmask == 4] = 1. 40 | ET_Label = npmask.copy() 41 | ET_Label[npmask == 1] = 0. 42 | ET_Label[npmask == 2] = 0. 43 | ET_Label[npmask == 4] = 1. 44 | nplabel = np.empty((160, 160, 3)) 45 | nplabel[:, :, 0] = WT_Label 46 | nplabel[:, :, 1] = TC_Label 47 | nplabel[:, :, 2] = ET_Label 48 | nplabel = nplabel.transpose((2, 0, 1)) 49 | 50 | nplabel = nplabel.astype("float32") 51 | npimage = npimage.astype("float32") 52 | 53 | return npimage,nplabel 54 | 55 | 56 | #读图片(如jpg、png)的代码 57 | ''' 58 | image = imread(img_path) 59 | mask = imread(mask_path) 60 | 61 | image = image.astype('float32') / 255 62 | mask = mask.astype('float32') / 255 63 | 64 | if self.aug: 65 | if random.uniform(0, 1) > 0.5: 66 | image = image[:, ::-1, :].copy() 67 | mask = mask[:, ::-1].copy() 68 | if random.uniform(0, 1) > 0.5: 69 | image = image[::-1, :, :].copy() 70 | mask = mask[::-1, :].copy() 71 | 72 | image = color.gray2rgb(image) 73 | #image = image[:,:,np.newaxis] 74 | image = image.transpose((2, 0, 1)) 75 | mask = mask[:,:,np.newaxis] 76 | mask = mask.transpose((2, 0, 1)) 77 | return image, mask 78 | ''' 79 | 80 | -------------------------------------------------------------------------------- /model2D/deepresunet_brats-master/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model2D/deepresunet_brats-master/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model2D/deepresunet_brats-master/mymodel.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class PreActivateDoubleConv(nn.Module): 6 | def __init__(self, in_channels, out_channels): 7 | super(PreActivateDoubleConv, self).__init__() 8 | self.double_conv = nn.Sequential( 9 | nn.BatchNorm2d(in_channels), 10 | nn.ReLU(inplace=True), 11 | nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), 12 | nn.BatchNorm2d(out_channels), 13 | nn.ReLU(inplace=True), 14 | nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) 15 | ) 16 | 17 | def forward(self, x): 18 | return self.double_conv(x) 19 | 20 | class PreActivateResUpBlock(nn.Module): 21 | def __init__(self, in_channels, out_channels): 22 | super(PreActivateResUpBlock, self).__init__() 23 | self.ch_avg = nn.Sequential( 24 | nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), 25 | nn.BatchNorm2d(out_channels)) 26 | self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 27 | self.ch_avg = nn.Sequential( 28 | nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), 29 | nn.BatchNorm2d(out_channels)) 30 | self.double_conv = PreActivateDoubleConv(in_channels, out_channels) 31 | 32 | def forward(self, down_input, skip_input): 33 | x = self.up_sample(down_input) 34 | x = torch.cat([x, skip_input], dim=1) 35 | return self.double_conv(x) + self.ch_avg(x) 36 | 37 | class PreActivateResBlock(nn.Module): 38 | def __init__(self, in_channels, out_channels): 39 | super(PreActivateResBlock, self).__init__() 40 | self.ch_avg = nn.Sequential( 41 | nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), 42 | nn.BatchNorm2d(out_channels)) 43 | 44 | self.double_conv = PreActivateDoubleConv(in_channels, out_channels) 45 | self.down_sample = nn.MaxPool2d(2) 46 | 47 | def forward(self, x): 48 | identity = self.ch_avg(x) 49 | out = self.double_conv(x) 50 | out = out + identity 51 | return self.down_sample(out), out 52 | 53 | class DoubleConv(nn.Module): 54 | def __init__(self, in_channels, out_channels): 55 | super(DoubleConv, self).__init__() 56 | self.double_conv = nn.Sequential( 57 | nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), 58 | nn.BatchNorm2d(out_channels), 59 | nn.ReLU(inplace=True), 60 | nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), 61 | nn.BatchNorm2d(out_channels), 62 | nn.ReLU(inplace=True), 63 | ) 64 | 65 | def forward(self, x): 66 | return self.double_conv(x) 67 | 68 | class ResBlock(nn.Module): 69 | def __init__(self, in_channels, out_channels): 70 | super(ResBlock, self).__init__() 71 | self.downsample = nn.Sequential( 72 | nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), 73 | nn.BatchNorm2d(out_channels)) 74 | self.double_conv = DoubleConv(in_channels, out_channels) 75 | self.down_sample = nn.MaxPool2d(2) 76 | self.relu = nn.ReLU() 77 | 78 | def forward(self, x): 79 | identity = self.downsample(x) 80 | out = self.double_conv(x) 81 | out = self.relu(out + identity) 82 | return self.down_sample(out), out 83 | 84 | class DownBlock(nn.Module): 85 | def __init__(self, in_channels, out_channels): 86 | super(DownBlock, self).__init__() 87 | self.double_conv = DoubleConv(in_channels, out_channels) 88 | self.down_sample = nn.MaxPool2d(2) 89 | 90 | def forward(self, x): 91 | skip_out = self.double_conv(x) 92 | down_out = self.down_sample(skip_out) 93 | return (down_out, skip_out) 94 | 95 | class UpBlock(nn.Module): 96 | def __init__(self, in_channels, out_channels): 97 | super(UpBlock, self).__init__() 98 | self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 99 | self.double_conv = DoubleConv(in_channels, out_channels) 100 | 101 | def forward(self, down_input, skip_input): 102 | x = self.up_sample(down_input) 103 | x = torch.cat([x, skip_input], dim=1) 104 | return self.double_conv(x) 105 | 106 | class DeepResUNet(nn.Module): 107 | def __init__(self, args): 108 | super(DeepResUNet, self).__init__() 109 | 110 | self.down_conv1 = PreActivateResBlock(4, 64) 111 | self.down_conv2 = PreActivateResBlock(64, 128) 112 | self.down_conv3 = PreActivateResBlock(128, 256) 113 | self.down_conv4 = PreActivateResBlock(256, 512) 114 | 115 | self.double_conv = PreActivateDoubleConv(512, 1024) 116 | 117 | self.up_conv4 = PreActivateResUpBlock(512 + 1024, 512) 118 | self.up_conv3 = PreActivateResUpBlock(256 + 512, 256) 119 | self.up_conv2 = PreActivateResUpBlock(128 + 256, 128) 120 | self.up_conv1 = PreActivateResUpBlock(128 + 64, 64) 121 | 122 | self.conv_last = nn.Conv2d(64, 3, kernel_size=1) 123 | 124 | def forward(self, x): 125 | x, skip1_out = self.down_conv1(x) 126 | x, skip2_out = self.down_conv2(x) 127 | x, skip3_out = self.down_conv3(x) 128 | x, skip4_out = self.down_conv4(x) 129 | x = self.double_conv(x) 130 | x = self.up_conv4(x, skip4_out) 131 | x = self.up_conv3(x, skip3_out) 132 | x = self.up_conv2(x, skip2_out) 133 | x = self.up_conv1(x, skip1_out) 134 | x = self.conv_last(x) 135 | return x 136 | 137 | class HybridResUNet(nn.Module): 138 | """ 139 | Hybrid solution of resnet blocks and double conv blocks 140 | """ 141 | def __init__(self, args): 142 | super(HybridResUNet, self).__init__() 143 | 144 | self.down_conv1 = ResBlock(4, 64) 145 | self.down_conv2 = ResBlock(64, 128) 146 | self.down_conv3 = ResBlock(128, 256) 147 | self.down_conv4 = ResBlock(256, 512) 148 | 149 | self.double_conv = DoubleConv(512, 1024) 150 | 151 | self.up_conv4 = UpBlock(512 + 1024, 512) 152 | self.up_conv3 = UpBlock(256 + 512, 256) 153 | self.up_conv2 = UpBlock(128 + 256, 128) 154 | self.up_conv1 = UpBlock(128 + 64, 64) 155 | 156 | self.conv_last = nn.Conv2d(64, 3, kernel_size=1) 157 | 158 | def forward(self, x): 159 | x, skip1_out = self.down_conv1(x) 160 | x, skip2_out = self.down_conv2(x) 161 | x, skip3_out = self.down_conv3(x) 162 | x, skip4_out = self.down_conv4(x) 163 | x = self.double_conv(x) 164 | x = self.up_conv4(x, skip4_out) 165 | x = self.up_conv3(x, skip3_out) 166 | x = self.up_conv2(x, skip2_out) 167 | x = self.up_conv1(x, skip1_out) 168 | x = self.conv_last(x) 169 | return x 170 | 171 | class ONet(nn.Module): 172 | def __init__(self, args): 173 | alpha = 470 174 | beta = 40 175 | super(ONet, self).__init__() 176 | self.alpha = alpha 177 | self.beta = beta 178 | self.down_conv1 = ResBlock(4, 64) 179 | self.down_conv2 = ResBlock(64, 128) 180 | self.down_conv3 = ResBlock(128, 256) 181 | self.down_conv4 = ResBlock(256, 512) 182 | 183 | self.double_conv = DoubleConv(512, 1024) 184 | 185 | self.up_conv4 = UpBlock(512 + 1024, 512) 186 | self.up_conv3 = UpBlock(256 + 512, 256) 187 | self.up_conv2 = UpBlock(128 + 256, 128) 188 | self.up_conv1 = UpBlock(128 + 64, 64) 189 | 190 | self.conv_last = nn.Conv2d(64, 3, kernel_size=1) 191 | self.input_output_conv = nn.Conv2d(2, 3, kernel_size=1) 192 | 193 | 194 | def forward(self, inputs): 195 | input_tensor, bounding = inputs 196 | x, skip1_out = self.down_conv1(input_tensor + (bounding * self.alpha)) 197 | x, skip2_out = self.down_conv2(x) 198 | x, skip3_out = self.down_conv3(x) 199 | x, skip4_out = self.down_conv4(x) 200 | x = self.double_conv(x) 201 | x = self.up_conv4(x, skip4_out) 202 | x = self.up_conv3(x, skip3_out) 203 | x = self.up_conv2(x, skip2_out) 204 | x = self.up_conv1(x, skip1_out) 205 | x = self.conv_last(x) 206 | input_output = torch.cat([x, bounding * self.beta], dim=1) 207 | x = self.input_output_conv(input_output) 208 | return x -------------------------------------------------------------------------------- /model2D/deepresunet_brats-master/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model2D/hybridresunet-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /model2D/hybridresunet-master/README.md: -------------------------------------------------------------------------------- 1 | # hybridresunet 2 | 3 | -------------------------------------------------------------------------------- /model2D/hybridresunet-master/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | #读numpy数据(npy)的代码 28 | npimage = np.load(img_path) 29 | npmask = np.load(mask_path) 30 | npimage = npimage.transpose((2, 0, 1)) 31 | 32 | WT_Label = npmask.copy() 33 | WT_Label[npmask == 1] = 1. 34 | WT_Label[npmask == 2] = 1. 35 | WT_Label[npmask == 4] = 1. 36 | TC_Label = npmask.copy() 37 | TC_Label[npmask == 1] = 1. 38 | TC_Label[npmask == 2] = 0. 39 | TC_Label[npmask == 4] = 1. 40 | ET_Label = npmask.copy() 41 | ET_Label[npmask == 1] = 0. 42 | ET_Label[npmask == 2] = 0. 43 | ET_Label[npmask == 4] = 1. 44 | nplabel = np.empty((160, 160, 3)) 45 | nplabel[:, :, 0] = WT_Label 46 | nplabel[:, :, 1] = TC_Label 47 | nplabel[:, :, 2] = ET_Label 48 | nplabel = nplabel.transpose((2, 0, 1)) 49 | 50 | nplabel = nplabel.astype("float32") 51 | npimage = npimage.astype("float32") 52 | 53 | return npimage,nplabel 54 | 55 | 56 | #读图片(如jpg、png)的代码 57 | ''' 58 | image = imread(img_path) 59 | mask = imread(mask_path) 60 | 61 | image = image.astype('float32') / 255 62 | mask = mask.astype('float32') / 255 63 | 64 | if self.aug: 65 | if random.uniform(0, 1) > 0.5: 66 | image = image[:, ::-1, :].copy() 67 | mask = mask[:, ::-1].copy() 68 | if random.uniform(0, 1) > 0.5: 69 | image = image[::-1, :, :].copy() 70 | mask = mask[::-1, :].copy() 71 | 72 | image = color.gray2rgb(image) 73 | #image = image[:,:,np.newaxis] 74 | image = image.transpose((2, 0, 1)) 75 | mask = mask[:,:,np.newaxis] 76 | mask = mask.transpose((2, 0, 1)) 77 | return image, mask 78 | ''' 79 | 80 | -------------------------------------------------------------------------------- /model2D/hybridresunet-master/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model2D/hybridresunet-master/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model2D/hybridresunet-master/mymodel.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class PreActivateDoubleConv(nn.Module): 6 | def __init__(self, in_channels, out_channels): 7 | super(PreActivateDoubleConv, self).__init__() 8 | self.double_conv = nn.Sequential( 9 | nn.BatchNorm2d(in_channels), 10 | nn.ReLU(inplace=True), 11 | nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), 12 | nn.BatchNorm2d(out_channels), 13 | nn.ReLU(inplace=True), 14 | nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) 15 | ) 16 | 17 | def forward(self, x): 18 | return self.double_conv(x) 19 | 20 | class PreActivateResUpBlock(nn.Module): 21 | def __init__(self, in_channels, out_channels): 22 | super(PreActivateResUpBlock, self).__init__() 23 | self.ch_avg = nn.Sequential( 24 | nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), 25 | nn.BatchNorm2d(out_channels)) 26 | self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 27 | self.ch_avg = nn.Sequential( 28 | nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), 29 | nn.BatchNorm2d(out_channels)) 30 | self.double_conv = PreActivateDoubleConv(in_channels, out_channels) 31 | 32 | def forward(self, down_input, skip_input): 33 | x = self.up_sample(down_input) 34 | x = torch.cat([x, skip_input], dim=1) 35 | return self.double_conv(x) + self.ch_avg(x) 36 | 37 | class PreActivateResBlock(nn.Module): 38 | def __init__(self, in_channels, out_channels): 39 | super(PreActivateResBlock, self).__init__() 40 | self.ch_avg = nn.Sequential( 41 | nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), 42 | nn.BatchNorm2d(out_channels)) 43 | 44 | self.double_conv = PreActivateDoubleConv(in_channels, out_channels) 45 | self.down_sample = nn.MaxPool2d(2) 46 | 47 | def forward(self, x): 48 | identity = self.ch_avg(x) 49 | out = self.double_conv(x) 50 | out = out + identity 51 | return self.down_sample(out), out 52 | 53 | class DoubleConv(nn.Module): 54 | def __init__(self, in_channels, out_channels): 55 | super(DoubleConv, self).__init__() 56 | self.double_conv = nn.Sequential( 57 | nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), 58 | nn.BatchNorm2d(out_channels), 59 | nn.ReLU(inplace=True), 60 | nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), 61 | nn.BatchNorm2d(out_channels), 62 | nn.ReLU(inplace=True), 63 | ) 64 | 65 | def forward(self, x): 66 | return self.double_conv(x) 67 | 68 | class ResBlock(nn.Module): 69 | def __init__(self, in_channels, out_channels): 70 | super(ResBlock, self).__init__() 71 | self.downsample = nn.Sequential( 72 | nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), 73 | nn.BatchNorm2d(out_channels)) 74 | self.double_conv = DoubleConv(in_channels, out_channels) 75 | self.down_sample = nn.MaxPool2d(2) 76 | self.relu = nn.ReLU() 77 | 78 | def forward(self, x): 79 | identity = self.downsample(x) 80 | out = self.double_conv(x) 81 | out = self.relu(out + identity) 82 | return self.down_sample(out), out 83 | 84 | class DownBlock(nn.Module): 85 | def __init__(self, in_channels, out_channels): 86 | super(DownBlock, self).__init__() 87 | self.double_conv = DoubleConv(in_channels, out_channels) 88 | self.down_sample = nn.MaxPool2d(2) 89 | 90 | def forward(self, x): 91 | skip_out = self.double_conv(x) 92 | down_out = self.down_sample(skip_out) 93 | return (down_out, skip_out) 94 | 95 | class UpBlock(nn.Module): 96 | def __init__(self, in_channels, out_channels): 97 | super(UpBlock, self).__init__() 98 | self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 99 | self.double_conv = DoubleConv(in_channels, out_channels) 100 | 101 | def forward(self, down_input, skip_input): 102 | x = self.up_sample(down_input) 103 | x = torch.cat([x, skip_input], dim=1) 104 | return self.double_conv(x) 105 | 106 | class DeepResUNet(nn.Module): 107 | def __init__(self, args): 108 | super(DeepResUNet, self).__init__() 109 | 110 | self.down_conv1 = PreActivateResBlock(4, 64) 111 | self.down_conv2 = PreActivateResBlock(64, 128) 112 | self.down_conv3 = PreActivateResBlock(128, 256) 113 | self.down_conv4 = PreActivateResBlock(256, 512) 114 | 115 | self.double_conv = PreActivateDoubleConv(512, 1024) 116 | 117 | self.up_conv4 = PreActivateResUpBlock(512 + 1024, 512) 118 | self.up_conv3 = PreActivateResUpBlock(256 + 512, 256) 119 | self.up_conv2 = PreActivateResUpBlock(128 + 256, 128) 120 | self.up_conv1 = PreActivateResUpBlock(128 + 64, 64) 121 | 122 | self.conv_last = nn.Conv2d(64, 3, kernel_size=1) 123 | 124 | def forward(self, x): 125 | x, skip1_out = self.down_conv1(x) 126 | x, skip2_out = self.down_conv2(x) 127 | x, skip3_out = self.down_conv3(x) 128 | x, skip4_out = self.down_conv4(x) 129 | x = self.double_conv(x) 130 | x = self.up_conv4(x, skip4_out) 131 | x = self.up_conv3(x, skip3_out) 132 | x = self.up_conv2(x, skip2_out) 133 | x = self.up_conv1(x, skip1_out) 134 | x = self.conv_last(x) 135 | return x 136 | 137 | class HybridResUNet(nn.Module): 138 | """ 139 | Hybrid solution of resnet blocks and double conv blocks 140 | """ 141 | def __init__(self, args): 142 | super(HybridResUNet, self).__init__() 143 | 144 | self.down_conv1 = ResBlock(4, 64) 145 | self.down_conv2 = ResBlock(64, 128) 146 | self.down_conv3 = ResBlock(128, 256) 147 | self.down_conv4 = ResBlock(256, 512) 148 | 149 | self.double_conv = DoubleConv(512, 1024) 150 | 151 | self.up_conv4 = UpBlock(512 + 1024, 512) 152 | self.up_conv3 = UpBlock(256 + 512, 256) 153 | self.up_conv2 = UpBlock(128 + 256, 128) 154 | self.up_conv1 = UpBlock(128 + 64, 64) 155 | 156 | self.conv_last = nn.Conv2d(64, 3, kernel_size=1) 157 | 158 | def forward(self, x): 159 | x, skip1_out = self.down_conv1(x) 160 | x, skip2_out = self.down_conv2(x) 161 | x, skip3_out = self.down_conv3(x) 162 | x, skip4_out = self.down_conv4(x) 163 | x = self.double_conv(x) 164 | x = self.up_conv4(x, skip4_out) 165 | x = self.up_conv3(x, skip3_out) 166 | x = self.up_conv2(x, skip2_out) 167 | x = self.up_conv1(x, skip1_out) 168 | x = self.conv_last(x) 169 | return x 170 | 171 | class ONet(nn.Module): 172 | def __init__(self, args): 173 | alpha = 470 174 | beta = 40 175 | super(ONet, self).__init__() 176 | self.alpha = alpha 177 | self.beta = beta 178 | self.down_conv1 = ResBlock(4, 64) 179 | self.down_conv2 = ResBlock(64, 128) 180 | self.down_conv3 = ResBlock(128, 256) 181 | self.down_conv4 = ResBlock(256, 512) 182 | 183 | self.double_conv = DoubleConv(512, 1024) 184 | 185 | self.up_conv4 = UpBlock(512 + 1024, 512) 186 | self.up_conv3 = UpBlock(256 + 512, 256) 187 | self.up_conv2 = UpBlock(128 + 256, 128) 188 | self.up_conv1 = UpBlock(128 + 64, 64) 189 | 190 | self.conv_last = nn.Conv2d(64, 3, kernel_size=1) 191 | self.input_output_conv = nn.Conv2d(2, 3, kernel_size=1) 192 | 193 | 194 | def forward(self, inputs): 195 | input_tensor, bounding = inputs 196 | x, skip1_out = self.down_conv1(input_tensor + (bounding * self.alpha)) 197 | x, skip2_out = self.down_conv2(x) 198 | x, skip3_out = self.down_conv3(x) 199 | x, skip4_out = self.down_conv4(x) 200 | x = self.double_conv(x) 201 | x = self.up_conv4(x, skip4_out) 202 | x = self.up_conv3(x, skip3_out) 203 | x = self.up_conv2(x, skip2_out) 204 | x = self.up_conv1(x, skip1_out) 205 | x = self.conv_last(x) 206 | input_output = torch.cat([x, bounding * self.beta], dim=1) 207 | x = self.input_output_conv(input_output) 208 | return x -------------------------------------------------------------------------------- /model2D/hybridresunet-master/train.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import time 4 | import os 5 | import math 6 | import argparse 7 | from glob import glob 8 | from collections import OrderedDict 9 | import random 10 | import warnings 11 | from datetime import datetime 12 | 13 | import numpy as np 14 | from tqdm import tqdm 15 | 16 | from sklearn.model_selection import train_test_split 17 | from sklearn.externals import joblib 18 | from skimage.io import imread 19 | 20 | import torch 21 | import torch.nn as nn 22 | import torch.nn.functional as F 23 | from torch.autograd import Variable 24 | import torch.optim as optim 25 | from torch.optim import lr_scheduler 26 | from torch.utils.data import DataLoader 27 | import torch.backends.cudnn as cudnn 28 | import torchvision 29 | from torchvision import datasets, models, transforms 30 | 31 | from dataset import Dataset 32 | 33 | from metrics import dice_coef, batch_iou, mean_iou, iou_score 34 | import losses 35 | from utils import str2bool, count_params 36 | import pandas as pd 37 | import mymodel 38 | 39 | arch_names = list(mymodel.__dict__.keys()) 40 | loss_names = list(losses.__dict__.keys()) 41 | loss_names.append('BCEWithLogitsLoss') 42 | 43 | 44 | def parse_args(): 45 | parser = argparse.ArgumentParser() 46 | 47 | parser.add_argument('--name', default=None, 48 | help='model name: (default: arch+timestamp)') 49 | parser.add_argument('--arch', '-a', metavar='ARCH', default='DeepResUNet', 50 | choices=arch_names, 51 | help='model architecture: ' + 52 | ' | '.join(arch_names) + 53 | ' (default: NestedUNet)') 54 | parser.add_argument('--deepsupervision', default=False, type=str2bool) 55 | parser.add_argument('--dataset', default="jiu0Monkey", 56 | help='dataset name') 57 | parser.add_argument('--input-channels', default=4, type=int, 58 | help='input channels') 59 | parser.add_argument('--image-ext', default='png', 60 | help='image file extension') 61 | parser.add_argument('--mask-ext', default='png', 62 | help='mask file extension') 63 | parser.add_argument('--aug', default=False, type=str2bool) 64 | parser.add_argument('--loss', default='BCEDiceLoss', 65 | choices=loss_names, 66 | help='loss: ' + 67 | ' | '.join(loss_names) + 68 | ' (default: BCEDiceLoss)') 69 | parser.add_argument('--epochs', default=10000, type=int, metavar='N', 70 | help='number of total epochs to run') 71 | parser.add_argument('--early-stop', default=20, type=int, 72 | metavar='N', help='early stopping (default: 20)') 73 | parser.add_argument('-b', '--batch-size', default=18, type=int, 74 | metavar='N', help='mini-batch size (default: 16)') 75 | parser.add_argument('--optimizer', default='Adam', 76 | choices=['Adam', 'SGD'], 77 | help='loss: ' + 78 | ' | '.join(['Adam', 'SGD']) + 79 | ' (default: Adam)') 80 | parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float, 81 | metavar='LR', help='initial learning rate') 82 | parser.add_argument('--momentum', default=0.9, type=float, 83 | help='momentum') 84 | parser.add_argument('--weight-decay', default=1e-4, type=float, 85 | help='weight decay') 86 | parser.add_argument('--nesterov', default=False, type=str2bool, 87 | help='nesterov') 88 | 89 | args = parser.parse_args() 90 | 91 | return args 92 | 93 | class AverageMeter(object): 94 | """Computes and stores the average and current value""" 95 | def __init__(self): 96 | self.reset() 97 | 98 | def reset(self): 99 | self.val = 0 100 | self.avg = 0 101 | self.sum = 0 102 | self.count = 0 103 | 104 | def update(self, val, n=1): 105 | self.val = val 106 | self.sum += val * n 107 | self.count += n 108 | self.avg = self.sum / self.count 109 | 110 | 111 | def train(args, train_loader, model, criterion, optimizer, epoch, scheduler=None): 112 | losses = AverageMeter() 113 | ious = AverageMeter() 114 | 115 | model.train() 116 | 117 | for i, (input, target) in tqdm(enumerate(train_loader), total=len(train_loader)): 118 | input = input.cuda() 119 | target = target.cuda() 120 | 121 | # compute output 122 | if args.deepsupervision: 123 | outputs = model(input) 124 | loss = 0 125 | for output in outputs: 126 | loss += criterion(output, target) 127 | loss /= len(outputs) 128 | iou = iou_score(outputs[-1], target) 129 | else: 130 | output = model(input) 131 | loss = criterion(output, target) 132 | iou = iou_score(output, target) 133 | 134 | losses.update(loss.item(), input.size(0)) 135 | ious.update(iou, input.size(0)) 136 | 137 | # compute gradient and do optimizing step 138 | optimizer.zero_grad() 139 | loss.backward() 140 | optimizer.step() 141 | 142 | log = OrderedDict([ 143 | ('loss', losses.avg), 144 | ('iou', ious.avg), 145 | ]) 146 | 147 | return log 148 | 149 | 150 | def validate(args, val_loader, model, criterion): 151 | losses = AverageMeter() 152 | ious = AverageMeter() 153 | 154 | # switch to evaluate mode 155 | model.eval() 156 | 157 | with torch.no_grad(): 158 | for i, (input, target) in tqdm(enumerate(val_loader), total=len(val_loader)): 159 | input = input.cuda() 160 | target = target.cuda() 161 | 162 | # compute output 163 | if args.deepsupervision: 164 | outputs = model(input) 165 | loss = 0 166 | for output in outputs: 167 | loss += criterion(output, target) 168 | loss /= len(outputs) 169 | iou = iou_score(outputs[-1], target) 170 | else: 171 | output = model(input) 172 | loss = criterion(output, target) 173 | iou = iou_score(output, target) 174 | 175 | losses.update(loss.item(), input.size(0)) 176 | ious.update(iou, input.size(0)) 177 | 178 | log = OrderedDict([ 179 | ('loss', losses.avg), 180 | ('iou', ious.avg), 181 | ]) 182 | 183 | return log 184 | 185 | 186 | def main(): 187 | args = parse_args() 188 | #args.dataset = "datasets" 189 | 190 | 191 | if args.name is None: 192 | if args.deepsupervision: 193 | args.name = '%s_%s_wDS' %(args.dataset, args.arch) 194 | else: 195 | args.name = '%s_%s_woDS' %(args.dataset, args.arch) 196 | if not os.path.exists('models/%s' %args.name): 197 | os.makedirs('models/%s' %args.name) 198 | 199 | print('Config -----') 200 | for arg in vars(args): 201 | print('%s: %s' %(arg, getattr(args, arg))) 202 | print('------------') 203 | 204 | with open('models/%s/args.txt' %args.name, 'w') as f: 205 | for arg in vars(args): 206 | print('%s: %s' %(arg, getattr(args, arg)), file=f) 207 | 208 | joblib.dump(args, 'models/%s/args.pkl' %args.name) 209 | 210 | # define loss function (criterion) 211 | if args.loss == 'BCEWithLogitsLoss': 212 | criterion = nn.BCEWithLogitsLoss().cuda() 213 | else: 214 | criterion = losses.__dict__[args.loss]().cuda() 215 | 216 | cudnn.benchmark = True 217 | 218 | # Data loading code 219 | img_paths = glob(r'D:\Project\CollegeDesign\dataset\Brats2018FoulModel2D\trainImage\*') 220 | mask_paths = glob(r'D:\Project\CollegeDesign\dataset\Brats2018FoulModel2D\trainMask\*') 221 | 222 | train_img_paths, val_img_paths, train_mask_paths, val_mask_paths = \ 223 | train_test_split(img_paths, mask_paths, test_size=0.2, random_state=41) 224 | print("train_num:%s"%str(len(train_img_paths))) 225 | print("val_num:%s"%str(len(val_img_paths))) 226 | 227 | 228 | # create model 229 | print("=> creating model %s" %args.arch) 230 | model = mymodel.__dict__[args.arch](args) 231 | 232 | model = model.cuda() 233 | 234 | print(count_params(model)) 235 | 236 | if args.optimizer == 'Adam': 237 | optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) 238 | elif args.optimizer == 'SGD': 239 | optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, 240 | momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) 241 | 242 | train_dataset = Dataset(args, train_img_paths, train_mask_paths, args.aug) 243 | val_dataset = Dataset(args, val_img_paths, val_mask_paths) 244 | 245 | train_loader = torch.utils.data.DataLoader( 246 | train_dataset, 247 | batch_size=args.batch_size, 248 | shuffle=True, 249 | pin_memory=True, 250 | drop_last=True) 251 | val_loader = torch.utils.data.DataLoader( 252 | val_dataset, 253 | batch_size=args.batch_size, 254 | shuffle=False, 255 | pin_memory=True, 256 | drop_last=False) 257 | 258 | log = pd.DataFrame(index=[], columns=[ 259 | 'epoch', 'lr', 'loss', 'iou', 'val_loss', 'val_iou' 260 | ]) 261 | 262 | best_iou = 0 263 | trigger = 0 264 | for epoch in range(args.epochs): 265 | print('Epoch [%d/%d]' %(epoch, args.epochs)) 266 | 267 | # train for one epoch 268 | train_log = train(args, train_loader, model, criterion, optimizer, epoch) 269 | # evaluate on validation set 270 | val_log = validate(args, val_loader, model, criterion) 271 | 272 | print('loss %.4f - iou %.4f - val_loss %.4f - val_iou %.4f' 273 | %(train_log['loss'], train_log['iou'], val_log['loss'], val_log['iou'])) 274 | 275 | tmp = pd.Series([ 276 | epoch, 277 | args.lr, 278 | train_log['loss'], 279 | train_log['iou'], 280 | val_log['loss'], 281 | val_log['iou'], 282 | ], index=['epoch', 'lr', 'loss', 'iou', 'val_loss', 'val_iou']) 283 | 284 | log = log.append(tmp, ignore_index=True) 285 | log.to_csv('models/%s/log.csv' %args.name, index=False) 286 | 287 | trigger += 1 288 | 289 | if val_log['iou'] > best_iou: 290 | torch.save(model.state_dict(), 'models/%s/model.pth' %args.name) 291 | best_iou = val_log['iou'] 292 | print("=> saved best model") 293 | trigger = 0 294 | 295 | # early stopping 296 | if not args.early_stop is None: 297 | if trigger >= args.early_stop: 298 | print("=> early stopping") 299 | break 300 | 301 | torch.cuda.empty_cache() 302 | 303 | 304 | 305 | if __name__ == '__main__': 306 | main() 307 | -------------------------------------------------------------------------------- /model2D/hybridresunet-master/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model2D/unet3P-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /model2D/unet3P-master/README.md: -------------------------------------------------------------------------------- 1 | # unet3P 2 | 3 | -------------------------------------------------------------------------------- /model2D/unet3P-master/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | #读numpy数据(npy)的代码 28 | npimage = np.load(img_path) 29 | npmask = np.load(mask_path) 30 | npimage = npimage.transpose((2, 0, 1)) 31 | 32 | WT_Label = npmask.copy() 33 | WT_Label[npmask == 1] = 1. 34 | WT_Label[npmask == 2] = 1. 35 | WT_Label[npmask == 4] = 1. 36 | TC_Label = npmask.copy() 37 | TC_Label[npmask == 1] = 1. 38 | TC_Label[npmask == 2] = 0. 39 | TC_Label[npmask == 4] = 1. 40 | ET_Label = npmask.copy() 41 | ET_Label[npmask == 1] = 0. 42 | ET_Label[npmask == 2] = 0. 43 | ET_Label[npmask == 4] = 1. 44 | nplabel = np.empty((160, 160, 3)) 45 | nplabel[:, :, 0] = WT_Label 46 | nplabel[:, :, 1] = TC_Label 47 | nplabel[:, :, 2] = ET_Label 48 | nplabel = nplabel.transpose((2, 0, 1)) 49 | 50 | nplabel = nplabel.astype("float32") 51 | npimage = npimage.astype("float32") 52 | 53 | return npimage,nplabel 54 | 55 | 56 | #读图片(如jpg、png)的代码 57 | ''' 58 | image = imread(img_path) 59 | mask = imread(mask_path) 60 | 61 | image = image.astype('float32') / 255 62 | mask = mask.astype('float32') / 255 63 | 64 | if self.aug: 65 | if random.uniform(0, 1) > 0.5: 66 | image = image[:, ::-1, :].copy() 67 | mask = mask[:, ::-1].copy() 68 | if random.uniform(0, 1) > 0.5: 69 | image = image[::-1, :, :].copy() 70 | mask = mask[::-1, :].copy() 71 | 72 | image = color.gray2rgb(image) 73 | #image = image[:,:,np.newaxis] 74 | image = image.transpose((2, 0, 1)) 75 | mask = mask[:,:,np.newaxis] 76 | mask = mask.transpose((2, 0, 1)) 77 | return image, mask 78 | ''' 79 | 80 | -------------------------------------------------------------------------------- /model2D/unet3P-master/init_weights.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | 5 | def weights_init_normal(m): 6 | classname = m.__class__.__name__ 7 | #print(classname) 8 | if classname.find('Conv') != -1: 9 | init.normal_(m.weight.data, 0.0, 0.02) 10 | elif classname.find('Linear') != -1: 11 | init.normal_(m.weight.data, 0.0, 0.02) 12 | elif classname.find('BatchNorm') != -1: 13 | init.normal_(m.weight.data, 1.0, 0.02) 14 | init.constant_(m.bias.data, 0.0) 15 | 16 | 17 | def weights_init_xavier(m): 18 | classname = m.__class__.__name__ 19 | #print(classname) 20 | if classname.find('Conv') != -1: 21 | init.xavier_normal_(m.weight.data, gain=1) 22 | elif classname.find('Linear') != -1: 23 | init.xavier_normal_(m.weight.data, gain=1) 24 | elif classname.find('BatchNorm') != -1: 25 | init.normal_(m.weight.data, 1.0, 0.02) 26 | init.constant_(m.bias.data, 0.0) 27 | 28 | 29 | def weights_init_kaiming(m): 30 | classname = m.__class__.__name__ 31 | #print(classname) 32 | if classname.find('Conv') != -1: 33 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 34 | elif classname.find('Linear') != -1: 35 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 36 | elif classname.find('BatchNorm') != -1: 37 | init.normal_(m.weight.data, 1.0, 0.02) 38 | init.constant_(m.bias.data, 0.0) 39 | 40 | 41 | def weights_init_orthogonal(m): 42 | classname = m.__class__.__name__ 43 | #print(classname) 44 | if classname.find('Conv') != -1: 45 | init.orthogonal_(m.weight.data, gain=1) 46 | elif classname.find('Linear') != -1: 47 | init.orthogonal_(m.weight.data, gain=1) 48 | elif classname.find('BatchNorm') != -1: 49 | init.normal_(m.weight.data, 1.0, 0.02) 50 | init.constant_(m.bias.data, 0.0) 51 | 52 | 53 | def init_weights(net, init_type='normal'): 54 | #print('initialization method [%s]' % init_type) 55 | if init_type == 'normal': 56 | net.apply(weights_init_normal) 57 | elif init_type == 'xavier': 58 | net.apply(weights_init_xavier) 59 | elif init_type == 'kaiming': 60 | net.apply(weights_init_kaiming) 61 | elif init_type == 'orthogonal': 62 | net.apply(weights_init_orthogonal) 63 | else: 64 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 65 | -------------------------------------------------------------------------------- /model2D/unet3P-master/layers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from init_weights import init_weights 5 | 6 | 7 | class unetConv2(nn.Module): 8 | def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1): 9 | super(unetConv2, self).__init__() 10 | self.n = n 11 | self.ks = ks 12 | self.stride = stride 13 | self.padding = padding 14 | s = stride 15 | p = padding 16 | if is_batchnorm: 17 | for i in range(1, n + 1): 18 | conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p), 19 | nn.BatchNorm2d(out_size), 20 | nn.ReLU(inplace=True), ) 21 | setattr(self, 'conv%d' % i, conv) 22 | in_size = out_size 23 | 24 | else: 25 | for i in range(1, n + 1): 26 | conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p), 27 | nn.ReLU(inplace=True), ) 28 | setattr(self, 'conv%d' % i, conv) 29 | in_size = out_size 30 | 31 | # initialise the blocks 32 | for m in self.children(): 33 | init_weights(m, init_type='kaiming') 34 | 35 | def forward(self, inputs): 36 | x = inputs 37 | for i in range(1, self.n + 1): 38 | conv = getattr(self, 'conv%d' % i) 39 | x = conv(x) 40 | 41 | return x 42 | -------------------------------------------------------------------------------- /model2D/unet3P-master/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model2D/unet3P-master/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model2D/unet3P-master/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model2D/vnet_code-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /model2D/vnet_code-master/README.md: -------------------------------------------------------------------------------- 1 | # vnet_code 2 | 3 | -------------------------------------------------------------------------------- /model2D/vnet_code-master/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | npimage = np.load(img_path) 28 | npmask = np.load(mask_path) 29 | npimage = npimage.transpose((3, 0, 1, 2)) 30 | npmask = npmask.transpose((3, 0, 1, 2)) 31 | npmask = npmask.astype("float32") 32 | npimage = npimage.astype("float32") 33 | 34 | return npimage,npmask 35 | 36 | 37 | #读图片(如jpg、png)的代码 38 | ''' 39 | image = imread(img_path) 40 | mask = imread(mask_path) 41 | 42 | image = image.astype('float32') / 255 43 | mask = mask.astype('float32') / 255 44 | 45 | if self.aug: 46 | if random.uniform(0, 1) > 0.5: 47 | image = image[:, ::-1, :].copy() 48 | mask = mask[:, ::-1].copy() 49 | if random.uniform(0, 1) > 0.5: 50 | image = image[::-1, :, :].copy() 51 | mask = mask[::-1, :].copy() 52 | 53 | image = color.gray2rgb(image) 54 | #image = image[:,:,np.newaxis] 55 | image = image.transpose((2, 0, 1)) 56 | mask = mask[:,:,np.newaxis] 57 | mask = mask.transpose((2, 0, 1)) 58 | return image, mask 59 | ''' 60 | 61 | -------------------------------------------------------------------------------- /model2D/vnet_code-master/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model2D/vnet_code-master/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model2D/vnet_code-master/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model2D/vnet_code-master/vnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | def passthrough(x, **kwargs): 7 | return x 8 | 9 | def ELUCons(elu, nchan): 10 | if elu: 11 | return nn.ELU(inplace=True) 12 | else: 13 | return nn.PReLU(nchan) 14 | 15 | # normalization between sub-volumes is necessary 16 | # for good performance 17 | class ContBatchNorm3d(nn.modules.batchnorm._BatchNorm): 18 | def _check_input_dim(self, input): 19 | if input.dim() != 5: 20 | raise ValueError('expected 5D input (got {}D input)' 21 | .format(input.dim())) 22 | super(ContBatchNorm3d, self)._check_input_dim(input) 23 | 24 | def forward(self, input): 25 | #self._check_input_dim(input) 26 | return F.batch_norm( 27 | input, self.running_mean, self.running_var, self.weight, self.bias, 28 | True, self.momentum, self.eps) 29 | 30 | 31 | class LUConv(nn.Module): 32 | def __init__(self, nchan, elu): 33 | super(LUConv, self).__init__() 34 | self.relu1 = ELUCons(elu, nchan) 35 | self.conv1 = nn.Conv3d(nchan, nchan, kernel_size=5, padding=2) 36 | self.bn1 = ContBatchNorm3d(nchan) 37 | 38 | def forward(self, x): 39 | out = self.relu1(self.bn1(self.conv1(x))) 40 | return out 41 | 42 | 43 | def _make_nConv(nchan, depth, elu): 44 | layers = [] 45 | for _ in range(depth): 46 | layers.append(LUConv(nchan, elu)) 47 | return nn.Sequential(*layers) 48 | 49 | 50 | class InputTransition(nn.Module): 51 | def __init__(self, outChans, elu): 52 | super(InputTransition, self).__init__() 53 | self.conv1 = nn.Conv3d(4, 16, kernel_size=5, padding=2) 54 | self.bn1 = ContBatchNorm3d(16) 55 | self.relu1 = ELUCons(elu, 16) 56 | 57 | def forward(self, x): 58 | # do we want a PRELU here as well? 59 | out = self.conv1(x) 60 | #print(out.shape) 61 | out = self.bn1(out) 62 | 63 | #print(out.shape) 64 | # split input in to 16 channels 65 | x16 = torch.cat((x, x, x, x,), 1) 66 | #print(x16.shape) 67 | myadd = torch.add(out,x16) 68 | #print('i am here') 69 | out = self.relu1(myadd) 70 | return out 71 | 72 | 73 | class DownTransition(nn.Module): 74 | def __init__(self, inChans, nConvs, elu, dropout=False): 75 | super(DownTransition, self).__init__() 76 | outChans = 2*inChans 77 | self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2) 78 | self.bn1 = ContBatchNorm3d(outChans) 79 | self.do1 = passthrough 80 | self.relu1 = ELUCons(elu, outChans) 81 | self.relu2 = ELUCons(elu, outChans) 82 | if dropout: 83 | self.do1 = nn.Dropout3d() 84 | self.ops = _make_nConv(outChans, nConvs, elu) 85 | 86 | def forward(self, x): 87 | down = self.relu1(self.bn1(self.down_conv(x))) 88 | out = self.do1(down) 89 | out = self.ops(out) 90 | out = self.relu2(torch.add(out, down)) 91 | return out 92 | 93 | 94 | class UpTransition(nn.Module): 95 | def __init__(self, inChans, outChans, nConvs, elu, dropout=False): 96 | super(UpTransition, self).__init__() 97 | self.up_conv = nn.ConvTranspose3d(inChans, outChans // 2, kernel_size=2, stride=2) 98 | self.bn1 = ContBatchNorm3d(outChans // 2) 99 | self.do1 = passthrough 100 | self.do2 = nn.Dropout3d() 101 | self.relu1 = ELUCons(elu, outChans // 2) 102 | self.relu2 = ELUCons(elu, outChans) 103 | if dropout: 104 | self.do1 = nn.Dropout3d() 105 | self.ops = _make_nConv(outChans, nConvs, elu) 106 | 107 | def forward(self, x, skipx): 108 | out = self.do1(x) 109 | skipxdo = self.do2(skipx) 110 | out = self.relu1(self.bn1(self.up_conv(out))) 111 | xcat = torch.cat((out, skipxdo), 1) 112 | out = self.ops(xcat) 113 | out = self.relu2(torch.add(out, xcat)) 114 | return out 115 | 116 | 117 | class OutputTransition(nn.Module): 118 | def __init__(self, inChans, elu, nll): 119 | super(OutputTransition, self).__init__() 120 | self.conv1 = nn.Conv3d(inChans, 3, kernel_size=5, padding=2) 121 | self.bn1 = ContBatchNorm3d(3) 122 | self.conv2 = nn.Conv3d(3, 3, kernel_size=1) 123 | self.relu1 = ELUCons(elu, 3) 124 | if nll: 125 | self.softmax = F.log_softmax 126 | else: 127 | self.softmax = F.softmax 128 | 129 | def forward(self, x): 130 | # convolve 32 down to 2 channels 131 | out = self.relu1(self.bn1(self.conv1(x))) 132 | out = self.conv2(out) 133 | 134 | # make channels the last axis 135 | #out = out.permute(0, 2, 3, 4, 1).contiguous() 136 | # flatten 137 | #out = out.view(out.numel() // 2, 2) 138 | #out = self.softmax(out) 139 | # treat channel 0 as the predicted output 140 | return out 141 | 142 | 143 | class VNet(nn.Module): 144 | # the number of convolutions in each layer corresponds 145 | # to what is in the actual prototxt, not the intent 146 | def __init__(self, argx): 147 | super(VNet, self).__init__() 148 | elu = True 149 | nll = False 150 | self.in_tr = InputTransition(16, elu) 151 | self.down_tr32 = DownTransition(16, 1, elu) 152 | self.down_tr64 = DownTransition(32, 2, elu) 153 | self.down_tr128 = DownTransition(64, 3, elu, dropout=True) 154 | self.down_tr256 = DownTransition(128, 2, elu, dropout=True) 155 | 156 | self.up_tr256 = UpTransition(256, 256, 2, elu, dropout=True) 157 | self.up_tr128 = UpTransition(256, 128, 2, elu, dropout=True) 158 | self.up_tr64 = UpTransition(128, 64, 1, elu) 159 | self.up_tr32 = UpTransition(64, 32, 1, elu) 160 | self.out_tr = OutputTransition(32, elu, nll) 161 | 162 | 163 | def forward(self, x): 164 | out16 = self.in_tr(x) 165 | out32 = self.down_tr32(out16) 166 | out64 = self.down_tr64(out32) 167 | out128 = self.down_tr128(out64) 168 | out256 = self.down_tr256(out128) 169 | out = self.up_tr256(out256, out128) 170 | out = self.up_tr128(out, out64) 171 | out = self.up_tr64(out, out32) 172 | out = self.up_tr32(out, out16) 173 | out = self.out_tr(out) 174 | return out 175 | 176 | #net = VNet(1) 177 | #net.apply(init) 178 | 179 | #print('创建网络成功') 180 | 181 | # 输出数据维度检查 182 | #net = net.cuda() 183 | #data = torch.randn((2, 4, 32, 160, 160)).cuda() 184 | #res = net(data) 185 | #for item in res: 186 | # print(item.size()) -------------------------------------------------------------------------------- /model3D/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model3D/.DS_Store -------------------------------------------------------------------------------- /model3D/Unet3D-master/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model3D/Unet3D-master/.DS_Store -------------------------------------------------------------------------------- /model3D/Unet3D-master/.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /model3D/Unet3D-master/README.md: -------------------------------------------------------------------------------- 1 | # Unet3D 2 | 3 | -------------------------------------------------------------------------------- /model3D/Unet3D-master/__pycache__/dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model3D/Unet3D-master/__pycache__/dataset.cpython-36.pyc -------------------------------------------------------------------------------- /model3D/Unet3D-master/__pycache__/losses.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model3D/Unet3D-master/__pycache__/losses.cpython-36.pyc -------------------------------------------------------------------------------- /model3D/Unet3D-master/__pycache__/metrics.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model3D/Unet3D-master/__pycache__/metrics.cpython-36.pyc -------------------------------------------------------------------------------- /model3D/Unet3D-master/__pycache__/unet3d.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model3D/Unet3D-master/__pycache__/unet3d.cpython-36.pyc -------------------------------------------------------------------------------- /model3D/Unet3D-master/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model3D/Unet3D-master/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /model3D/Unet3D-master/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | npimage = np.load(img_path) 28 | npmask = np.load(mask_path) 29 | npimage = npimage.transpose((3, 0, 1, 2)) 30 | npmask = npmask.transpose((3, 0, 1, 2)) 31 | npmask = npmask.astype("float32") 32 | npimage = npimage.astype("float32") 33 | 34 | return npimage,npmask 35 | 36 | 37 | #读图片(如jpg、png)的代码 38 | ''' 39 | image = imread(img_path) 40 | mask = imread(mask_path) 41 | 42 | image = image.astype('float32') / 255 43 | mask = mask.astype('float32') / 255 44 | 45 | if self.aug: 46 | if random.uniform(0, 1) > 0.5: 47 | image = image[:, ::-1, :].copy() 48 | mask = mask[:, ::-1].copy() 49 | if random.uniform(0, 1) > 0.5: 50 | image = image[::-1, :, :].copy() 51 | mask = mask[::-1, :].copy() 52 | 53 | image = color.gray2rgb(image) 54 | #image = image[:,:,np.newaxis] 55 | image = image.transpose((2, 0, 1)) 56 | mask = mask[:,:,np.newaxis] 57 | mask = mask.transpose((2, 0, 1)) 58 | return image, mask 59 | ''' 60 | 61 | -------------------------------------------------------------------------------- /model3D/Unet3D-master/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model3D/Unet3D-master/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model3D/Unet3D-master/unet3d.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from torch import cat 3 | 4 | class pub(nn.Module): 5 | 6 | def __init__(self, in_channels, out_channels, batch_norm=True): 7 | super(pub, self).__init__() 8 | inter_channels = out_channels if in_channels > out_channels else out_channels//2 9 | 10 | layers = [ 11 | nn.Conv3d(in_channels, inter_channels, 3, stride=1, padding=1), 12 | nn.ReLU(True), 13 | nn.Conv3d(inter_channels, out_channels, 3, stride=1, padding=1), 14 | nn.ReLU(True) 15 | ] 16 | if batch_norm: 17 | layers.insert(1, nn.BatchNorm3d(inter_channels)) 18 | layers.insert(len(layers)-1, nn.BatchNorm3d(out_channels)) 19 | self.pub = nn.Sequential(*layers) 20 | 21 | def forward(self, x): 22 | return self.pub(x) 23 | 24 | 25 | class unet3dEncoder(nn.Module): 26 | 27 | def __init__(self, in_channels, out_channels, batch_norm=True): 28 | super(unet3dEncoder, self).__init__() 29 | self.pub = pub(in_channels, out_channels, batch_norm) 30 | self.pool = nn.MaxPool3d(2, stride=2) 31 | 32 | def forward(self, x): 33 | x = self.pub(x) 34 | return x,self.pool(x) 35 | 36 | 37 | class unet3dUp(nn.Module): 38 | def __init__(self, in_channels, out_channels, batch_norm=True, sample=True): 39 | super(unet3dUp, self).__init__() 40 | self.pub = pub(in_channels//2+in_channels, out_channels, batch_norm) 41 | if sample: 42 | self.sample = nn.Upsample(scale_factor=2, mode='nearest') 43 | else: 44 | self.sample = nn.ConvTranspose3d(in_channels, in_channels, 2, stride=2) 45 | 46 | def forward(self, x, x1): 47 | x = self.sample(x) 48 | #c1 = (x1.size(2) - x.size(2)) // 2 49 | #c2 = (x1.size(3) - x.size(3)) // 2 50 | #x1 = x1[:, :, c1:-c1, c2:-c2, c2:-c2] 51 | x = cat((x, x1), dim=1) 52 | x = self.pub(x) 53 | return x 54 | 55 | 56 | class unet3d(nn.Module): 57 | def __init__(self, args): 58 | super(unet3d, self).__init__() 59 | init_channels = 4 60 | class_nums = 3 61 | batch_norm = True 62 | sample = True 63 | 64 | self.en1 = unet3dEncoder(init_channels, 64, batch_norm) 65 | self.en2 = unet3dEncoder(64, 128, batch_norm) 66 | self.en3 = unet3dEncoder(128, 256, batch_norm) 67 | self.en4 = unet3dEncoder(256, 512, batch_norm) 68 | 69 | self.up3 = unet3dUp(512, 256, batch_norm, sample) 70 | self.up2 = unet3dUp(256, 128, batch_norm, sample) 71 | self.up1 = unet3dUp(128, 64, batch_norm, sample) 72 | self.con_last = nn.Conv3d(64, class_nums, 1) 73 | #self.sigmoid = nn.Sigmoid() 74 | 75 | def forward(self, x): 76 | x1,x = self.en1(x) 77 | x2,x= self.en2(x) 78 | x3,x= self.en3(x) 79 | x4,_ = self.en4(x) 80 | 81 | x = self.up3(x4, x3) 82 | x = self.up2(x, x2) 83 | x = self.up1(x, x1) 84 | out = self.con_last(x) 85 | return out 86 | 87 | def _initialize_weights(self): 88 | for m in self.modules(): 89 | if isinstance(m, nn.Conv3d): 90 | nn.init.kaiming_uniform(m.weight.data) 91 | if m.bias is not None: 92 | m.bias.data.zero_() 93 | elif isinstance(m, nn.BatchNorm3d): 94 | m.weight.data.fill_(1) 95 | m.bias.data.zero_() -------------------------------------------------------------------------------- /model3D/Unet3D-master/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model3D/vnet_code/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Merofine/BrainTumorSegmentation/fa97d9268df32e37769a808a70fa257928475d8a/model3D/vnet_code/.DS_Store -------------------------------------------------------------------------------- /model3D/vnet_code/README.md: -------------------------------------------------------------------------------- 1 | # vnet_code 2 | 3 | -------------------------------------------------------------------------------- /model3D/vnet_code/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 #https://www.jianshu.com/p/f2e88197e81d 3 | import random 4 | 5 | from skimage.io import imread 6 | from skimage import color 7 | 8 | import torch 9 | import torch.utils.data 10 | from torchvision import datasets, models, transforms 11 | 12 | 13 | class Dataset(torch.utils.data.Dataset): 14 | 15 | def __init__(self, args, img_paths, mask_paths, aug=False): 16 | self.args = args 17 | self.img_paths = img_paths 18 | self.mask_paths = mask_paths 19 | self.aug = aug 20 | 21 | def __len__(self): 22 | return len(self.img_paths) 23 | 24 | def __getitem__(self, idx): 25 | img_path = self.img_paths[idx] 26 | mask_path = self.mask_paths[idx] 27 | npimage = np.load(img_path) 28 | npmask = np.load(mask_path) 29 | npimage = npimage.transpose((3, 0, 1, 2)) 30 | npmask = npmask.transpose((3, 0, 1, 2)) 31 | npmask = npmask.astype("float32") 32 | npimage = npimage.astype("float32") 33 | 34 | return npimage,npmask 35 | 36 | 37 | #读图片(如jpg、png)的代码 38 | ''' 39 | image = imread(img_path) 40 | mask = imread(mask_path) 41 | 42 | image = image.astype('float32') / 255 43 | mask = mask.astype('float32') / 255 44 | 45 | if self.aug: 46 | if random.uniform(0, 1) > 0.5: 47 | image = image[:, ::-1, :].copy() 48 | mask = mask[:, ::-1].copy() 49 | if random.uniform(0, 1) > 0.5: 50 | image = image[::-1, :, :].copy() 51 | mask = mask[::-1, :].copy() 52 | 53 | image = color.gray2rgb(image) 54 | #image = image[:,:,np.newaxis] 55 | image = image.transpose((2, 0, 1)) 56 | mask = mask[:,:,np.newaxis] 57 | mask = mask.transpose((2, 0, 1)) 58 | return image, mask 59 | ''' 60 | 61 | -------------------------------------------------------------------------------- /model3D/vnet_code/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | try: 7 | from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge 8 | except ImportError: 9 | pass 10 | 11 | 12 | class BCEDiceLoss(nn.Module): 13 | def __init__(self): 14 | super(BCEDiceLoss, self).__init__() 15 | 16 | def forward(self, input, target): 17 | bce = F.binary_cross_entropy_with_logits(input, target) 18 | smooth = 1e-5 19 | input = torch.sigmoid(input) 20 | num = target.size(0) 21 | input = input.view(num, -1) 22 | target = target.view(num, -1) 23 | intersection = (input * target) 24 | dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth) 25 | dice = 1 - dice.sum() / num 26 | return 0.5 * bce + dice 27 | 28 | 29 | class LovaszHingeLoss(nn.Module): 30 | def __init__(self): 31 | super(LovaszHingeLoss, self).__init__() 32 | 33 | def forward(self, input, target): 34 | input = input.squeeze(1) 35 | target = target.squeeze(1) 36 | loss = lovasz_hinge(input, target, per_image=True) 37 | 38 | return loss 39 | -------------------------------------------------------------------------------- /model3D/vnet_code/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def mean_iou(y_true_in, y_pred_in, print_table=False): 8 | if True: #not np.sum(y_true_in.flatten()) == 0: 9 | labels = y_true_in 10 | y_pred = y_pred_in 11 | 12 | true_objects = 2 13 | pred_objects = 2 14 | 15 | intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] 16 | 17 | # Compute areas (needed for finding the union between all objects) 18 | area_true = np.histogram(labels, bins = true_objects)[0] 19 | area_pred = np.histogram(y_pred, bins = pred_objects)[0] 20 | area_true = np.expand_dims(area_true, -1) 21 | area_pred = np.expand_dims(area_pred, 0) 22 | 23 | # Compute union 24 | union = area_true + area_pred - intersection 25 | 26 | # Exclude background from the analysis 27 | intersection = intersection[1:,1:] 28 | union = union[1:,1:] 29 | union[union == 0] = 1e-9 30 | 31 | # Compute the intersection over union 32 | iou = intersection / union 33 | 34 | # Precision helper function 35 | def precision_at(threshold, iou): 36 | matches = iou > threshold 37 | true_positives = np.sum(matches, axis=1) == 1 # Correct objects 38 | false_positives = np.sum(matches, axis=0) == 0 # Missed objects 39 | false_negatives = np.sum(matches, axis=1) == 0 # Extra objects 40 | tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) 41 | return tp, fp, fn 42 | 43 | # Loop over IoU thresholds 44 | prec = [] 45 | if print_table: 46 | print("Thresh\tTP\tFP\tFN\tPrec.") 47 | for t in np.arange(0.5, 1.0, 0.05): 48 | tp, fp, fn = precision_at(t, iou) 49 | if (tp + fp + fn) > 0: 50 | p = tp / (tp + fp + fn) 51 | else: 52 | p = 0 53 | if print_table: 54 | print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) 55 | prec.append(p) 56 | 57 | if print_table: 58 | print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) 59 | return np.mean(prec) 60 | 61 | else: 62 | if np.sum(y_pred_in.flatten()) == 0: 63 | return 1 64 | else: 65 | return 0 66 | 67 | 68 | def batch_iou(output, target): 69 | output = torch.sigmoid(output).data.cpu().numpy() > 0.5 70 | target = (target.data.cpu().numpy() > 0.5).astype('int') 71 | output = output[:,0,:,:] 72 | target = target[:,0,:,:] 73 | 74 | ious = [] 75 | for i in range(output.shape[0]): 76 | ious.append(mean_iou(output[i], target[i])) 77 | 78 | return np.mean(ious) 79 | 80 | 81 | def mean_iou(output, target): 82 | smooth = 1e-5 83 | 84 | output = torch.sigmoid(output).data.cpu().numpy() 85 | target = target.data.cpu().numpy() 86 | ious = [] 87 | for t in np.arange(0.5, 1.0, 0.05): 88 | output_ = output > t 89 | target_ = target > t 90 | intersection = (output_ & target_).sum() 91 | union = (output_ | target_).sum() 92 | iou = (intersection + smooth) / (union + smooth) 93 | ious.append(iou) 94 | 95 | return np.mean(ious) 96 | 97 | 98 | def iou_score(output, target): 99 | smooth = 1e-5 100 | 101 | if torch.is_tensor(output): 102 | output = torch.sigmoid(output).data.cpu().numpy() 103 | if torch.is_tensor(target): 104 | target = target.data.cpu().numpy() 105 | output_ = output > 0.5 106 | target_ = target > 0.5 107 | intersection = (output_ & target_).sum() 108 | union = (output_ | target_).sum() 109 | 110 | return (intersection + smooth) / (union + smooth) 111 | 112 | 113 | def dice_coef(output, target): 114 | smooth = 1e-5 115 | 116 | if torch.is_tensor(output): 117 | output = torch.sigmoid(output).data.cpu().numpy() 118 | if torch.is_tensor(target): 119 | target = target.data.cpu().numpy() 120 | #output = torch.sigmoid(output).view(-1).data.cpu().numpy() 121 | #target = target.view(-1).data.cpu().numpy() 122 | 123 | intersection = (output * target).sum() 124 | 125 | return (2. * intersection + smooth) / \ 126 | (output.sum() + target.sum() + smooth) 127 | 128 | 129 | def accuracy(output, target): 130 | output = torch.sigmoid(output).view(-1).data.cpu().numpy() 131 | output = (np.round(output)).astype('int') 132 | target = target.view(-1).data.cpu().numpy() 133 | target = (np.round(target)).astype('int') 134 | (output == target).sum() 135 | 136 | return (output == target).sum() / len(output) 137 | 138 | def ppv(output, target): 139 | smooth = 1e-5 140 | if torch.is_tensor(output): 141 | output = torch.sigmoid(output).data.cpu().numpy() 142 | if torch.is_tensor(target): 143 | target = target.data.cpu().numpy() 144 | intersection = (output * target).sum() 145 | return (intersection + smooth) / \ 146 | (output.sum() + smooth) 147 | 148 | def sensitivity(output, target): 149 | smooth = 1e-5 150 | 151 | if torch.is_tensor(output): 152 | output = torch.sigmoid(output).data.cpu().numpy() 153 | if torch.is_tensor(target): 154 | target = target.data.cpu().numpy() 155 | 156 | intersection = (output * target).sum() 157 | 158 | return (intersection + smooth) / \ 159 | (target.sum() + smooth) -------------------------------------------------------------------------------- /model3D/vnet_code/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def str2bool(v): 3 | if v.lower() in ['true', 1]: 4 | return True 5 | elif v.lower() in ['false', 0]: 6 | return False 7 | else: 8 | raise argparse.ArgumentTypeError('Boolean value expected.') 9 | 10 | 11 | def count_params(model): 12 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 13 | -------------------------------------------------------------------------------- /model3D/vnet_code/vnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | def passthrough(x, **kwargs): 7 | return x 8 | 9 | def ELUCons(elu, nchan): 10 | if elu: 11 | return nn.ELU(inplace=True) 12 | else: 13 | return nn.PReLU(nchan) 14 | 15 | # normalization between sub-volumes is necessary 16 | # for good performance 17 | class ContBatchNorm3d(nn.modules.batchnorm._BatchNorm): 18 | def _check_input_dim(self, input): 19 | if input.dim() != 5: 20 | raise ValueError('expected 5D input (got {}D input)' 21 | .format(input.dim())) 22 | super(ContBatchNorm3d, self)._check_input_dim(input) 23 | 24 | def forward(self, input): 25 | #self._check_input_dim(input) 26 | return F.batch_norm( 27 | input, self.running_mean, self.running_var, self.weight, self.bias, 28 | True, self.momentum, self.eps) 29 | 30 | 31 | class LUConv(nn.Module): 32 | def __init__(self, nchan, elu): 33 | super(LUConv, self).__init__() 34 | self.relu1 = ELUCons(elu, nchan) 35 | self.conv1 = nn.Conv3d(nchan, nchan, kernel_size=5, padding=2) 36 | self.bn1 = ContBatchNorm3d(nchan) 37 | 38 | def forward(self, x): 39 | out = self.relu1(self.bn1(self.conv1(x))) 40 | return out 41 | 42 | 43 | def _make_nConv(nchan, depth, elu): 44 | layers = [] 45 | for _ in range(depth): 46 | layers.append(LUConv(nchan, elu)) 47 | return nn.Sequential(*layers) 48 | 49 | 50 | class InputTransition(nn.Module): 51 | def __init__(self, outChans, elu): 52 | super(InputTransition, self).__init__() 53 | self.conv1 = nn.Conv3d(4, 16, kernel_size=5, padding=2) 54 | self.bn1 = ContBatchNorm3d(16) 55 | self.relu1 = ELUCons(elu, 16) 56 | 57 | def forward(self, x): 58 | # do we want a PRELU here as well? 59 | out = self.conv1(x) 60 | #print(out.shape) 61 | out = self.bn1(out) 62 | 63 | #print(out.shape) 64 | # split input in to 16 channels 65 | x16 = torch.cat((x, x, x, x,), 1) 66 | #print(x16.shape) 67 | myadd = torch.add(out,x16) 68 | #print('i am here') 69 | out = self.relu1(myadd) 70 | return out 71 | 72 | 73 | class DownTransition(nn.Module): 74 | def __init__(self, inChans, nConvs, elu, dropout=False): 75 | super(DownTransition, self).__init__() 76 | outChans = 2*inChans 77 | self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2) 78 | self.bn1 = ContBatchNorm3d(outChans) 79 | self.do1 = passthrough 80 | self.relu1 = ELUCons(elu, outChans) 81 | self.relu2 = ELUCons(elu, outChans) 82 | if dropout: 83 | self.do1 = nn.Dropout3d() 84 | self.ops = _make_nConv(outChans, nConvs, elu) 85 | 86 | def forward(self, x): 87 | down = self.relu1(self.bn1(self.down_conv(x))) 88 | out = self.do1(down) 89 | out = self.ops(out) 90 | out = self.relu2(torch.add(out, down)) 91 | return out 92 | 93 | 94 | class UpTransition(nn.Module): 95 | def __init__(self, inChans, outChans, nConvs, elu, dropout=False): 96 | super(UpTransition, self).__init__() 97 | self.up_conv = nn.ConvTranspose3d(inChans, outChans // 2, kernel_size=2, stride=2) 98 | self.bn1 = ContBatchNorm3d(outChans // 2) 99 | self.do1 = passthrough 100 | self.do2 = nn.Dropout3d() 101 | self.relu1 = ELUCons(elu, outChans // 2) 102 | self.relu2 = ELUCons(elu, outChans) 103 | if dropout: 104 | self.do1 = nn.Dropout3d() 105 | self.ops = _make_nConv(outChans, nConvs, elu) 106 | 107 | def forward(self, x, skipx): 108 | out = self.do1(x) 109 | skipxdo = self.do2(skipx) 110 | out = self.relu1(self.bn1(self.up_conv(out))) 111 | xcat = torch.cat((out, skipxdo), 1) 112 | out = self.ops(xcat) 113 | out = self.relu2(torch.add(out, xcat)) 114 | return out 115 | 116 | 117 | class OutputTransition(nn.Module): 118 | def __init__(self, inChans, elu, nll): 119 | super(OutputTransition, self).__init__() 120 | self.conv1 = nn.Conv3d(inChans, 3, kernel_size=5, padding=2) 121 | self.bn1 = ContBatchNorm3d(3) 122 | self.conv2 = nn.Conv3d(3, 3, kernel_size=1) 123 | self.relu1 = ELUCons(elu, 3) 124 | if nll: 125 | self.softmax = F.log_softmax 126 | else: 127 | self.softmax = F.softmax 128 | 129 | def forward(self, x): 130 | # convolve 32 down to 2 channels 131 | out = self.relu1(self.bn1(self.conv1(x))) 132 | out = self.conv2(out) 133 | 134 | # make channels the last axis 135 | #out = out.permute(0, 2, 3, 4, 1).contiguous() 136 | # flatten 137 | #out = out.view(out.numel() // 2, 2) 138 | #out = self.softmax(out) 139 | # treat channel 0 as the predicted output 140 | return out 141 | 142 | 143 | class VNet(nn.Module): 144 | # the number of convolutions in each layer corresponds 145 | # to what is in the actual prototxt, not the intent 146 | def __init__(self, argx): 147 | super(VNet, self).__init__() 148 | elu = True 149 | nll = False 150 | self.in_tr = InputTransition(16, elu) 151 | self.down_tr32 = DownTransition(16, 1, elu) 152 | self.down_tr64 = DownTransition(32, 2, elu) 153 | self.down_tr128 = DownTransition(64, 3, elu, dropout=True) 154 | self.down_tr256 = DownTransition(128, 2, elu, dropout=True) 155 | 156 | self.up_tr256 = UpTransition(256, 256, 2, elu, dropout=True) 157 | self.up_tr128 = UpTransition(256, 128, 2, elu, dropout=True) 158 | self.up_tr64 = UpTransition(128, 64, 1, elu) 159 | self.up_tr32 = UpTransition(64, 32, 1, elu) 160 | self.out_tr = OutputTransition(32, elu, nll) 161 | 162 | 163 | def forward(self, x): 164 | out16 = self.in_tr(x) 165 | out32 = self.down_tr32(out16) 166 | out64 = self.down_tr64(out32) 167 | out128 = self.down_tr128(out64) 168 | out256 = self.down_tr256(out128) 169 | out = self.up_tr256(out256, out128) 170 | out = self.up_tr128(out, out64) 171 | out = self.up_tr64(out, out32) 172 | out = self.up_tr32(out, out16) 173 | out = self.out_tr(out) 174 | return out 175 | 176 | #net = VNet(1) 177 | #net.apply(init) 178 | 179 | #print('创建网络成功') 180 | 181 | # 输出数据维度检查 182 | #net = net.cuda() 183 | #data = torch.randn((2, 4, 32, 160, 160)).cuda() 184 | #res = net(data) 185 | #for item in res: 186 | # print(item.size()) --------------------------------------------------------------------------------