├── .idea
├── Unet2d-master.iml
├── encodings.xml
├── misc.xml
├── modules.xml
└── workspace.xml
├── README.md
├── Unet_test.py
├── Unet_trian.py
├── log
├── events.out.tfevents.1560841719.chengzhenfeng-Precision-Tower-5810
└── events.out.tfevents.1560920626.chengzhenfeng-Precision-Tower-5810
├── outresult
└── camparationresult
│ ├── 1.2.840.113564.345050636177.37412.636620521709704390.13_16.png
│ ├── 1.2.840.113619.186.216157103242140.20130902084714854.289_6.png
│ ├── 1.2.840.113619.186.216157103242140.20131010083615159.345_11.png
│ ├── 1.2.840.113619.186.216157103242140.20131126143112165.290_10.png
│ ├── 1.2.840.113619.186.216157103242140.20131126143112165.290_7.png
│ ├── 1.2.840.113619.186.216157103242140.20140122101812020.261_11.png
│ ├── 1.2.840.113619.186.216157103242140.20140208081810822.667_6.png
│ ├── 1.2.840.113619.186.216157103242140.20140220101515752.961_5.png
│ ├── 1.2.840.113619.186.216157103242140.20140327093259350.944_8.png
│ ├── 1.2.840.113619.186.216157103242140.20140401133515414.952_11.png
│ ├── 1.2.840.113619.186.216157103242140.20140424165218691.193_16.png
│ ├── 1.2.840.113619.186.216157103242140.20140424165218691.193_7.png
│ ├── 1.2.840.113619.186.216157103242140.20140506141708301.275_11.png
│ ├── 1.2.840.113619.186.216157103242140.20140606105445308.486_8.png
│ ├── 1.2.840.113619.186.216157103242140.20140828142851255.952_7.png
│ ├── 1.2.840.113619.186.216157103242140.20140902134959373.432_9.png
│ ├── 1.2.840.113619.186.216157103242140.20141203134751086.952_13.png
│ ├── 1.2.840.113619.186.216157103242140.20141218150033349.234_6.png
│ ├── 1.2.840.113619.186.216157103242140.20150211142137745.744_10.png
│ ├── 1.2.840.113619.186.216157103242140.20150211142137745.744_9.png
│ ├── 1.2.840.113619.186.216157103242140.20150317153007340.864_12.png
│ ├── 1.2.840.113619.186.216157103242140.20150326133223547.947_4.png
│ ├── 1.2.840.113619.186.216157103242140.20150731143035314.515_8.png
│ ├── 1.2.840.113619.186.216157103242140.20150731143035314.515_9.png
│ ├── 1.2.840.113619.186.216157103242140.20150818111052132.533_11.png
│ ├── 1.2.840.113619.186.216157103242140.20150818111052132.533_13.png
│ ├── 1.2.840.113619.186.216157103242140.20150818111052132.533_9.png
│ ├── 1.2.840.113619.186.216157103242140.20150831080531559.921_4.png
│ ├── 1.2.840.113619.186.216157103242140.20150923155707779.763_10.png
│ ├── 1.2.840.113619.186.216157103242140.20150924155224057.616_17.png
│ ├── 1.2.840.113619.186.216157103242140.20151029113204160.843_10.png
│ ├── 1.2.840.113619.186.216157103242140.20151107234824829.658_9.png
│ ├── 1.2.840.113619.186.216157103242140.20151110120630299.700_15.png
│ ├── 1.2.840.113619.186.216157103242140.20151116125121675.436_11.png
│ ├── 1.2.840.113619.186.216157103242140.20151116125121675.436_9.png
│ ├── 1.2.840.113619.186.216157103242140.20151216110250722.789_18.png
│ ├── 1.2.840.113619.186.216157103242140.20151230084603402.899_17.png
│ ├── 1.2.840.113619.186.216157103242140.20160112080358004.281_16.png
│ ├── 1.2.840.113619.186.216157103242140.20160225124838541.615_14.png
│ ├── 1.2.840.113619.186.216157103242140.20160426094527128.852_7.png
│ ├── 1.2.840.113619.186.216157103242140.20160427083233734.118_10.png
│ ├── 1.2.840.113619.186.216157103242140.20160427083233734.118_13.png
│ ├── 1.2.840.113619.186.216157103242140.20160504110517539.506_12.png
│ ├── 1.2.840.113619.186.216157103242140.20160506142524241.176_13.png
│ ├── 1.2.840.113619.186.216157103242140.20160512080040587.827_13.png
│ └── 1.2.840.113619.186.216157103242140.20160607134200436.331_10.png
├── tool
├── Make_CSV_File.py
├── Unet2d_test.py
└── Unet2d_trian.py
└── unet
├── __init__.py
├── __pycache__
├── __init__.cpython-36.pyc
├── function.cpython-36.pyc
├── layer.cpython-36.pyc
├── model_GlandCeil.cpython-36.pyc
└── model_Infarct.cpython-36.pyc
├── function.py
├── layer.py
└── model_Infarct.py
/.idea/Unet2d-master.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 | 1560148050936
213 |
214 |
215 | 1560148050936
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Multi-modality-Infarct
2 | 融合MRI多模态的图像的不同特征进行脑梗死区分割网络(基于Unet网络更改新的网络)
3 |
4 | download trained model : https://pan.baidu.com/s/17p5lNKSNfWvFtn1KujjVbQ passport: hto4
5 |
6 | download trained data : Please email me with your question and the research you are doing;
7 |
8 | ## Result
9 |
10 | 
11 |
12 | 
13 |
14 | 
15 |
16 | ## Contact
17 | * https://github.com/lucs-C
18 | * email: 2997906313@qq.com
19 | * WeChat Phone number: 13940370538
20 |
--------------------------------------------------------------------------------
/Unet_test.py:
--------------------------------------------------------------------------------
1 | '''
2 | 2D Unet is used to segment medical images.
3 |
4 | author : ChengZhenfeng
5 | '''
6 | from __future__ import division
7 | from unet.model_Infarct import unet2dModule
8 | import numpy as np
9 | import pandas as pd
10 | from unet.function import readmat, get_filename
11 | import matplotlib.pyplot as plt
12 | import cv2
13 | import os
14 | import tensorflow as tf
15 |
16 | dispaystep = 10
17 | image_width = 256
18 | image_hight = 256
19 |
20 | def prediction():
21 |
22 | # Get test Image path from CSV file
23 | csvmaskdata = pd.read_csv('./Dataset/train/TrainMask.csv')
24 | csvimagedata = pd.read_csv('./Dataset/train/TrainImage.csv')
25 | maskdata = csvmaskdata.iloc[:, :].values
26 | imagedata = csvimagedata.iloc[:, :].values
27 |
28 | #-1. 模型分割的精度评估
29 | unet2d = unet2dModule(256, 256, channels = 2)
30 |
31 | init = tf.global_variables_initializer()
32 | saver = tf.train.Saver()
33 | sess = tf.InteractiveSession()
34 | sess.run(init)
35 | saver.restore(sess, "./model/Infarct100000.ckpt")
36 |
37 | for i in range(len(imagedata)):
38 | #-1. 分割精度评估
39 | test_images = readmat(imagedata[i][0], 'multimodalitydata')
40 | GT_image = readmat(maskdata[i][0], 'multimodalitymask')
41 | imagename = get_filename(imagedata[i][0])
42 | test_images = np.reshape(test_images, (1, test_images.shape[0], test_images.shape[1], 2))
43 |
44 | pred = sess.run(unet2d.Y_pred, feed_dict={unet2d.X: test_images,
45 | unet2d.phase: 1,
46 | unet2d.drop_conv: 1})
47 | predictvalue = np.reshape(pred, (test_images.shape[1], test_images.shape[2]))
48 | predictvalue = predictvalue.astype(np.float32) * 255.
49 | predictvalue = np.clip(predictvalue, 0, 255).astype('uint8')
50 |
51 | cv2.imwrite("./outresult/predmask/" + imagename + "_mask.bmp", predictvalue)
52 | print("The " + imagename + " has already save into ./outresult/predmask")
53 |
54 |
55 | #-2. 分割结果显示
56 | figure_ID = 0
57 | if i % dispaystep == 0:
58 | plt.figure(figure_ID)
59 | test_images = np.reshape(test_images, (test_images.shape[1], test_images.shape[2], 2))
60 |
61 | # 显示ADC
62 | plt.subplot(2,3,1)
63 | ADC = test_images[:,:,0]
64 | plt.imshow(ADC, cmap= 'gray')
65 | plt.xticks([])
66 | plt.yticks([])
67 | plt.title('ADC')
68 |
69 | # 显示DWI
70 | plt.subplot(2,3,2)
71 | DWI = test_images[:,:,1]
72 | plt.imshow(DWI, cmap= 'gray')
73 | plt.xticks([])
74 | plt.yticks([])
75 | plt.title('DWI')
76 |
77 | # 显示GT在DWI上的区域
78 | plt.subplot(2,3,3)
79 | plt.imshow(GT_image, cmap='binary')
80 | plt.xticks([])
81 | plt.yticks([])
82 | plt.title('GT')
83 |
84 | # 显示prediction
85 | plt.subplot(2,3,4)
86 | plt.imshow(predictvalue,cmap='binary')
87 | plt.xticks([])
88 | plt.yticks([])
89 | plt.title('predict')
90 |
91 | # GT 与 predict 的比较
92 | campresult = np.zeros((image_width, image_width))
93 | # convert from [0:255] => [0.0:1.0]
94 | predictvalue[predictvalue[:] >100] = 1
95 |
96 | # GT 与predict 的交集
97 | intersection = GT_image *predictvalue
98 |
99 | # GT - intersection_img
100 | Under_segmentation = GT_image - intersection
101 |
102 | # pred - intersection_img
103 | over_segmentation = predictvalue - intersection
104 |
105 | campresult = np.zeros((image_width,image_hight,3))
106 |
107 | # campresult = 1*intersection + 2*Under_segmentation + 3*over_segmentation
108 |
109 | if np.sum(np.sum(intersection)) !=0:
110 | campresult[intersection[:]==1, 0] = 255
111 | campresult[intersection[:] == 1, 1] = 0
112 | campresult[intersection[:] == 1, 2] = 0
113 | if np.sum(np.sum(Under_segmentation)) !=0:
114 | campresult[Under_segmentation[:]==1, 0] = 0
115 | campresult[Under_segmentation[:] == 1, 1] = 255
116 | campresult[Under_segmentation[:] == 1, 2] = 0
117 | if np.sum(np.sum(over_segmentation)) !=0:
118 | campresult[over_segmentation[:]==1, 0] = 0
119 | campresult[over_segmentation[:] == 1, 1] = 0
120 | campresult[over_segmentation[:] == 1, 2] = 255
121 |
122 | plt.subplot(2, 3, 5)
123 | plt.imshow(campresult,)
124 | plt.xticks([])
125 | plt.yticks([])
126 | plt.title('compare result')
127 |
128 | plt.savefig('./outresult/camparationresult/' + imagename + '.png')
129 |
130 | # 对所有结果进行显示
131 | # plt.show()
132 |
133 |
134 |
135 | if __name__ == "__main__":
136 | prediction()
137 |
--------------------------------------------------------------------------------
/Unet_trian.py:
--------------------------------------------------------------------------------
1 | '''
2 | 2D Unet is used to segment medical images.
3 |
4 | author : ChengZhenfeng
5 | '''
6 | from __future__ import division
7 | from unet.model_Infarct import unet2dModule
8 | import numpy as np
9 | import pandas as pd
10 | import cv2
11 |
12 | def train():
13 | '''
14 | Preprocessing for dataset
15 | '''
16 | # Read data set (Train data from CSV file)
17 | csvmaskdata = pd.read_csv('./Dataset/train/TrainMask.csv')
18 | csvimagedata = pd.read_csv('./Dataset/train/TrainImage.csv')
19 | maskdata = csvmaskdata.iloc[:, :].values
20 | imagedata = csvimagedata.iloc[:, :].values
21 | # shuffle imagedata and maskdata together
22 | perm = np.arange(len(csvimagedata))
23 | np.random.shuffle(perm)
24 | imagedata = imagedata[perm]
25 | maskdata = maskdata[perm]
26 |
27 | unet2d = unet2dModule(256, 256, channels = 2, costname = "dice coefficient")
28 | unet2d.train(imagedata, maskdata, "./model/Infarct7000.ckpt",
29 | "./log", 0.001, 0.8, 100000, 16)
30 |
31 |
32 | if __name__ == "__main__":
33 | train()
34 |
--------------------------------------------------------------------------------
/log/events.out.tfevents.1560841719.chengzhenfeng-Precision-Tower-5810:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/log/events.out.tfevents.1560841719.chengzhenfeng-Precision-Tower-5810
--------------------------------------------------------------------------------
/log/events.out.tfevents.1560920626.chengzhenfeng-Precision-Tower-5810:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/log/events.out.tfevents.1560920626.chengzhenfeng-Precision-Tower-5810
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113564.345050636177.37412.636620521709704390.13_16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113564.345050636177.37412.636620521709704390.13_16.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20130902084714854.289_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20130902084714854.289_6.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20131010083615159.345_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20131010083615159.345_11.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20131126143112165.290_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20131126143112165.290_10.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20131126143112165.290_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20131126143112165.290_7.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140122101812020.261_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140122101812020.261_11.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140208081810822.667_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140208081810822.667_6.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140220101515752.961_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140220101515752.961_5.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140327093259350.944_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140327093259350.944_8.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140401133515414.952_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140401133515414.952_11.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140424165218691.193_16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140424165218691.193_16.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140424165218691.193_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140424165218691.193_7.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140506141708301.275_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140506141708301.275_11.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140606105445308.486_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140606105445308.486_8.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140828142851255.952_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140828142851255.952_7.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140902134959373.432_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20140902134959373.432_9.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20141203134751086.952_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20141203134751086.952_13.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20141218150033349.234_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20141218150033349.234_6.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150211142137745.744_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150211142137745.744_10.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150211142137745.744_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150211142137745.744_9.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150317153007340.864_12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150317153007340.864_12.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150326133223547.947_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150326133223547.947_4.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150731143035314.515_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150731143035314.515_8.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150731143035314.515_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150731143035314.515_9.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150818111052132.533_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150818111052132.533_11.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150818111052132.533_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150818111052132.533_13.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150818111052132.533_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150818111052132.533_9.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150831080531559.921_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150831080531559.921_4.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150923155707779.763_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150923155707779.763_10.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150924155224057.616_17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20150924155224057.616_17.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151029113204160.843_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151029113204160.843_10.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151107234824829.658_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151107234824829.658_9.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151110120630299.700_15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151110120630299.700_15.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151116125121675.436_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151116125121675.436_11.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151116125121675.436_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151116125121675.436_9.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151216110250722.789_18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151216110250722.789_18.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151230084603402.899_17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20151230084603402.899_17.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160112080358004.281_16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160112080358004.281_16.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160225124838541.615_14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160225124838541.615_14.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160426094527128.852_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160426094527128.852_7.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160427083233734.118_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160427083233734.118_10.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160427083233734.118_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160427083233734.118_13.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160504110517539.506_12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160504110517539.506_12.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160506142524241.176_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160506142524241.176_13.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160512080040587.827_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160512080040587.827_13.png
--------------------------------------------------------------------------------
/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160607134200436.331_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/outresult/camparationresult/1.2.840.113619.186.216157103242140.20160607134200436.331_10.png
--------------------------------------------------------------------------------
/tool/Make_CSV_File.py:
--------------------------------------------------------------------------------
1 |
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Fri Mar 22 16:12:28 2019
5 |
6 | @author: ChengZhenfeng
7 | """
8 | import os
9 | # import csv
10 |
11 | # 生成存储图像的.csv文件
12 | f=open("TrainImage.csv","w") #没有文件的话自己就会新建了!
13 | # csvwriter = csv.writer(f,dialect = ("excel"))
14 | path="/home/chengzhenfeng/PycharmProjects/program/venv/czf1/Unet2d-master/Dataset/train/Image"
15 | k=os.listdir(path) #获取指定文件夹下面的文件
16 |
17 | for files1_name in k:
18 | save_name= path + "/" + files1_name
19 | al=save_name
20 | f.write(al+"\n")
21 |
22 | f.close()
23 |
24 |
25 | # 生成存储图像的.csv文件
26 | f=open("TrainMask.csv","w") #没有文件的话自己就会新建了!
27 | path="/home/chengzhenfeng/PycharmProjects/program/venv/czf1/Unet2d-master/Dataset/train/Mask"
28 | k=os.listdir(path) #获取指定文件夹下面的文件
29 | for files1_name in k:
30 | save_name= path + "/" + files1_name
31 | al=save_name
32 | f.write(al+"\n")
33 |
34 | f.close()
--------------------------------------------------------------------------------
/tool/Unet2d_test.py:
--------------------------------------------------------------------------------
1 | '''
2 | 2D Unet is used to segment medical images.
3 |
4 | author : ChengZhenfeng
5 | '''
6 | from __future__ import division
7 | from unet2d.model_Infarct import unet2dModule
8 | import numpy as np
9 | import pandas as pd
10 | import cv2
11 |
12 | def train():
13 | '''
14 | Preprocessing for dataset
15 | '''
16 | # Read data set (Train data from CSV file)
17 | csvmaskdata = pd.read_csv('./TrainMask.csv')
18 | csvimagedata = pd.read_csv('./TrainImage.csv')
19 | maskdata = csvmaskdata.iloc[:, :].values
20 | imagedata = csvimagedata.iloc[:, :].values
21 | # shuffle imagedata and maskdata together
22 | perm = np.arange(len(csvimagedata))
23 | np.random.shuffle(perm)
24 | imagedata = imagedata[perm]
25 | maskdata = maskdata[perm]
26 |
27 | unet2d = unet2dModule(512, 512, channels=3, costname="dice coefficient")
28 | unet2d.train(imagedata, maskdata, "./model/unet2dglandceil.pd",
29 | "./log", 0.0005, 0.8, 100000, 2)
30 |
31 |
32 | def predict():
33 | true_img = cv2.imread(r"/home/chengzhenfeng/PycharmProjects/program/venv/czf1/Unet2d-master/Dataset/GlandCeildata/test/Image/testA_55.bmp", cv2.IMREAD_COLOR)
34 | test_images = true_img.astype(np.float)
35 | # convert from [0:255] => [0.0:1.0]
36 | test_images = np.multiply(test_images, 1.0 / 255.0)
37 | unet2d = unet2dModule(512, 512, 3)
38 | predictvalue = unet2d.prediction("./model/unet2dglandceil.pd",
39 | test_images)
40 | cv2.imwrite("mask1.bmp", predictvalue)
41 |
42 |
43 | def main(argv):
44 | if argv == 1:
45 | train()
46 | if argv == 2:
47 | predict()
48 |
49 |
50 | if __name__ == "__main__":
51 | main(2)
52 |
--------------------------------------------------------------------------------
/tool/Unet2d_trian.py:
--------------------------------------------------------------------------------
1 | '''
2 | 2D Unet is used to segment medical images.
3 |
4 | author : ChengZhenfeng
5 | '''
6 | from __future__ import division
7 | from unet2d.model_Infarct import unet2dModule
8 | import numpy as np
9 | import pandas as pd
10 | import cv2
11 |
12 | def train():
13 | '''
14 | Preprocessing for dataset
15 | '''
16 | # Read data set (Train data from CSV file)
17 | csvmaskdata = pd.read_csv('./TrainMask.csv')
18 | csvimagedata = pd.read_csv('./TrainImage.csv')
19 | maskdata = csvmaskdata.iloc[:, :].values
20 | imagedata = csvimagedata.iloc[:, :].values
21 | # shuffle imagedata and maskdata together
22 | perm = np.arange(len(csvimagedata))
23 | np.random.shuffle(perm)
24 | imagedata = imagedata[perm]
25 | maskdata = maskdata[perm]
26 |
27 | unet2d = unet2dModule(512, 512, channels=3, costname="dice coefficient")
28 | unet2d.train(imagedata, maskdata, "./model/unet2dglandceil.pd",
29 | "./log", 0.0005, 0.8, 100000, 2)
30 |
31 |
32 | def predict():
33 | true_img = cv2.imread(r"F:\BaiduNetdiskDownload\GlandCeildata\test\Image\testA_55.bmp", cv2.IMREAD_COLOR)
34 | test_images = true_img.astype(np.float)
35 | # convert from [0:255] => [0.0:1.0]
36 | test_images = np.multiply(test_images, 1.0 / 255.0)
37 | unet2d = unet2dModule(512, 512, 3)
38 | predictvalue = unet2d.prediction("./model/unet2dglandceil.pd",
39 | test_images)
40 | cv2.imwrite("mask1.bmp", predictvalue)
41 |
42 |
43 | def main(argv):
44 | if argv == 1:
45 | train()
46 | if argv == 2:
47 | predict()
48 |
49 |
50 | if __name__ == "__main__":
51 | main(1)
52 |
--------------------------------------------------------------------------------
/unet/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'junqiang chen'
2 | __version__ = '0.1.0'
3 | __time__ = '2018.5.19'
4 |
--------------------------------------------------------------------------------
/unet/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/unet/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/unet/__pycache__/function.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/unet/__pycache__/function.cpython-36.pyc
--------------------------------------------------------------------------------
/unet/__pycache__/layer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/unet/__pycache__/layer.cpython-36.pyc
--------------------------------------------------------------------------------
/unet/__pycache__/model_GlandCeil.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/unet/__pycache__/model_GlandCeil.cpython-36.pyc
--------------------------------------------------------------------------------
/unet/__pycache__/model_Infarct.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lucs-C/Multi-modality-Infarct/d1103f6e615486c53072d7088e406d7bd3ce1e6d/unet/__pycache__/model_Infarct.cpython-36.pyc
--------------------------------------------------------------------------------
/unet/function.py:
--------------------------------------------------------------------------------
1 | import scipy.io as sio
2 | import numpy as np
3 | import os
4 |
5 | # 下面是讲解python怎么读取.mat文件以及怎么处理得到的结果
6 | def readmat(str_path, str_variable):
7 | load_data = sio.loadmat(str_path)
8 | data_out = load_data[str_variable]
9 |
10 | return data_out
11 |
12 | def get_filename(file_path):
13 | filepath, tempfilename = os.path.split(file_path)
14 | filename, extension = os.path.splitext(tempfilename)
15 |
16 | return filename
17 |
--------------------------------------------------------------------------------
/unet/layer.py:
--------------------------------------------------------------------------------
1 | '''
2 | covlution layer,pool layer,initialization。。。。
3 | '''
4 | import tensorflow as tf
5 |
6 |
7 | # Weight initialization (Xavier's init)
8 | def weight_xavier_init(shape, n_inputs, n_outputs, uniform=True, variable_name=None):
9 | if uniform:
10 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
11 | initial = tf.random_uniform(shape, -init_range, init_range)
12 | return tf.Variable(initial, name=variable_name)
13 | else:
14 | stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
15 | initial = tf.truncated_normal(shape, stddev=stddev)
16 | return tf.Variable(initial, name=variable_name)
17 |
18 |
19 | # Bias initialization
20 | def bias_variable(shape, variable_name=None):
21 | initial = tf.constant(0.1, shape=shape)
22 | return tf.Variable(initial, name=variable_name)
23 |
24 |
25 | # 2D convolution
26 | def conv2d(x, W, strides=1):
27 | conv_2d = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
28 | return conv_2d
29 |
30 |
31 | # 2D deconvolution
32 | def deconv2d(x, W, stride=2):
33 | x_shape = tf.shape(x)
34 | output_shape = tf.stack([x_shape[0], x_shape[1] * stride, x_shape[2] * stride, x_shape[3] // stride])
35 | return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='SAME')
36 |
37 |
38 | # Max Pooling
39 | def max_pool_2x2(x, k = 2):
40 | pool2d = tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
41 | return pool2d
42 |
43 |
44 | # Unet crop and concat
45 | def crop_and_concat(x1, x2):
46 | x1_shape = tf.shape(x1)
47 | x2_shape = tf.shape(x2)
48 | # offsets for the top left corner of the crop
49 | offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]
50 | size = [-1, x2_shape[1], x2_shape[2], -1]
51 | x1_crop = tf.slice(x1, offsets, size)
52 | return tf.concat([x1_crop, x2], 3)
53 |
54 |
55 |
--------------------------------------------------------------------------------
/unet/model_Infarct.py:
--------------------------------------------------------------------------------
1 | '''
2 | 2D Unet is used to segment medical images.
3 |
4 | author : ChengZhenfeng
5 | '''
6 | from unet.layer import (conv2d, deconv2d, max_pool_2x2, crop_and_concat, weight_xavier_init, bias_variable)
7 | import tensorflow as tf
8 | import numpy as np
9 | from unet.function import readmat
10 | import cv2
11 |
12 |
13 | def _create_conv_net(X, image_width, image_height, image_channel, phase, drop_conv, n_class=1):
14 | inputX = tf.reshape(X, [-1, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1)
15 | # UNet model
16 | # layer1->convolution
17 | W1_1 = weight_xavier_init(shape=[3, 3, image_channel, 32], n_inputs=3 * 3 * image_channel, n_outputs=32)
18 | B1_1 = bias_variable([32])
19 | conv1_1 = conv2d(inputX, W1_1) + B1_1
20 | # conv1_1 = tf.contrib.layers.batch_norm(conv1_1, epsilon=1e-5, scope='bn1')
21 | conv1_1 = tf.contrib.layers.batch_norm(conv1_1, center=True, scale=True, is_training=phase, scope='bn1')
22 | conv1_1 = tf.nn.dropout(tf.nn.relu(conv1_1), drop_conv)
23 |
24 | W1_2 = weight_xavier_init(shape=[3, 3, 32, 32], n_inputs=3 * 3 * 32, n_outputs=32)
25 | B1_2 = bias_variable([32])
26 | conv1_2 = conv2d(conv1_1, W1_2) + B1_2
27 | # conv1_2 = tf.contrib.layers.batch_norm(conv1_2, epsilon=1e-5, scope='bn2')
28 | conv1_2 = tf.contrib.layers.batch_norm(conv1_2, center=True, scale=True, is_training=phase, scope='bn2')
29 | conv1_2 = tf.nn.dropout(tf.nn.relu(conv1_2), drop_conv)
30 |
31 | pool1 = max_pool_2x2(conv1_2)
32 | # layer2->convolution
33 | W2_1 = weight_xavier_init(shape=[3, 3, 32, 64], n_inputs=3 * 3 * 32, n_outputs=64)
34 | B2_1 = bias_variable([64])
35 | conv2_1 = conv2d(pool1, W2_1) + B2_1
36 | # conv2_1 = tf.contrib.layers.batch_norm(conv2_1, epsilon=1e-5, scope='bn3')
37 | conv2_1 = tf.contrib.layers.batch_norm(conv2_1, center=True, scale=True, is_training=phase, scope='bn3')
38 | conv2_1 = tf.nn.dropout(tf.nn.relu(conv2_1), drop_conv)
39 |
40 | W2_2 = weight_xavier_init(shape=[3, 3, 64, 64], n_inputs=3 * 3 * 64, n_outputs=64)
41 | B2_2 = bias_variable([64])
42 | conv2_2 = conv2d(conv2_1, W2_2) + B2_2
43 | # conv2_2 = tf.contrib.layers.batch_norm(conv2_2, epsilon=1e-5, scope='bn4')
44 | conv2_2 = tf.contrib.layers.batch_norm(conv2_2, center=True, scale=True, is_training=phase, scope='bn4')
45 | conv2_2 = tf.nn.dropout(tf.nn.relu(conv2_2), drop_conv)
46 |
47 | pool2 = max_pool_2x2(conv2_2)
48 |
49 | # layer3->convolution
50 | W3_1 = weight_xavier_init(shape=[3, 3, 64, 128], n_inputs=3 * 3 * 64, n_outputs=128)
51 | B3_1 = bias_variable([128])
52 | conv3_1 = conv2d(pool2, W3_1) + B3_1
53 | # conv3_1 = tf.contrib.layers.batch_norm(conv3_1, epsilon=1e-5, scope='bn5')
54 | conv3_1 = tf.contrib.layers.batch_norm(conv3_1, center=True, scale=True, is_training=phase, scope='bn5')
55 | conv3_1 = tf.nn.dropout(tf.nn.relu(conv3_1), drop_conv)
56 |
57 | W3_2 = weight_xavier_init(shape=[3, 3, 128, 128], n_inputs=3 * 3 * 128, n_outputs=128)
58 | B3_2 = bias_variable([128])
59 | conv3_2 = conv2d(conv3_1, W3_2) + B3_2
60 | # conv3_2 = tf.contrib.layers.batch_norm(conv3_2, epsilon=1e-5, scope='bn6')
61 | conv3_2 = tf.contrib.layers.batch_norm(conv3_2, center=True, scale=True, is_training=phase, scope='bn6')
62 | conv3_2 = tf.nn.dropout(tf.nn.relu(conv3_2), drop_conv)
63 |
64 | pool3 = max_pool_2x2(conv3_2)
65 |
66 | # layer4->convolution
67 | W4_1 = weight_xavier_init(shape=[3, 3, 128, 256], n_inputs=3 * 3 * 128, n_outputs=256)
68 | B4_1 = bias_variable([256])
69 | conv4_1 = conv2d(pool3, W4_1) + B4_1
70 | # conv4_1 = tf.contrib.layers.batch_norm(conv4_1, epsilon=1e-5, scope='bn7')
71 | conv4_1 = tf.contrib.layers.batch_norm(conv4_1, center=True, scale=True, is_training=phase, scope='bn7')
72 | conv4_1 = tf.nn.dropout(tf.nn.relu(conv4_1), drop_conv)
73 |
74 | W4_2 = weight_xavier_init(shape=[3, 3, 256, 256], n_inputs=3 * 3 * 256, n_outputs=256)
75 | B4_2 = bias_variable([256])
76 | conv4_2 = conv2d(conv4_1, W4_2) + B4_2
77 | # conv4_2 = tf.contrib.layers.batch_norm(conv4_2, epsilon=1e-5, scope='bn8')
78 | conv4_2 = tf.contrib.layers.batch_norm(conv4_2, center=True, scale=True, is_training=phase, scope='bn8')
79 | conv4_2 = tf.nn.dropout(tf.nn.relu(conv4_2), drop_conv)
80 |
81 | pool4 = max_pool_2x2(conv4_2)
82 |
83 | # layer5->convolution
84 | W5_1 = weight_xavier_init(shape=[3, 3, 256, 512], n_inputs=3 * 3 * 256, n_outputs=512)
85 | B5_1 = bias_variable([512])
86 | conv5_1 = conv2d(pool4, W5_1) + B5_1
87 | # conv5_1 = tf.contrib.layers.batch_norm(conv5_1, epsilon=1e-5, scope='bn9')
88 | conv5_1 = tf.contrib.layers.batch_norm(conv5_1, center=True, scale=True, is_training=phase, scope='bn9')
89 | conv5_1 = tf.nn.dropout(tf.nn.relu(conv5_1), drop_conv)
90 |
91 | W5_2 = weight_xavier_init(shape=[3, 3, 512, 512], n_inputs=3 * 3 * 512, n_outputs=512)
92 | B5_2 = bias_variable([512])
93 | conv5_2 = conv2d(conv5_1, W5_2) + B5_2
94 | # conv5_2 = tf.contrib.layers.batch_norm(conv5_2, epsilon=1e-5, scope='bn10')
95 | conv5_2 = tf.contrib.layers.batch_norm(conv5_2, center=True, scale=True, is_training=phase, scope='bn10')
96 | conv5_2 = tf.nn.dropout(tf.nn.relu(conv5_2), drop_conv)
97 |
98 | # layer6->deconvolution
99 | W6 = weight_xavier_init(shape=[3, 3, 256, 512], n_inputs=3 * 3 * 512, n_outputs=256)
100 | B6 = bias_variable([256])
101 | dconv1 = tf.nn.relu(deconv2d(conv5_2, W6) + B6)
102 | dconv_concat1 = crop_and_concat(conv4_2, dconv1)
103 |
104 | # layer7->convolution
105 | W7_1 = weight_xavier_init(shape=[3, 3, 512, 256], n_inputs=3 * 3 * 512, n_outputs=256)
106 | B7_1 = bias_variable([256])
107 | conv7_1 = conv2d(dconv_concat1, W7_1) + B7_1
108 | # conv7_1 = tf.contrib.layers.batch_norm(conv7_1, epsilon=1e-5, scope='bn11')
109 | conv7_1 = tf.contrib.layers.batch_norm(conv7_1, center=True, scale=True, is_training=phase, scope='bn11')
110 | conv7_1 = tf.nn.dropout(tf.nn.relu(conv7_1), drop_conv)
111 |
112 | W7_2 = weight_xavier_init(shape=[3, 3, 256, 256], n_inputs=3 * 3 * 256, n_outputs=256)
113 | B7_2 = bias_variable([256])
114 | conv7_2 = conv2d(conv7_1, W7_2) + B7_2
115 | # conv7_2 = tf.contrib.layers.batch_norm(conv7_2, epsilon=1e-5, scope='bn12')
116 | conv7_2 = tf.contrib.layers.batch_norm(conv7_2, center=True, scale=True, is_training=phase, scope='bn12')
117 | conv7_2 = tf.nn.dropout(tf.nn.relu(conv7_2), drop_conv)
118 |
119 | # layer8->deconvolution
120 | W8 = weight_xavier_init(shape=[3, 3, 128, 256], n_inputs=3 * 3 * 256, n_outputs=128)
121 | B8 = bias_variable([128])
122 | dconv2 = tf.nn.relu(deconv2d(conv7_2, W8) + B8)
123 | dconv_concat2 = crop_and_concat(conv3_2, dconv2)
124 |
125 | # layer9->convolution
126 | W9_1 = weight_xavier_init(shape=[3, 3, 256, 128], n_inputs=3 * 3 * 256, n_outputs=128)
127 | B9_1 = bias_variable([128])
128 | conv9_1 = conv2d(dconv_concat2, W9_1) + B9_1
129 | # conv9_1 = tf.contrib.layers.batch_norm(conv9_1, epsilon=1e-5, scope='bn13')
130 | conv9_1 = tf.contrib.layers.batch_norm(conv9_1, center=True, scale=True, is_training=phase, scope='bn13')
131 | conv9_1 = tf.nn.dropout(tf.nn.relu(conv9_1), drop_conv)
132 |
133 | W9_2 = weight_xavier_init(shape=[3, 3, 128, 128], n_inputs=3 * 3 * 128, n_outputs=128)
134 | B9_2 = bias_variable([128])
135 | conv9_2 = conv2d(conv9_1, W9_2) + B9_2
136 | # conv9_2 = tf.contrib.layers.batch_norm(conv9_2, epsilon=1e-5, scope='bn14')
137 | conv9_2 = tf.contrib.layers.batch_norm(conv9_2, center=True, scale=True, is_training=phase, scope='bn14')
138 | conv9_2 = tf.nn.dropout(tf.nn.relu(conv9_2), drop_conv)
139 |
140 | # layer10->deconvolution
141 | W10 = weight_xavier_init(shape=[3, 3, 64, 128], n_inputs=3 * 3 * 128, n_outputs=64)
142 | B10 = bias_variable([64])
143 | dconv3 = tf.nn.relu(deconv2d(conv9_2, W10) + B10)
144 | dconv_concat3 = crop_and_concat(conv2_2, dconv3)
145 |
146 | # layer11->convolution
147 | W11_1 = weight_xavier_init(shape=[3, 3, 128, 64], n_inputs=3 * 3 * 128, n_outputs=64)
148 | B11_1 = bias_variable([64])
149 | conv11_1 = conv2d(dconv_concat3, W11_1) + B11_1
150 | # conv11_1 = tf.contrib.layers.batch_norm(conv11_1, epsilon=1e-5, scope='bn15')
151 | conv11_1 = tf.contrib.layers.batch_norm(conv11_1, center=True, scale=True, is_training=phase, scope='bn15')
152 | conv11_1 = tf.nn.dropout(tf.nn.relu(conv11_1), drop_conv)
153 |
154 | W11_2 = weight_xavier_init(shape=[3, 3, 64, 64], n_inputs=3 * 3 * 64, n_outputs=64)
155 | B11_2 = bias_variable([64])
156 | conv11_2 = conv2d(conv11_1, W11_2) + B11_2
157 | # conv11_2 = tf.contrib.layers.batch_norm(conv11_2, epsilon=1e-5, scope='bn16')
158 | conv11_2 = tf.contrib.layers.batch_norm(conv11_2, center=True, scale=True, is_training=phase, scope='bn16')
159 | conv11_2 = tf.nn.dropout(tf.nn.relu(conv11_2), drop_conv)
160 |
161 | # layer 12->deconvolution
162 | W12 = weight_xavier_init(shape=[3, 3, 32, 64], n_inputs=3 * 3 * 64, n_outputs=32)
163 | B12 = bias_variable([32])
164 | dconv4 = tf.nn.relu(deconv2d(conv11_2, W12) + B12)
165 | dconv_concat4 = crop_and_concat(conv1_2, dconv4)
166 |
167 | # layer 13->convolution
168 | W13_1 = weight_xavier_init(shape=[3, 3, 64, 32], n_inputs=3 * 3 * 64, n_outputs=32)
169 | B13_1 = bias_variable([32])
170 | conv13_1 = conv2d(dconv_concat4, W13_1) + B13_1
171 | # conv13_1 = tf.contrib.layers.batch_norm(conv13_1, epsilon=1e-5, scope='bn17')
172 | conv13_1 = tf.contrib.layers.batch_norm(conv13_1, center=True, scale=True, is_training=phase, scope='bn17')
173 | conv13_1 = tf.nn.dropout(tf.nn.relu(conv13_1), drop_conv)
174 |
175 | W13_2 = weight_xavier_init(shape=[3, 3, 32, 32], n_inputs=3 * 3 * 32, n_outputs=32)
176 | B13_2 = bias_variable([32])
177 | conv13_2 = conv2d(conv13_1, W13_2) + B13_2
178 | # conv13_2 = tf.contrib.layers.batch_norm(conv13_2, epsilon=1e-5, scope='bn18')
179 | conv13_2 = tf.contrib.layers.batch_norm(conv13_2, center=True, scale=True, is_training=phase, scope='bn18')
180 | conv13_2 = tf.nn.dropout(tf.nn.relu(conv13_2), drop_conv)
181 | # layer14->output
182 | W14 = weight_xavier_init(shape=[1, 1, 32, n_class], n_inputs=1 * 1 * 32, n_outputs=n_class)
183 | B14 = bias_variable([n_class])
184 | output_map = tf.nn.sigmoid(conv2d(conv13_2, W14) + B14, name='output')
185 |
186 | return output_map
187 |
188 |
189 | def _next_batch(train_images, train_labels, batch_size, index_in_epoch):
190 | start = index_in_epoch
191 | index_in_epoch += batch_size
192 |
193 | num_examples = train_images.shape[0]
194 | # when all trainig data have been already used, it is reorder randomly
195 | if index_in_epoch > num_examples:
196 | # shuffle the data
197 | perm = np.arange(num_examples)
198 | np.random.shuffle(perm)
199 | train_images = train_images[perm]
200 | train_labels = train_labels[perm]
201 | # start next epoch
202 | start = 0
203 | index_in_epoch = batch_size
204 | assert batch_size <= num_examples
205 | end = index_in_epoch
206 | return train_images[start:end], train_labels[start:end], index_in_epoch
207 |
208 |
209 | class unet2dModule(object):
210 | """
211 | A unet2d implementation
212 |
213 | :param image_height: number of height in the input image
214 | :param image_width: number of width in the input image
215 | :param channels: number of channels in the input image
216 | :param n_class: number of output labels
217 | :param costname: name of the cost function.Default is "cross_entropy"
218 | """
219 |
220 | def __init__(self, image_height, image_width, channels=1, costname="dice coefficient"):
221 | self.image_with = image_width
222 | self.image_height = image_height
223 | self.channels = channels
224 |
225 | self.X = tf.placeholder("float", shape=[None, image_height, image_width, channels], name="Input")
226 | self.Y_gt = tf.placeholder("float", shape=[None, image_height, image_width, 1], name="Output_GT")
227 | self.lr = tf.placeholder('float', name="Learning_rate")
228 | self.phase = tf.placeholder(tf.bool, name="Phase")
229 | self.drop_conv = tf.placeholder('float', name="DropOut")
230 |
231 | self.Y_pred = _create_conv_net(self.X, image_width, image_height, channels, self.phase, self.drop_conv)
232 |
233 | self.cost = self.__get_cost(costname)
234 | self.accuracy = -self.__get_cost(costname)
235 |
236 | def __get_cost(self, cost_name):
237 | H, W, C = self.Y_gt.get_shape().as_list()[1:]
238 | if cost_name == "dice coefficient":
239 | smooth = 1e-5
240 | pred_flat = tf.reshape(self.Y_pred, [-1, H * W * C])
241 | true_flat = tf.reshape(self.Y_gt, [-1, H * W * C])
242 | intersection = 2 * tf.reduce_sum(pred_flat * true_flat, axis=1) + smooth
243 | denominator = tf.reduce_sum(pred_flat, axis=1) + tf.reduce_sum(true_flat, axis=1) + smooth
244 | loss = -tf.reduce_mean(intersection / denominator)
245 | if cost_name == "pixelwise_cross entroy":
246 | assert (C == 1)
247 | flat_logit = tf.reshape(self.Y_pred, [-1])
248 | flat_label = tf.reshape(self.Y_gt, [-1])
249 | loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=flat_logit, labels=flat_label))
250 | return loss
251 |
252 | def train(self, train_images, train_lanbels, model_path, logs_path, learning_rate,
253 | dropout_conv=0.8, train_epochs=1000, batch_size=2):
254 | train_op = tf.train.AdamOptimizer(self.lr).minimize(self.cost)
255 |
256 | init = tf.global_variables_initializer()
257 | # saver = tf.train.Saver(tf.all_variables())
258 | saver = tf.train.Saver(tf.global_variables())
259 |
260 |
261 | tf.summary.scalar("loss", self.cost)
262 | tf.summary.scalar("accuracy", self.accuracy)
263 | merged_summary_op = tf.summary.merge_all()
264 | sess = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True))
265 | summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
266 | sess.run(init)
267 | # saver.restore(sess, model_path)
268 |
269 | DISPLAY_STEP = 1
270 | index_in_epoch = 0
271 |
272 | for i in range(train_epochs):
273 | # get new batch
274 | batch_xs_path, batch_ys_path, index_in_epoch = _next_batch(train_images, train_lanbels, batch_size,
275 | index_in_epoch)
276 | batch_xs = np.empty((len(batch_xs_path), self.image_height, self.image_with, self.channels))
277 | batch_ys = np.empty((len(batch_ys_path), self.image_height, self.image_with, 1))
278 |
279 | for num in range(len(batch_xs_path)):
280 | # image = cv2.imread(batch_xs_path[num][0], cv2.IMREAD_COLOR)
281 | # cv2.imwrite('image_src.bmp', image)
282 | # label = cv2.imread(batch_ys_path[num][0], cv2.IMREAD_GRAYSCALE)
283 | # cv2.imwrite('mask.bmp', label)
284 | image = readmat(batch_xs_path[num][0],'multimodalitydata')
285 | label = readmat(batch_ys_path[num][0], 'multimodalitymask')
286 | batch_xs[num, :, :, :] = np.reshape(image, (self.image_height, self.image_with, self.channels))
287 | batch_ys[num, :, :, :] = np.reshape(label, (self.image_height, self.image_with, 1))
288 |
289 | batch_xs = batch_xs.astype(np.float)
290 | batch_ys = batch_ys.astype(np.float)
291 |
292 | # Normalize from [0:255] => [0.0:1.0]
293 | # batch_xs = np.multiply(batch_xs, 1.0 / 255.0)
294 | # batch_ys = np.multiply(batch_ys, 1.0 / 255.0)
295 |
296 | # check progress on every 1st,2nd,...,10th,20th,...,100th... step
297 | if i % DISPLAY_STEP == 0 or (i + 1) == train_epochs:
298 | train_loss, train_accuracy = sess.run([self.cost, self.accuracy], feed_dict={self.X: batch_xs,
299 | self.Y_gt: batch_ys,
300 | self.lr: learning_rate,
301 | self.phase: 1,
302 | self.drop_conv: dropout_conv})
303 | # pred = sess.run(self.Y_pred, feed_dict={self.X: batch_xs,
304 | # self.Y_gt: batch_ys,
305 | # self.phase: 1,
306 | # self.drop_conv: 1})
307 | # result = np.reshape(pred[0], (512, 512))
308 | # result = result.astype(np.float32) * 255.
309 | # result = np.clip(result, 0, 255).astype('uint8')
310 | # cv2.imwrite("result.bmp", result)#存储每一次的预测结果
311 | print('epochs %d training_loss ,Training_accuracy => %.5f,%.5f ' % (i, train_loss, train_accuracy))
312 | if i % (DISPLAY_STEP * 10) == 0 and i:
313 | DISPLAY_STEP *= 10
314 |
315 | # train on batch
316 | _, summary = sess.run([train_op, merged_summary_op], feed_dict={self.X: batch_xs,
317 | self.Y_gt: batch_ys,
318 | self.lr: learning_rate,
319 | self.phase: 1,
320 | self.drop_conv: dropout_conv})
321 | summary_writer.add_summary(summary, i)
322 | summary_writer.close()
323 |
324 | save_path = saver.save(sess, model_path)
325 | print("Model saved in file:", save_path)
326 |
327 | def prediction(self, model_path, test_images):
328 | init = tf.global_variables_initializer()
329 | saver = tf.train.Saver()
330 | sess = tf.InteractiveSession()
331 | sess.run(init)
332 | saver.restore(sess, model_path)
333 |
334 | test_images = np.reshape(test_images, (1, test_images.shape[0], test_images.shape[1], self.channels))
335 | # test_label = cv2.imread("D:\Data\GlandCeil\Test\Mask\\train_37_anno.bmp", 0)
336 | # test_label = np.multiply(test_label, 1.0 / 255.0)
337 | # test_label = np.reshape(test_label, (1, test_label.shape[0], test_label.shape[1], 1))
338 | pred = sess.run(self.Y_pred, feed_dict={self.X: test_images,
339 | self.phase: 1,
340 | self.drop_conv: 1})
341 | result = np.reshape(pred, (test_images.shape[1], test_images.shape[2]))
342 | result = result.astype(np.float32) * 255.
343 | result = np.clip(result, 0, 255).astype('uint8')
344 | return result
345 |
--------------------------------------------------------------------------------