├── .idea
├── gan.iml
├── misc.xml
├── modules.xml
└── workspace.xml
├── Criterion.py
├── README.md
├── datasets.py
├── eyedata
├── train
│ ├── img
│ │ ├── 21_training.tif
│ │ ├── 22_training.tif
│ │ ├── 23_training.tif
│ │ ├── 24_training.tif
│ │ ├── 25_training.tif
│ │ ├── 26_training.tif
│ │ ├── 27_training.tif
│ │ ├── 28_training.tif
│ │ ├── 29_training.tif
│ │ ├── 30_training.tif
│ │ ├── 31_training.tif
│ │ ├── 32_training.tif
│ │ ├── 33_training.tif
│ │ ├── 34_training.tif
│ │ ├── 35_training.tif
│ │ ├── 36_training.tif
│ │ ├── 37_training.tif
│ │ ├── 38_training.tif
│ │ ├── 39_training.tif
│ │ └── 40_training.tif
│ └── label
│ │ ├── 21_training.gif
│ │ ├── 22_training.gif
│ │ ├── 23_training.gif
│ │ ├── 24_training.gif
│ │ ├── 25_training.gif
│ │ ├── 26_training.gif
│ │ ├── 27_training.gif
│ │ ├── 28_training.gif
│ │ ├── 29_training.gif
│ │ ├── 30_training.gif
│ │ ├── 31_training.gif
│ │ ├── 32_training.gif
│ │ ├── 33_training.gif
│ │ ├── 34_training.gif
│ │ ├── 35_training.gif
│ │ ├── 36_training.gif
│ │ ├── 37_training.gif
│ │ ├── 38_training.gif
│ │ ├── 39_training.gif
│ │ └── 40_training.gif
└── val
│ ├── img
│ ├── 01_test.tif
│ ├── 02_test.tif
│ ├── 03_test.tif
│ ├── 04_test.tif
│ ├── 05_test.tif
│ ├── 06_test.tif
│ ├── 07_test.tif
│ ├── 08_test.tif
│ ├── 09_test.tif
│ ├── 10_test.tif
│ ├── 11_test.tif
│ ├── 12_test.tif
│ ├── 13_test.tif
│ ├── 14_test.tif
│ ├── 15_test.tif
│ ├── 16_test.tif
│ ├── 17_test.tif
│ ├── 18_test.tif
│ ├── 19_test.tif
│ └── 20_test.tif
│ └── label
│ ├── 01_test.gif
│ ├── 02_test.gif
│ ├── 03_test.gif
│ ├── 04_test.gif
│ ├── 05_test.gif
│ ├── 06_test.gif
│ ├── 07_test.gif
│ ├── 08_test.gif
│ ├── 09_test.gif
│ ├── 10_test.gif
│ ├── 11_test.gif
│ ├── 12_test.gif
│ ├── 13_test.gif
│ ├── 14_test.gif
│ ├── 15_test.gif
│ ├── 16_test.gif
│ ├── 17_test.gif
│ ├── 18_test.gif
│ ├── 19_test.gif
│ └── 20_test.gif
├── gan.py
├── gycutils
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-36.pyc
│ ├── gycaug.cpython-36.pyc
│ ├── trainschedule.cpython-36.pyc
│ └── utils.cpython-36.pyc
├── gycaug.py
├── trainschedule.py
└── utils.py
├── loss.py
├── readmeDisplay
└── 1.PNG
├── train.py
└── transform.py
/.idea/gan.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 | true
85 | DEFINITION_ORDER
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 | 1525436788164
255 |
256 |
257 | 1525436788164
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 |
368 |
369 |
370 |
371 |
372 |
373 |
374 |
375 |
376 |
377 |
378 |
379 |
380 |
381 |
382 |
383 |
384 |
385 |
386 |
387 |
388 |
389 |
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 |
402 |
403 |
404 |
405 |
406 |
407 |
408 |
409 |
410 |
411 |
412 |
413 |
414 |
415 |
416 |
417 |
418 |
419 |
420 |
421 |
422 |
423 |
424 |
425 |
426 |
427 |
428 |
429 |
430 |
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 |
439 |
440 |
441 |
442 |
443 |
444 |
445 |
446 |
447 |
448 |
449 |
450 |
451 |
452 |
453 |
454 |
455 |
456 |
457 |
458 |
459 |
460 |
461 |
462 |
463 |
464 |
465 |
466 |
467 |
468 |
469 |
470 |
471 |
472 |
473 |
474 |
475 |
476 |
477 |
478 |
479 |
480 |
481 |
482 |
483 |
484 |
485 |
486 |
487 |
488 |
489 |
490 |
491 |
492 |
493 |
494 |
495 |
496 |
497 |
498 |
499 |
500 |
501 |
502 |
503 |
504 |
505 |
506 |
507 |
508 |
509 |
510 |
511 |
512 |
513 |
514 |
515 |
516 |
517 |
518 |
519 |
520 |
521 |
522 |
523 |
524 |
525 |
526 |
527 |
528 |
529 |
530 |
531 |
532 |
533 |
534 |
535 |
536 |
537 |
538 |
539 |
540 |
541 |
542 |
543 |
544 |
545 |
546 |
547 |
548 |
549 |
550 |
551 |
552 |
553 |
554 |
555 |
556 |
557 |
558 |
559 |
560 |
561 |
562 |
563 |
564 |
565 |
566 |
567 |
568 |
569 |
570 |
571 |
572 |
573 |
574 |
575 |
576 |
577 |
578 |
579 |
580 |
581 |
582 |
583 |
584 |
585 |
586 |
587 |
588 |
589 |
590 |
591 |
592 |
593 |
594 |
595 |
596 |
597 |
598 |
599 |
600 |
601 |
602 |
603 |
604 |
605 |
606 |
607 |
608 |
609 |
610 |
611 |
612 |
613 |
614 |
615 |
616 |
617 |
618 |
619 |
620 |
621 |
622 |
623 |
624 |
625 |
626 |
627 |
628 |
629 |
630 |
631 |
632 |
633 |
634 |
635 |
636 |
637 |
638 |
639 |
640 |
641 |
642 |
643 |
644 |
645 |
646 |
647 |
648 |
649 |
650 |
651 |
652 |
653 |
654 |
655 |
656 |
657 |
658 |
659 |
660 |
661 |
662 |
663 |
664 |
665 |
666 |
667 |
668 |
669 |
670 |
671 |
672 |
673 |
674 |
675 |
676 |
677 |
678 |
679 |
680 |
681 |
682 |
683 |
684 |
685 |
686 |
687 |
688 |
689 |
690 |
691 |
692 |
693 |
694 |
695 |
696 |
697 |
698 |
699 |
700 |
701 |
702 |
703 |
704 |
705 |
706 |
707 |
708 |
709 |
710 |
711 |
712 |
713 |
714 |
715 |
716 |
717 |
718 |
719 |
720 |
721 |
722 |
723 |
724 |
725 |
726 |
727 |
728 |
729 |
730 |
731 |
732 |
733 |
734 |
735 |
736 |
737 |
738 |
739 |
740 |
741 |
742 |
743 |
744 |
745 |
--------------------------------------------------------------------------------
/Criterion.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from sklearn.metrics import precision_recall_curve
3 | import os
4 | class Criterion:
5 | def precision_recall(self,dst,epoch,ytrue,ypred):
6 | precision, recall, thresholds = precision_recall_curve(ytrue, ypred)
7 | np.save(os.path.join(dst,"P%s"%epoch),precision)
8 | np.save(os.path.join(dst, "R%s" % epoch), recall)
9 | np.save(os.path.join(dst, "T%s" % epoch), thresholds)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Vessel-wgan-pytorch
2 |
3 | > Author: Yuchao Gu
4 |
5 | > E-mail: 2015014178@buct.edu.cn
6 |
7 | > Date: 2018-05-27
8 |
9 | >Description: The code is an pytorch implementation of [《Retinal Vessel Segmentation in Fundoscopic Images with Generative Adversarial Networks》](https://arxiv.org/abs/1706.09318)
10 |
11 |
12 | ---
13 |
14 | ## Overview
15 |
16 | ### Data
17 |
18 | [DRIVE: Digital Retinal Images for Vessel Extraction](http://www.isi.uu.nl/Research/Databases/DRIVE/) you can download the train and test data from this server. You can also find data in the eyedata folder.
19 |
20 | ### Pre-processing
21 |
22 | The dataset contains 20 training images, the first step of my pre-processing is randomly cropping into 512*512. The second step is to randomly change brightness ,contrast and hue of the train image. I implement this method in my code, so you can be convenient to use it. Further more, a gan-based method of generating retina images can be used as an extra data source.
23 |
24 | ### Model
25 |
26 | 
27 |
28 | ### Training
29 |
30 | python train.py
31 |
32 | ---
33 |
34 | ## How to use
35 |
36 | ### Dependencies
37 |
38 | This code depends on the following libraries:
39 |
40 | * Python 3.6
41 | * Pytorch
42 | * PIL
43 |
44 | ### structure
45 | ```
46 | vessel gan
47 | │
48 | ├── eyedata # drive data
49 | │
50 | ├── gycutils # my utils for data augmentation
51 | │
52 | ├── Criterion.py # generate and store precison,recall curve
53 | │
54 | ├── datasets.py # dataset for dataloader
55 | │
56 | ├── gan.py # generative adversial network for vessel segmentation
57 | │
58 | ├── train.py # train code
59 | │
60 | ├── transform.py
61 | │
62 | └── readme.md # introduce to this project
63 | ```
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/datasets.py:
--------------------------------------------------------------------------------
1 | import os
2 | import os.path as osp
3 | import numpy as np
4 | from PIL import Image
5 | from gycutils.gycaug import ColorAug,Add_Gaussion_noise,Random_horizontal_flip,Random_vertical_flip,Compose_imglabel,Random_crop
6 | import collections
7 | import torch
8 | import torchvision
9 | from transform import ReLabel, ToLabel, Scale
10 | from torch.utils import data
11 | from transform import HorizontalFlip, VerticalFlip
12 | from torchvision.transforms import Compose
13 | from torchvision.transforms import Compose, CenterCrop, Normalize, ToTensor
14 |
15 | def default_loader(path):
16 | return Image.open(path)
17 |
18 | class VOCDataSet(data.Dataset):
19 | def __init__(self, root, split="train", img_transform=None, label_transform=None,image_label_transform=None):
20 | self.root = root
21 | self.split = split
22 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
23 | self.files = collections.defaultdict(list)
24 | self.img_transform = img_transform
25 | self.label_transform = label_transform
26 | self.image_label_transform=image_label_transform
27 | self.h_flip = HorizontalFlip()
28 | self.v_flip = VerticalFlip()
29 |
30 | data_dir = osp.join(root, "eyedata",split)
31 | # for split in ["train", "trainval", "val"]:
32 | imgsets_dir = osp.join(data_dir, "img")
33 | for name in os.listdir(imgsets_dir):
34 | name = os.path.splitext(name)[0]
35 | img_file = osp.join(data_dir, "img/%s.tif" % name)
36 | label_file = osp.join(data_dir, "label/%s.gif" % name)
37 | self.files[split].append({
38 | "img": img_file,
39 | "label": label_file
40 | })
41 |
42 | def __len__(self):
43 | return len(self.files[self.split])
44 |
45 | def __getitem__(self, index):
46 | datafiles = self.files[self.split][index]
47 |
48 | img_file = datafiles["img"]
49 | img = Image.open(img_file).convert('RGB')
50 | # img = img.resize((256, 256), Image.NEAREST)
51 | # img = np.array(img, dtype=np.uint8)
52 |
53 | label_file = datafiles["label"]
54 | label = Image.open(label_file).convert("P")
55 | #label_size = label.size
56 | # label image has categorical value, not continuous, so we have to
57 | # use NEAREST not BILINEAR
58 | # label = label.resize((256, 256), Image.NEAREST)
59 | # label = np.array(label, dtype=np.uint8)
60 | # label[label == 255] = 21
61 |
62 | if self.image_label_transform is not None:
63 | img,label=self.image_label_transform(img,label)
64 |
65 | if self.img_transform is not None:
66 | imgs= self.img_transform(img)
67 | # img_h = self.img_transform(self.h_flip(img))
68 | # img_v = self.img_transform(self.v_flip(img))
69 |
70 | #imgs = [img_o]
71 |
72 | #else:
73 | #imgs = img
74 |
75 | if self.label_transform is not None:
76 | labels= self.label_transform(label)
77 | # label_h = self.label_transform(self.h_flip(label))
78 | # label_v = self.label_transform(self.v_flip(label))
79 | #labels = [label_o]
80 | #else:
81 | #labels = label
82 | return imgs, labels
83 |
84 | if __name__ == '__main__':
85 |
86 | input_transform = Compose([
87 | ColorAug(),
88 | Add_Gaussion_noise(prob=0.5),
89 | #Scale((512, 512), Image.BILINEAR),
90 | ToTensor(),
91 | Normalize([.485, .456, .406], [.229, .224, .225]),
92 |
93 | ])
94 | target_transform = Compose([
95 | #Scale((512, 512), Image.NEAREST),
96 | #ToSP(512),
97 | ToLabel(),
98 | ReLabel(255, 1),
99 | ])
100 |
101 | img_label_transform = Compose_imglabel([
102 | Random_crop(512,512),
103 | Random_horizontal_flip(0.5),
104 | Random_vertical_flip(0.5),
105 | ])
106 | dst = VOCDataSet("./", img_transform=input_transform,label_transform=target_transform,image_label_transform=img_label_transform)
107 | trainloader = data.DataLoader(dst, batch_size=1)
108 |
109 | for i, data in enumerate(trainloader):
110 | imgs, labels = data
111 |
112 |
113 |
114 |
--------------------------------------------------------------------------------
/eyedata/train/img/21_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/21_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/22_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/22_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/23_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/23_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/24_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/24_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/25_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/25_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/26_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/26_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/27_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/27_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/28_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/28_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/29_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/29_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/30_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/30_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/31_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/31_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/32_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/32_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/33_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/33_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/34_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/34_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/35_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/35_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/36_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/36_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/37_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/37_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/38_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/38_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/39_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/39_training.tif
--------------------------------------------------------------------------------
/eyedata/train/img/40_training.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/img/40_training.tif
--------------------------------------------------------------------------------
/eyedata/train/label/21_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/21_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/22_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/22_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/23_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/23_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/24_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/24_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/25_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/25_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/26_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/26_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/27_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/27_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/28_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/28_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/29_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/29_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/30_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/30_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/31_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/31_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/32_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/32_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/33_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/33_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/34_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/34_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/35_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/35_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/36_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/36_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/37_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/37_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/38_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/38_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/39_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/39_training.gif
--------------------------------------------------------------------------------
/eyedata/train/label/40_training.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/train/label/40_training.gif
--------------------------------------------------------------------------------
/eyedata/val/img/01_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/01_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/02_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/02_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/03_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/03_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/04_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/04_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/05_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/05_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/06_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/06_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/07_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/07_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/08_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/08_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/09_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/09_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/10_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/10_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/11_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/11_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/12_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/12_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/13_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/13_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/14_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/14_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/15_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/15_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/16_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/16_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/17_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/17_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/18_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/18_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/19_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/19_test.tif
--------------------------------------------------------------------------------
/eyedata/val/img/20_test.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/img/20_test.tif
--------------------------------------------------------------------------------
/eyedata/val/label/01_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/01_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/02_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/02_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/03_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/03_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/04_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/04_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/05_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/05_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/06_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/06_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/07_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/07_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/08_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/08_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/09_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/09_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/10_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/10_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/11_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/11_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/12_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/12_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/13_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/13_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/14_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/14_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/15_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/15_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/16_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/16_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/17_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/17_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/18_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/18_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/19_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/19_test.gif
--------------------------------------------------------------------------------
/eyedata/val/label/20_test.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/eyedata/val/label/20_test.gif
--------------------------------------------------------------------------------
/gan.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 | from torchvision import models
3 | import torch.nn.functional as F
4 | import torch
5 |
6 |
7 | class block(nn.Module):
8 | def __init__(self,in_filters,n_filters):
9 | super(block,self).__init__()
10 | self.deconv1 = nn.Sequential(
11 | nn.Conv2d(in_filters, n_filters, 3, stride=1, padding=1),
12 | nn.BatchNorm2d(n_filters),
13 | nn.ReLU())
14 | def forward(self, x):
15 | x=self.deconv1(x)
16 | return x
17 |
18 | class generator(nn.Module):
19 | # initializers
20 | def __init__(self, n_filters=32):
21 | super(generator, self).__init__()
22 | self.down1=nn.Sequential(
23 | block(3,n_filters),
24 | block(n_filters,n_filters),
25 | nn.MaxPool2d((2,2)))
26 | self.down2 = nn.Sequential(
27 | block(n_filters, 2*n_filters),
28 | block(2*n_filters, 2*n_filters),
29 | nn.MaxPool2d((2, 2)))
30 | self.down3 = nn.Sequential(
31 | block(2*n_filters, 4*n_filters),
32 | block(4*n_filters, 4*n_filters),
33 | nn.MaxPool2d((2, 2)))
34 | self.down4 = nn.Sequential(
35 | block(4*n_filters, 8 * n_filters),
36 | block(8 * n_filters, 8 * n_filters),
37 | nn.MaxPool2d((2, 2)))
38 | self.down5 = nn.Sequential(
39 | block(8 * n_filters, 16 * n_filters),
40 | block(16 * n_filters, 16 * n_filters))
41 |
42 | self.up1=nn.Sequential(
43 | block(16 * n_filters+8*n_filters, 8 * n_filters),
44 | block(8 * n_filters, 8 * n_filters))
45 | self.up2 = nn.Sequential(
46 | block(8 * n_filters+4*n_filters, 4 * n_filters),
47 | block(4 * n_filters, 4 * n_filters))
48 | self.up3 = nn.Sequential(
49 | block(4 * n_filters+2*n_filters,2 * n_filters),
50 | block(2 * n_filters, 2 * n_filters))
51 | self.up4 = nn.Sequential(
52 | block(2 * n_filters+n_filters, n_filters),
53 | block( n_filters, n_filters))
54 |
55 | self.out=nn.Sequential(
56 | nn.Conv2d(n_filters,1,kernel_size=1)
57 | )
58 | # forward method
59 | def forward(self, x):
60 | #print(x.size())
61 | x1=self.down1(x)
62 | #print(x1.size())
63 | x2=self.down2(x1)
64 | #print(x2.size())
65 | x3 = self.down3(x2)
66 | #print(x3.size())
67 | x4 = self.down4(x3)
68 | #print(x4.size())
69 | x5 = self.down5(x4)
70 | #print(x5.size())
71 | x = self.up1(F.upsample(torch.cat((x4,x5),dim=1),scale_factor=2))
72 | x = self.up2(F.upsample(torch.cat((x, x3), dim=1), scale_factor=2))
73 | x = self.up3(F.upsample(torch.cat((x, x2), dim=1), scale_factor=2))
74 | x = self.up4(F.upsample(torch.cat((x, x1), dim=1), scale_factor=2))
75 | x=F.sigmoid(self.out(x))
76 | return x#b,1,w,h
77 |
78 | class discriminator(nn.Module):
79 | def __init__(self,n_filters):
80 | super(discriminator,self).__init__()
81 | self.down1 = nn.Sequential(
82 | block(4, n_filters),
83 | block(n_filters, n_filters),
84 | nn.MaxPool2d((2, 2)))
85 | self.down2 = nn.Sequential(
86 | block(n_filters, 2 * n_filters),
87 | block(2 * n_filters, 2 * n_filters),
88 | nn.MaxPool2d((2, 2)))
89 | self.down3 = nn.Sequential(
90 | block(2 * n_filters, 4 * n_filters),
91 | block(4 * n_filters, 4 * n_filters),
92 | nn.MaxPool2d((2, 2)))
93 | self.down4 = nn.Sequential(
94 | block(4 * n_filters, 8 * n_filters),
95 | block(8 * n_filters, 8 * n_filters),
96 | nn.MaxPool2d((2, 2)))
97 | self.down5 = nn.Sequential(
98 | block(8 * n_filters, 16 * n_filters),
99 | block(16 * n_filters, 16 * n_filters))
100 | self.out = nn.Linear(16*n_filters,1)
101 | def forward(self, x):
102 | x=self.down1(x)
103 | #print(x.size())
104 | x = self.down2(x)
105 | #print(x.size())
106 | x = self.down3(x)
107 | #print(x.size())
108 | x = self.down4(x)
109 | #print(x.size())
110 | x = self.down5(x)
111 | #print(x.size())
112 | x=F.avg_pool2d(x, kernel_size=x.size()[2:]).view(x.size()[0], -1)
113 |
114 |
115 | x=self.out(x)
116 | x = F.sigmoid(x)
117 | #print(x.size())
118 | return x#b,1
119 |
120 |
121 | if __name__=='__main__':
122 | from torch.nn.functional import Variable
123 | D=discriminator(32).cuda()
124 | t=Variable(torch.ones((2,4,512,512)).cuda())
125 | print(D(t).size())
--------------------------------------------------------------------------------
/gycutils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/gycutils/__init__.py
--------------------------------------------------------------------------------
/gycutils/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/gycutils/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/gycutils/__pycache__/gycaug.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/gycutils/__pycache__/gycaug.cpython-36.pyc
--------------------------------------------------------------------------------
/gycutils/__pycache__/trainschedule.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/gycutils/__pycache__/trainschedule.cpython-36.pyc
--------------------------------------------------------------------------------
/gycutils/__pycache__/utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/gycutils/__pycache__/utils.cpython-36.pyc
--------------------------------------------------------------------------------
/gycutils/gycaug.py:
--------------------------------------------------------------------------------
1 | from PIL import Image,ImageEnhance
2 | import numpy as np
3 | import random
4 | from skimage import color,exposure
5 |
6 | class Compose_imglabel(object):
7 | def __init__(self, transforms):
8 | self.transforms = transforms
9 |
10 | def __call__(self, img,label):
11 | for t in self.transforms:
12 | img,label = t(img,label)
13 | return img,label
14 |
15 |
16 | class Retina_enhance(object):
17 | def __init__(self):
18 | pass
19 | def __call__(self, img):
20 | '''
21 |
22 | :param img:should be pil image
23 | :return:4-dimension image (l,a,b,g-enhance)
24 | '''
25 | npimg=np.array(img)
26 | g_enhance = exposure.equalize_hist(npimg[:,:,1])
27 | g_enhance = exposure.adjust_gamma(g_enhance, 0.1)
28 | return np.dstack((g_enhance,g_enhance,g_enhance))
29 |
30 | class Random_vertical_flip(object):
31 | def _vertical_flip(self,img,label):
32 | return img.transpose(Image.FLIP_TOP_BOTTOM),label.transpose(Image.FLIP_TOP_BOTTOM)
33 | def __init__(self,prob):
34 | '''
35 |
36 | :param prob: should be (0,1)
37 | '''
38 | assert prob>=0 and prob<=1,"prob should be [0,1]"
39 | self.prob=prob
40 | def __call__(self, img,label):
41 | '''
42 | flip img and label simultaneously
43 | :param img:should be PIL image
44 | :param label:should be PIL image
45 | :return:
46 | '''
47 | assert isinstance(img, Image.Image),"should be PIL image"
48 | assert isinstance(label, Image.Image),"should be PIL image"
49 | if random.random()=0 and prob<=1,"prob should be [0,1]"
64 | self.prob=prob
65 |
66 | def __call__(self, img,label):
67 | '''
68 | flip img and label simultaneously
69 | :param img:should be PIL image
70 | :param label:should be PIL image
71 | :return:
72 | '''
73 | assert isinstance(img, Image.Image),"should be PIL image"
74 | assert isinstance(label, Image.Image),"should be PIL image"
75 | if random.random()255]=255
109 | newimg[newimg<0]=0
110 | return Image.fromarray(newimg.astype(np.uint8))
111 |
112 | def __init__(self,prob):
113 | self.prob=prob
114 |
115 | def __call__(self,img):
116 | return self._gaussian_noise(np.array(img),0,random.randint(0,15))
117 |
118 | class Random_rotation(object):
119 | def _randomRotation(self,image,label, mode=Image.NEAREST):
120 | """
121 | 对图像进行随机任意角度(0~360度)旋转
122 | :param mode 邻近插值,双线性插值,双三次B样条插值(default)
123 | :param image PIL的图像image
124 | :return: 旋转转之后的图像
125 | """
126 | random_angle = np.random.randint(0, 360)
127 | return image.rotate(random_angle, mode)
128 |
129 | def __init__(self,prob):
130 | self.prob=prob
131 |
132 | def __call__(self, img,label):
133 | return self._randomRotation(img,label)
134 |
135 | class Random_crop(object):
136 | def _randomCrop(self,img,label):
137 | width, height = img.size
138 | x, y = random.randint(0, width - 512), random.randint(0, height - 512)
139 | region = [x, y, x + self.width, y + self.height]
140 | return img.crop(region),label.crop(region)
141 |
142 | def __init__(self,height,width):
143 | self.height=height
144 | self.width=width
145 |
146 | def __call__(self,img,label):
147 | assert img.size==label.size,"img should have the same shape as label"
148 | width,height=img.size
149 | assert height>=self.height and width>=self.width,"Cropimg should larger than origin"
150 | return self._randomCrop(img,label)
151 |
--------------------------------------------------------------------------------
/gycutils/trainschedule.py:
--------------------------------------------------------------------------------
1 | class Scheduler:
2 | def __init__(self,lr,total_epoches):
3 | self._lr = lr
4 | self._total_epoches=total_epoches
5 |
6 | def get_learning_rate(self):
7 | return self._lr
8 | def get_total_epoches(self):
9 | return self._total_epoches
--------------------------------------------------------------------------------
/gycutils/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Variable,grad
3 | def make_trainable(model, val):
4 | for p in model.parameters():
5 | p.requires_grad = val
6 |
7 |
8 | def calc_gradient_penalty(netD, real_data, fake_data,LAMBDA=10):
9 | BATCH=real_data.size()[0]
10 | alpha = torch.rand(BATCH, 1)
11 | #print(alpha.size(),real_data.size())
12 | alpha = alpha.unsqueeze(-1).unsqueeze(-1).expand(real_data.size())
13 | alpha = alpha.cuda()
14 | interpolates = alpha * real_data + ((1 - alpha) * fake_data)
15 | interpolates = interpolates.cuda()
16 | interpolates = Variable(interpolates, requires_grad=True)
17 |
18 | disc_interpolates = netD(interpolates)
19 |
20 | gradients = grad(outputs=disc_interpolates, inputs=interpolates,
21 | grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
22 | create_graph=True, retain_graph=True, only_inputs=True)[0]
23 |
24 | gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
25 | return gradient_penalty
26 |
--------------------------------------------------------------------------------
/loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | import torch.nn as nn
4 | # Recommend
5 | class CrossEntropyLoss2d(nn.Module):
6 | def __init__(self, weight=None, size_average=True):
7 | super(CrossEntropyLoss2d, self).__init__()
8 | self.nll_loss = nn.NLLLoss2d(weight, size_average)
9 |
10 | def forward(self, inputs, targets):
11 | #print(inputs.size())
12 | return self.nll_loss(F.log_softmax(inputs,dim=1), targets)
13 |
14 | class BCE_Loss(nn.Module):
15 | def __init__(self):
16 | super(BCE_Loss,self).__init__()
17 | self.bce=nn.BCELoss()
18 | def forward(self,inputs,targets):
19 | return self.bce(inputs,targets)
--------------------------------------------------------------------------------
/readmeDisplay/1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guyuchao/Vessel-wgan-pytorch/e895901849d63997d597ee9e7ad82e32a142763a/readmeDisplay/1.PNG
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | import torch
3 | from torch.autograd import Variable
4 | from torch.utils import data
5 |
6 | from gycutils.trainschedule import Scheduler
7 | from gycutils.utils import make_trainable,calc_gradient_penalty
8 | from gan import discriminator,generator
9 | from datasets import VOCDataSet
10 | from torch.optim import Adam
11 | from loss import BCE_Loss
12 | from transform import ReLabel, ToLabel
13 | from torchvision.transforms import Compose, Normalize, ToTensor
14 | import tqdm
15 | from Criterion import Criterion
16 | from PIL import Image
17 | import numpy as np
18 | import os
19 | from gycutils.gycaug import ColorAug,Random_horizontal_flip,Random_vertical_flip,Compose_imglabel,Random_crop
20 | input_transform = Compose([
21 | ColorAug(),
22 | ToTensor(),
23 | Normalize([.585, .256, .136], [.229, .124, .095]),
24 | ])
25 | val_transform = Compose([
26 | ToTensor(),
27 | Normalize([.585, .256, .136], [.229, .124, .095]),
28 | ])
29 | target_transform = Compose([
30 | ToLabel(),
31 | ReLabel(255, 1),
32 | ])
33 | img_label_transform = Compose_imglabel([
34 | Random_crop(512,512),
35 | Random_horizontal_flip(0.5),
36 | Random_vertical_flip(0.5),
37 | ])
38 |
39 | trainloader = data.DataLoader(VOCDataSet("./", img_transform=input_transform,
40 | label_transform=target_transform,image_label_transform=img_label_transform),
41 | batch_size=2, shuffle=True, pin_memory=True)
42 | valloader = data.DataLoader(VOCDataSet("./",split='val', img_transform=val_transform,
43 | label_transform=target_transform,image_label_transform=img_label_transform),
44 | batch_size=1, shuffle=False, pin_memory=True)
45 |
46 |
47 | schedule=Scheduler(lr=1e-4,total_epoches=4000)
48 | D=torch.nn.DataParallel(discriminator(n_filters=32)).cuda()
49 | G=torch.nn.DataParallel(generator(n_filters=32)).cuda()
50 | gan_loss_percent=0.03
51 |
52 | one=torch.FloatTensor([1])
53 | mone=one*-1
54 | moneg=one*-1*gan_loss_percent
55 |
56 | one=one.cuda()
57 | mone=mone.cuda()
58 | moneg=moneg.cuda()
59 |
60 | loss_func=BCE_Loss()
61 | optimizer_D=Adam(D.parameters(),lr=1e-4,betas=(0.5,0.9),eps=10e-8)
62 | optimizer_G=Adam(G.parameters(),lr=1e-4,betas=(0.5,0.9),eps=10e-8)
63 |
64 | for epoch in range(schedule.get_total_epoches()):
65 |
66 | D.train()
67 | G.train()
68 | #train D
69 | make_trainable(D,True)
70 | make_trainable(G,False)
71 | for idx,(real_imgs,real_labels) in tqdm.tqdm(enumerate(trainloader)):
72 | real_imgs=Variable(real_imgs).cuda()
73 | real_labels=Variable(real_labels.unsqueeze(1)).cuda()
74 | D.zero_grad()
75 | optimizer_D.zero_grad()
76 |
77 | real_pair = torch.cat((real_imgs, real_labels), dim=1)
78 | #real_pair_y=Variable(torch.ones((real_pair.size()[0],1))).cuda()
79 | d_real = D(real_pair)
80 | d_real = d_real.mean()
81 | d_real.backward(mone)
82 |
83 | fake_pair=torch.cat((real_imgs, G(real_imgs)), dim=1)
84 | #fake_pair_y=Variable(torch.zeros((real_pair.size()[0],1))).cuda()
85 | d_fake=D(fake_pair)
86 | d_fake=d_fake.mean()
87 | d_fake.backward(one)
88 |
89 | #d_loss=loss_func(D(real_pair),real_pair_y)+loss_func(D(fake_pair),fake_pair_y)
90 | #d_loss.backward()
91 | gradient_penalty=calc_gradient_penalty(D,real_pair.data,fake_pair.data)
92 | gradient_penalty.backward()
93 |
94 | Wasserstein_D=d_real-d_fake
95 | optimizer_D.step()
96 | #train G
97 |
98 | make_trainable(D,False)
99 | make_trainable(G,True)
100 | for idx,(real_imgs,real_labels) in tqdm.tqdm(enumerate(trainloader)):
101 | G.zero_grad()
102 | optimizer_G.zero_grad()
103 | real_imgs=Variable(real_imgs).cuda()
104 | real_labels=Variable(real_labels).cuda()
105 | pred_labels=G(real_imgs)
106 | Seg_Loss=loss_func(pred_labels,real_labels.unsqueeze(1))#Seg Loss
107 | Seg_Loss.backward(retain_graph=True)
108 | fake_pair=torch.cat((real_imgs,pred_labels),dim=1)
109 | gd_fake=D(fake_pair)
110 | gd_fake=gd_fake.mean()
111 | gd_fake.backward(moneg)
112 | #Gan_Loss=loss_func(D_fack,Variable(torch.ones(fake_pair.size()[0],1)).cuda())
113 | #g_loss=Gan_Loss*gan_loss_percent+Seg_Loss
114 | #g_loss.backward()
115 | optimizer_G.step()
116 | print("epoch[%d/%d] W:%f segloss%f"%(epoch,schedule.get_total_epoches(),Wasserstein_D,Seg_Loss))
117 |
118 |
119 |
120 | G.eval()
121 | D.eval()
122 | if epoch%500==0:
123 | os.mkdir('./pth/epoch%d' % epoch)
124 | for i_val,(real_imgs,real_labels) in enumerate(valloader):
125 | real_imgs = Variable(real_imgs.cuda(), volatile=True)
126 | real_labels = Variable(real_labels.cuda(), volatile=True)
127 | outputs = G(real_imgs)
128 | #valloss = loss_func(outputs, real_labels)
129 |
130 | outputs = outputs[0].data.squeeze(0).cpu().numpy()
131 | pred = outputs.flatten()
132 | label = real_labels[0].cpu().data.numpy().flatten()
133 | # Criterion().precision_recall('./pth/epoch%d' % epoch, i_val, label, pred)
134 | Image.fromarray((outputs * 255).astype(np.uint8)).save("./pth/epoch%d/%d.jpg" % (epoch, i_val))
135 |
136 | torch.save(G.state_dict(), "./pth/G.pth")
137 | torch.save(D.state_dict(), "./pth/D.pth")
138 |
--------------------------------------------------------------------------------
/transform.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from PIL import Image
4 | import collections
5 |
6 |
7 | class Scale(object):
8 | def __init__(self, size, interpolation=Image.BILINEAR):
9 | assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
10 | self.size = size
11 | self.interpolation = interpolation
12 |
13 | def __call__(self, img):
14 | if isinstance(self.size, int):
15 | w, h = img.size
16 | if (w <= h and w == self.size) or (h <= w and h == self.size):
17 | return img
18 | if w < h:
19 | ow = self.size
20 | oh = int(self.size * h / w)
21 | return img.resize((ow, oh), self.interpolation)
22 | else:
23 | oh = self.size
24 | ow = int(self.size * w / h)
25 | return img.resize((ow, oh), self.interpolation)
26 | else:
27 | return img.resize(self.size, self.interpolation)
28 |
29 |
30 | class ToParallel(object):
31 | def __init__(self, transforms):
32 | self.transforms = transforms
33 |
34 | def __call__(self, img):
35 | yield img
36 | for t in self.transforms:
37 | yield t(img)
38 |
39 | '''
40 | class ToLabel(object):
41 | def __call__(self, inputs):
42 | tensors = []
43 | for i in inputs:
44 | tensors.append(torch.from_numpy(np.array(i)).long())
45 | return tensors
46 | '''
47 |
48 | class ToLabel(object):
49 | def __call__(self, inputs):
50 | return torch.from_numpy(np.array(inputs)).float()
51 |
52 |
53 | class ReLabel(object):
54 | def __init__(self, olabel, nlabel):
55 | self.olabel = olabel
56 | self.nlabel = nlabel
57 |
58 | def __call__(self, inputs):
59 | # assert isinstance(input, torch.LongTensor), 'tensor needs to be LongTensor'
60 | for i in inputs:
61 | i[i == self.olabel] = self.nlabel
62 | return inputs
63 |
64 | '''
65 | class ToSP(object):
66 | def __init__(self, size):
67 | self.scale2 = Scale(size//2, Image.NEAREST)
68 | self.scale4 = Scale(size//4, Image.NEAREST)
69 | self.scale8 = Scale(size//8, Image.NEAREST)
70 | self.scale16 = Scale(size//16, Image.NEAREST)
71 | self.scale32 = Scale(size//32, Image.NEAREST)
72 |
73 | def __call__(self, input):
74 | input2 = self.scale2(input)
75 | input4 = self.scale4(input)
76 | input8 = self.scale8(input)
77 | input16 = self.scale16(input)
78 | input32 = self.scale32(input)
79 | inputs = [input, input2, input4, input8, input16, input32]
80 | # inputs = [input]
81 |
82 | return inputs
83 | '''
84 |
85 | class HorizontalFlip(object):
86 | """Horizontally flips the given PIL.Image with a probability of 0.5."""
87 |
88 | def __call__(self, img):
89 | return img.transpose(Image.FLIP_LEFT_RIGHT)
90 |
91 |
92 | class VerticalFlip(object):
93 | def __call__(self, img):
94 | return img.transpose(Image.FLIP_TOP_BOTTOM)
95 |
96 | def uint82bin(n, count=8):
97 | """returns the binary of integer n, count refers to amount of bits"""
98 | return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
99 |
100 | def labelcolormap(N):
101 | cmap = np.zeros((N, 3), dtype=np.uint8)
102 | for i in range(N):
103 | r = 0
104 | g = 0
105 | b = 0
106 | id = i
107 | for j in range(7):
108 | str_id = uint82bin(id)
109 | r = r ^ (np.uint8(str_id[-1]) << (7-j))
110 | g = g ^ (np.uint8(str_id[-2]) << (7-j))
111 | b = b ^ (np.uint8(str_id[-3]) << (7-j))
112 | id = id >> 3
113 | cmap[i, 0] = r
114 | cmap[i, 1] = g
115 | cmap[i, 2] = b
116 | return cmap
117 |
118 | def colormap(n):
119 | cmap = np.zeros([n, 3]).astype(np.uint8)
120 |
121 | for i in np.arange(n):
122 | r, g, b = np.zeros(3)
123 |
124 | for j in np.arange(8):
125 | r = r + (1 << (7-j))*((i & (1 << (3*j))) >> (3*j))
126 | g = g + (1 << (7-j))*((i & (1 << (3*j+1))) >> (3*j+1))
127 | b = b + (1 << (7-j))*((i & (1 << (3*j+2))) >> (3*j+2))
128 |
129 | cmap[i, :] = np.array([r, g, b])
130 |
131 | return cmap
132 |
133 |
134 | class Colorize(object):
135 | def __init__(self, n=22):
136 | self.cmap = labelcolormap(22)
137 | self.cmap = torch.from_numpy(self.cmap[:n])
138 |
139 | def __call__(self, gray_image):
140 | size = gray_image.size()
141 | color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
142 |
143 | for label in range(0, len(self.cmap)):
144 | mask = (label == gray_image[0]).cpu()
145 | color_image[0][mask] = self.cmap[label][0]
146 | color_image[1][mask] = self.cmap[label][1]
147 | color_image[2][mask] = self.cmap[label][2]
148 |
149 | return color_image
150 |
--------------------------------------------------------------------------------