├── .gitignore
├── README.md
├── config
├── adobe.toml
└── sim.toml
├── data
└── util.py
├── dataset
├── SIMD_composition_test_filelist.txt
└── generate_testset.py
├── figures
├── .DS_Store
├── example1.png
├── example2.png
└── framework.jpg
├── networks
├── __init__.py
├── lap_pyramid_loss.py
├── layers_WS.py
├── model.py
├── ppm.py
├── resnet.py
├── resnet_GN_WS.py
├── resnet_bn.py
├── spatial_gradient.py
├── spatial_gradient_2d.py
├── transforms.py
└── util.py
├── scripts
└── main.py
└── utils
├── __init__.py
├── colormap.py
├── config.py
├── radam.py
└── util.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | */.DS_Store
3 | */*/.DS_Store
4 | */*/*/.DS_Store
5 | logs
6 | checkpoints/*
7 | pretrained/*
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Semantic Image Matting
2 | ---
3 | This is the official repository of Semantic Image Matting (CVPR2021).
4 |
5 |
6 | ### Overview
7 |
8 |
9 | Natural image matting separates the foreground from background in fractional occupancy which can be caused by highly transparent objects, complex foreground (e.g., net or tree), and/or objects containing very fine details (e.g., hairs). Although conventional matting formulation can be applied to all of the above cases, no previous work has attempted to reason the underlying causes of matting due to various foreground semantics.
10 |
11 | We show how to obtain better alpha mattes by incorporating into our framework semantic classification of matting regions. Specifically, we consider and learn 20 classes of matting patterns, and propose to extend the conventional trimap to semantic trimap. The proposed semantic trimap can be obtained automatically through patch structure analysis within trimap regions. Meanwhile, we learn a multi-class discriminator to regularize the alpha prediction at semantic level, and content-sensitive weights to balance different regularization losses.
12 |
13 | ### Dataset
14 | Download our semantic image matting dataset (SIMD) [here](https://drive.google.com/file/d/1Cl_Nacgid9ZLVZ7j-cMHnim4SocTMY92/view?usp=sharing). SIMD is composed self-collected images and a subset of adobe images. To obtain the complete dataset, please contact Brian Price (bprice@adobe.com) for the Adobe Image Matting dataset first and follow the instructions within SIMD.zip.
15 |
16 | ### Requirements
17 | The codes are tested in the following environment:
18 |
19 | * Python 3.7
20 | * Pytorch 1.9.0
21 | * CUDA 10.2 & CuDNN 7.6.5
22 |
23 | ### Performance
24 | Some pretrained models are listed below with their performance.
25 |
26 |
27 |
28 | Methods |
29 | SAD |
30 | MSE |
31 | Grad |
32 | Conn |
33 | Link |
34 |
35 |
36 |
37 |
38 | SIMD |
39 | 27.9 |
40 | 4.7 |
41 | 11.6 |
42 | 20.8 |
43 | model |
44 |
45 |
46 | Composition-1K |
47 | 27.7 |
48 | 5.6 |
49 | 10.7 |
50 | 24.4 |
51 | model |
52 |
53 |
54 |
55 |
56 | ### Run
57 |
58 | Download the model and put it under `checkpoints/DIM` or `checkpoints/Adobe` in the root directory. Download the classifier [here](https://drive.google.com/file/d/12JCGqDylBXJpgDhj4hg_JZYdbHlX8TKe/view?usp=sharing) and put it under `checkpoints`. Run the inference and evaluation by
59 | ```
60 | python scripts/main.py -c config/CONFIG.yaml
61 | ```
62 |
63 | ### Results
64 |
65 |
66 |
67 |
68 | ### Reference
69 | If you find our work useful in your research, please consider citing:
70 |
71 | ```
72 | @inproceedings{sun2021sim,
73 | author = {Yanan Sun and Chi-Keung Tang and Yu-Wing Tai}
74 | title = {Semantic Image Matting},
75 | booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
76 | year = {2021},
77 | }
78 | ```
79 |
80 | ### Acknowledgment
81 | This repo borrows code from several repos, like [GCA](https://github.com/Yaoyi-Li/GCA-Matting) and [FBA](https://github.com/MarcoForte/FBA_Matting).
82 |
--------------------------------------------------------------------------------
/config/adobe.toml:
--------------------------------------------------------------------------------
1 | # Refer to utils/config.py for definition and options.
2 |
3 | version = "Adobe"
4 | task = "Adobe"
5 | dist = false
6 | debug = false
7 |
8 |
9 | [classifier]
10 | n_channel = 4
11 | num_classes = 20
12 | load_size = 320
13 | resume_checkpoint = "checkpoints/classifier_resnet50_best.pth.tar"
14 |
15 |
16 | [model]
17 | resume_checkpoint = "checkpoints/Adobe/ckpt_best.pth"
18 |
19 |
20 | [model.arch]
21 | encoder = "resnet50_BN"
22 | n_channel = 11
23 | atrous_rates = [12,24,36]
24 | aspp_channel = 256
25 |
26 |
27 | [data]
28 | workers = 8
29 | test_dir = "datasets/Composition-1K"
30 |
31 | [log]
32 | tensorboard_path = "./logs/tensorboard"
33 | logging_path = "./logs/stdout"
34 | checkpoint_path = "./checkpoints"
35 |
36 |
37 | [test]
38 | checkpoint = "best"
39 | fast_eval = true
40 |
--------------------------------------------------------------------------------
/config/sim.toml:
--------------------------------------------------------------------------------
1 | # Refer to utils/config.py for definition and options.
2 |
3 | version = "SIM"
4 | task = "SIM"
5 | dist = false
6 | debug = false
7 |
8 |
9 | [classifier]
10 | n_channel = 4
11 | num_classes = 20
12 | load_size = 320
13 | resume_checkpoint = "checkpoints/classifier_resnet50_best.pth.tar"
14 |
15 |
16 | [model]
17 | resume_checkpoint = "checkpoints/SIM/ckpt_best.pth"
18 |
19 |
20 | [model.arch]
21 | encoder = "resnet50_BN"
22 | n_channel = 11
23 | atrous_rates = [12,24,36]
24 | aspp_channel = 256
25 |
26 |
27 | [data]
28 | workers = 8
29 | test_dir = "datasets/SIMD"
30 |
31 |
32 | [log]
33 | tensorboard_path = "./logs/tensorboard"
34 | logging_path = "./logs/stdout"
35 | checkpoint_path = "./checkpoints"
36 |
37 |
38 | [test]
39 | checkpoint = "best"
40 | fast_eval = true
41 |
--------------------------------------------------------------------------------
/data/util.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import torch
5 |
6 |
7 | def dt(a):
8 | return cv2.distanceTransform((a * 255).astype(np.uint8), cv2.DIST_L2, 0)
9 |
10 |
11 | def get_fname(x):
12 | return os.path.splitext(os.path.basename(x))[0]
13 |
14 |
15 | def gen_trimap(alpha, ksize=3, iterations=5):
16 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (ksize, ksize))
17 | dilated = cv2.dilate(alpha, kernel, iterations=iterations)
18 | eroded = cv2.erode(alpha, kernel, iterations=iterations)
19 | trimap = np.zeros(alpha.shape) + 128
20 | trimap[eroded >= 255] = 255
21 | trimap[dilated <= 0] = 0
22 | return trimap
23 |
24 |
25 | def compute_gradient(img):
26 | x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
27 | y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
28 | absX = cv2.convertScaleAbs(x)
29 | absY = cv2.convertScaleAbs(y)
30 | grad = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
31 | grad = cv2.cvtColor(grad, cv2.COLOR_BGR2GRAY)
32 | return grad
33 |
34 |
35 | def transform(image, scale=255.):
36 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
37 | mean = np.array([[[0.485, 0.456, 0.406]]])
38 | std = np.array([[[0.229, 0.224, 0.225]]])
39 | image_scale = image / scale
40 | image_trans = (image_scale - mean) / std
41 | image_scale = torch.from_numpy(image_scale.transpose(2,0,1)).float()
42 | image_trans = torch.from_numpy(image_trans.transpose(2,0,1)).float()
43 | return image_scale, image_trans
44 |
45 |
46 | def trimap_to_2chn(trimap):
47 | h, w = trimap.shape[:2]
48 | trimap_2chn = np.zeros((h, w, 2), dtype=np.float32)
49 | trimap_2chn[:,:,0] = (trimap == 0)
50 | trimap_2chn[:,:,1] = (trimap == 255)
51 | return trimap_2chn
52 |
53 |
54 | def trimap_to_clks(trimap, L=320):
55 | h, w = trimap.shape[:2]
56 | clicks = np.zeros((h, w, 6), dtype=np.float32)
57 | for k in range(2):
58 | if (np.count_nonzero(trimap[:, :, k]) > 0):
59 | dt_mask = -dt(1 - trimap[:, :, k])**2
60 | clicks[:, :, 3*k] = np.exp(dt_mask / (2 * ((0.02 * L)**2)))
61 | clicks[:, :, 3*k+1] = np.exp(dt_mask / (2 * ((0.08 * L)**2)))
62 | clicks[:, :, 3*k+2] = np.exp(dt_mask / (2 * ((0.16 * L)**2)))
63 | return clicks
64 |
65 |
66 | def composite(bg, fg, alpha):
67 | # bg: [h, w, 3], fg: [h, w, 3], alpha: [h, w]
68 | h, w ,c = fg.shape
69 | bh, bw, bc = bg.shape
70 | wratio = float(w) / bw
71 | hratio = float(h) / bh
72 | ratio = wratio if wratio > hratio else hratio
73 | if ratio > 1:
74 | new_bw = int(bw * ratio + 1.0)
75 | new_bh = int(bh * ratio + 1.0)
76 | bg = cv2.resize(bg, (new_bw, new_bh), cv2.INTER_LINEAR)
77 | bg = bg[0:h, 0:w, :]
78 | alpha_f = alpha[:,:,None] / 255.
79 | comp = (fg*alpha_f + bg*(1.-alpha_f)).astype(np.uint8)
80 | return comp, bg
81 |
--------------------------------------------------------------------------------
/dataset/SIMD_composition_test_filelist.txt:
--------------------------------------------------------------------------------
1 | defocus:red-dahlia-flower-60597.jpg:2008_000902.jpg
2 | defocus:red-dahlia-flower-60597.jpg:2009_001054.jpg
3 | defocus:red-dahlia-flower-60597.jpg:2009_001371.jpg
4 | defocus:red-dahlia-flower-60597.jpg:2009_002862.jpg
5 | defocus:red-dahlia-flower-60597.jpg:2009_005148.jpg
6 | defocus:red-dahlia-flower-60597.jpg:2010_002251.jpg
7 | defocus:red-dahlia-flower-60597.jpg:2010_003613.jpg
8 | defocus:red-dahlia-flower-60597.jpg:2011_003203.jpg
9 | defocus:red-dahlia-flower-60597.jpg:2011_005491.jpg
10 | defocus:red-dahlia-flower-60597.jpg:2011_007171.jpg
11 | defocus:red-flower-bloom-blossom-64272.jpg:2008_004188.jpg
12 | defocus:red-flower-bloom-blossom-64272.jpg:2008_006205.jpg
13 | defocus:red-flower-bloom-blossom-64272.jpg:2009_001350.jpg
14 | defocus:red-flower-bloom-blossom-64272.jpg:2009_001884.jpg
15 | defocus:red-flower-bloom-blossom-64272.jpg:2010_000805.jpg
16 | defocus:red-flower-bloom-blossom-64272.jpg:2010_004945.jpg
17 | defocus:red-flower-bloom-blossom-64272.jpg:2011_000457.jpg
18 | defocus:red-flower-bloom-blossom-64272.jpg:2011_006800.jpg
19 | defocus:red-flower-bloom-blossom-64272.jpg:2011_007213.jpg
20 | defocus:red-flower-bloom-blossom-64272.jpg:2012_001651.jpg
21 | defocus:sea-sunny-person-beach.jpg:2007_005978.jpg
22 | defocus:sea-sunny-person-beach.jpg:2008_001619.jpg
23 | defocus:sea-sunny-person-beach.jpg:2008_001885.jpg
24 | defocus:sea-sunny-person-beach.jpg:2008_006488.jpg
25 | defocus:sea-sunny-person-beach.jpg:2009_001257.jpg
26 | defocus:sea-sunny-person-beach.jpg:2009_003089.jpg
27 | defocus:sea-sunny-person-beach.jpg:2009_003751.jpg
28 | defocus:sea-sunny-person-beach.jpg:2010_003011.jpg
29 | defocus:sea-sunny-person-beach.jpg:2011_005534.jpg
30 | defocus:sea-sunny-person-beach.jpg:2012_000384.jpg
31 | defocus:western-gull-4985671_1920.jpg:2008_001825.jpg
32 | defocus:western-gull-4985671_1920.jpg:2008_002932.jpg
33 | defocus:western-gull-4985671_1920.jpg:2008_004372.jpg
34 | defocus:western-gull-4985671_1920.jpg:2008_007902.jpg
35 | defocus:western-gull-4985671_1920.jpg:2010_005389.jpg
36 | defocus:western-gull-4985671_1920.jpg:2011_000129.jpg
37 | defocus:western-gull-4985671_1920.jpg:2011_002246.jpg
38 | defocus:western-gull-4985671_1920.jpg:2011_003236.jpg
39 | defocus:western-gull-4985671_1920.jpg:2011_005869.jpg
40 | defocus:western-gull-4985671_1920.jpg:2011_006407.jpg
41 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2008_002601.jpg
42 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2008_005875.jpg
43 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2008_008641.jpg
44 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2009_000641.jpg
45 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2009_002185.jpg
46 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2010_000493.jpg
47 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2010_002152.jpg
48 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2010_005513.jpg
49 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2011_001608.jpg
50 | fire:b548f33ab9c559926d5f68088a00443a.jpg:2012_001262.jpg
51 | fire:fire-2204302_1920.jpg:2008_002098.jpg
52 | fire:fire-2204302_1920.jpg:2009_003194.jpg
53 | fire:fire-2204302_1920.jpg:2010_002603.jpg
54 | fire:fire-2204302_1920.jpg:2010_002909.jpg
55 | fire:fire-2204302_1920.jpg:2010_004448.jpg
56 | fire:fire-2204302_1920.jpg:2011_005030.jpg
57 | fire:fire-2204302_1920.jpg:2011_006587.jpg
58 | fire:fire-2204302_1920.jpg:2012_002169.jpg
59 | fire:fire-2204302_1920.jpg:2012_002766.jpg
60 | fire:fire-2204302_1920.jpg:2012_003663.jpg
61 | fire:fire-2568405_1920.jpg:2008_001026.jpg
62 | fire:fire-2568405_1920.jpg:2008_001304.jpg
63 | fire:fire-2568405_1920.jpg:2008_006811.jpg
64 | fire:fire-2568405_1920.jpg:2010_002267.jpg
65 | fire:fire-2568405_1920.jpg:2010_002783.jpg
66 | fire:fire-2568405_1920.jpg:2010_004279.jpg
67 | fire:fire-2568405_1920.jpg:2011_002713.jpg
68 | fire:fire-2568405_1920.jpg:2011_004371.jpg
69 | fire:fire-2568405_1920.jpg:2012_003141.jpg
70 | fire:fire-2568405_1920.jpg:2012_003306.jpg
71 | fire:flame-1789454_1920.jpg:2008_001419.jpg
72 | fire:flame-1789454_1920.jpg:2008_004422.jpg
73 | fire:flame-1789454_1920.jpg:2008_004512.jpg
74 | fire:flame-1789454_1920.jpg:2008_008528.jpg
75 | fire:flame-1789454_1920.jpg:2008_008748.jpg
76 | fire:flame-1789454_1920.jpg:2009_005035.jpg
77 | fire:flame-1789454_1920.jpg:2010_001105.jpg
78 | fire:flame-1789454_1920.jpg:2010_002924.jpg
79 | fire:flame-1789454_1920.jpg:2012_002479.jpg
80 | fire:flame-1789454_1920.jpg:2012_003021.jpg
81 | fire:flame-1789455_1920.jpg:2008_000887.jpg
82 | fire:flame-1789455_1920.jpg:2008_003943.jpg
83 | fire:flame-1789455_1920.jpg:2008_008366.jpg
84 | fire:flame-1789455_1920.jpg:2010_006021.jpg
85 | fire:flame-1789455_1920.jpg:2011_002380.jpg
86 | fire:flame-1789455_1920.jpg:2011_004089.jpg
87 | fire:flame-1789455_1920.jpg:2011_005300.jpg
88 | fire:flame-1789455_1920.jpg:2011_006840.jpg
89 | fire:flame-1789455_1920.jpg:2012_000441.jpg
90 | fire:flame-1789455_1920.jpg:2012_001933.jpg
91 | fur:cat-1288531_1920.jpg:2007_009402.jpg
92 | fur:cat-1288531_1920.jpg:2008_003596.jpg
93 | fur:cat-1288531_1920.jpg:2008_005525.jpg
94 | fur:cat-1288531_1920.jpg:2008_005623.jpg
95 | fur:cat-1288531_1920.jpg:2009_000663.jpg
96 | fur:cat-1288531_1920.jpg:2009_004805.jpg
97 | fur:cat-1288531_1920.jpg:2010_002542.jpg
98 | fur:cat-1288531_1920.jpg:2010_002939.jpg
99 | fur:cat-1288531_1920.jpg:2010_003473.jpg
100 | fur:cat-1288531_1920.jpg:2011_005789.jpg
101 | fur:dog-1196645_1920.jpg:2009_004694.jpg
102 | fur:dog-1196645_1920.jpg:2010_004554.jpg
103 | fur:dog-1196645_1920.jpg:2010_005616.jpg
104 | fur:dog-1196645_1920.jpg:2010_005670.jpg
105 | fur:dog-1196645_1920.jpg:2010_006398.jpg
106 | fur:dog-1196645_1920.jpg:2011_003476.jpg
107 | fur:dog-1196645_1920.jpg:2011_006645.jpg
108 | fur:dog-1196645_1920.jpg:2011_007153.jpg
109 | fur:dog-1196645_1920.jpg:2012_000136.jpg
110 | fur:dog-1196645_1920.jpg:2012_002020.jpg
111 | fur:dog-1203760_1920.jpg:2007_009901.jpg
112 | fur:dog-1203760_1920.jpg:2008_006438.jpg
113 | fur:dog-1203760_1920.jpg:2009_000316.jpg
114 | fur:dog-1203760_1920.jpg:2009_002128.jpg
115 | fur:dog-1203760_1920.jpg:2010_003402.jpg
116 | fur:dog-1203760_1920.jpg:2011_001054.jpg
117 | fur:dog-1203760_1920.jpg:2011_004120.jpg
118 | fur:dog-1203760_1920.jpg:2011_005780.jpg
119 | fur:dog-1203760_1920.jpg:2012_001767.jpg
120 | fur:dog-1203760_1920.jpg:2012_003019.jpg
121 | fur:golden-retriever-1349045_1920.jpg:2008_007989.jpg
122 | fur:golden-retriever-1349045_1920.jpg:2009_000040.jpg
123 | fur:golden-retriever-1349045_1920.jpg:2009_002890.jpg
124 | fur:golden-retriever-1349045_1920.jpg:2009_003718.jpg
125 | fur:golden-retriever-1349045_1920.jpg:2009_004128.jpg
126 | fur:golden-retriever-1349045_1920.jpg:2009_004154.jpg
127 | fur:golden-retriever-1349045_1920.jpg:2009_004518.jpg
128 | fur:golden-retriever-1349045_1920.jpg:2010_006658.jpg
129 | fur:golden-retriever-1349045_1920.jpg:2011_003665.jpg
130 | fur:golden-retriever-1349045_1920.jpg:2011_003951.jpg
131 | fur:horse-473093_1280.jpg:2007_005281.jpg
132 | fur:horse-473093_1280.jpg:2007_005859.jpg
133 | fur:horse-473093_1280.jpg:2008_000272.jpg
134 | fur:horse-473093_1280.jpg:2008_005979.jpg
135 | fur:horse-473093_1280.jpg:2008_006169.jpg
136 | fur:horse-473093_1280.jpg:2008_007252.jpg
137 | fur:horse-473093_1280.jpg:2008_007643.jpg
138 | fur:horse-473093_1280.jpg:2009_002144.jpg
139 | fur:horse-473093_1280.jpg:2010_005066.jpg
140 | fur:horse-473093_1280.jpg:2010_006773.jpg
141 | fur:lion-1118467_1920.jpg:2008_000215.jpg
142 | fur:lion-1118467_1920.jpg:2008_002984.jpg
143 | fur:lion-1118467_1920.jpg:2008_003697.jpg
144 | fur:lion-1118467_1920.jpg:2009_001856.jpg
145 | fur:lion-1118467_1920.jpg:2010_001273.jpg
146 | fur:lion-1118467_1920.jpg:2010_001908.jpg
147 | fur:lion-1118467_1920.jpg:2010_002378.jpg
148 | fur:lion-1118467_1920.jpg:2010_004950.jpg
149 | fur:lion-1118467_1920.jpg:2011_004423.jpg
150 | fur:lion-1118467_1920.jpg:2012_002742.jpg
151 | fur:malthezer-1047519_1920.jpg:2008_002378.jpg
152 | fur:malthezer-1047519_1920.jpg:2008_002965.jpg
153 | fur:malthezer-1047519_1920.jpg:2008_003997.jpg
154 | fur:malthezer-1047519_1920.jpg:2008_007030.jpg
155 | fur:malthezer-1047519_1920.jpg:2009_005160.jpg
156 | fur:malthezer-1047519_1920.jpg:2010_005836.jpg
157 | fur:malthezer-1047519_1920.jpg:2011_001941.jpg
158 | fur:malthezer-1047519_1920.jpg:2011_003160.jpg
159 | fur:malthezer-1047519_1920.jpg:2011_004333.jpg
160 | fur:malthezer-1047519_1920.jpg:2011_006534.jpg
161 | fur:puppy-1226295_1920.jpg:2008_000144.jpg
162 | fur:puppy-1226295_1920.jpg:2008_002533.jpg
163 | fur:puppy-1226295_1920.jpg:2008_005181.jpg
164 | fur:puppy-1226295_1920.jpg:2009_000898.jpg
165 | fur:puppy-1226295_1920.jpg:2009_004514.jpg
166 | fur:puppy-1226295_1920.jpg:2009_004944.jpg
167 | fur:puppy-1226295_1920.jpg:2010_001030.jpg
168 | fur:puppy-1226295_1920.jpg:2010_001970.jpg
169 | fur:puppy-1226295_1920.jpg:2010_005305.jpg
170 | fur:puppy-1226295_1920.jpg:2011_007018.jpg
171 | fur:retriever-348572_1920 (1).jpg:2008_000795.jpg
172 | fur:retriever-348572_1920 (1).jpg:2008_003114.jpg
173 | fur:retriever-348572_1920 (1).jpg:2008_003501.jpg
174 | fur:retriever-348572_1920 (1).jpg:2008_007032.jpg
175 | fur:retriever-348572_1920 (1).jpg:2009_000052.jpg
176 | fur:retriever-348572_1920 (1).jpg:2010_003381.jpg
177 | fur:retriever-348572_1920 (1).jpg:2010_006815.jpg
178 | fur:retriever-348572_1920 (1).jpg:2011_000592.jpg
179 | fur:retriever-348572_1920 (1).jpg:2012_001002.jpg
180 | fur:retriever-348572_1920 (1).jpg:2012_004021.jpg
181 | fur:rhea-bird-408648_1920.jpg:2007_006660.jpg
182 | fur:rhea-bird-408648_1920.jpg:2007_007531.jpg
183 | fur:rhea-bird-408648_1920.jpg:2009_004858.jpg
184 | fur:rhea-bird-408648_1920.jpg:2010_000802.jpg
185 | fur:rhea-bird-408648_1920.jpg:2010_001184.jpg
186 | fur:rhea-bird-408648_1920.jpg:2010_002435.jpg
187 | fur:rhea-bird-408648_1920.jpg:2010_005136.jpg
188 | fur:rhea-bird-408648_1920.jpg:2010_005169.jpg
189 | fur:rhea-bird-408648_1920.jpg:2010_006009.jpg
190 | fur:rhea-bird-408648_1920.jpg:2012_001437.jpg
191 | fur:sheep-63230_1920.jpg:2007_006035.jpg
192 | fur:sheep-63230_1920.jpg:2008_001792.jpg
193 | fur:sheep-63230_1920.jpg:2008_003552.jpg
194 | fur:sheep-63230_1920.jpg:2008_006776.jpg
195 | fur:sheep-63230_1920.jpg:2009_002597.jpg
196 | fur:sheep-63230_1920.jpg:2009_002727.jpg
197 | fur:sheep-63230_1920.jpg:2009_004171.jpg
198 | fur:sheep-63230_1920.jpg:2010_000151.jpg
199 | fur:sheep-63230_1920.jpg:2011_005290.jpg
200 | fur:sheep-63230_1920.jpg:2012_001670.jpg
201 | fur:still-life-1037378_1920.jpg:2007_009779.jpg
202 | fur:still-life-1037378_1920.jpg:2008_003737.jpg
203 | fur:still-life-1037378_1920.jpg:2009_002912.jpg
204 | fur:still-life-1037378_1920.jpg:2010_000805.jpg
205 | fur:still-life-1037378_1920.jpg:2010_001954.jpg
206 | fur:still-life-1037378_1920.jpg:2010_002368.jpg
207 | fur:still-life-1037378_1920.jpg:2010_003920.jpg
208 | fur:still-life-1037378_1920.jpg:2010_004971.jpg
209 | fur:still-life-1037378_1920.jpg:2010_005981.jpg
210 | fur:still-life-1037378_1920.jpg:2012_003985.jpg
211 | fur:teddy-562960_1920.jpg:2007_008013.jpg
212 | fur:teddy-562960_1920.jpg:2008_001808.jpg
213 | fur:teddy-562960_1920.jpg:2008_006361.jpg
214 | fur:teddy-562960_1920.jpg:2009_000532.jpg
215 | fur:teddy-562960_1920.jpg:2010_004105.jpg
216 | fur:teddy-562960_1920.jpg:2011_001605.jpg
217 | fur:teddy-562960_1920.jpg:2011_003043.jpg
218 | fur:teddy-562960_1920.jpg:2011_004043.jpg
219 | fur:teddy-562960_1920.jpg:2011_005252.jpg
220 | fur:teddy-562960_1920.jpg:2011_007053.jpg
221 | fur:teddy-bear-1180034_1920.jpg:2008_003323.jpg
222 | fur:teddy-bear-1180034_1920.jpg:2008_005253.jpg
223 | fur:teddy-bear-1180034_1920.jpg:2008_005943.jpg
224 | fur:teddy-bear-1180034_1920.jpg:2008_008223.jpg
225 | fur:teddy-bear-1180034_1920.jpg:2010_000001.jpg
226 | fur:teddy-bear-1180034_1920.jpg:2010_000492.jpg
227 | fur:teddy-bear-1180034_1920.jpg:2011_001868.jpg
228 | fur:teddy-bear-1180034_1920.jpg:2011_003583.jpg
229 | fur:teddy-bear-1180034_1920.jpg:2011_006673.jpg
230 | fur:teddy-bear-1180034_1920.jpg:2012_001375.jpg
231 | glass_ice:16452523375_08591714cf_o.jpg:2008_000138.jpg
232 | glass_ice:16452523375_08591714cf_o.jpg:2008_004998.jpg
233 | glass_ice:16452523375_08591714cf_o.jpg:2008_006910.jpg
234 | glass_ice:16452523375_08591714cf_o.jpg:2009_001964.jpg
235 | glass_ice:16452523375_08591714cf_o.jpg:2010_002805.jpg
236 | glass_ice:16452523375_08591714cf_o.jpg:2010_004409.jpg
237 | glass_ice:16452523375_08591714cf_o.jpg:2011_005436.jpg
238 | glass_ice:16452523375_08591714cf_o.jpg:2011_005494.jpg
239 | glass_ice:16452523375_08591714cf_o.jpg:2011_005667.jpg
240 | glass_ice:16452523375_08591714cf_o.jpg:2012_000724.jpg
241 | glass_ice:brandy-402572_1920.jpg:2007_000323.jpg
242 | glass_ice:brandy-402572_1920.jpg:2007_007084.jpg
243 | glass_ice:brandy-402572_1920.jpg:2008_007690.jpg
244 | glass_ice:brandy-402572_1920.jpg:2009_001709.jpg
245 | glass_ice:brandy-402572_1920.jpg:2009_002431.jpg
246 | glass_ice:brandy-402572_1920.jpg:2009_003122.jpg
247 | glass_ice:brandy-402572_1920.jpg:2010_001118.jpg
248 | glass_ice:brandy-402572_1920.jpg:2011_001979.jpg
249 | glass_ice:brandy-402572_1920.jpg:2012_002196.jpg
250 | glass_ice:brandy-402572_1920.jpg:2012_003909.jpg
251 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2007_002260.jpg
252 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2008_004258.jpg
253 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2009_004358.jpg
254 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2010_004008.jpg
255 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2010_005754.jpg
256 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2011_001862.jpg
257 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2011_003287.jpg
258 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2011_003445.jpg
259 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2012_002541.jpg
260 | glass_ice:clear-liquid-in-drinking-glass-1556381.jpg:2012_003265.jpg
261 | glass_ice:crystal-82296_1920.jpg:2008_001772.jpg
262 | glass_ice:crystal-82296_1920.jpg:2008_005042.jpg
263 | glass_ice:crystal-82296_1920.jpg:2008_005250.jpg
264 | glass_ice:crystal-82296_1920.jpg:2008_008746.jpg
265 | glass_ice:crystal-82296_1920.jpg:2009_000642.jpg
266 | glass_ice:crystal-82296_1920.jpg:2010_001355.jpg
267 | glass_ice:crystal-82296_1920.jpg:2011_004022.jpg
268 | glass_ice:crystal-82296_1920.jpg:2011_005934.jpg
269 | glass_ice:crystal-82296_1920.jpg:2012_001476.jpg
270 | glass_ice:crystal-82296_1920.jpg:2012_003584.jpg
271 | glass_ice:light-bulb-376930_1920.jpg:2008_001791.jpg
272 | glass_ice:light-bulb-376930_1920.jpg:2008_003913.jpg
273 | glass_ice:light-bulb-376930_1920.jpg:2009_000093.jpg
274 | glass_ice:light-bulb-376930_1920.jpg:2009_000290.jpg
275 | glass_ice:light-bulb-376930_1920.jpg:2009_002235.jpg
276 | glass_ice:light-bulb-376930_1920.jpg:2009_002976.jpg
277 | glass_ice:light-bulb-376930_1920.jpg:2010_002420.jpg
278 | glass_ice:light-bulb-376930_1920.jpg:2010_006867.jpg
279 | glass_ice:light-bulb-376930_1920.jpg:2011_002786.jpg
280 | glass_ice:light-bulb-376930_1920.jpg:2012_003470.jpg
281 | hair_easy:girl-beautiful-young-face-53000.jpg:2008_003303.jpg
282 | hair_easy:girl-beautiful-young-face-53000.jpg:2010_001320.jpg
283 | hair_easy:girl-beautiful-young-face-53000.jpg:2010_003060.jpg
284 | hair_easy:girl-beautiful-young-face-53000.jpg:2010_005472.jpg
285 | hair_easy:girl-beautiful-young-face-53000.jpg:2011_000080.jpg
286 | hair_easy:girl-beautiful-young-face-53000.jpg:2011_000542.jpg
287 | hair_easy:girl-beautiful-young-face-53000.jpg:2011_002098.jpg
288 | hair_easy:girl-beautiful-young-face-53000.jpg:2011_002185.jpg
289 | hair_easy:girl-beautiful-young-face-53000.jpg:2012_002133.jpg
290 | hair_easy:girl-beautiful-young-face-53000.jpg:2012_002937.jpg
291 | hair_easy:model-600238_1920.jpg:2008_000185.jpg
292 | hair_easy:model-600238_1920.jpg:2008_001047.jpg
293 | hair_easy:model-600238_1920.jpg:2008_004259.jpg
294 | hair_easy:model-600238_1920.jpg:2008_004610.jpg
295 | hair_easy:model-600238_1920.jpg:2009_002433.jpg
296 | hair_easy:model-600238_1920.jpg:2009_002734.jpg
297 | hair_easy:model-600238_1920.jpg:2009_005142.jpg
298 | hair_easy:model-600238_1920.jpg:2011_001977.jpg
299 | hair_easy:model-600238_1920.jpg:2011_004920.jpg
300 | hair_easy:model-600238_1920.jpg:2012_003507.jpg
301 | hair_easy:pexels-photo-58463.jpg:2008_006953.jpg
302 | hair_easy:pexels-photo-58463.jpg:2008_008148.jpg
303 | hair_easy:pexels-photo-58463.jpg:2008_008272.jpg
304 | hair_easy:pexels-photo-58463.jpg:2009_000312.jpg
305 | hair_easy:pexels-photo-58463.jpg:2009_000748.jpg
306 | hair_easy:pexels-photo-58463.jpg:2010_001087.jpg
307 | hair_easy:pexels-photo-58463.jpg:2010_004588.jpg
308 | hair_easy:pexels-photo-58463.jpg:2011_005953.jpg
309 | hair_easy:pexels-photo-58463.jpg:2012_001910.jpg
310 | hair_easy:pexels-photo-58463.jpg:2012_003677.jpg
311 | hair_easy:pink-hair-selfie-bun.jpg:2008_001631.jpg
312 | hair_easy:pink-hair-selfie-bun.jpg:2008_003593.jpg
313 | hair_easy:pink-hair-selfie-bun.jpg:2008_005190.jpg
314 | hair_easy:pink-hair-selfie-bun.jpg:2008_007887.jpg
315 | hair_easy:pink-hair-selfie-bun.jpg:2009_000014.jpg
316 | hair_easy:pink-hair-selfie-bun.jpg:2010_001364.jpg
317 | hair_easy:pink-hair-selfie-bun.jpg:2010_006796.jpg
318 | hair_easy:pink-hair-selfie-bun.jpg:2011_003871.jpg
319 | hair_easy:pink-hair-selfie-bun.jpg:2012_001090.jpg
320 | hair_easy:pink-hair-selfie-bun.jpg:2012_003394.jpg
321 | hair_easy:sea-sunny-person-beach.jpg:2008_004942.jpg
322 | hair_easy:sea-sunny-person-beach.jpg:2008_005582.jpg
323 | hair_easy:sea-sunny-person-beach.jpg:2008_006269.jpg
324 | hair_easy:sea-sunny-person-beach.jpg:2009_002169.jpg
325 | hair_easy:sea-sunny-person-beach.jpg:2010_000169.jpg
326 | hair_easy:sea-sunny-person-beach.jpg:2010_001518.jpg
327 | hair_easy:sea-sunny-person-beach.jpg:2010_006768.jpg
328 | hair_easy:sea-sunny-person-beach.jpg:2011_002970.jpg
329 | hair_easy:sea-sunny-person-beach.jpg:2011_005860.jpg
330 | hair_easy:sea-sunny-person-beach.jpg:2011_006954.jpg
331 | hair_hard:boy-1518482_1920.jpg:2008_003424.jpg
332 | hair_hard:boy-1518482_1920.jpg:2008_004567.jpg
333 | hair_hard:boy-1518482_1920.jpg:2008_006316.jpg
334 | hair_hard:boy-1518482_1920.jpg:2008_006530.jpg
335 | hair_hard:boy-1518482_1920.jpg:2009_000471.jpg
336 | hair_hard:boy-1518482_1920.jpg:2009_001270.jpg
337 | hair_hard:boy-1518482_1920.jpg:2009_003702.jpg
338 | hair_hard:boy-1518482_1920.jpg:2010_004441.jpg
339 | hair_hard:boy-1518482_1920.jpg:2011_000400.jpg
340 | hair_hard:boy-1518482_1920.jpg:2011_001568.jpg
341 | hair_hard:girl-1219339_1920.jpg:2007_000039.jpg
342 | hair_hard:girl-1219339_1920.jpg:2007_003587.jpg
343 | hair_hard:girl-1219339_1920.jpg:2008_007733.jpg
344 | hair_hard:girl-1219339_1920.jpg:2009_001663.jpg
345 | hair_hard:girl-1219339_1920.jpg:2009_003696.jpg
346 | hair_hard:girl-1219339_1920.jpg:2011_000666.jpg
347 | hair_hard:girl-1219339_1920.jpg:2011_004877.jpg
348 | hair_hard:girl-1219339_1920.jpg:2011_004911.jpg
349 | hair_hard:girl-1219339_1920.jpg:2012_001709.jpg
350 | hair_hard:girl-1219339_1920.jpg:2012_003608.jpg
351 | hair_hard:girl-1467820_1280.jpg:2007_008948.jpg
352 | hair_hard:girl-1467820_1280.jpg:2008_005649.jpg
353 | hair_hard:girl-1467820_1280.jpg:2008_005935.jpg
354 | hair_hard:girl-1467820_1280.jpg:2010_001174.jpg
355 | hair_hard:girl-1467820_1280.jpg:2011_000612.jpg
356 | hair_hard:girl-1467820_1280.jpg:2011_001794.jpg
357 | hair_hard:girl-1467820_1280.jpg:2011_007150.jpg
358 | hair_hard:girl-1467820_1280.jpg:2012_001009.jpg
359 | hair_hard:girl-1467820_1280.jpg:2012_001954.jpg
360 | hair_hard:girl-1467820_1280.jpg:2012_002547.jpg
361 | hair_hard:long-1245787_1920.jpg:2008_003593.jpg
362 | hair_hard:long-1245787_1920.jpg:2008_006509.jpg
363 | hair_hard:long-1245787_1920.jpg:2008_007733.jpg
364 | hair_hard:long-1245787_1920.jpg:2009_000790.jpg
365 | hair_hard:long-1245787_1920.jpg:2009_004581.jpg
366 | hair_hard:long-1245787_1920.jpg:2009_004922.jpg
367 | hair_hard:long-1245787_1920.jpg:2010_001070.jpg
368 | hair_hard:long-1245787_1920.jpg:2010_006609.jpg
369 | hair_hard:long-1245787_1920.jpg:2010_006750.jpg
370 | hair_hard:long-1245787_1920.jpg:2011_001011.jpg
371 | hair_hard:woman-952506_1920 (1).jpg:2007_002565.jpg
372 | hair_hard:woman-952506_1920 (1).jpg:2007_008815.jpg
373 | hair_hard:woman-952506_1920 (1).jpg:2008_003448.jpg
374 | hair_hard:woman-952506_1920 (1).jpg:2009_001684.jpg
375 | hair_hard:woman-952506_1920 (1).jpg:2009_003513.jpg
376 | hair_hard:woman-952506_1920 (1).jpg:2010_000553.jpg
377 | hair_hard:woman-952506_1920 (1).jpg:2010_002714.jpg
378 | hair_hard:woman-952506_1920 (1).jpg:2010_004848.jpg
379 | hair_hard:woman-952506_1920 (1).jpg:2011_005148.jpg
380 | hair_hard:woman-952506_1920 (1).jpg:2012_001064.jpg
381 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2008_001699.jpg
382 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2008_007471.jpg
383 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2008_007904.jpg
384 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2008_008190.jpg
385 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2010_002582.jpg
386 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2010_006032.jpg
387 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2011_000787.jpg
388 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2011_001440.jpg
389 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2011_002975.jpg
390 | hair_hard:woman-morning-bathrobe-bathroom.jpg:2012_003865.jpg
391 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2008_001712.jpg
392 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2008_003451.jpg
393 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2008_003860.jpg
394 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2008_006182.jpg
395 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2009_000690.jpg
396 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2009_003156.jpg
397 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2010_002332.jpg
398 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2010_006076.jpg
399 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2012_001568.jpg
400 | insect:afeb913765053b516089cfcd645556b5--flying-insects-beautiful-bugs.jpg:2012_004246.jpg
401 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2007_001225.jpg
402 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2008_000566.jpg
403 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2008_002826.jpg
404 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2008_006339.jpg
405 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2009_002240.jpg
406 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2011_001288.jpg
407 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2011_003730.jpg
408 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2012_001007.jpg
409 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2012_001465.jpg
410 | insect:dc803fd5de157c41b4c0320a9aeb6887.jpg:2012_001518.jpg
411 | insect:nature-insect-macro-dragonfly-34487.jpg:2007_009221.jpg
412 | insect:nature-insect-macro-dragonfly-34487.jpg:2008_001895.jpg
413 | insect:nature-insect-macro-dragonfly-34487.jpg:2008_003768.jpg
414 | insect:nature-insect-macro-dragonfly-34487.jpg:2008_007761.jpg
415 | insect:nature-insect-macro-dragonfly-34487.jpg:2008_008278.jpg
416 | insect:nature-insect-macro-dragonfly-34487.jpg:2010_004225.jpg
417 | insect:nature-insect-macro-dragonfly-34487.jpg:2011_002343.jpg
418 | insect:nature-insect-macro-dragonfly-34487.jpg:2011_005492.jpg
419 | insect:nature-insect-macro-dragonfly-34487.jpg:2011_006245.jpg
420 | insect:nature-insect-macro-dragonfly-34487.jpg:2012_001070.jpg
421 | insect:normalized.jpg:2007_003815.jpg
422 | insect:normalized.jpg:2009_002382.jpg
423 | insect:normalized.jpg:2010_002251.jpg
424 | insect:normalized.jpg:2010_002921.jpg
425 | insect:normalized.jpg:2011_001571.jpg
426 | insect:normalized.jpg:2011_003419.jpg
427 | insect:normalized.jpg:2011_005646.jpg
428 | insect:normalized.jpg:2012_000997.jpg
429 | insect:normalized.jpg:2012_003110.jpg
430 | insect:normalized.jpg:2012_003276.jpg
431 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2007_009594.jpg
432 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2008_006482.jpg
433 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2009_001424.jpg
434 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2009_001847.jpg
435 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2009_004336.jpg
436 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2011_002589.jpg
437 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2011_003530.jpg
438 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2011_004277.jpg
439 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2011_006740.jpg
440 | insect:yellow-and-black-dragonfly-on-brown-stick-3860844.jpg:2012_003593.jpg
441 | motion:creative+dance+headshots+minneapolis-3.jpg:2008_001140.jpg
442 | motion:creative+dance+headshots+minneapolis-3.jpg:2009_002958.jpg
443 | motion:creative+dance+headshots+minneapolis-3.jpg:2010_000394.jpg
444 | motion:creative+dance+headshots+minneapolis-3.jpg:2010_001000.jpg
445 | motion:creative+dance+headshots+minneapolis-3.jpg:2010_002808.jpg
446 | motion:creative+dance+headshots+minneapolis-3.jpg:2010_004637.jpg
447 | motion:creative+dance+headshots+minneapolis-3.jpg:2010_005718.jpg
448 | motion:creative+dance+headshots+minneapolis-3.jpg:2011_004038.jpg
449 | motion:creative+dance+headshots+minneapolis-3.jpg:2011_006670.jpg
450 | motion:creative+dance+headshots+minneapolis-3.jpg:2011_007132.jpg
451 | motion:motion-blur-birds.jpg:2008_001625.jpg
452 | motion:motion-blur-birds.jpg:2008_005248.jpg
453 | motion:motion-blur-birds.jpg:2008_007701.jpg
454 | motion:motion-blur-birds.jpg:2008_008075.jpg
455 | motion:motion-blur-birds.jpg:2009_000805.jpg
456 | motion:motion-blur-birds.jpg:2009_004263.jpg
457 | motion:motion-blur-birds.jpg:2010_004029.jpg
458 | motion:motion-blur-birds.jpg:2011_004088.jpg
459 | motion:motion-blur-birds.jpg:2011_004285.jpg
460 | motion:motion-blur-birds.jpg:2011_007083.jpg
461 | motion:motion-blur-dance.jpg:2008_002558.jpg
462 | motion:motion-blur-dance.jpg:2008_002603.jpg
463 | motion:motion-blur-dance.jpg:2008_007509.jpg
464 | motion:motion-blur-dance.jpg:2008_007755.jpg
465 | motion:motion-blur-dance.jpg:2009_002175.jpg
466 | motion:motion-blur-dance.jpg:2009_003976.jpg
467 | motion:motion-blur-dance.jpg:2010_001344.jpg
468 | motion:motion-blur-dance.jpg:2011_003726.jpg
469 | motion:motion-blur-dance.jpg:2011_005510.jpg
470 | motion:motion-blur-dance.jpg:2012_000838.jpg
471 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2008_000727.jpg
472 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2009_004290.jpg
473 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2010_000548.jpg
474 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2010_002507.jpg
475 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2010_002697.jpg
476 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2010_003249.jpg
477 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2010_005369.jpg
478 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2011_000408.jpg
479 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2011_002039.jpg
480 | net:HTB1ELiPXPfguuRjSspaq6yXVXXa6.jpg:2012_000184.jpg
481 | net:sieve-641426_1920.jpg:2008_000944.jpg
482 | net:sieve-641426_1920.jpg:2008_002973.jpg
483 | net:sieve-641426_1920.jpg:2009_002149.jpg
484 | net:sieve-641426_1920.jpg:2009_002343.jpg
485 | net:sieve-641426_1920.jpg:2009_002914.jpg
486 | net:sieve-641426_1920.jpg:2009_003282.jpg
487 | net:sieve-641426_1920.jpg:2010_000759.jpg
488 | net:sieve-641426_1920.jpg:2012_001185.jpg
489 | net:sieve-641426_1920.jpg:2012_002213.jpg
490 | net:sieve-641426_1920.jpg:2012_003440.jpg
491 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2008_000765.jpg
492 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2008_001470.jpg
493 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2010_000246.jpg
494 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2010_005297.jpg
495 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2010_006558.jpg
496 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2011_000496.jpg
497 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2011_002335.jpg
498 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2011_005271.jpg
499 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2012_002766.jpg
500 | net:steve-johnson-nCmq50z4o2E-unsplash.jpg:2012_003678.jpg
501 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2007_000559.jpg
502 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2008_005566.jpg
503 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2008_005780.jpg
504 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2010_001974.jpg
505 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2010_005217.jpg
506 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2011_001563.jpg
507 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2011_002663.jpg
508 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2011_007157.jpg
509 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2012_001407.jpg
510 | plant_flower:close-up-photo-of-cactus-2463394.jpg:2012_004101.jpg
511 | plant_flower:dandelion-1335575_1920.jpg:2008_003743.jpg
512 | plant_flower:dandelion-1335575_1920.jpg:2008_006642.jpg
513 | plant_flower:dandelion-1335575_1920.jpg:2009_001636.jpg
514 | plant_flower:dandelion-1335575_1920.jpg:2010_002845.jpg
515 | plant_flower:dandelion-1335575_1920.jpg:2010_004286.jpg
516 | plant_flower:dandelion-1335575_1920.jpg:2010_004357.jpg
517 | plant_flower:dandelion-1335575_1920.jpg:2011_007201.jpg
518 | plant_flower:dandelion-1335575_1920.jpg:2012_000691.jpg
519 | plant_flower:dandelion-1335575_1920.jpg:2012_001981.jpg
520 | plant_flower:dandelion-1335575_1920.jpg:2012_003103.jpg
521 | plant_flower:dandelion-1392492_1920.jpg:2007_006212.jpg
522 | plant_flower:dandelion-1392492_1920.jpg:2008_002118.jpg
523 | plant_flower:dandelion-1392492_1920.jpg:2009_003257.jpg
524 | plant_flower:dandelion-1392492_1920.jpg:2009_003707.jpg
525 | plant_flower:dandelion-1392492_1920.jpg:2009_003713.jpg
526 | plant_flower:dandelion-1392492_1920.jpg:2010_006902.jpg
527 | plant_flower:dandelion-1392492_1920.jpg:2011_004221.jpg
528 | plant_flower:dandelion-1392492_1920.jpg:2011_006656.jpg
529 | plant_flower:dandelion-1392492_1920.jpg:2011_006699.jpg
530 | plant_flower:dandelion-1392492_1920.jpg:2012_001281.jpg
531 | plant_flower:dandelion-1394577_1920.jpg:2008_001023.jpg
532 | plant_flower:dandelion-1394577_1920.jpg:2008_001640.jpg
533 | plant_flower:dandelion-1394577_1920.jpg:2008_002579.jpg
534 | plant_flower:dandelion-1394577_1920.jpg:2008_002616.jpg
535 | plant_flower:dandelion-1394577_1920.jpg:2008_003547.jpg
536 | plant_flower:dandelion-1394577_1920.jpg:2008_003978.jpg
537 | plant_flower:dandelion-1394577_1920.jpg:2009_000568.jpg
538 | plant_flower:dandelion-1394577_1920.jpg:2009_004815.jpg
539 | plant_flower:dandelion-1394577_1920.jpg:2011_000897.jpg
540 | plant_flower:dandelion-1394577_1920.jpg:2011_001422.jpg
541 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2007_004500.jpg
542 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2008_005310.jpg
543 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2008_005507.jpg
544 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2008_006671.jpg
545 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2008_007473.jpg
546 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2009_002697.jpg
547 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2011_007015.jpg
548 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2012_001521.jpg
549 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2012_003408.jpg
550 | plant_flower:james-wainscoat-eeOlPoObi6w-unsplash.jpg:2012_004172.jpg
551 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2007_005600.jpg
552 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2008_003701.jpg
553 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2008_005956.jpg
554 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2010_003054.jpg
555 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2010_004179.jpg
556 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2010_004849.jpg
557 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2011_001128.jpg
558 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2011_005864.jpg
559 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2011_006890.jpg
560 | plant_flower:julius-drost-wpuYwd0qDms-unsplash.jpg:2012_004302.jpg
561 | plant_flower:pasture-67658_1920.jpg:2008_004854.jpg
562 | plant_flower:pasture-67658_1920.jpg:2009_000512.jpg
563 | plant_flower:pasture-67658_1920.jpg:2009_002616.jpg
564 | plant_flower:pasture-67658_1920.jpg:2010_001860.jpg
565 | plant_flower:pasture-67658_1920.jpg:2011_000875.jpg
566 | plant_flower:pasture-67658_1920.jpg:2011_001463.jpg
567 | plant_flower:pasture-67658_1920.jpg:2011_001841.jpg
568 | plant_flower:pasture-67658_1920.jpg:2011_007210.jpg
569 | plant_flower:pasture-67658_1920.jpg:2012_002393.jpg
570 | plant_flower:pasture-67658_1920.jpg:2012_004178.jpg
571 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2008_003924.jpg
572 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2008_006923.jpg
573 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2008_007485.jpg
574 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2010_002208.jpg
575 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2010_004286.jpg
576 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2011_003822.jpg
577 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2011_004568.jpg
578 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2012_000025.jpg
579 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2012_001117.jpg
580 | plant_leaf:celine-aumard-ot-QByD7ra4-unsplash.jpg:2012_002533.jpg
581 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2007_005019.jpg
582 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2008_002281.jpg
583 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2008_002705.jpg
584 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2008_008706.jpg
585 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2009_004453.jpg
586 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2010_003469.jpg
587 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2010_006837.jpg
588 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2011_000583.jpg
589 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2011_005121.jpg
590 | plant_leaf:linh-le-AeqJWvRVI94-unsplash.jpg:2011_006753.jpg
591 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2008_000143.jpg
592 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2008_002631.jpg
593 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2008_004976.jpg
594 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2008_005010.jpg
595 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2009_004183.jpg
596 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2009_004568.jpg
597 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2010_003401.jpg
598 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2010_004558.jpg
599 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2010_006881.jpg
600 | plant_tree:branches-bright-clouds-countryside-218673.jpg:2012_001819.jpg
601 | plant_tree:daylight-forest-nature-park-589802.jpg:2007_008051.jpg
602 | plant_tree:daylight-forest-nature-park-589802.jpg:2008_005788.jpg
603 | plant_tree:daylight-forest-nature-park-589802.jpg:2008_006071.jpg
604 | plant_tree:daylight-forest-nature-park-589802.jpg:2008_008166.jpg
605 | plant_tree:daylight-forest-nature-park-589802.jpg:2009_000923.jpg
606 | plant_tree:daylight-forest-nature-park-589802.jpg:2009_003310.jpg
607 | plant_tree:daylight-forest-nature-park-589802.jpg:2010_001856.jpg
608 | plant_tree:daylight-forest-nature-park-589802.jpg:2011_000165.jpg
609 | plant_tree:daylight-forest-nature-park-589802.jpg:2011_002793.jpg
610 | plant_tree:daylight-forest-nature-park-589802.jpg:2011_003427.jpg
611 | plastic_bag:3.jpg:2007_003957.jpg
612 | plastic_bag:3.jpg:2007_005600.jpg
613 | plastic_bag:3.jpg:2008_002307.jpg
614 | plastic_bag:3.jpg:2008_006295.jpg
615 | plastic_bag:3.jpg:2009_005119.jpg
616 | plastic_bag:3.jpg:2010_001574.jpg
617 | plastic_bag:3.jpg:2010_005301.jpg
618 | plastic_bag:3.jpg:2011_004194.jpg
619 | plastic_bag:3.jpg:2011_006511.jpg
620 | plastic_bag:3.jpg:2012_003325.jpg
621 | plastic_bag:63_obracadabra-3.jpg:2008_000283.jpg
622 | plastic_bag:63_obracadabra-3.jpg:2009_003090.jpg
623 | plastic_bag:63_obracadabra-3.jpg:2009_003626.jpg
624 | plastic_bag:63_obracadabra-3.jpg:2010_000973.jpg
625 | plastic_bag:63_obracadabra-3.jpg:2010_003912.jpg
626 | plastic_bag:63_obracadabra-3.jpg:2010_005519.jpg
627 | plastic_bag:63_obracadabra-3.jpg:2011_003163.jpg
628 | plastic_bag:63_obracadabra-3.jpg:2011_006136.jpg
629 | plastic_bag:63_obracadabra-3.jpg:2012_001349.jpg
630 | plastic_bag:63_obracadabra-3.jpg:2012_001360.jpg
631 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2007_000837.jpg
632 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2008_006834.jpg
633 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2008_007717.jpg
634 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2008_007819.jpg
635 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2009_003433.jpg
636 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2009_003500.jpg
637 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2011_000808.jpg
638 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2011_006453.jpg
639 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2012_000813.jpg
640 | sharp:beautiful-bloom-blooming-blur-573020.jpg:2012_002625.jpg
641 | sharp:blur-branches-close-up-environment-213727.jpg:2008_004876.jpg
642 | sharp:blur-branches-close-up-environment-213727.jpg:2008_005190.jpg
643 | sharp:blur-branches-close-up-environment-213727.jpg:2009_000970.jpg
644 | sharp:blur-branches-close-up-environment-213727.jpg:2009_002393.jpg
645 | sharp:blur-branches-close-up-environment-213727.jpg:2010_005382.jpg
646 | sharp:blur-branches-close-up-environment-213727.jpg:2011_000965.jpg
647 | sharp:blur-branches-close-up-environment-213727.jpg:2011_001632.jpg
648 | sharp:blur-branches-close-up-environment-213727.jpg:2011_002927.jpg
649 | sharp:blur-branches-close-up-environment-213727.jpg:2012_001974.jpg
650 | sharp:blur-branches-close-up-environment-213727.jpg:2012_003830.jpg
651 | sharp:man-wearing-yellow-jacket-1317712.jpg:2008_004670.jpg
652 | sharp:man-wearing-yellow-jacket-1317712.jpg:2008_008480.jpg
653 | sharp:man-wearing-yellow-jacket-1317712.jpg:2009_000935.jpg
654 | sharp:man-wearing-yellow-jacket-1317712.jpg:2009_001621.jpg
655 | sharp:man-wearing-yellow-jacket-1317712.jpg:2009_004798.jpg
656 | sharp:man-wearing-yellow-jacket-1317712.jpg:2010_003024.jpg
657 | sharp:man-wearing-yellow-jacket-1317712.jpg:2010_005586.jpg
658 | sharp:man-wearing-yellow-jacket-1317712.jpg:2010_006182.jpg
659 | sharp:man-wearing-yellow-jacket-1317712.jpg:2012_000840.jpg
660 | sharp:man-wearing-yellow-jacket-1317712.jpg:2012_001264.jpg
661 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2007_006281.jpg
662 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2008_007031.jpg
663 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2009_001201.jpg
664 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2009_003407.jpg
665 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2009_003832.jpg
666 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2009_004346.jpg
667 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2010_003201.jpg
668 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2012_001224.jpg
669 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2012_001469.jpg
670 | sharp:selective-focus-photo-of-smiling-man-looking-at-his-phone-3206080.jpg:2012_002141.jpg
671 | smoke_cloud:59127bd249542.jpg:2008_003884.jpg
672 | smoke_cloud:59127bd249542.jpg:2008_005204.jpg
673 | smoke_cloud:59127bd249542.jpg:2008_006586.jpg
674 | smoke_cloud:59127bd249542.jpg:2008_007500.jpg
675 | smoke_cloud:59127bd249542.jpg:2008_008127.jpg
676 | smoke_cloud:59127bd249542.jpg:2009_000085.jpg
677 | smoke_cloud:59127bd249542.jpg:2010_005448.jpg
678 | smoke_cloud:59127bd249542.jpg:2011_003381.jpg
679 | smoke_cloud:59127bd249542.jpg:2012_001382.jpg
680 | smoke_cloud:59127bd249542.jpg:2012_002808.jpg
681 | smoke_cloud:Smoke_3D_p.jpg:2007_001073.jpg
682 | smoke_cloud:Smoke_3D_p.jpg:2008_004502.jpg
683 | smoke_cloud:Smoke_3D_p.jpg:2009_003685.jpg
684 | smoke_cloud:Smoke_3D_p.jpg:2009_004718.jpg
685 | smoke_cloud:Smoke_3D_p.jpg:2009_005300.jpg
686 | smoke_cloud:Smoke_3D_p.jpg:2010_003801.jpg
687 | smoke_cloud:Smoke_3D_p.jpg:2010_005023.jpg
688 | smoke_cloud:Smoke_3D_p.jpg:2010_006140.jpg
689 | smoke_cloud:Smoke_3D_p.jpg:2011_002330.jpg
690 | smoke_cloud:Smoke_3D_p.jpg:2011_005725.jpg
691 | smoke_cloud:main-image-smoking2-copy.jpg:2007_002426.jpg
692 | smoke_cloud:main-image-smoking2-copy.jpg:2007_009425.jpg
693 | smoke_cloud:main-image-smoking2-copy.jpg:2008_003333.jpg
694 | smoke_cloud:main-image-smoking2-copy.jpg:2008_006169.jpg
695 | smoke_cloud:main-image-smoking2-copy.jpg:2008_006617.jpg
696 | smoke_cloud:main-image-smoking2-copy.jpg:2008_007544.jpg
697 | smoke_cloud:main-image-smoking2-copy.jpg:2010_003938.jpg
698 | smoke_cloud:main-image-smoking2-copy.jpg:2010_005997.jpg
699 | smoke_cloud:main-image-smoking2-copy.jpg:2011_000774.jpg
700 | smoke_cloud:main-image-smoking2-copy.jpg:2011_002018.jpg
701 | smoke_cloud:vape-smoke-on-black.jpg:2008_000656.jpg
702 | smoke_cloud:vape-smoke-on-black.jpg:2008_000981.jpg
703 | smoke_cloud:vape-smoke-on-black.jpg:2008_001257.jpg
704 | smoke_cloud:vape-smoke-on-black.jpg:2008_001533.jpg
705 | smoke_cloud:vape-smoke-on-black.jpg:2008_002662.jpg
706 | smoke_cloud:vape-smoke-on-black.jpg:2008_007897.jpg
707 | smoke_cloud:vape-smoke-on-black.jpg:2009_001370.jpg
708 | smoke_cloud:vape-smoke-on-black.jpg:2010_001052.jpg
709 | smoke_cloud:vape-smoke-on-black.jpg:2010_006765.jpg
710 | smoke_cloud:vape-smoke-on-black.jpg:2012_003940.jpg
711 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2008_004985.jpg
712 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2008_007019.jpg
713 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2008_007491.jpg
714 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2009_004708.jpg
715 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2011_000467.jpg
716 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2011_001043.jpg
717 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2011_001799.jpg
718 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2011_002992.jpg
719 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2012_001666.jpg
720 | spider_web:418943ddce09fb6388a001b06b1a2d68.jpg:2012_004077.jpg
721 | spider_web:cobweb-4439844_1280.jpg:2008_006111.jpg
722 | spider_web:cobweb-4439844_1280.jpg:2008_006837.jpg
723 | spider_web:cobweb-4439844_1280.jpg:2008_007696.jpg
724 | spider_web:cobweb-4439844_1280.jpg:2008_008732.jpg
725 | spider_web:cobweb-4439844_1280.jpg:2010_000645.jpg
726 | spider_web:cobweb-4439844_1280.jpg:2010_004184.jpg
727 | spider_web:cobweb-4439844_1280.jpg:2010_006605.jpg
728 | spider_web:cobweb-4439844_1280.jpg:2011_002419.jpg
729 | spider_web:cobweb-4439844_1280.jpg:2012_002356.jpg
730 | spider_web:cobweb-4439844_1280.jpg:2012_002571.jpg
731 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2007_004831.jpg
732 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2008_001437.jpg
733 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2008_004263.jpg
734 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2008_006810.jpg
735 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2009_000833.jpg
736 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2009_005001.jpg
737 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2010_000955.jpg
738 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2011_003358.jpg
739 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2011_006539.jpg
740 | spider_web:jason-gardner-vxKamYLvV5k-unsplash.jpg:2012_004083.jpg
741 | texture_holed:81+TCX-lrrL.jpg:2008_002167.jpg
742 | texture_holed:81+TCX-lrrL.jpg:2008_006394.jpg
743 | texture_holed:81+TCX-lrrL.jpg:2008_007594.jpg
744 | texture_holed:81+TCX-lrrL.jpg:2010_000910.jpg
745 | texture_holed:81+TCX-lrrL.jpg:2010_001305.jpg
746 | texture_holed:81+TCX-lrrL.jpg:2010_002780.jpg
747 | texture_holed:81+TCX-lrrL.jpg:2011_003715.jpg
748 | texture_holed:81+TCX-lrrL.jpg:2011_006654.jpg
749 | texture_holed:81+TCX-lrrL.jpg:2011_006798.jpg
750 | texture_holed:81+TCX-lrrL.jpg:2012_000701.jpg
751 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2008_004729.jpg
752 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2008_005801.jpg
753 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2008_006546.jpg
754 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2010_003156.jpg
755 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2010_004783.jpg
756 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2011_004298.jpg
757 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2011_005433.jpg
758 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2012_000592.jpg
759 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2012_000873.jpg
760 | texture_holed:antique-honiton-lace-1182740_1920.jpg:2012_001601.jpg
761 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2007_009323.jpg
762 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2008_003423.jpg
763 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2008_004763.jpg
764 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2009_000082.jpg
765 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2009_001188.jpg
766 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2009_003347.jpg
767 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2010_001608.jpg
768 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2011_002389.jpg
769 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2011_004961.jpg
770 | texture_holed:mksmh98zhnaju_cblksilver_v099.jpg:2011_006290.jpg
771 | texture_smooth:wedding-dresses-1486260_1280.jpg:2008_004802.jpg
772 | texture_smooth:wedding-dresses-1486260_1280.jpg:2008_005167.jpg
773 | texture_smooth:wedding-dresses-1486260_1280.jpg:2009_000690.jpg
774 | texture_smooth:wedding-dresses-1486260_1280.jpg:2010_001456.jpg
775 | texture_smooth:wedding-dresses-1486260_1280.jpg:2010_002614.jpg
776 | texture_smooth:wedding-dresses-1486260_1280.jpg:2010_004312.jpg
777 | texture_smooth:wedding-dresses-1486260_1280.jpg:2011_000397.jpg
778 | texture_smooth:wedding-dresses-1486260_1280.jpg:2011_000799.jpg
779 | texture_smooth:wedding-dresses-1486260_1280.jpg:2011_005286.jpg
780 | texture_smooth:wedding-dresses-1486260_1280.jpg:2012_002276.jpg
781 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2007_004795.jpg
782 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2008_000563.jpg
783 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2008_005269.jpg
784 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2009_001755.jpg
785 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2009_003267.jpg
786 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2010_003395.jpg
787 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2010_004037.jpg
788 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2011_004963.jpg
789 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2011_005053.jpg
790 | texture_smooth:woman-in-white-dress-covering-her-face-with-hands-with-3951887.jpg:2012_000650.jpg
791 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2007_001321.jpg
792 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2007_004281.jpg
793 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2008_004464.jpg
794 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2009_001055.jpg
795 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2010_000272.jpg
796 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2010_001614.jpg
797 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2011_002379.jpg
798 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2011_002422.jpg
799 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2011_007076.jpg
800 | texture_smooth:woman-in-white-wedding-dress-3830442.jpg:2012_003555.jpg
801 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2007_005828.jpg
802 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2008_004898.jpg
803 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2008_007147.jpg
804 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2009_000882.jpg
805 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2009_004513.jpg
806 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2010_001085.jpg
807 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2011_004068.jpg
808 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2011_005813.jpg
809 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2012_001443.jpg
810 | water_drop:cobweb-morgentau-dew-dewdrop-52501.jpg:2012_003112.jpg
811 | water_drop:dew-drops-2776772_1920.jpg:2007_002462.jpg
812 | water_drop:dew-drops-2776772_1920.jpg:2008_007941.jpg
813 | water_drop:dew-drops-2776772_1920.jpg:2008_008072.jpg
814 | water_drop:dew-drops-2776772_1920.jpg:2009_001542.jpg
815 | water_drop:dew-drops-2776772_1920.jpg:2009_001964.jpg
816 | water_drop:dew-drops-2776772_1920.jpg:2009_005215.jpg
817 | water_drop:dew-drops-2776772_1920.jpg:2010_001212.jpg
818 | water_drop:dew-drops-2776772_1920.jpg:2010_006026.jpg
819 | water_drop:dew-drops-2776772_1920.jpg:2011_000321.jpg
820 | water_drop:dew-drops-2776772_1920.jpg:2011_003220.jpg
821 | water_drop:pexels-photo-68084.jpg:2007_003131.jpg
822 | water_drop:pexels-photo-68084.jpg:2008_002547.jpg
823 | water_drop:pexels-photo-68084.jpg:2008_003073.jpg
824 | water_drop:pexels-photo-68084.jpg:2008_004703.jpg
825 | water_drop:pexels-photo-68084.jpg:2009_000066.jpg
826 | water_drop:pexels-photo-68084.jpg:2009_002214.jpg
827 | water_drop:pexels-photo-68084.jpg:2009_004953.jpg
828 | water_drop:pexels-photo-68084.jpg:2010_000079.jpg
829 | water_drop:pexels-photo-68084.jpg:2010_000748.jpg
830 | water_drop:pexels-photo-68084.jpg:2012_002657.jpg
831 | water_drop:spider-web-1037165_1280.jpg:2008_001919.jpg
832 | water_drop:spider-web-1037165_1280.jpg:2008_003870.jpg
833 | water_drop:spider-web-1037165_1280.jpg:2009_004112.jpg
834 | water_drop:spider-web-1037165_1280.jpg:2010_002455.jpg
835 | water_drop:spider-web-1037165_1280.jpg:2011_001526.jpg
836 | water_drop:spider-web-1037165_1280.jpg:2011_002793.jpg
837 | water_drop:spider-web-1037165_1280.jpg:2011_004647.jpg
838 | water_drop:spider-web-1037165_1280.jpg:2012_001743.jpg
839 | water_drop:spider-web-1037165_1280.jpg:2012_002924.jpg
840 | water_drop:spider-web-1037165_1280.jpg:2012_003653.jpg
841 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2008_000950.jpg
842 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2008_002885.jpg
843 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2008_004290.jpg
844 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2010_000241.jpg
845 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2010_001659.jpg
846 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2010_006555.jpg
847 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2011_003988.jpg
848 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2011_004751.jpg
849 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2012_000746.jpg
850 | water_spray:alcohol-alcoholic-beverage-cold-339696.jpg:2012_003403.jpg
851 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2007_000822.jpg
852 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2008_002679.jpg
853 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2008_005134.jpg
854 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2008_005406.jpg
855 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2009_000328.jpg
856 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2010_006265.jpg
857 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2010_006580.jpg
858 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2011_000250.jpg
859 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2011_005332.jpg
860 | water_spray:alcoholic-beverage-beverage-cocktail-drink-588575.jpg:2012_003494.jpg
861 | water_spray:black-background-bubble-clean-clear-372980.jpg:2007_000636.jpg
862 | water_spray:black-background-bubble-clean-clear-372980.jpg:2008_000196.jpg
863 | water_spray:black-background-bubble-clean-clear-372980.jpg:2010_001110.jpg
864 | water_spray:black-background-bubble-clean-clear-372980.jpg:2011_002683.jpg
865 | water_spray:black-background-bubble-clean-clear-372980.jpg:2011_003527.jpg
866 | water_spray:black-background-bubble-clean-clear-372980.jpg:2011_005568.jpg
867 | water_spray:black-background-bubble-clean-clear-372980.jpg:2011_005639.jpg
868 | water_spray:black-background-bubble-clean-clear-372980.jpg:2012_002443.jpg
869 | water_spray:black-background-bubble-clean-clear-372980.jpg:2012_003212.jpg
870 | water_spray:black-background-bubble-clean-clear-372980.jpg:2012_003588.jpg
871 | water_spray:drip-water-spray-liquid.jpg:2007_009610.jpg
872 | water_spray:drip-water-spray-liquid.jpg:2008_005068.jpg
873 | water_spray:drip-water-spray-liquid.jpg:2008_005137.jpg
874 | water_spray:drip-water-spray-liquid.jpg:2008_008058.jpg
875 | water_spray:drip-water-spray-liquid.jpg:2008_008541.jpg
876 | water_spray:drip-water-spray-liquid.jpg:2009_004839.jpg
877 | water_spray:drip-water-spray-liquid.jpg:2010_003081.jpg
878 | water_spray:drip-water-spray-liquid.jpg:2010_005190.jpg
879 | water_spray:drip-water-spray-liquid.jpg:2012_001586.jpg
880 | water_spray:drip-water-spray-liquid.jpg:2012_003623.jpg
881 | water_spray:photography-of-clear-wine-glass-227906.jpg:2008_008155.jpg
882 | water_spray:photography-of-clear-wine-glass-227906.jpg:2009_000418.jpg
883 | water_spray:photography-of-clear-wine-glass-227906.jpg:2009_001147.jpg
884 | water_spray:photography-of-clear-wine-glass-227906.jpg:2009_002750.jpg
885 | water_spray:photography-of-clear-wine-glass-227906.jpg:2009_004748.jpg
886 | water_spray:photography-of-clear-wine-glass-227906.jpg:2011_002212.jpg
887 | water_spray:photography-of-clear-wine-glass-227906.jpg:2011_002805.jpg
888 | water_spray:photography-of-clear-wine-glass-227906.jpg:2011_006709.jpg
889 | water_spray:photography-of-clear-wine-glass-227906.jpg:2012_000623.jpg
890 | water_spray:photography-of-clear-wine-glass-227906.jpg:2012_000863.jpg
891 |
--------------------------------------------------------------------------------
/dataset/generate_testset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import cv2
4 | import numpy as np
5 |
6 | from scipy.ndimage import morphology
7 |
8 |
9 | names = ['defocus', 'fire', 'fur', 'glass_ice', 'hair_easy', 'hair_hard',
10 | 'insect', 'motion', 'net', 'plant_flower', 'plant_leaf', 'plant_tree',
11 | 'plastic_bag', 'sharp', 'smoke_cloud', 'spider_web', 'texture_holed',
12 | 'texture_smooth', 'water_drop', 'water_spray']
13 | name2class = {name:idx for idx, name in enumerate(names)}
14 |
15 |
16 | def gen_trimap(alpha, ksize=3, iterations=5):
17 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (ksize, ksize))
18 | dilated = cv2.dilate(alpha, kernel, iterations=iterations)
19 | eroded = cv2.erode(alpha, kernel, iterations=iterations)
20 | trimap = np.zeros(alpha.shape) + 128
21 | trimap[eroded >= 255] = 255
22 | trimap[dilated <= 0] = 0
23 | return trimap
24 |
25 |
26 | def composite(bg, fg, alpha):
27 | # bg: [h, w, 3], fg: [h, w, 3], alpha: [h, w]
28 | h, w ,c = fg.shape
29 | bh, bw, bc = bg.shape
30 | wratio = float(w) / bw
31 | hratio = float(h) / bh
32 | ratio = wratio if wratio > hratio else hratio
33 | if ratio > 1:
34 | new_bw = int(bw * ratio + 1.0)
35 | new_bh = int(bh * ratio + 1.0)
36 | bg = cv2.resize(bg, (new_bw, new_bh), cv2.INTER_LANCZOS4)
37 | bg = bg[0:h, 0:w, :]
38 | alpha_f = alpha[:,:,None] / 255.
39 | comp = (fg*alpha_f + bg*(1.-alpha_f)).astype(np.uint8)
40 | return comp, bg
41 |
42 |
43 | def read_and_resize(fg_path, alpha_path, max_size=1920, min_size=800):
44 | fg = cv2.imread(fg_path)
45 | alpha = cv2.imread(alpha_path, 0)
46 | if max_size > 0 and min_size > 0:
47 | h, w = alpha.shape[:2]
48 | r = max_size / max(h,w)
49 | if r < 1:
50 | th, tw = h*r, w*r
51 | else:
52 | th, tw = h, w
53 | r = min_size / min(th, tw)
54 | if r > 1:
55 | th, tw = int(th*r), int(tw*r)
56 | else:
57 | th, tw = int(th), int(tw)
58 | if th!=h or tw!=w:
59 | alpha = cv2.resize(alpha, (tw,th), cv2.INTER_LANCZOS4)
60 | fg = cv2.resize(fg, (tw,th), cv2.INTER_LANCZOS4)
61 | return fg, alpha
62 |
63 |
64 | def read_and_composite(bg_path, fg_path, alpha_path, max_size=1920, min_size=800):
65 | fg, alpha = read_and_resize(fg_path, alpha_path, max_size, min_size)
66 | bg = cv2.imread(bg_path)
67 | comp, bg = composite(bg, fg, alpha)
68 | return alpha, fg, bg, comp
69 |
70 |
71 | def load_test_samples(test_fg_dir, test_bg_dir, filelist, sv_test_fg_dir):
72 | with open(filelist) as f:
73 | lines = f.read().splitlines()
74 | for line in lines:
75 | name, fg_name, bg_name = line.split(':')
76 | print(name, fg_name)
77 | alpha_file = os.path.join(test_fg_dir, name, "alpha", fg_name)
78 | fg_file = os.path.join(test_fg_dir, name, "fg", fg_name)
79 | bg_file = os.path.join(test_bg_dir, bg_name)
80 | filename = name + "_" + fg_name + "_" + bg_name
81 | alpha, fg, bg, comp = read_and_composite(bg_file, fg_file, alpha_file)
82 |
83 | trimap = gen_trimap(alpha)
84 | image_dir = os.path.join(sv_test_fg_dir, name, "merged")
85 | trimap_dir = os.path.join(sv_test_fg_dir, name, "trimap")
86 | alpha_dir = os.path.join(sv_test_fg_dir, name, "alpha")
87 | os.makedirs(image_dir, exist_ok=True)
88 | os.makedirs(trimap_dir, exist_ok=True)
89 | os.makedirs(alpha_dir, exist_ok=True)
90 |
91 | cv2.imwrite(os.path.join(image_dir, fg_name[:-4]+"_"+bg_name[:-4]+".png"), comp)
92 | cv2.imwrite(os.path.join(trimap_dir, fg_name[:-4]+"_"+bg_name[:-4]+".png"), trimap)
93 | cv2.imwrite(os.path.join(alpha_dir, fg_name[:-4]+"_"+bg_name[:-4]+".png"), alpha)
94 |
95 |
96 | if __name__ == "__main__":
97 | test_fg_dir = "PATH/TO/SIMD/ROOT/DIR"
98 | test_bg_dir = "PATH/TO/VOC/IMAGE/DIR"
99 | filelist_test = "SIMD_composition_test_filelist.txt"
100 | sv_test_fg_dir = "PATH/TO/SAVE/DIR"
101 | load_test_samples(test_fg_dir, test_bg_dir, filelist_test, sv_test_fg_dir)
102 |
--------------------------------------------------------------------------------
/figures/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nowsyn/SIM/dc5ed5a594fda16d3bd0a8d114b8740d876c12c8/figures/.DS_Store
--------------------------------------------------------------------------------
/figures/example1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nowsyn/SIM/dc5ed5a594fda16d3bd0a8d114b8740d876c12c8/figures/example1.png
--------------------------------------------------------------------------------
/figures/example2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nowsyn/SIM/dc5ed5a594fda16d3bd0a8d114b8740d876c12c8/figures/example2.png
--------------------------------------------------------------------------------
/figures/framework.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nowsyn/SIM/dc5ed5a594fda16d3bd0a8d114b8740d876c12c8/figures/framework.jpg
--------------------------------------------------------------------------------
/networks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nowsyn/SIM/dc5ed5a594fda16d3bd0a8d114b8740d876c12c8/networks/__init__.py
--------------------------------------------------------------------------------
/networks/lap_pyramid_loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | def gauss_kernel(size=5, device=torch.device('cpu'), channels=3):
4 | kernel = torch.tensor([[1., 4., 6., 4., 1],
5 | [4., 16., 24., 16., 4.],
6 | [6., 24., 36., 24., 6.],
7 | [4., 16., 24., 16., 4.],
8 | [1., 4., 6., 4., 1.]])
9 | kernel /= 256.
10 | kernel = kernel.repeat(channels, 1, 1, 1)
11 | kernel = kernel.to(device)
12 | return kernel
13 |
14 | def downsample(x):
15 | return x[:, :, ::2, ::2]
16 |
17 | def upsample(x):
18 | cc = torch.cat([x, torch.zeros(x.shape[0], x.shape[1], x.shape[2], x.shape[3], device=x.device)], dim=3)
19 | cc = cc.view(x.shape[0], x.shape[1], x.shape[2]*2, x.shape[3])
20 | cc = cc.permute(0,1,3,2)
21 | cc = torch.cat([cc, torch.zeros(x.shape[0], x.shape[1], x.shape[2], x.shape[3]*2, device=x.device)], dim=3)
22 | cc = cc.view(x.shape[0], x.shape[1], x.shape[2]*2, x.shape[3]*2)
23 | x_up = cc.permute(0,1,3,2)
24 | return conv_gauss(x_up, 4*gauss_kernel(channels=x.shape[1], device=x.device))
25 |
26 | def conv_gauss(img, kernel):
27 | img = torch.nn.functional.pad(img, (2, 2, 2, 2), mode='reflect')
28 | out = torch.nn.functional.conv2d(img, kernel, groups=img.shape[1])
29 | return out
30 |
31 | def laplacian_pyramid(img, kernel, max_levels=3):
32 | current = img
33 | pyr = []
34 | for level in range(max_levels):
35 | filtered = conv_gauss(current, kernel)
36 | down = downsample(filtered)
37 | up = upsample(down)
38 | diff = current-up
39 | pyr.append(diff)
40 | current = down
41 | return pyr
42 |
43 | class LapLoss(torch.nn.Module):
44 | def __init__(self, max_levels=3, channels=3, device=torch.device('cpu')):
45 | super(LapLoss, self).__init__()
46 | self.max_levels = max_levels
47 | self.gauss_kernel = gauss_kernel(channels=channels, device=device)
48 |
49 | def forward(self, input, target):
50 | pyr_input = laplacian_pyramid(img=input, kernel=self.gauss_kernel, max_levels=self.max_levels)
51 | pyr_target = laplacian_pyramid(img=target, kernel=self.gauss_kernel, max_levels=self.max_levels)
52 | return sum(torch.nn.functional.l1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
53 |
--------------------------------------------------------------------------------
/networks/layers_WS.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torch.nn import functional as F
4 |
5 |
6 | class Conv2d(nn.Conv2d):
7 |
8 | def __init__(self, in_channels, out_channels, kernel_size, stride=1,
9 | padding=0, dilation=1, groups=1, bias=True):
10 | super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride,
11 | padding, dilation, groups, bias)
12 |
13 | def forward(self, x):
14 | # return super(Conv2d, self).forward(x)
15 | weight = self.weight
16 | weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
17 | weight = weight - weight_mean
18 | std = torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 1e-12).view(-1, 1, 1, 1) + 1e-5
19 | weight = weight / std.expand_as(weight)
20 | return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
21 |
22 |
23 | def BatchNorm2d(num_features):
24 | return nn.GroupNorm(num_channels=num_features, num_groups=32)
25 |
--------------------------------------------------------------------------------
/networks/model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | import networks.resnet_GN_WS as resnet_GN_WS
5 | import networks.layers_WS as L
6 | import networks.resnet_bn as resnet_bn
7 |
8 | from networks.lap_pyramid_loss import LapLoss
9 | from networks.spatial_gradient_2d import SpatialGradient
10 | from networks.util import weight_init, norm, ResnetDilated, ResnetDilatedBN
11 | from networks.ppm import PPM, ASPP
12 |
13 |
14 | def build_model(args):
15 | builder = ModelBuilder()
16 | net_encoder = builder.build_encoder(args)
17 | net_decoder = builder.build_decoder(args)
18 | model = MattingModule(args, net_encoder, net_decoder)
19 | return model
20 |
21 |
22 | class MattingModule(nn.Module):
23 | def __init__(self, args, net_enc, net_dec):
24 | super(MattingModule, self).__init__()
25 | self.inc = args.arch.n_channel
26 | self.encoder = net_enc
27 | self.decoder = net_dec
28 | self.args = args
29 |
30 | def forward(self, image, two_chan_trimap, image_n, trimap_transformed, smap, inputs=None, is_training=True):
31 |
32 | if self.inc == 5:
33 | resnet_input = torch.cat((image_n, two_chan_trimap), 1)
34 | elif self.inc == 11:
35 | resnet_input = torch.cat((image_n, trimap_transformed, two_chan_trimap), 1)
36 | else:
37 | raise NotImplementedError
38 |
39 | conv_out, indices = self.encoder(resnet_input, return_feature_maps=True, smap=smap)
40 | out = self.decoder(conv_out, image, indices, two_chan_trimap, smap=smap, inputs=inputs, is_training=is_training)
41 | return out
42 |
43 |
44 | class ModelBuilder():
45 | def build_encoder(self, args):
46 | if args.arch.encoder == 'resnet50_GN_WS':
47 | orig_resnet = resnet_GN_WS.__dict__['l_resnet50'](pretrained=True)
48 | net_encoder = ResnetDilated(args.arch, orig_resnet, dilate_scale=8)
49 | elif args.arch.encoder == 'resnet50_BN':
50 | orig_resnet = resnet_bn.__dict__['l_resnet50'](pretrained=True)
51 | net_encoder = ResnetDilatedBN(args.arch, orig_resnet, dilate_scale=8)
52 | else:
53 | raise Exception('Architecture undefined!')
54 |
55 | num_channels = args.arch.n_channel
56 |
57 | if(num_channels > 3):
58 | print(f'modifying input layer to accept {num_channels} channels')
59 | net_encoder_sd = net_encoder.state_dict()
60 | conv1_weights = net_encoder_sd['conv1.weight']
61 |
62 | c_out, c_in, h, w = conv1_weights.size()
63 | conv1_mod = torch.zeros(c_out, num_channels, h, w)
64 | conv1_mod[:, :3, :, :] = conv1_weights
65 |
66 | conv1 = net_encoder.conv1
67 | conv1.in_channels = num_channels
68 | conv1.weight = torch.nn.Parameter(conv1_mod)
69 |
70 | net_encoder.conv1 = conv1
71 |
72 | net_encoder_sd['conv1.weight'] = conv1_mod
73 |
74 | net_encoder.load_state_dict(net_encoder_sd)
75 | return net_encoder
76 |
77 | def build_decoder(self, args):
78 | net_decoder = Decoder(args)
79 | return net_decoder
80 |
81 |
82 | class Decoder(nn.Module):
83 | def __init__(self, args):
84 | super(Decoder, self).__init__()
85 |
86 | self.args = args.arch
87 | self.batch_norm = True
88 | middle_chn = 2048
89 |
90 | self.global_module = ASPP(middle_chn, self.args.atrous_rates, self.args.aspp_channel)
91 | en_chn = middle_chn + self.args.aspp_channel
92 |
93 | self.conv_up1 = nn.Sequential(
94 | L.Conv2d(en_chn, 256, kernel_size=3, padding=1, bias=True),
95 | norm(256, self.batch_norm),
96 | nn.LeakyReLU(),
97 | L.Conv2d(256, 256, kernel_size=3, padding=1),
98 | norm(256, self.batch_norm),
99 | nn.LeakyReLU()
100 | )
101 |
102 | self.conv_up2 = nn.Sequential(
103 | L.Conv2d(256 + 256, 256, kernel_size=3, padding=1, bias=True),
104 | norm(256, self.batch_norm),
105 | nn.LeakyReLU()
106 | )
107 |
108 | self.conv_up3 = nn.Sequential(
109 | L.Conv2d(256 + 128, 64, kernel_size=3, padding=1, bias=True),
110 | norm(64, self.batch_norm),
111 | nn.LeakyReLU()
112 | )
113 |
114 | self.conv_up4_alpha = nn.Sequential(
115 | nn.Conv2d(64 + 3 + 3 + 2, 32, kernel_size=3, padding=1, bias=True),
116 | nn.LeakyReLU(),
117 | nn.Conv2d(32, 16, kernel_size=3, padding=1, bias=True),
118 | nn.LeakyReLU(),
119 | nn.Conv2d(16, 1, kernel_size=1, padding=0, bias=False)
120 | )
121 |
122 | self.conv_up4_fb = nn.Sequential(
123 | nn.Conv2d(64 + 3 + 3 + 2, 32, kernel_size=3, padding=1, bias=True),
124 | nn.LeakyReLU(),
125 | nn.Conv2d(32, 16, kernel_size=3, padding=1, bias=True),
126 | nn.LeakyReLU(),
127 | nn.Conv2d(16, 6, kernel_size=1, padding=0, bias=False)
128 | )
129 |
130 | self.conv_up4_attn = nn.Sequential(
131 | nn.Conv2d(64, 32, kernel_size=3, padding=1, bias=True),
132 | nn.LeakyReLU(),
133 | nn.Conv2d(32, 16, kernel_size=3, padding=1, bias=True),
134 | nn.LeakyReLU(),
135 | nn.Conv2d(16, 3, kernel_size=1, padding=0, bias=False)
136 | )
137 |
138 | def forward(self, conv_out, img, indices, two_chan_trimap, smap=None, inputs=None, is_training=True):
139 | conv5 = conv_out[-1]
140 |
141 | global_ctx = self.global_module(conv5)
142 | x = torch.cat([conv5, global_ctx], 1)
143 |
144 | x = self.conv_up1(x)
145 | x = torch.nn.functional.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
146 | x = torch.cat((x, conv_out[-4]), 1)
147 | x = self.conv_up2(x)
148 | x = torch.nn.functional.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
149 | x = torch.cat((x, conv_out[-5]), 1)
150 | x = self.conv_up3(x)
151 | x = torch.nn.functional.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
152 |
153 | y = torch.cat((x, conv_out[-6][:, :3], img, two_chan_trimap), 1)
154 |
155 | a_out = self.conv_up4_alpha(y)
156 |
157 | alpha = torch.clamp(a_out, 0, 1)
158 |
159 | output = {"alpha": alpha}
160 |
161 | if is_training:
162 | fb_out = self.conv_up4_fb(y)
163 | F = torch.sigmoid(fb_out[:, 0:3])
164 | B = torch.sigmoid(fb_out[:, 3:6])
165 | output.update({"fg": F, "bg": B})
166 |
167 | attn_out = self.conv_up4_attn(x)
168 | attn = torch.sigmoid(attn_out)
169 | r1 = attn[:, 0:1]
170 | r2 = attn[:, 1:2]
171 | r3 = attn[:, 2:3]
172 | output.update({"r1": r1, "r2": r2, "r3": r3})
173 |
174 | return output
175 |
176 |
177 |
178 | class SIMLoss(object):
179 | def __init__(self, args):
180 | self.args = args.loss
181 |
182 | self.use_comploss = args.loss.use_comploss
183 | self.use_laploss = args.loss.use_laploss
184 | self.use_fbloss = args.loss.use_fbloss
185 | self.use_fbcloss = args.loss.use_fbcloss
186 | self.use_fblaploss = args.loss.use_fblaploss
187 |
188 | self.kernel_diagonal = args.loss.kernel_diagonal
189 | self.kernel_laplacian = args.loss.kernel_laplacian
190 | self.kernel_second_order = args.loss.kernel_second_order
191 |
192 | self.l1_loss = nn.L1Loss(reduction='sum')
193 | self.l2_loss = nn.MSELoss(reduction='sum')
194 | self.bce_loss = nn.BCEWithLogitsLoss(reduction='mean')
195 | self.lap_loss = LapLoss(5, device=torch.device('cuda'))
196 |
197 | self.gradient = SpatialGradient(diagonal=self.kernel_diagonal,
198 | laplacian=self.kernel_laplacian,
199 | second_order=self.kernel_second_order)
200 |
201 | self.loss_keys = ['loss_alpha']
202 | if self.use_comploss:
203 | self.loss_keys += ['loss_comp']
204 | if self.use_laploss:
205 | self.loss_keys += ['loss_lap']
206 | if self.use_fbloss:
207 | self.loss_keys += ['loss_fb', 'loss_comp_fb']
208 | if self.use_fbcloss:
209 | self.loss_keys += ['loss_fbc']
210 | if self.use_fblaploss:
211 | self.loss_keys += ['loss_fblap']
212 |
213 | if self.use_attention:
214 | self.loss_keys += ['loss_reg']
215 | if self.use_discriminator:
216 | self.loss_keys += ['loss_D']
217 |
218 | def gen_alpha_loss(self, pred, alpha, mask):
219 | diff = (pred - alpha) * mask
220 | loss = torch.sqrt(diff ** 2 + 1e-12)
221 | loss = loss.sum(dim=(1,2,3)) / (mask.sum(dim=(1,2,3)) + 1.)
222 | loss = loss.sum() / pred.shape[0]
223 | return loss
224 |
225 | def gen_fb_loss(self, pf, gf, pb, gb, fmask, bmask):
226 | df = (pf - gf) * fmask
227 | db = (pb - gb) * bmask
228 | loss = torch.sqrt(df**2 + 1e-12) + torch.sqrt(db**2 + 1e-12)
229 | loss = loss.sum(dim=(1,2,3)) / (fmask.sum(dim=(1,2,3)) + bmask.sum(dim=(1,2,3)) + 1.)
230 | loss = loss.sum() / pf.shape[0]
231 | return loss
232 |
233 | def gen_comp_loss(self, img, fg, bg, alpha, mask):
234 | comp = alpha * fg + (1. - alpha) * bg
235 | diff = (comp - img) * mask
236 | loss = torch.sqrt(diff ** 2 + 1e-12)
237 | loss = loss.sum(dim=(1,2,3)) / (mask.sum(dim=(1,2,3)) + 1.) / 3.
238 | loss = loss.sum() / alpha.shape[0]
239 | return loss
240 |
241 | def gen_attention_loss(self, grad_a, grad_f, grad_b, grad_i, attn_a, attn_f, attn_b, mask):
242 | grad_a_m = grad_a.abs().sum(dim=2)
243 | grad_f_m = grad_f.abs().sum(dim=2)
244 | grad_b_m = grad_b.abs().sum(dim=2)
245 | grad_i_m = grad_i.abs().sum(dim=2)
246 | grad_fba = grad_a_m*attn_a + grad_f_m*attn_f + grad_b_m*attn_b
247 | diff = torch.sqrt((grad_fba - grad_i_m)**2 + 1e-12)
248 | loss_reg = (diff * mask).sum() / (mask.sum() + 1.) / 3.
249 | return loss_reg
250 |
251 | def gen_discriminator_loss(self, d_out):
252 | bce_loss = torch.nn.BCELoss()
253 | mse_loss = torch.nn.MSELoss()
254 |
255 | fake_ret = d_out['fake_ret']
256 | real_ret = d_out['real_ret']
257 | fake_feats = d_out['fake_feats']
258 | real_feats = d_out['real_feats']
259 |
260 | loss_D = bce_loss(torch.sigmoid(fake_ret), torch.sigmoid(real_ret))
261 |
262 | loss_perp = []
263 | for i in range(len(fake_feats)):
264 | loss_perp.append(mse_loss(fake_feats[i], real_feats[i]))
265 | loss_D += torch.tensor(loss_perp).mean()
266 | return loss_D
267 |
268 | def calc_loss(self, out, gt):
269 | trimap = gt['trimap']
270 |
271 | g_a = gt['alpha']
272 | g_i = gt['image']
273 | p_a = out['alpha']
274 | g_f = gt['fg']
275 | g_b = gt['bg']
276 |
277 | umask = (trimap == 128).float()
278 | fmask = (trimap >= 0).float()
279 | bmask = (trimap <= 128).float()
280 |
281 | loss_dict = {}
282 | loss_alpha = self.gen_alpha_loss(p_a, g_a, umask)
283 | loss_dict['loss_alpha'] = loss_alpha
284 |
285 | if self.use_comploss:
286 | p_f = out['fg']
287 | p_b = out['bg']
288 | loss_comp = self.gen_comp_loss(g_i, g_f, g_b, p_a, umask)
289 | loss_dict['loss_comp'] = loss_comp * self.args.weight_comp
290 |
291 | if self.use_laploss:
292 | loss_lap = self.lap_loss(p_a, g_a)
293 | loss_dict['loss_lap'] = loss_lap * self.args.weight_lap
294 |
295 | if self.use_fbloss:
296 | p_f = out['fg']
297 | p_b = out['bg']
298 | loss_fb = self.gen_fb_loss(p_f, g_f, p_b, g_b, fmask, bmask)
299 | loss_comp_fb = self.gen_comp_loss(g_i, p_f, p_b, g_a, umask)
300 | loss_dict['loss_fb'] = loss_fb * self.args.weight_fb
301 | loss_dict['loss_comp_fb'] = loss_comp_fb * self.args.weight_fb
302 |
303 | if self.use_fbcloss:
304 | p_f = out['fg']
305 | p_b = out['bg']
306 | loss_fc = self.gen_comp_loss(g_i, p_f, g_b, g_a, umask)
307 | loss_bc = self.gen_comp_loss(g_i, g_f, p_b, g_a, umask)
308 | loss_fbc = self.gen_comp_loss(g_i, p_f, p_b, p_a, umask)
309 | loss_fbc = (loss_fc + loss_bc + loss_fbc) / 3.
310 | loss_dict['loss_fbc'] = loss_fbc * self.args.weight_fb
311 |
312 | if self.use_fblaploss:
313 | loss_flap = self.lap_loss(p_f, g_f)
314 | loss_blap = self.lap_loss(p_b, g_b)
315 | loss_fblap = (loss_flap + loss_blap) / 2.
316 | loss_dict['loss_fblap'] = loss_fblap * self.args.weight_fb
317 |
318 | if self.use_discriminator:
319 | loss_D = self.gen_discriminator_loss(out)
320 | loss_dict['loss_D'] = loss_D * self.args.weight_D
321 |
322 | if self.use_attention:
323 | grad_f = self.gradient(p_f)
324 | grad_b = self.gradient(p_b)
325 | grad_a = self.gradient(p_a)
326 | grad_i = self.gradient(g_i)
327 | r1, r2, r3 = out['r1'], out['r2'], out['r3']
328 | loss_reg = self.gen_attention_loss(grad_a, grad_f, grad_b, grad_i, r3, r1, r2, umask)
329 | loss_reg += (grad_f_l1 * grad_b_l1).mean() + (grad_a_l1 * grad_b_l1).mean()
330 | loss_dict['loss_reg'] = loss_reg * self.args.weight_reg
331 |
332 | loss = 0.
333 | for key in self.loss_keys:
334 | loss += loss_dict[key]
335 | loss_dict['loss'] = loss
336 | return loss_dict
337 |
--------------------------------------------------------------------------------
/networks/ppm.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | import networks.layers_WS as L
5 | from networks.util import norm
6 |
7 |
8 | class PPM(nn.Module):
9 | def __init__(self, in_channels, pool_scales, out_channels=256, batch_norm=True):
10 | super().__init__()
11 | # ppm module
12 | self.ppm = []
13 | for scale in pool_scales:
14 | self.ppm.append(nn.Sequential(
15 | nn.AdaptiveAvgPool2d(scale),
16 | L.Conv2d(in_channels, out_channels, kernel_size=1, bias=True),
17 | norm(out_channels, batch_norm),
18 | nn.LeakyReLU()
19 | ))
20 | self.ppm = nn.ModuleList(self.ppm)
21 |
22 | def forward(self, inp):
23 | input_size = inp.size()
24 | ppm_out = []
25 | for pool_scale in self.ppm:
26 | ppm_out.append(nn.functional.interpolate(
27 | pool_scale(inp), (input_size[2], input_size[3]), mode='bilinear', align_corners=False))
28 | ppm_out = torch.cat(ppm_out, 1)
29 | return ppm_out
30 |
31 |
32 | # ASPP module
33 | class ASPPConv(nn.Sequential):
34 | def __init__(self, in_channels, out_channels, dilation, batch_norm=True):
35 | modules = [
36 | L.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
37 | norm(out_channels, batch_norm),
38 | nn.ReLU()
39 | ]
40 | super(ASPPConv, self).__init__(*modules)
41 |
42 |
43 | class ASPPPooling(nn.Sequential):
44 | def __init__(self, in_channels, out_channels, batch_norm=True):
45 | super(ASPPPooling, self).__init__(
46 | nn.AdaptiveAvgPool2d(1),
47 | L.Conv2d(in_channels, out_channels, 1, bias=False),
48 | norm(out_channels, batch_norm),
49 | nn.ReLU())
50 |
51 | def forward(self, x):
52 | size = x.shape[-2:]
53 | for mod in self:
54 | x = mod(x)
55 | return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
56 |
57 |
58 | class ASPP(nn.Module):
59 | def __init__(self, in_channels, atrous_rates, out_channels=256, batch_norm=True):
60 | super(ASPP, self).__init__()
61 | modules = []
62 | modules.append(nn.Sequential(
63 | L.Conv2d(in_channels, out_channels, 1, bias=False),
64 | norm(out_channels, batch_norm),
65 | nn.ReLU()))
66 |
67 | rates = tuple(atrous_rates)
68 | for rate in rates:
69 | modules.append(ASPPConv(in_channels, out_channels, rate, batch_norm))
70 |
71 | modules.append(ASPPPooling(in_channels, out_channels, batch_norm))
72 |
73 | self.convs = nn.ModuleList(modules)
74 |
75 | self.project = nn.Sequential(
76 | L.Conv2d(len(modules) * out_channels, out_channels, 1, bias=False),
77 | norm(out_channels, batch_norm),
78 | nn.ReLU(),
79 | nn.Dropout(0.5))
80 |
81 | def forward(self, x):
82 | res = []
83 | for conv in self.convs:
84 | res.append(conv(x))
85 | res = torch.cat(res, dim=1)
86 | return self.project(res)
87 |
--------------------------------------------------------------------------------
/networks/resnet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 | from collections import OrderedDict
6 |
7 | try:
8 | from torch.hub import load_state_dict_from_url
9 | except ImportError:
10 | from torch.utils.model_zoo import load_url as load_state_dict_from_url
11 |
12 |
13 | __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
14 | 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
15 | 'wide_resnet50_2', 'wide_resnet101_2']
16 |
17 |
18 | model_urls = {
19 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
20 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
21 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
22 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
23 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
24 | 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
25 | 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
26 | 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
27 | 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
28 | }
29 |
30 |
31 | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
32 | """3x3 convolution with padding"""
33 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
34 | padding=dilation, groups=groups, bias=False, dilation=dilation)
35 |
36 |
37 | def conv1x1(in_planes, out_planes, stride=1):
38 | """1x1 convolution"""
39 | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
40 |
41 |
42 | class BasicBlock(nn.Module):
43 | expansion = 1
44 |
45 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
46 | base_width=64, dilation=1, norm_layer=None):
47 | super(BasicBlock, self).__init__()
48 | if norm_layer is None:
49 | norm_layer = nn.BatchNorm2d
50 | if groups != 1 or base_width != 64:
51 | raise ValueError('BasicBlock only supports groups=1 and base_width=64')
52 | if dilation > 1:
53 | raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
54 | # Both self.conv1 and self.downsample layers downsample the input when stride != 1
55 | self.conv1 = conv3x3(inplanes, planes, stride)
56 | self.bn1 = norm_layer(planes)
57 | self.relu = nn.ReLU(inplace=True)
58 | self.conv2 = conv3x3(planes, planes)
59 | self.bn2 = norm_layer(planes)
60 | self.downsample = downsample
61 | self.stride = stride
62 |
63 | def forward(self, x):
64 | identity = x
65 |
66 | out = self.conv1(x)
67 | out = self.bn1(out)
68 | out = self.relu(out)
69 |
70 | out = self.conv2(out)
71 | out = self.bn2(out)
72 |
73 | if self.downsample is not None:
74 | identity = self.downsample(x)
75 |
76 | out += identity
77 | out = self.relu(out)
78 |
79 | return out
80 |
81 |
82 | class Bottleneck(nn.Module):
83 | # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
84 | # while original implementation places the stride at the first 1x1 convolution(self.conv1)
85 | # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
86 | # This variant is also known as ResNet V1.5 and improves accuracy according to
87 | # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
88 |
89 | expansion = 4
90 |
91 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
92 | base_width=64, dilation=1, norm_layer=None):
93 | super(Bottleneck, self).__init__()
94 | if norm_layer is None:
95 | norm_layer = nn.BatchNorm2d
96 | width = int(planes * (base_width / 64.)) * groups
97 | # Both self.conv2 and self.downsample layers downsample the input when stride != 1
98 | self.conv1 = conv1x1(inplanes, width)
99 | self.bn1 = norm_layer(width)
100 | self.conv2 = conv3x3(width, width, stride, groups, dilation)
101 | self.bn2 = norm_layer(width)
102 | self.conv3 = conv1x1(width, planes * self.expansion)
103 | self.bn3 = norm_layer(planes * self.expansion)
104 | self.relu = nn.ReLU(inplace=True)
105 | self.downsample = downsample
106 | self.stride = stride
107 |
108 | def forward(self, x):
109 | identity = x
110 |
111 | out = self.conv1(x)
112 | out = self.bn1(out)
113 | out = self.relu(out)
114 |
115 | out = self.conv2(out)
116 | out = self.bn2(out)
117 | out = self.relu(out)
118 |
119 | out = self.conv3(out)
120 | out = self.bn3(out)
121 |
122 | if self.downsample is not None:
123 | identity = self.downsample(x)
124 |
125 | out += identity
126 | out = self.relu(out)
127 |
128 | return out
129 |
130 |
131 | class ResNet(nn.Module):
132 |
133 | def __init__(self, inc, block, layers, num_classes=1000, zero_init_residual=False,
134 | groups=1, width_per_group=64, replace_stride_with_dilation=None,
135 | norm_layer=None, use_feature=False, use_multiscale=False):
136 | super(ResNet, self).__init__()
137 | if norm_layer is None:
138 | norm_layer = nn.BatchNorm2d
139 |
140 | self._norm_layer = norm_layer
141 | self.use_feature = use_feature
142 | self.use_multiscale = use_multiscale
143 |
144 | self.inplanes = 64
145 | self.dilation = 1
146 | if replace_stride_with_dilation is None:
147 | # each element in the tuple indicates if we should replace
148 | # the 2x2 stride with a dilated convolution instead
149 | replace_stride_with_dilation = [False, False, False]
150 | if len(replace_stride_with_dilation) != 3:
151 | raise ValueError("replace_stride_with_dilation should be None "
152 | "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
153 | self.groups = groups
154 | self.base_width = width_per_group
155 | self.conv1 = nn.Conv2d(inc, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
156 | self.bn1 = norm_layer(self.inplanes)
157 | self.relu = nn.ReLU(inplace=True)
158 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
159 | self.layer1 = self._make_layer(block, 64, layers[0])
160 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
161 | dilate=replace_stride_with_dilation[0])
162 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
163 | dilate=replace_stride_with_dilation[1])
164 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
165 | dilate=replace_stride_with_dilation[2])
166 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
167 | self.fc = nn.Linear(512 * block.expansion, num_classes)
168 |
169 | if self.use_multiscale:
170 | fuse_in = (64+128+256+512) * block.expansion
171 | fuse_out = 512 * block.expansion
172 | self.fuse_conv = nn.Sequential(
173 | nn.Conv2d(fuse_in, fuse_out, kernel_size=1, bias=True),
174 | nn.BatchNorm2d(fuse_out),
175 | nn.ReLU()
176 | )
177 |
178 | for m in self.modules():
179 | if isinstance(m, nn.Conv2d):
180 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
181 | elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
182 | nn.init.constant_(m.weight, 1)
183 | nn.init.constant_(m.bias, 0)
184 |
185 | # Zero-initialize the last BN in each residual branch,
186 | # so that the residual branch starts with zeros, and each residual block behaves like an identity.
187 | # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
188 | if zero_init_residual:
189 | for m in self.modules():
190 | if isinstance(m, Bottleneck):
191 | nn.init.constant_(m.bn3.weight, 0)
192 | elif isinstance(m, BasicBlock):
193 | nn.init.constant_(m.bn2.weight, 0)
194 |
195 | def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
196 | norm_layer = self._norm_layer
197 | downsample = None
198 | previous_dilation = self.dilation
199 | if dilate:
200 | self.dilation *= stride
201 | stride = 1
202 | if stride != 1 or self.inplanes != planes * block.expansion:
203 | downsample = nn.Sequential(
204 | conv1x1(self.inplanes, planes * block.expansion, stride),
205 | norm_layer(planes * block.expansion),
206 | )
207 |
208 | layers = []
209 | layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
210 | self.base_width, previous_dilation, norm_layer))
211 | self.inplanes = planes * block.expansion
212 | for _ in range(1, blocks):
213 | layers.append(block(self.inplanes, planes, groups=self.groups,
214 | base_width=self.base_width, dilation=self.dilation,
215 | norm_layer=norm_layer))
216 |
217 | return nn.Sequential(*layers)
218 |
219 | def _forward_impl(self, x, side=None):
220 | # See note [TorchScript super()]
221 | x = self.conv1(x)
222 | x = self.bn1(x)
223 | x = self.relu(x)
224 | x = self.maxpool(x)
225 |
226 | conv_out = []
227 | x = self.layer1(x)
228 | conv_out.append(x)
229 | x = self.layer2(x)
230 | conv_out.append(x)
231 | x = self.layer3(x)
232 | conv_out.append(x)
233 | x = self.layer4(x)
234 | conv_out.append(x)
235 |
236 | if self.use_multiscale:
237 | x1, x2, x3, x4 = conv_out
238 | x1 = F.interpolate(x1, scale_factor=0.125, mode='bilinear', align_corners=False)
239 | x2 = F.interpolate(x2, scale_factor=0.25, mode='bilinear', align_corners=False)
240 | x3 = F.interpolate(x3, scale_factor=0.5, mode='bilinear', align_corners=False)
241 | x = torch.cat([x1,x2,x3,x4], dim=1)
242 | x = self.fuse_conv(x)
243 |
244 | cam = torch.einsum('dc,nchw->ndhw', self.fc.weight, x)
245 |
246 | x = self.avgpool(x)
247 | conv_out.append(x)
248 |
249 | if self.use_feature:
250 | return x
251 |
252 | x = torch.flatten(x, 1)
253 | x = self.fc(x)
254 |
255 | return x, cam, conv_out
256 |
257 | def forward(self, x):
258 | return self._forward_impl(x)
259 |
260 |
261 | def _resnet(arch, inc, block, layers, pretrained, progress, **kwargs):
262 | model = ResNet(inc, block, layers, **kwargs)
263 | if pretrained:
264 | state_dict = load_state_dict_from_url(model_urls[arch],
265 | progress=progress)
266 | state_dict_filter = OrderedDict()
267 | for k,v in state_dict.items():
268 | if 'fc' not in k:
269 | state_dict_filter[k] = v
270 | model.load_state_dict(state_dict_filter, strict=False)
271 | return model
272 |
273 |
274 | def resnet18(inc, pretrained=False, progress=True, **kwargs):
275 | r"""ResNet-18 model from
276 | `"Deep Residual Learning for Image Recognition" `_
277 |
278 | Args:
279 | pretrained (bool): If True, returns a model pre-trained on ImageNet
280 | progress (bool): If True, displays a progress bar of the download to stderr
281 | """
282 | return _resnet('resnet18', inc, BasicBlock, [2, 2, 2, 2], pretrained, progress,
283 | **kwargs)
284 |
285 |
286 | def resnet34(inc, pretrained=False, progress=True, **kwargs):
287 | r"""ResNet-34 model from
288 | `"Deep Residual Learning for Image Recognition" `_
289 |
290 | Args:
291 | pretrained (bool): If True, returns a model pre-trained on ImageNet
292 | progress (bool): If True, displays a progress bar of the download to stderr
293 | """
294 | return _resnet('resnet34', inc, BasicBlock, [3, 4, 6, 3], pretrained, progress,
295 | **kwargs)
296 |
297 |
298 | def resnet50(inc, pretrained=False, progress=True, **kwargs):
299 | r"""ResNet-50 model from
300 | `"Deep Residual Learning for Image Recognition" `_
301 |
302 | Args:
303 | pretrained (bool): If True, returns a model pre-trained on ImageNet
304 | progress (bool): If True, displays a progress bar of the download to stderr
305 | """
306 | return _resnet('resnet50', inc, Bottleneck, [3, 4, 6, 3], pretrained, progress,
307 | **kwargs)
308 |
309 |
310 | def resnet101(inc, pretrained=False, progress=True, **kwargs):
311 | r"""ResNet-101 model from
312 | `"Deep Residual Learning for Image Recognition" `_
313 |
314 | Args:
315 | pretrained (bool): If True, returns a model pre-trained on ImageNet
316 | progress (bool): If True, displays a progress bar of the download to stderr
317 | """
318 | return _resnet('resnet101', inc, Bottleneck, [3, 4, 23, 3], pretrained, progress,
319 | **kwargs)
320 |
321 |
322 | def resnet152(inc, pretrained=False, progress=True, **kwargs):
323 | r"""ResNet-152 model from
324 | `"Deep Residual Learning for Image Recognition" `_
325 |
326 | Args:
327 | pretrained (bool): If True, returns a model pre-trained on ImageNet
328 | progress (bool): If True, displays a progress bar of the download to stderr
329 | """
330 | return _resnet('resnet152', inc, Bottleneck, [3, 8, 36, 3], pretrained, progress,
331 | **kwargs)
332 |
333 |
334 | def resnext50_32x4d(inc, pretrained=False, progress=True, **kwargs):
335 | r"""ResNeXt-50 32x4d model from
336 | `"Aggregated Residual Transformation for Deep Neural Networks" `_
337 |
338 | Args:
339 | pretrained (bool): If True, returns a model pre-trained on ImageNet
340 | progress (bool): If True, displays a progress bar of the download to stderr
341 | """
342 | kwargs['groups'] = 32
343 | kwargs['width_per_group'] = 4
344 | return _resnet('resnext50_32x4d', inc, Bottleneck, [3, 4, 6, 3],
345 | pretrained, progress, **kwargs)
346 |
347 |
348 | def resnext101_32x8d(inc, pretrained=False, progress=True, **kwargs):
349 | r"""ResNeXt-101 32x8d model from
350 | `"Aggregated Residual Transformation for Deep Neural Networks" `_
351 |
352 | Args:
353 | pretrained (bool): If True, returns a model pre-trained on ImageNet
354 | progress (bool): If True, displays a progress bar of the download to stderr
355 | """
356 | kwargs['groups'] = 32
357 | kwargs['width_per_group'] = 8
358 | return _resnet('resnext101_32x8d', inc, Bottleneck, [3, 4, 23, 3],
359 | pretrained, progress, **kwargs)
360 |
361 |
362 | def wide_resnet50_2(inc, pretrained=False, progress=True, **kwargs):
363 | r"""Wide ResNet-50-2 model from
364 | `"Wide Residual Networks" `_
365 |
366 | The model is the same as ResNet except for the bottleneck number of channels
367 | which is twice larger in every block. The number of channels in outer 1x1
368 | convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
369 | channels, and in Wide ResNet-50-2 has 2048-1024-2048.
370 |
371 | Args:
372 | pretrained (bool): If True, returns a model pre-trained on ImageNet
373 | progress (bool): If True, displays a progress bar of the download to stderr
374 | """
375 | kwargs['width_per_group'] = 64 * 2
376 | return _resnet('wide_resnet50_2', inc, Bottleneck, [3, 4, 6, 3],
377 | pretrained, progress, **kwargs)
378 |
379 |
380 | def wide_resnet101_2(inc, pretrained=False, progress=True, **kwargs):
381 | r"""Wide ResNet-101-2 model from
382 | `"Wide Residual Networks" `_
383 |
384 | The model is the same as ResNet except for the bottleneck number of channels
385 | which is twice larger in every block. The number of channels in outer 1x1
386 | convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
387 | channels, and in Wide ResNet-50-2 has 2048-1024-2048.
388 |
389 | Args:
390 | pretrained (bool): If True, returns a model pre-trained on ImageNet
391 | progress (bool): If True, displays a progress bar of the download to stderr
392 | """
393 | kwargs['width_per_group'] = 64 * 2
394 | return _resnet('wide_resnet101_2', inc, Bottleneck, [3, 4, 23, 3],
395 | pretrained, progress, **kwargs)
396 |
--------------------------------------------------------------------------------
/networks/resnet_GN_WS.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import networks.layers_WS as L
4 |
5 | __all__ = ['ResNet', 'l_resnet50']
6 |
7 |
8 | def conv3x3(in_planes, out_planes, stride=1):
9 | """3x3 convolution with padding"""
10 | return L.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
11 | padding=1, bias=False)
12 |
13 |
14 | def conv1x1(in_planes, out_planes, stride=1):
15 | """1x1 convolution"""
16 | return L.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
17 |
18 |
19 | class BasicBlock(nn.Module):
20 | expansion = 1
21 |
22 | def __init__(self, inplanes, planes, stride=1, downsample=None):
23 | super(BasicBlock, self).__init__()
24 | self.conv1 = conv3x3(inplanes, planes, stride)
25 | self.bn1 = L.BatchNorm2d(planes)
26 | self.relu = nn.ReLU(inplace=True)
27 | self.conv2 = conv3x3(planes, planes)
28 | self.bn2 = L.BatchNorm2d(planes)
29 | self.downsample = downsample
30 | self.stride = stride
31 |
32 | def forward(self, x):
33 | identity = x
34 |
35 | out = self.conv1(x)
36 | out = self.bn1(out)
37 | out = self.relu(out)
38 |
39 | out = self.conv2(out)
40 | out = self.bn2(out)
41 |
42 | if self.downsample is not None:
43 | identity = self.downsample(x)
44 |
45 | out += identity
46 | out = self.relu(out)
47 |
48 | return out
49 |
50 |
51 | class Bottleneck(nn.Module):
52 | expansion = 4
53 |
54 | def __init__(self, inplanes, planes, stride=1, downsample=None):
55 | super(Bottleneck, self).__init__()
56 | self.conv1 = conv1x1(inplanes, planes)
57 | self.bn1 = L.BatchNorm2d(planes)
58 | self.conv2 = conv3x3(planes, planes, stride)
59 | self.bn2 = L.BatchNorm2d(planes)
60 | self.conv3 = conv1x1(planes, planes * self.expansion)
61 | self.bn3 = L.BatchNorm2d(planes * self.expansion)
62 | self.relu = nn.ReLU(inplace=True)
63 | self.downsample = downsample
64 | self.stride = stride
65 |
66 | def forward(self, x):
67 | identity = x
68 |
69 | out = self.conv1(x)
70 | out = self.bn1(out)
71 | out = self.relu(out)
72 |
73 | out = self.conv2(out)
74 | out = self.bn2(out)
75 | out = self.relu(out)
76 |
77 | out = self.conv3(out)
78 | out = self.bn3(out)
79 |
80 | if self.downsample is not None:
81 | identity = self.downsample(x)
82 |
83 | out += identity
84 | out = self.relu(out)
85 |
86 | return out
87 |
88 |
89 | class ResNet(nn.Module):
90 |
91 | def __init__(self, block, layers, num_classes=1000):
92 | super(ResNet, self).__init__()
93 | self.inplanes = 64
94 | self.conv1 = L.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
95 | self.bn1 = L.BatchNorm2d(64)
96 | self.relu = nn.ReLU(inplace=True)
97 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, return_indices=True)
98 | self.layer1 = self._make_layer(block, 64, layers[0])
99 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
100 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
101 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
102 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
103 | self.fc = nn.Linear(512 * block.expansion, num_classes)
104 |
105 | def _make_layer(self, block, planes, blocks, stride=1):
106 | downsample = None
107 | if stride != 1 or self.inplanes != planes * block.expansion:
108 | downsample = nn.Sequential(
109 | conv1x1(self.inplanes, planes * block.expansion, stride),
110 | L.BatchNorm2d(planes * block.expansion),
111 | )
112 |
113 | layers = []
114 | layers.append(block(self.inplanes, planes, stride, downsample))
115 | self.inplanes = planes * block.expansion
116 | for _ in range(1, blocks):
117 | layers.append(block(self.inplanes, planes))
118 |
119 | return nn.Sequential(*layers)
120 |
121 | def forward(self, x):
122 | x = self.conv1(x)
123 | x = self.bn1(x)
124 | x = self.relu(x)
125 | x = self.maxpool(x)
126 |
127 | x = self.layer1(x)
128 | x = self.layer2(x)
129 | x = self.layer3(x)
130 | x = self.layer4(x)
131 |
132 | x = self.avgpool(x)
133 | x = x.view(x.size(0), -1)
134 | x = self.fc(x)
135 |
136 | return x
137 |
138 |
139 | def remove_prefix(ckpt):
140 | from collections import OrderedDict
141 | new_ckpt = OrderedDict()
142 | for k,v in ckpt.items():
143 | nk = '.'.join(k.split('.')[1:])
144 | new_ckpt[nk] = v
145 | return new_ckpt
146 |
147 |
148 | def l_resnet50(pretrained=False, **kwargs):
149 | """Constructs a ResNet-50 model.
150 | Args:
151 | pretrained (bool): If True, returns a model pre-trained on ImageNet
152 | """
153 | model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
154 | if pretrained is True:
155 | state_dict = remove_prefix(torch.load('pretrained/R-50-GN-WS.pth.tar'))
156 | model.load_state_dict(state_dict)
157 | return model
158 |
159 |
160 | if __name__ == "__main__":
161 | model = l_resnet50(True)
162 | print(model)
163 |
--------------------------------------------------------------------------------
/networks/resnet_bn.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | import torch.nn as nn
4 |
5 | from torch.nn import BatchNorm2d
6 | # from modules.nn import BatchNorm2d
7 | from collections import OrderedDict
8 |
9 | try:
10 | from torch.hub import load_state_dict_from_url
11 | except ImportError:
12 | from torch.utils.model_zoo import load_url as load_state_dict_from_url
13 |
14 | __all__ = ['ResNet']
15 |
16 |
17 | model_urls = {
18 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
19 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
20 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
21 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
22 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
23 | 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
24 | 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
25 | 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
26 | 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
27 | }
28 |
29 |
30 | def conv3x3(in_planes, out_planes, stride=1):
31 | "3x3 convolution with padding"
32 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
33 | padding=1, bias=False)
34 |
35 | def conv7x7(in_planes, out_planes, stride=1):
36 | "3x3 convolution with padding"
37 | return nn.Conv2d(in_planes, out_planes, kernel_size=7, stride=stride,
38 | padding=3, bias=False)
39 |
40 |
41 | class BasicBlock(nn.Module):
42 | expansion = 1
43 |
44 | def __init__(self, inplanes, planes, stride=1, downsample=None):
45 | super(BasicBlock, self).__init__()
46 | self.conv1 = conv3x3(inplanes, planes, stride)
47 | self.bn1 = BatchNorm2d(planes)
48 | self.relu = nn.ReLU(inplace=True)
49 | self.conv2 = conv3x3(planes, planes)
50 | self.bn2 = BatchNorm2d(planes)
51 | self.downsample = downsample
52 | self.stride = stride
53 |
54 | def forward(self, x):
55 | residual = x
56 |
57 | out = self.conv1(x)
58 | out = self.bn1(out)
59 | out = self.relu(out)
60 |
61 | out = self.conv2(out)
62 | out = self.bn2(out)
63 |
64 | if self.downsample is not None:
65 | residual = self.downsample(x)
66 |
67 | out += residual
68 | out = self.relu(out)
69 |
70 | return out
71 |
72 |
73 | class Bottleneck(nn.Module):
74 | expansion = 4
75 |
76 | def __init__(self, inplanes, planes, stride=1, downsample=None):
77 | super(Bottleneck, self).__init__()
78 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
79 | self.bn1 = BatchNorm2d(planes)
80 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
81 | padding=1, bias=False)
82 | self.bn2 = BatchNorm2d(planes, momentum=0.01)
83 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
84 | self.bn3 = BatchNorm2d(planes * 4)
85 | self.relu = nn.ReLU(inplace=True)
86 | self.downsample = downsample
87 | self.stride = stride
88 |
89 | def forward(self, x):
90 | residual = x
91 |
92 | out = self.conv1(x)
93 | out = self.bn1(out)
94 | out = self.relu(out)
95 |
96 | out = self.conv2(out)
97 | out = self.bn2(out)
98 | out = self.relu(out)
99 |
100 | out = self.conv3(out)
101 | out = self.bn3(out)
102 |
103 | if self.downsample is not None:
104 | residual = self.downsample(x)
105 |
106 | out += residual
107 | out = self.relu(out)
108 |
109 | return out
110 |
111 |
112 | class ResNet(nn.Module):
113 | def __init__(self, block, layers, num_classes=1000, inplanes=128):
114 | self.inplanes = inplanes
115 | super(ResNet, self).__init__()
116 |
117 | self.conv1 = conv3x3(3, 64, stride=2)
118 | self.bn1 = BatchNorm2d(64)
119 | self.relu1 = nn.ReLU(inplace=True)
120 | self.conv2 = conv3x3(64, 64)
121 | self.bn2 = BatchNorm2d(64)
122 | self.relu2 = nn.ReLU(inplace=True)
123 | self.conv3 = conv3x3(64, 128)
124 | self.bn3 = BatchNorm2d(128)
125 | self.relu3 = nn.ReLU(inplace=True)
126 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, return_indices=True)
127 |
128 | self.layer1 = self._make_layer(block, 64, layers[0])
129 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
130 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
131 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
132 | self.avgpool = nn.AvgPool2d(7, stride=1)
133 | self.fc = nn.Linear(512 * block.expansion, num_classes)
134 |
135 | for m in self.modules():
136 | if isinstance(m, nn.Conv2d):
137 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
138 | m.weight.data.normal_(0, math.sqrt(2. / n))
139 | elif isinstance(m, BatchNorm2d):
140 | m.weight.data.fill_(1)
141 | m.bias.data.zero_()
142 |
143 | def _make_layer(self, block, planes, blocks, stride=1):
144 | downsample = None
145 | if stride != 1 or self.inplanes != planes * block.expansion:
146 | downsample = nn.Sequential(
147 | nn.Conv2d(self.inplanes, planes * block.expansion,
148 | kernel_size=1, stride=stride, bias=False),
149 | BatchNorm2d(planes * block.expansion),
150 | )
151 |
152 | layers = []
153 | layers.append(block(self.inplanes, planes, stride, downsample))
154 | self.inplanes = planes * block.expansion
155 | for i in range(1, blocks):
156 | layers.append(block(self.inplanes, planes))
157 |
158 | return nn.Sequential(*layers)
159 |
160 | def forward(self, x):
161 | x = self.relu1(self.bn1(self.conv1(x)))
162 | x = self.relu2(self.bn2(self.conv2(x)))
163 | x = self.relu3(self.bn3(self.conv3(x)))
164 | x, indices = self.maxpool(x)
165 |
166 | x = self.layer1(x)
167 | x = self.layer2(x)
168 | x = self.layer3(x)
169 | x = self.layer4(x)
170 |
171 | x = self.avgpool(x)
172 | x = x.view(x.size(0), -1)
173 | x = self.fc(x)
174 | return x
175 |
176 |
177 | def l_resnet50(pretrained=False):
178 | """Constructs a ResNet-50 model.
179 | Args:
180 | pretrained (bool): If True, returns a model pre-trained on ImageNet
181 | """
182 | model = ResNet(Bottleneck, [3, 4, 6, 3], inplanes=128)
183 | if pretrained:
184 | state_dict = torch.load('pretrained/resnet50_v1c.pth')
185 | model.load_state_dict(state_dict, strict=True)
186 | return model
187 |
188 |
189 | if __name__ == "__main__":
190 | model = l_resnet50(pretrained=True)
191 |
--------------------------------------------------------------------------------
/networks/spatial_gradient.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | def _get_sobel_kernel_3x3() -> torch.Tensor:
7 | """Utility function that returns a sobel kernel of 3x3"""
8 | return torch.tensor([
9 | [-1., 0., 1.],
10 | [-2., 0., 2.],
11 | [-1., 0., 1.],
12 | ])
13 |
14 |
15 | def _get_sobel_diag_x_kernel_3x3() -> torch.Tensor:
16 | return torch.tensor([
17 | [ 0., 1., 2.],
18 | [-1., 0., 1.],
19 | [-2.,-1., 0.],
20 | ])
21 |
22 |
23 | def _get_sobel_diag_y_kernel_3x3() -> torch.Tensor:
24 | return torch.tensor([
25 | [-2.,-1., 0.],
26 | [-1., 0., 1.],
27 | [ 0., 1., 2.],
28 | ])
29 |
30 |
31 | def _get_laplacian_kernel_3x3() -> torch.Tensor:
32 | """Utility function that returns a sobel kernel of 3x3"""
33 | return torch.tensor([
34 | [ 0., -1., 0.],
35 | [-1., 4., -1.],
36 | [ 0., -1., 0.],
37 | ])
38 |
39 |
40 | def _get_second_order_kernel_3x3() -> torch.Tensor:
41 | """Utility function that returns a sobel kernel of 3x3"""
42 | return torch.tensor([
43 | [-1., 2., -1.],
44 | [-2., 4., -2.],
45 | [-1., 2., -1.],
46 | ])
47 |
48 |
49 | class SpatialGradient(nn.Module):
50 | r"""Computes the first order image derivative in both x and y using a Sobel
51 | operator.
52 |
53 | Return:
54 | torch.Tensor: the sobel edges of the input feature map.
55 |
56 | Shape:
57 | - Input: :math:`(B, C, H, W)`
58 | - Output: :math:`(B, C, 2, H, W)`
59 |
60 | Examples:
61 | >>> input = torch.rand(1, 3, 4, 4)
62 | >>> output = kornia.filters.SpatialGradient()(input) # 1x3x2x4x4
63 | """
64 |
65 | def __init__(self, diagonal=False, laplacian=False, second_order=False) -> None:
66 | super(SpatialGradient, self).__init__()
67 | self.kernel: torch.Tensor = self.get_sobel_kernel(diagonal=diagonal, laplacian=laplacian, second_order=second_order)
68 |
69 | @staticmethod
70 | def get_sobel_kernel(diagonal=False, laplacian=False, second_order=False) -> torch.Tensor:
71 | kernel_x: torch.Tensor = _get_sobel_kernel_3x3()
72 | kernel_y: torch.Tensor = kernel_x.transpose(0, 1)
73 | kernels = [kernel_x, kernel_y]
74 | # if diagonal:
75 | # kernel_dx: torch.Tensor = _get_sobel_diag_x_kernel_3x3()
76 | # kernel_dy: torch.Tensor = _get_sobel_diag_y_kernel_3x3()
77 | # kernels += [kernel_dx, kernel_dy]
78 | # if laplacian:
79 | # kernel_lap: torch.Tensor = _get_laplacian_kernel_3x3()
80 | # kernels += [kernel_lap]
81 | # if second_order:
82 | # kernel_2x: torch.Tensor = _get_second_order_kernel_3x3()
83 | # kernel_2y: torch.Tensor = kernel_2x.transpose(0, 1)
84 | # kernels += [kernel_2x, kernel_2y]
85 | return torch.stack(kernels)
86 |
87 | def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
88 | if not torch.is_tensor(input):
89 | raise TypeError("Input type is not a torch.Tensor. Got {}"
90 | .format(type(input)))
91 | if not len(input.shape) == 4:
92 | raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
93 | .format(input.shape))
94 | # prepare kernel
95 | b, c, h, w = input.shape
96 | tmp_kernel: torch.Tensor = self.kernel.to(input.device).to(input.dtype)
97 | kernel: torch.Tensor = tmp_kernel.repeat(c, 1, 1, 1, 1)
98 |
99 | # convolve input tensor with sobel kernel
100 | kernel_flip: torch.Tensor = kernel.flip(-3)
101 | padding = kernel_flip.size(2) // 2
102 | return F.conv3d(input[:, :, None], kernel_flip, padding=padding, groups=c)
103 |
104 |
105 |
106 | class Sobel(nn.Module):
107 | r"""Computes the Sobel operator and returns the magnitude per channel.
108 |
109 | Return:
110 | torch.Tensor: the sobel edge gradient maginitudes map.
111 |
112 | Shape:
113 | - Input: :math:`(B, C, H, W)`
114 | - Output: :math:`(B, C, H, W)`
115 |
116 | Examples:
117 | >>> input = torch.rand(1, 3, 4, 4)
118 | >>> output = kornia.filters.Sobel()(input) # 1x3x4x4
119 | """
120 |
121 | def __init__(self) -> None:
122 | super(Sobel, self).__init__()
123 |
124 | def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
125 | if not torch.is_tensor(input):
126 | raise TypeError("Input type is not a torch.Tensor. Got {}"
127 | .format(type(input)))
128 | if not len(input.shape) == 4:
129 | raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
130 | .format(input.shape))
131 | # comput the x/y gradients
132 | edges: torch.Tensor = spatial_gradient(input)
133 |
134 | # unpack the edges
135 | gx: torch.Tensor = edges[:, :, 0]
136 | gy: torch.Tensor = edges[:, :, 1]
137 |
138 | # compute gradient maginitude
139 | magnitude: torch.Tensor = torch.sqrt(gx * gx + gy * gy)
140 | return magnitude
141 |
142 |
143 |
144 | # functiona api
145 |
146 |
147 | def spatial_gradient(input: torch.Tensor) -> torch.Tensor:
148 | r"""Computes the first order image derivative in both x and y using a Sobel
149 | operator.
150 |
151 | See :class:`~kornia.filters.SpatialGradient` for details.
152 | """
153 | return SpatialGradient()(input)
154 |
155 |
156 |
157 | def sobel(input: torch.Tensor) -> torch.Tensor:
158 | r"""Computes the Sobel operator and returns the magnitude per channel.
159 |
160 | See :class:`~kornia.filters.Sobel` for details.
161 | """
162 | return Sobel()(input)
163 |
--------------------------------------------------------------------------------
/networks/spatial_gradient_2d.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 | from kornia.filters.kernels import get_spatial_gradient_kernel2d, get_spatial_gradient_kernel3d
6 | from kornia.filters.kernels import normalize_kernel2d
7 |
8 |
9 | def _get_sobel_diag_x_kernel_3x3() -> torch.Tensor:
10 | return torch.tensor([
11 | [ 0., 1., 2.],
12 | [-1., 0., 1.],
13 | [-2.,-1., 0.],
14 | ])
15 |
16 |
17 | def _get_sobel_diag_y_kernel_3x3() -> torch.Tensor:
18 | return torch.tensor([
19 | [-2.,-1., 0.],
20 | [-1., 0., 1.],
21 | [ 0., 1., 2.],
22 | ])
23 |
24 |
25 | def _get_laplacian_kernel_3x3() -> torch.Tensor:
26 | """Utility function that returns a sobel kernel of 3x3"""
27 | return torch.tensor([
28 | [ 0., -1., 0.],
29 | [-1., 4., -1.],
30 | [ 0., -1., 0.],
31 | ])
32 |
33 |
34 | def _get_second_order_kernel_3x3() -> torch.Tensor:
35 | """Utility function that returns a sobel kernel of 3x3"""
36 | return torch.tensor([
37 | [-1., 2., -1.],
38 | [-2., 4., -2.],
39 | [-1., 2., -1.],
40 | ])
41 |
42 |
43 | class SpatialGradient(nn.Module):
44 | r"""Computes the first order image derivative in both x and y using a Sobel
45 | operator.
46 | Return:
47 | torch.Tensor: the sobel edges of the input feature map.
48 | Shape:
49 | - Input: :math:`(B, C, H, W)`
50 | - Output: :math:`(B, C, 2, H, W)`
51 | Examples:
52 | >>> input = torch.rand(1, 3, 4, 4)
53 | >>> output = kornia.filters.SpatialGradient()(input) # 1x3x2x4x4
54 | """
55 |
56 | def __init__(self,
57 | mode: str = 'sobel',
58 | order: int = 1,
59 | normalized: bool = True,
60 | diagonal=False,
61 | laplacian=False,
62 | second_order=False) -> None:
63 | super(SpatialGradient, self).__init__()
64 | self.normalized: bool = normalized
65 | self.order: int = order
66 | self.mode: str = mode
67 | self.kernel = get_spatial_gradient_kernel2d(mode, order)
68 | kernels = []
69 | if diagonal:
70 | kernel_dx: torch.Tensor = _get_sobel_diag_x_kernel_3x3()
71 | kernel_dy: torch.Tensor = _get_sobel_diag_y_kernel_3x3()
72 | kernels += [kernel_dx, kernel_dy]
73 | if laplacian:
74 | kernel_lap: torch.Tensor = _get_laplacian_kernel_3x3()
75 | kernels += [kernel_lap]
76 | if second_order:
77 | kernel_2x: torch.Tensor = _get_second_order_kernel_3x3()
78 | kernel_2y: torch.Tensor = kernel_2x.transpose(0, 1)
79 | kernels += [kernel_2x, kernel_2y]
80 | if len(kernels)>0:
81 | kernels = torch.stack(kernels)
82 | self.kernel = torch.cat([self.kernel, kernels], dim=0)
83 | if self.normalized:
84 | self.kernel = normalize_kernel2d(self.kernel)
85 | return
86 |
87 | def __repr__(self) -> str:
88 | return self.__class__.__name__ + '('\
89 | 'order=' + str(self.order) + ', ' + \
90 | 'normalized=' + str(self.normalized) + ', ' + \
91 | 'mode=' + self.mode + ')'
92 |
93 | def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
94 | if not torch.is_tensor(input):
95 | raise TypeError("Input type is not a torch.Tensor. Got {}"
96 | .format(type(input)))
97 | if not len(input.shape) == 4:
98 | raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
99 | .format(input.shape))
100 | # prepare kernel
101 | b, c, h, w = input.shape
102 | tmp_kernel: torch.Tensor = self.kernel.to(input.device).to(input.dtype).detach()
103 | kernel: torch.Tensor = tmp_kernel.unsqueeze(1).unsqueeze(1)
104 |
105 | # convolve input tensor with sobel kernel
106 | kernel_flip: torch.Tensor = kernel.flip(-3)
107 | # Pad with "replicate for spatial dims, but with zeros for channel
108 | spatial_pad = [self.kernel.size(1) // 2,
109 | self.kernel.size(1) // 2,
110 | self.kernel.size(2) // 2,
111 | self.kernel.size(2) // 2]
112 | # out_channels: int = 3 if self.order == 2 else 2
113 | out_channels: int = self.kernel.size(0)
114 | padded_inp: torch.Tensor = F.pad(input.reshape(b * c, 1, h, w), spatial_pad, 'replicate')[:, :, None]
115 | return F.conv3d(padded_inp, kernel_flip, padding=0).view(b, c, out_channels, h, w)
116 |
117 |
118 | class SpatialGradient3d(nn.Module):
119 | r"""Computes the first and second order volume derivative in x, y and d using a diff
120 | operator.
121 | Return:
122 | torch.Tensor: the spatial gradients of the input feature map.
123 | Shape:
124 | - Input: :math:`(B, C, D, H, W)`. D, H, W are spatial dimensions, gradient is calculated w.r.t to them.
125 | - Output: :math:`(B, C, 3, D, H, W)` or :math:`(B, C, 6, D, H, W)`
126 | Examples:
127 | >>> input = torch.rand(1, 3, 4, 4)
128 | >>> output = kornia.filters.SpatialGradient()(input) # 1x3x2x4x4
129 | """
130 |
131 | def __init__(self,
132 | mode: str = 'diff',
133 | order: int = 1) -> None:
134 | super(SpatialGradient3d, self).__init__()
135 | self.order: int = order
136 | self.mode: str = mode
137 | self.kernel = get_spatial_gradient_kernel3d(mode, order)
138 | return
139 |
140 | def __repr__(self) -> str:
141 | return self.__class__.__name__ + '('\
142 | 'order=' + str(self.order) + ', ' + \
143 | 'mode=' + self.mode + ')'
144 |
145 | def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
146 | if not torch.is_tensor(input):
147 | raise TypeError("Input type is not a torch.Tensor. Got {}"
148 | .format(type(input)))
149 | if not len(input.shape) == 5:
150 | raise ValueError("Invalid input shape, we expect BxCxDxHxW. Got: {}"
151 | .format(input.shape))
152 | # prepare kernel
153 | b, c, d, h, w = input.shape
154 | tmp_kernel: torch.Tensor = self.kernel.to(input.device).to(input.dtype).detach()
155 | kernel: torch.Tensor = tmp_kernel.repeat(c, 1, 1, 1, 1)
156 |
157 | # convolve input tensor with grad kernel
158 | kernel_flip: torch.Tensor = kernel.flip(-3)
159 | # Pad with "replicate for spatial dims, but with zeros for channel
160 | spatial_pad = [self.kernel.size(2) // 2,
161 | self.kernel.size(2) // 2,
162 | self.kernel.size(3) // 2,
163 | self.kernel.size(3) // 2,
164 | self.kernel.size(4) // 2,
165 | self.kernel.size(4) // 2]
166 | out_ch: int = 6 if self.order == 2 else 3
167 | return F.conv3d(F.pad(input, spatial_pad, 'replicate'), kernel, padding=0, groups=c).view(b, c, out_ch, d, h, w)
168 |
169 |
170 | class Sobel(nn.Module):
171 | r"""Computes the Sobel operator and returns the magnitude per channel.
172 | Return:
173 | torch.Tensor: the sobel edge gradient maginitudes map.
174 | Args:
175 | normalized (bool): if True, L1 norm of the kernel is set to 1.
176 | eps (float): regularization number to avoid NaN during backprop. Default: 1e-6.
177 | Shape:
178 | - Input: :math:`(B, C, H, W)`
179 | - Output: :math:`(B, C, H, W)`
180 | Examples:
181 | >>> input = torch.rand(1, 3, 4, 4)
182 | >>> output = kornia.filters.Sobel()(input) # 1x3x4x4
183 | """
184 |
185 | def __init__(self,
186 | normalized: bool = True, eps: float = 1e-6) -> None:
187 | super(Sobel, self).__init__()
188 | self.normalized: bool = normalized
189 | self.eps: float = eps
190 |
191 | def __repr__(self) -> str:
192 | return self.__class__.__name__ + '('\
193 | 'normalized=' + str(self.normalized) + ')'
194 |
195 | def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
196 | if not torch.is_tensor(input):
197 | raise TypeError("Input type is not a torch.Tensor. Got {}"
198 | .format(type(input)))
199 | if not len(input.shape) == 4:
200 | raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
201 | .format(input.shape))
202 | # comput the x/y gradients
203 | edges: torch.Tensor = spatial_gradient(input,
204 | normalized=self.normalized)
205 |
206 | # unpack the edges
207 | gx: torch.Tensor = edges[:, :, 0]
208 | gy: torch.Tensor = edges[:, :, 1]
209 |
210 | # compute gradient maginitude
211 | magnitude: torch.Tensor = torch.sqrt(gx * gx + gy * gy + self.eps)
212 | return magnitude
213 |
214 |
215 | # functiona api
216 | # TODO: In terms of functional API, there should not be any initialization of an nn.Module.
217 | # This logic is reversed.
218 |
219 | def spatial_gradient(input: torch.Tensor,
220 | mode: str = 'sobel',
221 | order: int = 1,
222 | normalized: bool = True) -> torch.Tensor:
223 | r"""Computes the first order image derivative in both x and y using a Sobel
224 | operator.
225 | See :class:`~kornia.filters.SpatialGradient` for details.
226 | """
227 | return SpatialGradient(mode, order, normalized)(input)
228 |
229 |
230 | def spatial_gradient3d(input: torch.Tensor,
231 | mode: str = 'diff',
232 | order: int = 1) -> torch.Tensor:
233 | r"""Computes the first or second order image derivative in both x and y and y using a diff
234 | operator.
235 | See :class:`~kornia.filters.SpatialGradient3d` for details.
236 | """
237 | return SpatialGradient3d(mode, order)(input)
238 |
239 |
240 | def sobel(input: torch.Tensor, normalized: bool = True, eps: float = 1e-6) -> torch.Tensor:
241 | r"""Computes the Sobel operator and returns the magnitude per channel.
242 | See :class:`~kornia.filters.Sobel` for details.
243 | """
244 | return Sobel(normalized, eps)(input)
245 |
--------------------------------------------------------------------------------
/networks/transforms.py:
--------------------------------------------------------------------------------
1 |
2 | import numpy as np
3 | import torch
4 | import cv2
5 |
6 |
7 | def dt(a):
8 | return cv2.distanceTransform((a * 255).astype(np.uint8), cv2.DIST_L2, 0)
9 |
10 |
11 | def trimap_transform(trimap):
12 | h, w = trimap.shape[0], trimap.shape[1]
13 |
14 | clicks = np.zeros((h, w, 6))
15 | for k in range(2):
16 | if(np.count_nonzero(trimap[:, :, k]) > 0):
17 | dt_mask = -dt(1 - trimap[:, :, k])**2
18 | L = 320
19 | clicks[:, :, 3*k] = np.exp(dt_mask / (2 * ((0.02 * L)**2)))
20 | clicks[:, :, 3*k+1] = np.exp(dt_mask / (2 * ((0.08 * L)**2)))
21 | clicks[:, :, 3*k+2] = np.exp(dt_mask / (2 * ((0.16 * L)**2)))
22 |
23 | return clicks
24 |
25 |
26 | # For RGB !
27 | group_norm_std = [0.229, 0.224, 0.225]
28 | group_norm_mean = [0.485, 0.456, 0.406]
29 |
30 |
31 | def groupnorm_normalise_image(img, format='nhwc'):
32 | '''
33 | Accept rgb in range 0,1
34 | '''
35 | if(format == 'nhwc'):
36 | for i in range(3):
37 | img[..., i] = (img[..., i] - group_norm_mean[i]) / group_norm_std[i]
38 | else:
39 | for i in range(3):
40 | img[..., i, :, :] = (img[..., i, :, :] - group_norm_mean[i]) / group_norm_std[i]
41 |
42 | return img
43 |
44 |
45 | def groupnorm_denormalise_image(img, format='nhwc'):
46 | '''
47 | Accept rgb, normalised, return in range 0,1
48 | '''
49 | if(format == 'nhwc'):
50 | for i in range(3):
51 | img[:, :, :, i] = img[:, :, :, i] * group_norm_std[i] + group_norm_mean[i]
52 | else:
53 | img1 = torch.zeros_like(img).cuda()
54 | for i in range(3):
55 | img1[:, i, :, :] = img[:, i, :, :] * group_norm_std[i] + group_norm_mean[i]
56 | return img1
57 | return img
58 |
--------------------------------------------------------------------------------
/networks/util.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import functools
4 | import networks.resnet_GN_WS as resnet_GN_WS
5 | import networks.layers_WS as L
6 | import networks.resnet_bn as resnet_bn
7 | from torch.nn import BatchNorm2d
8 | # from modules.nn import BatchNorm2d
9 |
10 |
11 | def weight_init(m):
12 | if isinstance(m, nn.Conv2d):
13 | torch.nn.init.normal_(m.weight.data, 0.2, 1.0)
14 | elif isinstance(m, BatchNorm2d):
15 | m.weight.data.fill_(1)
16 | m.bias.data.zero_()
17 |
18 |
19 | def norm(dim, bn=False):
20 | if (bn is False):
21 | return nn.GroupNorm(32, dim)
22 | else:
23 | return BatchNorm2d(dim)
24 |
25 |
26 | class _ConvLayer(nn.Sequential):
27 | def __init__(self, inc, ouc, kernel_size=1, stride=1, padding=0, bn=True, bias=False, lrelu=False):
28 | super(_ConvLayer, self).__init__()
29 | if bn:
30 | self.add_module('conv', nn.Conv2d(inc, ouc, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)),
31 | self.add_module('norm', BatchNorm2d(ouc))
32 | if lrelu:
33 | self.add_module('relu', nn.LeakyReLU())
34 | else:
35 | self.add_module('relu', nn.ReLU(inplace=True))
36 | else:
37 | self.add_module('conv', nn.Conv2d(inc, ouc, kernel_size=kernel_size, stride=stride, padding=padding, bias=True)),
38 | if lrelu:
39 | self.add_module('relu', nn.LeakyReLU())
40 | else:
41 | self.add_module('relu', nn.ReLU(inplace=True))
42 | def forward(self, x):
43 | out = super(_ConvLayer, self).forward(x)
44 | return out
45 |
46 |
47 | class SELayer(nn.Module):
48 | def __init__(self, channel, reduction=16):
49 | super(SELayer, self).__init__()
50 | self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
51 | self.fc = nn.Sequential(
52 | nn.Linear(channel, channel // reduction, bias=False),
53 | nn.ReLU(inplace=True),
54 | nn.Linear(channel // reduction, channel, bias=False),
55 | nn.Sigmoid()
56 | )
57 |
58 | def forward(self, x):
59 | b, c, _, _ = x.size()
60 | y = self.avg_pool(x).view(b, c)
61 | y = self.fc(y).view(b, c, 1, 1)
62 | return x * y.expand_as(x)
63 |
64 |
65 | class ResnetDilatedBN(nn.Module):
66 | def __init__(self, args, orig_resnet, dilate_scale=8):
67 | super(ResnetDilatedBN, self).__init__()
68 | from functools import partial
69 |
70 | if dilate_scale == 8:
71 | orig_resnet.layer3.apply(
72 | partial(self._nostride_dilate, dilate=2))
73 | orig_resnet.layer4.apply(
74 | partial(self._nostride_dilate, dilate=4))
75 | elif dilate_scale == 16:
76 | orig_resnet.layer4.apply(
77 | partial(self._nostride_dilate, dilate=2))
78 |
79 | # take pretrained resnet, except AvgPool and FC
80 | self.conv1 = orig_resnet.conv1
81 | self.conv1_side = nn.Conv2d(20, 64, 7, 2, 3, bias=False)
82 | self.bn1 = orig_resnet.bn1
83 | self.relu1 = orig_resnet.relu1
84 |
85 | self.conv2 = orig_resnet.conv2
86 | self.bn2 = orig_resnet.bn2
87 | self.relu2 = orig_resnet.relu2
88 | self.conv3 = orig_resnet.conv3
89 | self.bn3 = orig_resnet.bn3
90 | self.relu3 = orig_resnet.relu3
91 |
92 | self.maxpool = orig_resnet.maxpool
93 | self.layer1 = orig_resnet.layer1
94 | self.layer2 = orig_resnet.layer2
95 | self.layer3 = orig_resnet.layer3
96 | self.layer4 = orig_resnet.layer4
97 |
98 | def _nostride_dilate(self, m, dilate):
99 | classname = m.__class__.__name__
100 | if classname.find('Conv') != -1:
101 | # the convolution with stride
102 | if m.stride == (2, 2):
103 | m.stride = (1, 1)
104 | if m.kernel_size == (3, 3):
105 | m.dilation = (dilate // 2, dilate // 2)
106 | m.padding = (dilate // 2, dilate // 2)
107 | # other convoluions
108 | else:
109 | if m.kernel_size == (3, 3):
110 | m.dilation = (dilate, dilate)
111 | m.padding = (dilate, dilate)
112 |
113 | def forward(self, x, return_feature_maps=False, smap=None):
114 | conv_out = [x]
115 | x = self.relu1(self.bn1(self.conv1(x)+self.conv1_side(smap)))
116 | x = self.relu2(self.bn2(self.conv2(x)))
117 | x = self.relu3(self.bn3(self.conv3(x)))
118 |
119 | conv_out.append(x)
120 | x, indices = self.maxpool(x)
121 | x = self.layer1(x)
122 | conv_out.append(x)
123 | x = self.layer2(x)
124 | conv_out.append(x)
125 | x = self.layer3(x)
126 | conv_out.append(x)
127 | x = self.layer4(x)
128 | conv_out.append(x)
129 |
130 | if return_feature_maps:
131 | return conv_out, indices
132 | return [x]
133 |
134 |
135 | class ResnetDilated(nn.Module):
136 | def __init__(self, args, orig_resnet, dilate_scale=8):
137 | super(ResnetDilated, self).__init__()
138 | from functools import partial
139 |
140 | if dilate_scale == 8:
141 | orig_resnet.layer3.apply(
142 | partial(self._nostride_dilate, dilate=2))
143 | orig_resnet.layer4.apply(
144 | partial(self._nostride_dilate, dilate=4))
145 | elif dilate_scale == 16:
146 | orig_resnet.layer4.apply(
147 | partial(self._nostride_dilate, dilate=2))
148 |
149 | # take pretrained resnet, except AvgPool and FC
150 | self.conv1 = orig_resnet.conv1
151 | self.conv1_side = nn.Conv2d(20, 64, 7, 2, 3, bias=False)
152 | self.bn1 = orig_resnet.bn1
153 | self.relu = orig_resnet.relu
154 | self.maxpool = orig_resnet.maxpool
155 | self.layer1 = orig_resnet.layer1
156 | self.layer2 = orig_resnet.layer2
157 | self.layer3 = orig_resnet.layer3
158 | self.layer4 = orig_resnet.layer4
159 |
160 | def _nostride_dilate(self, m, dilate):
161 | classname = m.__class__.__name__
162 | if classname.find('Conv') != -1:
163 | # the convolution with stride
164 | if m.stride == (2, 2):
165 | m.stride = (1, 1)
166 | if m.kernel_size == (3, 3):
167 | m.dilation = (dilate // 2, dilate // 2)
168 | m.padding = (dilate // 2, dilate // 2)
169 | # other convoluions
170 | else:
171 | if m.kernel_size == (3, 3):
172 | m.dilation = (dilate, dilate)
173 | m.padding = (dilate, dilate)
174 |
175 | def forward(self, x, return_feature_maps=False, smap=None):
176 | conv_out = [x]
177 | x = self.relu(self.bn1(self.conv1(x)+self.conv1_side(smap)))
178 | conv_out.append(x)
179 | x, indices = self.maxpool(x)
180 | x = self.layer1(x)
181 | conv_out.append(x)
182 | x = self.layer2(x)
183 | conv_out.append(x)
184 | x = self.layer3(x)
185 | conv_out.append(x)
186 | x = self.layer4(x)
187 | conv_out.append(x)
188 |
189 | if return_feature_maps:
190 | return conv_out, indices
191 | return [x]
192 |
193 |
194 | class Resnet(nn.Module):
195 | def __init__(self, orig_resnet):
196 | super(Resnet, self).__init__()
197 |
198 | # take pretrained resnet, except AvgPool and FC
199 | self.conv1 = orig_resnet.conv1
200 | self.bn1 = orig_resnet.bn1
201 | self.relu1 = orig_resnet.relu1
202 | self.conv2 = orig_resnet.conv2
203 | self.bn2 = orig_resnet.bn2
204 | self.relu2 = orig_resnet.relu2
205 | self.conv3 = orig_resnet.conv3
206 | self.bn3 = orig_resnet.bn3
207 | self.relu3 = orig_resnet.relu3
208 | self.maxpool = orig_resnet.maxpool
209 | self.layer1 = orig_resnet.layer1
210 | self.layer2 = orig_resnet.layer2
211 | self.layer3 = orig_resnet.layer3
212 | self.layer4 = orig_resnet.layer4
213 |
214 | def forward(self, x, return_feature_maps=False):
215 | conv_out = []
216 |
217 | x = self.relu1(self.bn1(self.conv1(x)))
218 | x = self.relu2(self.bn2(self.conv2(x)))
219 | x = self.relu3(self.bn3(self.conv3(x)))
220 | conv_out.append(x)
221 | x, indices = self.maxpool(x)
222 |
223 | x = self.layer1(x)
224 | conv_out.append(x)
225 | x = self.layer2(x)
226 | conv_out.append(x)
227 | x = self.layer3(x)
228 | conv_out.append(x)
229 | x = self.layer4(x)
230 | conv_out.append(x)
231 |
232 | if return_feature_maps:
233 | return conv_out
234 | return [x]
235 |
--------------------------------------------------------------------------------
/scripts/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import time
4 | import glob
5 | import random
6 | import logging
7 | import argparse
8 | import numpy as np
9 |
10 | import torch
11 | import torch.nn as nn
12 | import torch.nn.parallel
13 | import torch.nn.functional as F
14 | import networks.resnet as resnet_models
15 |
16 | from collections import OrderedDict
17 | from pprint import pprint
18 | from networks.model import build_model
19 | from utils.config import load_config
20 | from utils.util import *
21 | from data.util import *
22 |
23 |
24 | #########################################################################################
25 | # args
26 | #########################################################################################
27 | parser = argparse.ArgumentParser(description='SIM')
28 | parser.add_argument('-c', '--config', type=str, metavar='FILE', help='path to config file')
29 | parser.add_argument('-p', '--phase', type=str, metavar='PHASE', help='train or test')
30 |
31 |
32 | def build_classifier(args, logger):
33 | logger.info("=> creating classifier '{}'".format(args.arch))
34 | model = resnet_models.__dict__[args.arch](args.n_channel, num_classes=args.num_classes, pretrained=False)
35 | if os.path.isfile(args.resume_checkpoint):
36 | logger.info("=> loading checkpoint '{}'".format(args.resume_checkpoint))
37 | checkpoint = torch.load(args.resume_checkpoint, map_location=torch.device('cpu'))
38 | state_dict = remove_prefix(checkpoint['state_dict'])
39 | model.load_state_dict(state_dict, strict=True)
40 | logger.info("=> loaded checkpoint '{}'".format(args.resume_checkpoint))
41 | else:
42 | logger.info("=> no checkpoint found at '{}'".format(args.resume_checkpoint))
43 | exit()
44 | return model
45 |
46 |
47 | def build_sim_model(args, logger):
48 | model = build_model(args)
49 |
50 | if args.pretrain_checkpoint and os.path.isfile(args.pretrain_checkpoint):
51 | logger.info("Pretrain: loading '{}'".format(args.pretrain_checkpoint))
52 | ckpt = torch.load(args.pretrain_checkpoint, map_location=torch.device('cpu'))
53 | if 'state_dict' in ckpt:
54 | ckpt = ckpt['state_dict']
55 | n_keys = len(ckpt.keys())
56 | ckpt = remove_mismatch_weight(model.state_dict(), ckpt)
57 | n_keys_rest = len(ckpt.keys())
58 | logger.info("Remove %d mismatched keys" % (n_keys - n_keys_rest))
59 |
60 | model.load_state_dict(ckpt, strict=False)
61 | logger.info("Pretrain: loaded '{}'".format(args.pretrain_checkpoint))
62 | else:
63 | logger.info("Pretrain: no checkpoint found at '{}'".format(args.pretrain_checkpoint))
64 |
65 | if args.resume_checkpoint and os.path.isfile(args.resume_checkpoint):
66 | logger.info("Resume: loading '{}'".format(args.resume_checkpoint))
67 | ckpt = torch.load(args.resume_checkpoint, map_location=torch.device('cpu'))
68 | if 'state_dict' in ckpt:
69 | ckpt = ckpt['state_dict']
70 | model.load_state_dict(ckpt, strict=True)
71 | logger.info("Resume: loaded '{}'".format(args.resume_checkpoint))
72 | else:
73 | logger.info("Resume: no checkpoint found at '{}'".format(args.resume_checkpoint))
74 |
75 | return model
76 |
77 |
78 | def extract_semantic_trimap(model, image, trimap, thresh=0.3, return_cam=False):
79 | model.eval()
80 | with torch.no_grad():
81 | N, C, H, W = image.shape
82 | output, cam, feats = model(torch.cat([image, trimap/255.], dim=1))
83 | cam = F.interpolate(cam, (H, W), mode='bilinear')
84 | if return_cam: return cam, output
85 |
86 | cam_norm = (cam - cam.min()) / (cam.max() - cam.min())
87 | semantic_trimap = cam_norm * (trimap==128).float()
88 | torch.cuda.empty_cache()
89 | return semantic_trimap
90 |
91 |
92 | def extract_semantic_trimap_whole(args, model, image, trimap, thresh=0.1):
93 | step = args.load_size
94 | N, C, H, W = image.shape
95 | cam = torch.zeros((N, args.num_classes, H, W)).to(image.device)
96 | weight = torch.zeros((N, args.num_classes, H, W)).to(image.device)
97 | for step in [320, 800]:
98 | xs = list(range(0, W-step, step//2)) + [W-step]
99 | ys = list(range(0, H-step, step//2)) + [H-step]
100 | for i in ys:
101 | for j in xs:
102 | patcht = trimap[:,:,i:i+step,j:j+step]
103 | if (patcht == 128).sum() == 0: continue
104 | patchi = image[:,:,i:i+step, j:j+step]
105 | patchc, out = extract_semantic_trimap(model, patchi, patcht, return_cam=True)
106 | cam[:,:,i:i+step,j:j+step] += patchc
107 | weight[:,:,i:i+step,j:j+step] += 1
108 | cam = cam / torch.clamp_min(weight,1)
109 | cam_norm = (cam - cam.min()) / (cam.max() - cam.min())
110 | smap = cam_norm * (trimap == 128).float()
111 | return smap
112 |
113 |
114 | def run_discriminator(args, discriminator, pred, gt):
115 | g_fg_trans = gt['fg_trans'] # (N, 3, H, W)
116 | g_alpha = gt['alpha'] # (N, 1, H, W)
117 | p_alpha = pred['alpha'] # (N, 1, H, W)
118 |
119 | with torch.no_grad():
120 | if args.n_channel == 4:
121 | # keep consistent with training
122 | real_inputs = torch.cat([g_fg_trans, g_alpha], dim=1)
123 | fake_inputs = torch.cat([g_fg_trans, p_alpha], dim=1)
124 | else:
125 | real_inputs = g_alpha.repeat((1,3,1,1))
126 | fake_inputs = p_alpha.repeat((1,3,1,1))
127 |
128 | real_ret,_,real_feats = discriminator(real_inputs)
129 | fake_ret,_,fake_feats = discriminator(fake_inputs)
130 |
131 | out = {
132 | "real_ret": real_ret,
133 | "fake_ret": fake_ret,
134 | "real_feats": real_feats,
135 | "fake_feats": fake_feats,
136 | }
137 | return out
138 |
139 |
140 | def load_sim_samples(cfg):
141 | names = ['defocus', 'fire', 'fur', 'glass_ice', 'hair_easy', 'hair_hard',
142 | 'insect', 'motion', 'net', 'plant_flower', 'plant_leaf', 'plant_tree',
143 | 'plastic_bag', 'sharp', 'smoke_cloud', 'spider_web', 'texture_holed',
144 | 'texture_smooth', 'water_drop', 'water_spray']
145 | name2class = {name:idx for idx, name in enumerate(names)}
146 |
147 | data_dir = cfg.data.test_dir
148 | merged = sorted(glob.glob("%s/*/merged/*.png" % data_dir))
149 | trimap = sorted(glob.glob("%s/*/trimap/*.png" % data_dir))
150 | alpha = sorted(glob.glob("%s/*/alpha/*.png" % data_dir))
151 |
152 | print('Found %d samples' % len(merged))
153 |
154 | filenames = []
155 | target = []
156 | names = []
157 | for fp in merged:
158 | splits = fp.split('/')
159 | filenames.append(splits[-3] + "_" + splits[-1])
160 | names.append(splits[-3])
161 | target.append(name2class[splits[-3]])
162 | print('Found %d samples' % len(alpha))
163 | return zip(alpha, trimap, merged, names, filenames, target)
164 |
165 |
166 | def load_adobe_samples(cfg):
167 | data_dir = cfg.data.test_dir
168 | merged = sorted(glob.glob("%s/merged/*.png" % data_dir))
169 | trimap = sorted(glob.glob("%s/trimap/*.png" % data_dir))
170 | alpha = sorted(glob.glob("%s/alpha/*.png" % data_dir))
171 |
172 | filenames = []
173 | target = []
174 | names = []
175 | for fp in merged:
176 | splits = fp.split('/')
177 | filenames.append(splits[-1])
178 | names.append(None)
179 | target.append(None)
180 | print('Found %d samples' % len(alpha))
181 | return zip(alpha, trimap, merged, names, filenames, target)
182 |
183 |
184 | def preprocess(alpha_path, trimap_path, image_path, stride=8):
185 | alpha = cv2.imread(alpha_path, 0)
186 | trimap = cv2.imread(trimap_path, 0)
187 | image = cv2.imread(image_path)
188 |
189 | h, w = image.shape[:2]
190 | pad_h = (h // stride + 1) * stride - h
191 | pad_w = (w // stride + 1) * stride - w
192 |
193 | trimap = cv2.copyMakeBorder(trimap, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT)
194 | image = cv2.copyMakeBorder(image, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT)
195 | alpha = cv2.copyMakeBorder(alpha, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT)
196 |
197 | image_scale, image_trans = transform(image, scale=255.)
198 | trimap_tensor = torch.from_numpy(trimap).unsqueeze(0)
199 | alpha_tensor = torch.from_numpy(alpha/255.).unsqueeze(0)
200 |
201 | trimap_2chn = trimap_to_2chn(trimap)
202 | trimap_clks = trimap_to_clks(trimap_2chn, 320)
203 | trimap_2chn = torch.from_numpy(trimap_2chn.transpose((2,0,1)))
204 | trimap_clks = torch.from_numpy(trimap_clks.transpose((2,0,1)))
205 |
206 | inputs = {
207 | "alpha": alpha_tensor.unsqueeze(0),
208 | "trimap": trimap_tensor.unsqueeze(0),
209 | "image_scale": image_scale.unsqueeze(0),
210 | "image_trans": image_trans.unsqueeze(0),
211 | "trimap_2chn": trimap_2chn.unsqueeze(0),
212 | "trimap_clks": trimap_clks.unsqueeze(0),
213 | "origin_image": image,
214 | "origin_alpha": alpha,
215 | "origin_h": h,
216 | "origin_w": w
217 | }
218 | return inputs
219 |
220 |
221 | def save_prediction(pred, save_path):
222 | p_a = pred[0,0].data.cpu().numpy() * 255
223 | cv2.imwrite(save_path, p_a)
224 |
225 |
226 | def run(cfg, model, classifier, logger):
227 | batch_time = AverageMeter()
228 | total_sad = AverageMeter()
229 |
230 | sad_list = {}
231 |
232 | model.eval()
233 |
234 | end = time.time()
235 |
236 | if cfg.task == 'SIM':
237 | samples = load_sim_samples(cfg)
238 | elif cfg.task == 'Adobe':
239 | samples = load_adobe_samples(cfg)
240 | else:
241 | raise NotImplementedError
242 |
243 | samples = list(samples)
244 |
245 | with torch.no_grad():
246 | for idx, (alpha_path, trimap_path, image_path, name, filename, target) in enumerate(samples):
247 | inputs = preprocess(alpha_path, trimap_path, image_path)
248 |
249 | trimap = inputs['trimap'].float().to(device)
250 | alpha = inputs['alpha'].float().to(device)
251 | image_scale = inputs['image_scale'].float().to(device)
252 | image_trans = inputs['image_trans'].float().to(device)
253 | trimap_2chn = inputs['trimap_2chn'].float().to(device)
254 | trimap_clks = inputs['trimap_clks'].float().to(device)
255 |
256 | oh = inputs['origin_h']
257 | ow = inputs['origin_w']
258 |
259 | semantic_trimap = extract_semantic_trimap_whole(cfg.classifier, classifier, image_trans, trimap)
260 | out = model(image_scale, trimap_2chn, image_trans, trimap_clks, semantic_trimap, is_training=False)
261 | out['alpha'] = torch.clamp(out['alpha'], 0, 1)
262 |
263 | pred = out['alpha']
264 | pred[trimap==0] = 0
265 | pred[trimap==255] = 1
266 |
267 | pred = pred[:,:,0:oh,0:ow]
268 | trimap = trimap[:,:,0:oh,0:ow]
269 | alpha = alpha[:,:,0:oh,0:ow]
270 |
271 | save_prediction(pred, os.path.join(cfg.log.visualize_path, filename))
272 |
273 | sad = ((pred - alpha) * (trimap==128).float()).abs().sum() / 1000.
274 | total_sad.update(sad)
275 | if name is not None:
276 | if name not in sad_list:
277 | sad_list[name] = []
278 | sad_list[name].append(sad.item())
279 | msg = 'Test: [{0}/{1}] SAD {sad:.4f}'.format(idx, len(samples), sad=sad)
280 | logger.info(msg)
281 |
282 | # measure elapsed time
283 | batch_time.update(time.time() - end)
284 | end = time.time()
285 |
286 | del trimap, alpha, pred
287 | del out, inputs
288 | del image_scale, image_trans, trimap_2chn, trimap_clks
289 |
290 | msg = 'Test: Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(batch_time=batch_time)
291 | logger.info(msg)
292 |
293 | for key in sorted(sad_list.keys()):
294 | logger.info("{} {:.4f}".format(key, np.array(sad_list[key]).mean()))
295 |
296 | logger.info("MeanSAD: {sad.avg:.3f}".format(sad=total_sad))
297 |
298 | return total_sad.avg
299 |
300 |
301 | def main():
302 | global args, device
303 |
304 | args = parser.parse_args()
305 | cfg = load_config(args.config)
306 |
307 | cfg.version = args.config.split('/')[-1].split('.')[0]
308 | cfg.phase = args.phase
309 |
310 |
311 | if cfg.is_default:
312 | raise ValueError("No .toml config loaded.")
313 |
314 | USE_CUDA = torch.cuda.is_available()
315 |
316 | cfg.log.logging_path = os.path.join(cfg.log.logging_path, cfg.version)
317 | cfg.log.visualize_path = os.path.join(cfg.log.visualize_path, cfg.version)
318 |
319 | os.makedirs(cfg.log.logging_path, exist_ok=True)
320 | os.makedirs(cfg.log.visualize_path, exist_ok=True)
321 |
322 | logger = get_logger(cfg.log.logging_path)
323 |
324 | pprint(cfg, stream=open(os.path.join(cfg.log.logging_path, "cfg.json"), 'w'))
325 |
326 | classifier = build_classifier(cfg.classifier, logger)
327 | model = build_sim_model(cfg.model, logger)
328 |
329 | device = torch.device("cuda:0" if USE_CUDA else "cpu")
330 |
331 | model.cuda()
332 | classifier.cuda()
333 | run(cfg, model, classifier, logger)
334 |
335 |
336 | if __name__ == '__main__':
337 | main()
338 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nowsyn/SIM/dc5ed5a594fda16d3bd0a8d114b8740d876c12c8/utils/__init__.py
--------------------------------------------------------------------------------
/utils/colormap.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017-present, Facebook, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | ##############################################################################
15 |
16 | """An awesome colormap for really neat visualizations."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 | from __future__ import unicode_literals
22 |
23 | import numpy as np
24 |
25 |
26 | def colormap(rgb=False):
27 | color_list = np.array(
28 | [
29 | 0.000, 0.447, 0.741,
30 | 0.850, 0.325, 0.098,
31 | 0.929, 0.694, 0.125,
32 | 0.494, 0.184, 0.556,
33 | 0.466, 0.674, 0.188,
34 | 0.301, 0.745, 0.933,
35 | 0.635, 0.078, 0.184,
36 | 0.300, 0.300, 0.300,
37 | 0.600, 0.600, 0.600,
38 | 1.000, 0.000, 0.000,
39 | 1.000, 0.500, 0.000,
40 | 0.749, 0.749, 0.000,
41 | 0.000, 1.000, 0.000,
42 | 0.000, 0.000, 1.000,
43 | 0.667, 0.000, 1.000,
44 | 0.333, 0.333, 0.000,
45 | 0.333, 0.667, 0.000,
46 | 0.333, 1.000, 0.000,
47 | 0.667, 0.333, 0.000,
48 | 0.667, 0.667, 0.000,
49 | 0.667, 1.000, 0.000,
50 | 1.000, 0.333, 0.000,
51 | 1.000, 0.667, 0.000,
52 | 1.000, 1.000, 0.000,
53 | 0.000, 0.333, 0.500,
54 | 0.000, 0.667, 0.500,
55 | 0.000, 1.000, 0.500,
56 | 0.333, 0.000, 0.500,
57 | 0.333, 0.333, 0.500,
58 | 0.333, 0.667, 0.500,
59 | 0.333, 1.000, 0.500,
60 | 0.667, 0.000, 0.500,
61 | 0.667, 0.333, 0.500,
62 | 0.667, 0.667, 0.500,
63 | 0.667, 1.000, 0.500,
64 | 1.000, 0.000, 0.500,
65 | 1.000, 0.333, 0.500,
66 | 1.000, 0.667, 0.500,
67 | 1.000, 1.000, 0.500,
68 | 0.000, 0.333, 1.000,
69 | 0.000, 0.667, 1.000,
70 | 0.000, 1.000, 1.000,
71 | 0.333, 0.000, 1.000,
72 | 0.333, 0.333, 1.000,
73 | 0.333, 0.667, 1.000,
74 | 0.333, 1.000, 1.000,
75 | 0.667, 0.000, 1.000,
76 | 0.667, 0.333, 1.000,
77 | 0.667, 0.667, 1.000,
78 | 0.667, 1.000, 1.000,
79 | 1.000, 0.000, 1.000,
80 | 1.000, 0.333, 1.000,
81 | 1.000, 0.667, 1.000,
82 | 0.167, 0.000, 0.000,
83 | 0.333, 0.000, 0.000,
84 | 0.500, 0.000, 0.000,
85 | 0.667, 0.000, 0.000,
86 | 0.833, 0.000, 0.000,
87 | 1.000, 0.000, 0.000,
88 | 0.000, 0.167, 0.000,
89 | 0.000, 0.333, 0.000,
90 | 0.000, 0.500, 0.000,
91 | 0.000, 0.667, 0.000,
92 | 0.000, 0.833, 0.000,
93 | 0.000, 1.000, 0.000,
94 | 0.000, 0.000, 0.167,
95 | 0.000, 0.000, 0.333,
96 | 0.000, 0.000, 0.500,
97 | 0.000, 0.000, 0.667,
98 | 0.000, 0.000, 0.833,
99 | 0.000, 0.000, 1.000,
100 | 0.000, 0.000, 0.000,
101 | 0.143, 0.143, 0.143,
102 | 0.286, 0.286, 0.286,
103 | 0.429, 0.429, 0.429,
104 | 0.571, 0.571, 0.571,
105 | 0.714, 0.714, 0.714,
106 | 0.857, 0.857, 0.857,
107 | 1.000, 1.000, 1.000
108 | ]
109 | ).astype(np.float32)
110 | color_list = color_list.reshape((-1, 3)) * 255
111 | if not rgb:
112 | color_list = color_list[:, ::-1]
113 | return color_list
114 |
--------------------------------------------------------------------------------
/utils/config.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict
2 |
3 | CONFIG = EasyDict({})
4 | CONFIG.is_default = True
5 | CONFIG.task = "SIM"
6 | CONFIG.version = "SIM"
7 | CONFIG.debug = False
8 | CONFIG.phase = "train"
9 | CONFIG.dataset = "SIMD"
10 | # distributed training
11 | CONFIG.dist = False
12 | # global variables which will be assigned in the runtime
13 | CONFIG.local_rank = 0
14 | CONFIG.gpu = 0
15 | CONFIG.world_size = 1
16 | CONFIG.devices = (0,)
17 |
18 |
19 | # ===============================================================================
20 | # Model config
21 | # ===============================================================================
22 | CONFIG.classifier = EasyDict({})
23 | CONFIG.classifier.arch = "resnet50"
24 | CONFIG.classifier.n_channel = 4
25 | CONFIG.classifier.num_classes = 20
26 | CONFIG.classifier.resume_checkpoint = None
27 | CONFIG.classifier.load_size = 320
28 |
29 | CONFIG.discriminator = EasyDict({})
30 | CONFIG.discriminator.arch = "resnet50"
31 | CONFIG.discriminator.n_channel = 4
32 | CONFIG.discriminator.num_classes = 20
33 | CONFIG.discriminator.resume_checkpoint = None
34 | CONFIG.discriminator.load_size = 320
35 |
36 | CONFIG.model = EasyDict({})
37 | CONFIG.model.num_classes = 20
38 | CONFIG.model.pretrain_checkpoint = None
39 | CONFIG.model.resume_checkpoint = None
40 | CONFIG.model.trimap_channel = 3
41 |
42 | CONFIG.model.arch = EasyDict({})
43 | CONFIG.model.arch.n_channel = 11
44 | CONFIG.model.arch.encoder = "resnet_BN"
45 | # parameters for ppm
46 | CONFIG.model.arch.pool_scales = (1,2,3,6)
47 | CONFIG.model.arch.ppm_channel = 256
48 | # parameters for aspp
49 | CONFIG.model.arch.atrous_rates = (12, 24, 36)
50 | CONFIG.model.arch.aspp_channel = 256
51 |
52 |
53 | # ===============================================================================
54 | # Loss config
55 | # ===============================================================================
56 | CONFIG.loss = EasyDict({})
57 | CONFIG.loss.use_laploss = False
58 | CONFIG.loss.use_comploss = False
59 | CONFIG.loss.use_fbloss = False
60 | CONFIG.loss.use_fbcloss = False
61 | CONFIG.loss.use_fblaploss = False
62 | CONFIG.loss.use_attention = False
63 | CONFIG.loss.use_discriminator = False
64 | CONFIG.loss.kernel_diagonal = False
65 | CONFIG.loss.kernel_laplacian = False
66 | CONFIG.loss.kernel_second_order = False
67 | CONFIG.loss.weight_comp = 1.0
68 | CONFIG.loss.weight_fb = 1.0
69 | CONFIG.loss.weight_reg = 1.0
70 | CONFIG.loss.weight_D = 1.0
71 |
72 |
73 | # ===============================================================================
74 | # Dataloader config
75 | # ===============================================================================
76 | CONFIG.data = EasyDict({})
77 | CONFIG.data.workers = 0
78 | CONFIG.data.online = True
79 | CONFIG.data.num_classes = 20
80 | CONFIG.data.load_size = 320
81 | CONFIG.data.max_size = 1920
82 | CONFIG.data.min_size = 800
83 | CONFIG.data.augmentation = True
84 |
85 | CONFIG.data.train_alpha_dir = None
86 | CONFIG.data.train_fg_dir = None
87 | CONFIG.data.train_bg_dir = None
88 | CONFIG.data.test_img_dir = None
89 | CONFIG.data.test_alpha_dir = None
90 | CONFIG.data.test_trimap_dir = None
91 | CONFIG.data.test_dir = None
92 |
93 | CONFIG.data.aug = EasyDict({})
94 | CONFIG.data.aug.crop_sizes = (320, 480, 640)
95 | CONFIG.data.aug.ksize_range = (3, 5)
96 | CONFIG.data.aug.iteration_range = (5, 15)
97 | CONFIG.data.aug.flip = True
98 | CONFIG.data.aug.adjust_gamma = False
99 | CONFIG.data.aug.gamma_range = (0.2, 2)
100 | CONFIG.data.aug.adjust_color = False
101 | CONFIG.data.aug.color_delta = 0.2
102 | CONFIG.data.aug.rescale = False
103 | CONFIG.data.aug.rescale_min = 0.25
104 | CONFIG.data.aug.rescale_max = 0.5
105 | CONFIG.data.aug.rotate = False
106 | CONFIG.data.aug.rotate_degree = 60
107 | CONFIG.data.aug.rejpeg = False
108 | CONFIG.data.aug.composite_fg = False
109 | CONFIG.data.aug.gaussian_noise = False
110 |
111 |
112 | # ===============================================================================
113 | # Training config
114 | # ===============================================================================
115 | CONFIG.train = EasyDict({})
116 | CONFIG.train.batch_size = 8
117 | CONFIG.train.epochs = 30
118 | CONFIG.train.start_epoch = 0
119 | CONFIG.train.decay_step = 10
120 | CONFIG.train.warmup_step = 0
121 | CONFIG.train.lr = 1e-5
122 | CONFIG.train.min_lr = 1e-8
123 | CONFIG.train.reset_lr = False
124 | CONFIG.train.adaptive_lr = False
125 | CONFIG.train.optim = "Adam"
126 | CONFIG.train.eps = 1e-5
127 | CONFIG.train.beta1 = 0.9
128 | CONFIG.train.beta2 = 0.999
129 | CONFIG.train.momentum = 0.9
130 | CONFIG.train.weight_decay = 1e-4
131 | CONFIG.train.clip_grad = True
132 | CONFIG.train.print_freq = 10
133 | CONFIG.train.save_freq = 10
134 | CONFIG.train.test_freq = 10
135 |
136 |
137 | # ===============================================================================
138 | # Testing config
139 | # ===============================================================================
140 | CONFIG.test = EasyDict({})
141 | CONFIG.test.max_size = 1920
142 | CONFIG.test.min_size = 800
143 | CONFIG.test.batch_size = 1
144 | CONFIG.test.checkpoint = "best_model"
145 | CONFIG.test.fast_eval = True
146 |
147 |
148 | # ===============================================================================
149 | # Logging config
150 | # ===============================================================================
151 | CONFIG.log = EasyDict({})
152 | CONFIG.log.tensorboard_path = "./logs/tensorboard"
153 | CONFIG.log.tensorboard_step = 10
154 | CONFIG.log.tensorboard_image_step = 10
155 | CONFIG.log.logging_path = "./logs/stdout"
156 | CONFIG.log.logging_step = 10
157 | CONFIG.log.logging_level = "DEBUG"
158 | CONFIG.log.checkpoint_path = "./checkpoints"
159 | CONFIG.log.checkpoint_step = 10
160 | CONFIG.log.visualize_path = "./logs/visualizations"
161 |
162 |
163 | # ===============================================================================
164 | # util functions
165 | # ===============================================================================
166 | def parse_config(custom_config, default_config=CONFIG, prefix="CONFIG"):
167 | """
168 | This function will recursively overwrite the default config by a custom config
169 | :param default_config:
170 | :param custom_config: parsed from config/config.toml
171 | :param prefix: prefix for config key
172 | :return: None
173 | """
174 | if "is_default" in default_config:
175 | default_config.is_default = False
176 |
177 | for key in custom_config.keys():
178 | full_key = ".".join([prefix, key])
179 | if key not in default_config:
180 | raise NotImplementedError("Unknown config key: {}".format(full_key))
181 | elif isinstance(custom_config[key], dict):
182 | if isinstance(default_config[key], dict):
183 | parse_config(default_config=default_config[key],
184 | custom_config=custom_config[key],
185 | prefix=full_key)
186 | else:
187 | raise ValueError("{}: Expected {}, got dict instead.".format(full_key, type(custom_config[key])))
188 | else:
189 | if isinstance(default_config[key], dict):
190 | raise ValueError("{}: Expected dict, got {} instead.".format(full_key, type(custom_config[key])))
191 | else:
192 | default_config[key] = custom_config[key]
193 |
194 |
195 | def load_config(config_path):
196 | import toml
197 | with open(config_path) as fp:
198 | custom_config = EasyDict(toml.load(fp))
199 | parse_config(custom_config=custom_config)
200 | return CONFIG
201 |
202 |
203 | if __name__ == "__main__":
204 | from pprint import pprint
205 |
206 | pprint(CONFIG)
207 | load_config("../config/example.toml")
208 | pprint(CONFIG)
209 |
--------------------------------------------------------------------------------
/utils/radam.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | from torch.optim.optimizer import Optimizer, required
4 |
5 |
6 | class RAdam(Optimizer):
7 |
8 | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
9 | if not 0.0 <= lr:
10 | raise ValueError("Invalid learning rate: {}".format(lr))
11 | if not 0.0 <= eps:
12 | raise ValueError("Invalid epsilon value: {}".format(eps))
13 | if not 0.0 <= betas[0] < 1.0:
14 | raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
15 | if not 0.0 <= betas[1] < 1.0:
16 | raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
17 |
18 | self.degenerated_to_sgd = degenerated_to_sgd
19 | if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
20 | for param in params:
21 | if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
22 | param['buffer'] = [[None, None, None] for _ in range(10)]
23 | defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)])
24 | super(RAdam, self).__init__(params, defaults)
25 |
26 | def __setstate__(self, state):
27 | super(RAdam, self).__setstate__(state)
28 |
29 | def step(self, closure=None):
30 |
31 | loss = None
32 | if closure is not None:
33 | loss = closure()
34 |
35 | for group in self.param_groups:
36 |
37 | for p in group['params']:
38 | if p.grad is None:
39 | continue
40 | grad = p.grad.data.float()
41 | if grad.is_sparse:
42 | raise RuntimeError('RAdam does not support sparse gradients')
43 |
44 | p_data_fp32 = p.data.float()
45 |
46 | state = self.state[p]
47 |
48 | if len(state) == 0:
49 | state['step'] = 0
50 | state['exp_avg'] = torch.zeros_like(p_data_fp32)
51 | state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
52 | else:
53 | state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
54 | state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
55 |
56 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
57 | beta1, beta2 = group['betas']
58 |
59 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
60 | exp_avg.mul_(beta1).add_(1 - beta1, grad)
61 |
62 | state['step'] += 1
63 | buffered = group['buffer'][int(state['step'] % 10)]
64 | if state['step'] == buffered[0]:
65 | N_sma, step_size = buffered[1], buffered[2]
66 | else:
67 | buffered[0] = state['step']
68 | beta2_t = beta2 ** state['step']
69 | N_sma_max = 2 / (1 - beta2) - 1
70 | N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
71 | buffered[1] = N_sma
72 |
73 | # more conservative since it's an approximated value
74 | if N_sma >= 5:
75 | step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
76 | elif self.degenerated_to_sgd:
77 | step_size = 1.0 / (1 - beta1 ** state['step'])
78 | else:
79 | step_size = -1
80 | buffered[2] = step_size
81 |
82 | # more conservative since it's an approximated value
83 | if N_sma >= 5:
84 | if group['weight_decay'] != 0:
85 | p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
86 | denom = exp_avg_sq.sqrt().add_(group['eps'])
87 | p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
88 | p.data.copy_(p_data_fp32)
89 | elif step_size > 0:
90 | if group['weight_decay'] != 0:
91 | p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
92 | p_data_fp32.add_(-step_size * group['lr'], exp_avg)
93 | p.data.copy_(p_data_fp32)
94 |
95 | return loss
96 |
97 | class PlainRAdam(Optimizer):
98 |
99 | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
100 | if not 0.0 <= lr:
101 | raise ValueError("Invalid learning rate: {}".format(lr))
102 | if not 0.0 <= eps:
103 | raise ValueError("Invalid epsilon value: {}".format(eps))
104 | if not 0.0 <= betas[0] < 1.0:
105 | raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
106 | if not 0.0 <= betas[1] < 1.0:
107 | raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
108 |
109 | self.degenerated_to_sgd = degenerated_to_sgd
110 | defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
111 |
112 | super(PlainRAdam, self).__init__(params, defaults)
113 |
114 | def __setstate__(self, state):
115 | super(PlainRAdam, self).__setstate__(state)
116 |
117 | def step(self, closure=None):
118 |
119 | loss = None
120 | if closure is not None:
121 | loss = closure()
122 |
123 | for group in self.param_groups:
124 |
125 | for p in group['params']:
126 | if p.grad is None:
127 | continue
128 | grad = p.grad.data.float()
129 | if grad.is_sparse:
130 | raise RuntimeError('RAdam does not support sparse gradients')
131 |
132 | p_data_fp32 = p.data.float()
133 |
134 | state = self.state[p]
135 |
136 | if len(state) == 0:
137 | state['step'] = 0
138 | state['exp_avg'] = torch.zeros_like(p_data_fp32)
139 | state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
140 | else:
141 | state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
142 | state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
143 |
144 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
145 | beta1, beta2 = group['betas']
146 |
147 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
148 | exp_avg.mul_(beta1).add_(1 - beta1, grad)
149 |
150 | state['step'] += 1
151 | beta2_t = beta2 ** state['step']
152 | N_sma_max = 2 / (1 - beta2) - 1
153 | N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
154 |
155 |
156 | # more conservative since it's an approximated value
157 | if N_sma >= 5:
158 | if group['weight_decay'] != 0:
159 | p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
160 | step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
161 | denom = exp_avg_sq.sqrt().add_(group['eps'])
162 | p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
163 | p.data.copy_(p_data_fp32)
164 | elif self.degenerated_to_sgd:
165 | if group['weight_decay'] != 0:
166 | p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
167 | step_size = group['lr'] / (1 - beta1 ** state['step'])
168 | p_data_fp32.add_(-step_size, exp_avg)
169 | p.data.copy_(p_data_fp32)
170 |
171 | return loss
172 |
173 |
174 | class AdamW(Optimizer):
175 |
176 | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0):
177 | if not 0.0 <= lr:
178 | raise ValueError("Invalid learning rate: {}".format(lr))
179 | if not 0.0 <= eps:
180 | raise ValueError("Invalid epsilon value: {}".format(eps))
181 | if not 0.0 <= betas[0] < 1.0:
182 | raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
183 | if not 0.0 <= betas[1] < 1.0:
184 | raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
185 |
186 | defaults = dict(lr=lr, betas=betas, eps=eps,
187 | weight_decay=weight_decay, warmup = warmup)
188 | super(AdamW, self).__init__(params, defaults)
189 |
190 | def __setstate__(self, state):
191 | super(AdamW, self).__setstate__(state)
192 |
193 | def step(self, closure=None):
194 | loss = None
195 | if closure is not None:
196 | loss = closure()
197 |
198 | for group in self.param_groups:
199 |
200 | for p in group['params']:
201 | if p.grad is None:
202 | continue
203 | grad = p.grad.data.float()
204 | if grad.is_sparse:
205 | raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
206 |
207 | p_data_fp32 = p.data.float()
208 |
209 | state = self.state[p]
210 |
211 | if len(state) == 0:
212 | state['step'] = 0
213 | state['exp_avg'] = torch.zeros_like(p_data_fp32)
214 | state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
215 | else:
216 | state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
217 | state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
218 |
219 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
220 | beta1, beta2 = group['betas']
221 |
222 | state['step'] += 1
223 |
224 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
225 | exp_avg.mul_(beta1).add_(1 - beta1, grad)
226 |
227 | denom = exp_avg_sq.sqrt().add_(group['eps'])
228 | bias_correction1 = 1 - beta1 ** state['step']
229 | bias_correction2 = 1 - beta2 ** state['step']
230 |
231 | if group['warmup'] > state['step']:
232 | scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']
233 | else:
234 | scheduled_lr = group['lr']
235 |
236 | step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1
237 |
238 | if group['weight_decay'] != 0:
239 | p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)
240 |
241 | p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
242 |
243 | p.data.copy_(p_data_fp32)
244 |
245 | return loss
246 |
--------------------------------------------------------------------------------
/utils/util.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import torch
4 | import shutil
5 | import logging
6 | import datetime
7 | import numpy as np
8 | import torch
9 | import torch.distributed as dist
10 |
11 | from collections import OrderedDict
12 | from utils.colormap import colormap
13 | from utils.radam import RAdam
14 |
15 |
16 | def reduce_tensor_dict(tensor_dict, world_size, mode='mean'):
17 | """
18 | average tensor dict over different GPUs
19 | """
20 | for key, tensor in tensor_dict.items():
21 | if tensor is not None:
22 | tensor_dict[key] = reduce_tensor(tensor, world_size, mode)
23 | return tensor_dict
24 |
25 |
26 | def reduce_tensor(tensor, world_size, mode='mean'):
27 | """
28 | average tensor over different GPUs
29 | """
30 | rt = tensor.clone()
31 | dist.all_reduce(rt, op=dist.ReduceOp.SUM)
32 | if mode == 'mean':
33 | rt /= world_size
34 | elif mode == 'sum':
35 | pass
36 | else:
37 | raise NotImplementedError("reduce mode can only be 'mean' or 'sum'")
38 | return rt
39 |
40 |
41 | def get_logger(save_dir):
42 | assert(save_dir != "")
43 | exp_string = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
44 | log_path = os.path.join(save_dir, exp_string+'.log')
45 |
46 | logger = logging.getLogger("LOGGING")
47 | logger.setLevel(level = logging.DEBUG)
48 | formatter = logging.Formatter("%(asctime)s-%(filename)s:%(lineno)d-%(levelname)s-%(message)s")
49 |
50 | # log file stream
51 | handler = logging.FileHandler(log_path)
52 | handler.setLevel(logging.DEBUG)
53 | handler.setFormatter(formatter)
54 |
55 | # log console stream
56 | console = logging.StreamHandler()
57 | console.setLevel(logging.INFO)
58 | console.setFormatter(formatter)
59 |
60 | logger.addHandler(handler)
61 | logger.addHandler(console)
62 |
63 | return logger
64 |
65 |
66 | def get_optimizers(conf, model):
67 | nets = ['encoder', 'decoder']
68 | # if conf.model.arch.use_attention:
69 | # nets += ['attention']
70 |
71 | optimizers = []
72 | for net in nets:
73 | parameters = getattr(model.module, net).parameters()
74 | if conf.train.optim == "Adam":
75 | optimizer = torch.optim.Adam(parameters, conf.train.lr,
76 | betas=(conf.train.beta1, conf.train.beta2), eps=conf.train.eps,
77 | weight_decay=conf.train.weight_decay)
78 | elif conf.train.optim == "RAdam":
79 | optimizer = RAdam(parameters, conf.train.lr,
80 | betas=(conf.train.beta1, conf.train.beta2), eps=1e-8,
81 | weight_decay=conf.train.weight_decay)
82 | else:
83 | raise NotImplementedError
84 | optimizers.append((net, optimizer))
85 | return optimizers
86 |
87 |
88 | def adjust_learning_rate(conf, optimizers, epoch):
89 | for submodule, optimizer in optimizers:
90 | for param_group in optimizer.param_groups:
91 | if epoch < conf.train.warmup_step:
92 | lr = (epoch+1) / conf.train.warmup_step * conf.train.lr
93 | else:
94 | lr = max(conf.train.lr * (0.1 ** (epoch // conf.train.decay_step)), conf.train.min_lr)
95 | param_group['lr'] = lr
96 |
97 |
98 | def weight_init(m):
99 | if isinstance(m, nn.Conv2d):
100 | torch.nn.init.xavier_normal_(m.weight.data)
101 | elif isinstance(m, nn.BatchNorm2d):
102 | m.weight.data.fill_(1)
103 | m.bias.data.zero_()
104 |
105 |
106 | def copy_weight(dst_dict, src_dict, key):
107 | ws = src_dict[key]
108 | wd = dst_dict[key]
109 | if len(ws.shape) == 4:
110 | cout1, cin1, kh, kw = ws.shape
111 | cout2, cin2, kh, kw = wd.shape
112 | weight = torch.zeros((cout2, cin2, kh, kw)).float().to(ws.device)
113 | weight[:cout1, 0:cin1] = ws
114 | src_dict[key] = weight
115 | else:
116 | cout1, = ws.shape
117 | cout2, = wd.shape
118 | weight = torch.zeros((cout2,)).float().to(ws.device)
119 | weight[:cout1] = ws
120 | src_dict[key] = weight
121 | return src_dict
122 |
123 |
124 | def remove_mismatch_weight(dst_dict, src_dict):
125 | new_dict = OrderedDict()
126 | mismatched = []
127 | for k,v in src_dict.items():
128 | if k in dst_dict and v.shape != dst_dict[k].shape:
129 | mismatched.append((k, v.shape, dst_dict[k].shape))
130 | continue
131 | new_dict[k] = v
132 | print(mismatched)
133 | return new_dict
134 |
135 |
136 | def zero_weight(dst_dict, src_dict, key):
137 | src_dict[key] = torch.zeros_like(dst_dict[key])
138 | return src_dict
139 |
140 |
141 | def remove_prefix(state_dict):
142 | new_state_dict = OrderedDict()
143 | for k, v in state_dict.items():
144 | new_k = '.'.join(k.split('.')[1:])
145 | new_state_dict[new_k] = v
146 | return new_state_dict
147 |
148 |
149 | def save_checkpoint(save_dir, model, epoch, best_sad, logger, mname="latest", best=False):
150 | if not os.path.exists(save_dir):
151 | os.makedirs(save_dir)
152 | model_out_path = "{}/ckpt_{}.pth".format(save_dir, mname)
153 | torch.save({
154 | 'epoch': epoch + 1,
155 | 'state_dict': model.module.state_dict(),
156 | 'best_sad': best_sad
157 | }, model_out_path)
158 | if best:
159 | shutil.copyfile(model_out_path, os.path.join(save_dir, 'ckpt_best.pth'))
160 | logger.info("Checkpoint saved to {}".format(model_out_path))
161 |
162 |
163 | def log_time(batch_time, data_time):
164 | msg = '\n'
165 | msg += ('\tTime Batch {batch_time.val:.2f}({batch_time.avg:.2f}) '
166 | 'Data {data_time.val:.2f}({data_time.avg:.2f})').format(
167 | batch_time=batch_time, data_time=data_time)
168 | return msg
169 |
170 |
171 | def log_loss(loss_keys, losses):
172 | msg = ''
173 | for loss_key in loss_keys:
174 | msg += '\n\t{0:15s} {loss.val:.4f} ({loss.avg:.4f})'.format(loss_key, loss=losses[loss_key])
175 | return msg
176 |
177 |
178 | def log_dict(info_dict):
179 | msg = ''
180 | for key, val in info_dict.items():
181 | msg += '\n\t{0:s} = {1:.4f}'.format(key, val.item())
182 | return msg
183 |
184 |
185 | class AverageMeter(object):
186 | """Computes and stores the average and current value"""
187 | def __init__(self):
188 | self.reset()
189 |
190 | def reset(self):
191 | self.val = 0
192 | self.avg = 0
193 | self.sum = 0
194 | self.count = 0
195 |
196 | def update(self, val, n=1):
197 | self.val = val
198 | self.sum += val * n
199 | self.count += n
200 | self.avg = self.sum / self.count
201 |
--------------------------------------------------------------------------------