├── cams.png ├── voc12 ├── cls_labels.npy ├── __pycache__ │ └── data.cpython-36.pyc ├── make_cls_labels.py ├── data.py └── test.txt ├── tool ├── __pycache__ │ ├── imutils.cpython-36.pyc │ ├── pyutils.cpython-36.pyc │ ├── torchutils.cpython-36.pyc │ └── visualization.cpython-36.pyc ├── visualization.py ├── pyutils.py ├── torchutils.py └── imutils.py ├── network ├── __pycache__ │ ├── vgg16d.cpython-36.pyc │ ├── resnet38d.cpython-36.pyc │ ├── vgg16_cls.cpython-36.pyc │ ├── resnet38_aff.cpython-36.pyc │ ├── resnet38_cls.cpython-36.pyc │ ├── resnet38_cls_cam.cpython-36.pyc │ ├── resnet38_cls_ser.cpython-36.pyc │ ├── resnet38_aff_noise.cpython-36.pyc │ ├── resnet38_cls_ade20k.cpython-36.pyc │ ├── resnet38_aff_cascade.cpython-36.pyc │ ├── resnet38_cls_cam_ade20k.cpython-36.pyc │ └── resnet38_triplet_cascade.cpython-36.pyc ├── resnet38_cls_ser.py ├── resnet38_cls.py ├── vgg16_cls_ser.py ├── vgg16_cls.py ├── resnet38_aff.py ├── vgg16d.py ├── vgg16_aff.py ├── vgg16_20M.prototxt └── resnet38d.py ├── LICENSE ├── evaluation.py ├── infer_cls_ser.py ├── README.md ├── infer_aff.py ├── train_aff.py └── train_cls_ser.py /cams.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/cams.png -------------------------------------------------------------------------------- /voc12/cls_labels.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/voc12/cls_labels.npy -------------------------------------------------------------------------------- /voc12/__pycache__/data.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/voc12/__pycache__/data.cpython-36.pyc -------------------------------------------------------------------------------- /tool/__pycache__/imutils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/tool/__pycache__/imutils.cpython-36.pyc -------------------------------------------------------------------------------- /tool/__pycache__/pyutils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/tool/__pycache__/pyutils.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/vgg16d.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/vgg16d.cpython-36.pyc -------------------------------------------------------------------------------- /tool/__pycache__/torchutils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/tool/__pycache__/torchutils.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38d.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38d.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/vgg16_cls.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/vgg16_cls.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38_aff.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38_aff.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38_cls.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38_cls.cpython-36.pyc -------------------------------------------------------------------------------- /tool/__pycache__/visualization.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/tool/__pycache__/visualization.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38_cls_cam.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38_cls_cam.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38_cls_ser.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38_cls_ser.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38_aff_noise.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38_aff_noise.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38_cls_ade20k.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38_cls_ade20k.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38_aff_cascade.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38_aff_cascade.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38_cls_cam_ade20k.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38_cls_cam_ade20k.cpython-36.pyc -------------------------------------------------------------------------------- /network/__pycache__/resnet38_triplet_cascade.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YudeWang/SSENet-pytorch/HEAD/network/__pycache__/resnet38_triplet_cascade.cpython-36.pyc -------------------------------------------------------------------------------- /voc12/make_cls_labels.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import voc12.data 3 | import numpy as np 4 | 5 | if __name__ == '__main__': 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("--train_list", default='train_aug.txt', type=str) 9 | parser.add_argument("--val_list", default='val.txt', type=str) 10 | parser.add_argument("--out", default="cls_labels.npy", type=str) 11 | parser.add_argument("--voc12_root", required=True, type=str) 12 | args = parser.parse_args() 13 | 14 | img_name_list = voc12.data.load_img_name_list(args.train_list) 15 | img_name_list.extend(voc12.data.load_img_name_list(args.val_list)) 16 | label_list = voc12.data.load_image_label_list_from_xml(img_name_list, args.voc12_root) 17 | 18 | d = dict() 19 | for img_name, label in zip(img_name_list, label_list): 20 | d[img_name] = label 21 | 22 | np.save(args.out, d) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Hibercraft 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /network/resnet38_cls_ser.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | import network.resnet38d 6 | 7 | 8 | class Net(network.resnet38d.Net): 9 | def __init__(self): 10 | super().__init__() 11 | 12 | self.dropout7 = torch.nn.Dropout2d(0.5) 13 | 14 | self.fc8 = nn.Conv2d(4096, 20, 1, bias=False) 15 | torch.nn.init.xavier_uniform_(self.fc8.weight) 16 | 17 | self.not_training = [self.conv1a, self.b2, self.b2_1, self.b2_2] 18 | self.from_scratch_layers = [self.fc8] 19 | 20 | 21 | def forward(self, x): 22 | N, C, H, W = x.size() 23 | x = super().forward(x) 24 | x = self.dropout7(x) 25 | x = self.fc8(x) 26 | x = F.interpolate(x,(H,W),mode='bilinear') 27 | 28 | return x 29 | 30 | 31 | def get_parameter_groups(self): 32 | groups = ([], [], [], []) 33 | 34 | for m in self.modules(): 35 | 36 | if isinstance(m, nn.Conv2d): 37 | 38 | if m.weight.requires_grad: 39 | if m in self.from_scratch_layers: 40 | groups[2].append(m.weight) 41 | else: 42 | groups[0].append(m.weight) 43 | 44 | if m.bias is not None and m.bias.requires_grad: 45 | 46 | if m in self.from_scratch_layers: 47 | groups[3].append(m.bias) 48 | else: 49 | groups[1].append(m.bias) 50 | 51 | return groups 52 | -------------------------------------------------------------------------------- /network/resnet38_cls.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | import network.resnet38d 6 | 7 | 8 | class Net(network.resnet38d.Net): 9 | def __init__(self): 10 | super().__init__() 11 | 12 | self.dropout7 = torch.nn.Dropout2d(0.5) 13 | 14 | self.fc8 = nn.Conv2d(4096, 20, 1, bias=False) 15 | torch.nn.init.xavier_uniform_(self.fc8.weight) 16 | 17 | self.not_training = [self.conv1a, self.b2, self.b2_1, self.b2_2] 18 | self.from_scratch_layers = [self.fc8] 19 | 20 | 21 | def forward(self, x): 22 | x = super().forward(x) 23 | x = self.dropout7(x) 24 | 25 | x = F.avg_pool2d( 26 | x, kernel_size=(x.size(2), x.size(3)), padding=0) 27 | 28 | x = self.fc8(x) 29 | x = x.view(x.size(0), -1) 30 | 31 | return x 32 | 33 | def forward_cam(self, x): 34 | x = super().forward(x) 35 | 36 | x = F.conv2d(x, self.fc8.weight) 37 | x = F.relu(x) 38 | 39 | return x 40 | 41 | def get_parameter_groups(self): 42 | groups = ([], [], [], []) 43 | 44 | for m in self.modules(): 45 | 46 | if isinstance(m, nn.Conv2d): 47 | 48 | if m.weight.requires_grad: 49 | if m in self.from_scratch_layers: 50 | groups[2].append(m.weight) 51 | else: 52 | groups[0].append(m.weight) 53 | 54 | if m.bias is not None and m.bias.requires_grad: 55 | 56 | if m in self.from_scratch_layers: 57 | groups[3].append(m.bias) 58 | else: 59 | groups[1].append(m.bias) 60 | 61 | return groups -------------------------------------------------------------------------------- /network/vgg16_cls_ser.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | import network.vgg16d 6 | 7 | class Net(network.vgg16d.Net): 8 | 9 | def __init__(self): 10 | super(Net, self).__init__() 11 | 12 | self.drop7 = nn.Dropout2d(p=0.5) 13 | self.fc8 = nn.Conv2d(1024, 20, 1, bias=False) 14 | torch.nn.init.xavier_uniform_(self.fc8.weight) 15 | 16 | self.not_training = [self.conv1_1, self.conv1_2, 17 | self.conv2_1, self.conv2_2] 18 | self.from_scratch_layers = [self.fc8] 19 | 20 | def forward(self, x): 21 | N, C, H, W = x.size() 22 | x = super().forward(x) 23 | x = self.drop7(x) 24 | x = self.fc8(x) 25 | x = F.interpolate(x,(H,W),mode='bilinear') 26 | 27 | return x 28 | 29 | def fix_bn(self): 30 | self.bn8.eval() 31 | self.bn8.weight.requires_grad = False 32 | self.bn8.bias.requires_grad = False 33 | 34 | def get_parameter_groups(self): 35 | groups = ([], [], [], []) 36 | 37 | for m in self.modules(): 38 | 39 | if (isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)): 40 | 41 | if m.weight is not None and m.weight.requires_grad: 42 | if m in self.from_scratch_layers: 43 | groups[2].append(m.weight) 44 | else: 45 | groups[0].append(m.weight) 46 | 47 | if m.bias is not None and m.bias.requires_grad: 48 | 49 | if m in self.from_scratch_layers: 50 | groups[3].append(m.bias) 51 | else: 52 | groups[1].append(m.bias) 53 | 54 | return groups 55 | -------------------------------------------------------------------------------- /network/vgg16_cls.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | import network.vgg16d 6 | 7 | class Net(network.vgg16d.Net): 8 | 9 | def __init__(self): 10 | super(Net, self).__init__() 11 | 12 | self.drop7 = nn.Dropout2d(p=0.5) 13 | self.fc8 = nn.Conv2d(1024, 20, 1, bias=False) 14 | torch.nn.init.xavier_uniform_(self.fc8.weight) 15 | 16 | self.not_training = [self.conv1_1, self.conv1_2, 17 | self.conv2_1, self.conv2_2] 18 | self.from_scratch_layers = [self.fc8] 19 | 20 | def forward(self, x): 21 | x = super().forward(x) 22 | x = self.drop7(x) 23 | 24 | x = self.fc8(x) 25 | 26 | x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0) 27 | 28 | x = x.view(-1, 20) 29 | 30 | return x 31 | 32 | def forward_cam(self, x): 33 | x = super().forward(x) 34 | x = self.fc8(x) 35 | x = F.relu(x) 36 | x = torch.sqrt(x) 37 | return x 38 | 39 | def fix_bn(self): 40 | self.bn8.eval() 41 | self.bn8.weight.requires_grad = False 42 | self.bn8.bias.requires_grad = False 43 | 44 | def get_parameter_groups(self): 45 | groups = ([], [], [], []) 46 | 47 | for m in self.modules(): 48 | 49 | if (isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)): 50 | 51 | if m.weight is not None and m.weight.requires_grad: 52 | if m in self.from_scratch_layers: 53 | groups[2].append(m.weight) 54 | else: 55 | groups[0].append(m.weight) 56 | 57 | if m.bias is not None and m.bias.requires_grad: 58 | 59 | if m in self.from_scratch_layers: 60 | groups[3].append(m.bias) 61 | else: 62 | groups[1].append(m.bias) 63 | 64 | return groups -------------------------------------------------------------------------------- /tool/visualization.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn.functional as F 4 | import cv2 5 | from cv2.ximgproc import l0Smooth 6 | import pydensecrf.densecrf as dcrf 7 | from pydensecrf.utils import unary_from_softmax 8 | 9 | def color_pro(pro, img=None, mode='hwc'): 10 | H, W = pro.shape 11 | pro_255 = (pro*255).astype(np.uint8) 12 | pro_255 = np.expand_dims(pro_255,axis=2) 13 | color = cv2.applyColorMap(pro_255,cv2.COLORMAP_JET) 14 | color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB) 15 | if img is not None: 16 | rate = 0.5 17 | if mode == 'hwc': 18 | assert img.shape[0] == H and img.shape[1] == W 19 | color = cv2.addWeighted(img,rate,color,1-rate,0) 20 | elif mode == 'chw': 21 | assert img.shape[1] == H and img.shape[2] == W 22 | img = np.transpose(img,(1,2,0)) 23 | color = cv2.addWeighted(img,rate,color,1-rate,0) 24 | color = np.transpose(color,(2,0,1)) 25 | else: 26 | if mode == 'chw': 27 | color = np.transpose(color,(2,0,1)) 28 | return color 29 | 30 | def generate_vis(p, gt, img, func_label2color, threshold=0.1, norm=True): 31 | # All the input should be numpy.array 32 | # img should be 0-255 uint8 33 | C, H, W = p.shape 34 | 35 | if norm: 36 | prob = max_norm(p, 'numpy') 37 | else: 38 | prob = p 39 | prob = prob * gt 40 | prob[prob<=0] = 1e-7 41 | if threshold is not None: 42 | prob[0,:,:] = np.power(1-np.max(prob[1:,:,:],axis=0,keepdims=True), 4) 43 | 44 | CLS = ColorCLS(prob, func_label2color) 45 | CAM = ColorCAM(prob, img) 46 | 47 | prob_crf = dense_crf(prob, img, n_classes=C, n_iters=1) 48 | 49 | CLS_crf = ColorCLS(prob_crf, func_label2color) 50 | CAM_crf = ColorCAM(prob_crf, img) 51 | 52 | return CLS, CAM, CLS_crf, CAM_crf 53 | 54 | def max_norm(p, version='torch', e=1e-5): 55 | if version is 'torch': 56 | if p.dim() == 3: 57 | C, H, W = p.size() 58 | max_v = torch.max(p.view(C,-1),dim=-1)[0].view(C,1,1)+e 59 | p = F.relu(p-e, inplace=True) 60 | p = p/max_v 61 | elif p.dim() == 4: 62 | N, C, H, W = p.size() 63 | max_v = torch.max(p.view(N,C,-1),dim=-1)[0].view(N,C,1,1)+e 64 | p = F.relu(p-e, inplace=True) 65 | p = p/max_v 66 | elif version is 'numpy' or version is 'np': 67 | if p.ndim == 3: 68 | C, H, W = p.shape 69 | max_v = np.max(p,(1,2),keepdims=True)+e 70 | p -= e 71 | p[p<0] = 0 72 | p = p/max_v 73 | elif p.ndim == 4: 74 | N, C, H, W = p.shape 75 | max_v = np.max(p,(2,3),keepdims=True)+e 76 | p -= e 77 | p[p<0] = 0 78 | p = p/max_v 79 | return p 80 | 81 | def ColorCAM(prob, img): 82 | assert prob.ndim == 3 83 | C, H, W = prob.shape 84 | colorlist = [] 85 | for i in range(C): 86 | colorlist.append(color_pro(prob[i,:,:],img=img,mode='chw')) 87 | CAM = np.array(colorlist)/255.0 88 | return CAM 89 | 90 | def ColorCLS(prob, func_label2color): 91 | assert prob.ndim == 3 92 | prob_idx = np.argmax(prob, axis=0) 93 | CLS = func_label2color(prob_idx).transpose((2,0,1)) 94 | return CLS 95 | 96 | def VOClabel2colormap(label): 97 | m = label.astype(np.uint8) 98 | r,c = m.shape 99 | cmap = np.zeros((r,c,3), dtype=np.uint8) 100 | cmap[:,:,0] = (m&1)<<7 | (m&8)<<3 101 | cmap[:,:,1] = (m&2)<<6 | (m&16)<<2 102 | cmap[:,:,2] = (m&4)<<5 103 | cmap[m==255] = [255,255,255] 104 | return cmap 105 | 106 | def dense_crf(probs, img=None, n_classes=21, n_iters=1, scale_factor=1): 107 | c,h,w = probs.shape 108 | 109 | if img is not None: 110 | assert(img.shape[1:3] == (h, w)) 111 | img = np.transpose(img,(1,2,0)).copy(order='C') 112 | 113 | d = dcrf.DenseCRF2D(w, h, n_classes) # Define DenseCRF model. 114 | 115 | unary = unary_from_softmax(probs) 116 | unary = np.ascontiguousarray(unary) 117 | d.setUnaryEnergy(unary) 118 | d.addPairwiseGaussian(sxy=3/scale_factor, compat=3) 119 | d.addPairwiseBilateral(sxy=80/scale_factor, srgb=13, rgbim=np.copy(img), compat=10) 120 | Q = d.inference(n_iters) 121 | 122 | preds = np.array(Q, dtype=np.float32).reshape((n_classes, h, w)) 123 | return preds 124 | -------------------------------------------------------------------------------- /network/resnet38_aff.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.sparse as sparse 4 | import torch.nn.functional as F 5 | 6 | import network.resnet38d 7 | from tool import pyutils 8 | 9 | class Net(network.resnet38d.Net): 10 | def __init__(self): 11 | super(Net, self).__init__() 12 | 13 | self.f8_3 = torch.nn.Conv2d(512, 64, 1, bias=False) 14 | self.f8_4 = torch.nn.Conv2d(1024, 128, 1, bias=False) 15 | self.f8_5 = torch.nn.Conv2d(4096, 256, 1, bias=False) 16 | 17 | self.f9 = torch.nn.Conv2d(448, 448, 1, bias=False) 18 | 19 | torch.nn.init.kaiming_normal_(self.f8_3.weight) 20 | torch.nn.init.kaiming_normal_(self.f8_4.weight) 21 | torch.nn.init.kaiming_normal_(self.f8_5.weight) 22 | torch.nn.init.xavier_uniform_(self.f9.weight, gain=4) 23 | 24 | self.not_training = [self.conv1a, self.b2, self.b2_1, self.b2_2] 25 | 26 | self.from_scratch_layers = [self.f8_3, self.f8_4, self.f8_5, self.f9] 27 | 28 | self.predefined_featuresize = int(448//8) 29 | self.ind_from, self.ind_to = pyutils.get_indices_of_pairs(radius=5, size=(self.predefined_featuresize, self.predefined_featuresize)) 30 | self.ind_from = torch.from_numpy(self.ind_from); self.ind_to = torch.from_numpy(self.ind_to) 31 | 32 | return 33 | 34 | def forward(self, x, to_dense=False): 35 | 36 | d = super().forward_as_dict(x) 37 | 38 | f8_3 = F.elu(self.f8_3(d['conv4'])) 39 | f8_4 = F.elu(self.f8_4(d['conv5'])) 40 | f8_5 = F.elu(self.f8_5(d['conv6'])) 41 | x = F.elu(self.f9(torch.cat([f8_3, f8_4, f8_5], dim=1))) 42 | 43 | if x.size(2) == self.predefined_featuresize and x.size(3) == self.predefined_featuresize: 44 | ind_from = self.ind_from 45 | ind_to = self.ind_to 46 | else: 47 | ind_from, ind_to = pyutils.get_indices_of_pairs(5, (x.size(2), x.size(3))) 48 | ind_from = torch.from_numpy(ind_from); ind_to = torch.from_numpy(ind_to) 49 | 50 | x = x.view(x.size(0), x.size(1), -1) 51 | 52 | ff = torch.index_select(x, dim=2, index=ind_from.cuda(non_blocking=True)) 53 | ft = torch.index_select(x, dim=2, index=ind_to.cuda(non_blocking=True)) 54 | 55 | ff = torch.unsqueeze(ff, dim=2) 56 | ft = ft.view(ft.size(0), ft.size(1), -1, ff.size(3)) 57 | 58 | aff = torch.exp(-torch.mean(torch.abs(ft-ff), dim=1)) 59 | 60 | if to_dense: 61 | aff = aff.view(-1).cpu() 62 | 63 | ind_from_exp = torch.unsqueeze(ind_from, dim=0).expand(ft.size(2), -1).contiguous().view(-1) 64 | indices = torch.stack([ind_from_exp, ind_to]) 65 | indices_tp = torch.stack([ind_to, ind_from_exp]) 66 | 67 | area = x.size(2) 68 | indices_id = torch.stack([torch.arange(0, area).long(), torch.arange(0, area).long()]) 69 | 70 | aff_mat = sparse.FloatTensor(torch.cat([indices, indices_id, indices_tp], dim=1), 71 | torch.cat([aff, torch.ones([area]), aff])).to_dense().cuda() 72 | 73 | return aff_mat 74 | 75 | else: 76 | return aff 77 | 78 | 79 | def get_parameter_groups(self): 80 | groups = ([], [], [], []) 81 | 82 | for m in self.modules(): 83 | 84 | if (isinstance(m, nn.Conv2d) or isinstance(m, nn.modules.normalization.GroupNorm)): 85 | 86 | if m.weight.requires_grad: 87 | if m in self.from_scratch_layers: 88 | groups[2].append(m.weight) 89 | else: 90 | groups[0].append(m.weight) 91 | 92 | if m.bias is not None and m.bias.requires_grad: 93 | 94 | if m in self.from_scratch_layers: 95 | groups[3].append(m.bias) 96 | else: 97 | groups[1].append(m.bias) 98 | 99 | return groups 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /network/vgg16d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import numpy as np 4 | import torch.nn.functional as F 5 | 6 | class Normalize(): 7 | def __init__(self, mean = (122.675, 116.669, 104.008)): 8 | 9 | self.mean = mean 10 | 11 | def __call__(self, img): 12 | imgarr = np.asarray(img) 13 | proc_img = np.empty_like(imgarr, np.float32) 14 | 15 | proc_img[..., 0] = (imgarr[..., 2] - self.mean[2]) 16 | proc_img[..., 1] = (imgarr[..., 1] - self.mean[1]) 17 | proc_img[..., 2] = (imgarr[..., 0] - self.mean[0]) 18 | 19 | return proc_img 20 | 21 | class Net(nn.Module): 22 | def __init__(self, fc6_dilation = 1): 23 | super(Net, self).__init__() 24 | 25 | self.conv1_1 = nn.Conv2d(3,64,3,padding = 1) 26 | self.conv1_2 = nn.Conv2d(64,64,3,padding = 1) 27 | self.pool1 = nn.MaxPool2d(kernel_size = 3, stride = 2, padding=1) 28 | self.conv2_1 = nn.Conv2d(64,128,3,padding = 1) 29 | self.conv2_2 = nn.Conv2d(128,128,3,padding = 1) 30 | self.pool2 = nn.MaxPool2d(kernel_size = 3, stride = 2, padding=1) 31 | self.conv3_1 = nn.Conv2d(128,256,3,padding = 1) 32 | self.conv3_2 = nn.Conv2d(256,256,3,padding = 1) 33 | self.conv3_3 = nn.Conv2d(256,256,3,padding = 1) 34 | self.pool3 = nn.MaxPool2d(kernel_size = 3, stride = 2, padding=1) 35 | self.conv4_1 = nn.Conv2d(256,512,3,padding = 1) 36 | self.conv4_2 = nn.Conv2d(512,512,3,padding = 1) 37 | self.conv4_3 = nn.Conv2d(512,512,3,padding = 1) 38 | self.pool4 = nn.MaxPool2d(kernel_size = 3, stride = 1, padding=1) 39 | self.conv5_1 = nn.Conv2d(512,512,3,padding = 2, dilation = 2) 40 | self.conv5_2 = nn.Conv2d(512,512,3,padding = 2, dilation = 2) 41 | self.conv5_3 = nn.Conv2d(512,512,3,padding = 2, dilation = 2) 42 | self.pool5 = nn.MaxPool2d(kernel_size = 3, stride = 1, padding=1) 43 | self.pool5a = nn.AvgPool2d(kernel_size = 3, stride = 1, padding=1) 44 | 45 | self.fc6 = nn.Conv2d(512,1024, 3, padding = fc6_dilation, dilation = fc6_dilation) 46 | 47 | self.drop6 = nn.Dropout2d(p=0.5) 48 | self.fc7 = nn.Conv2d(1024,1024,1) 49 | 50 | self.normalize = Normalize() 51 | 52 | return 53 | 54 | def forward(self, x): 55 | return self.forward_as_dict(x)['conv5fc'] 56 | 57 | def forward_as_dict(self, x): 58 | 59 | x = F.relu(self.conv1_1(x)) 60 | x = F.relu(self.conv1_2(x)) 61 | x = self.pool1(x) 62 | 63 | x = F.relu(self.conv2_1(x)) 64 | x = F.relu(self.conv2_2(x)) 65 | x = self.pool2(x) 66 | 67 | x = F.relu(self.conv3_1(x)) 68 | x = F.relu(self.conv3_2(x)) 69 | x = F.relu(self.conv3_3(x)) 70 | x = self.pool3(x) 71 | 72 | x = F.relu(self.conv4_1(x)) 73 | x = F.relu(self.conv4_2(x)) 74 | x = F.relu(self.conv4_3(x)) 75 | conv4 = x 76 | 77 | x = self.pool4(x) 78 | 79 | x = F.relu(self.conv5_1(x)) 80 | x = F.relu(self.conv5_2(x)) 81 | x = F.relu(self.conv5_3(x)) 82 | conv5 = x 83 | 84 | x = F.relu(self.fc6(x)) 85 | x = self.drop6(x) 86 | x = F.relu(self.fc7(x)) 87 | 88 | conv5fc = x 89 | 90 | return dict({'conv4': conv4, 'conv5': conv5, 'conv5fc': conv5fc}) 91 | 92 | def train(self, mode=True): 93 | 94 | super().train(mode) 95 | 96 | for layer in self.not_training: 97 | 98 | if isinstance(layer, torch.nn.Conv2d): 99 | 100 | layer.weight.requires_grad = False 101 | layer.bias.requires_grad = False 102 | 103 | def convert_caffe_to_torch(caffemodel_path, prototxt_path='network/vgg16_20M.prototxt'): 104 | import caffe 105 | 106 | caffe_model = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST) 107 | 108 | dict = {} 109 | for caffe_name in list(caffe_model.params.keys()): 110 | dict[caffe_name + '.weight'] = torch.from_numpy(caffe_model.params[caffe_name][0].data) 111 | dict[caffe_name + '.bias'] = torch.from_numpy(caffe_model.params[caffe_name][1].data) 112 | 113 | return dict 114 | 115 | 116 | 117 | 118 | -------------------------------------------------------------------------------- /network/vgg16_aff.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.sparse as sparse 4 | import torch.nn.functional as F 5 | from tool import pyutils 6 | 7 | import network.vgg16d 8 | 9 | class Net(network.vgg16d.Net): 10 | def __init__(self): 11 | super(Net, self).__init__(fc6_dilation=4) 12 | 13 | self.f8_3 = nn.Conv2d(512, 64, 1, bias=False) 14 | self.f8_4 = nn.Conv2d(512, 128, 1, bias=False) 15 | self.f8_5 = nn.Conv2d(1024, 256, 1, bias=False) 16 | self.gn8_3 = nn.modules.normalization.GroupNorm(8, 64) 17 | self.gn8_4 = nn.modules.normalization.GroupNorm(16, 128) 18 | self.gn8_5 = nn.modules.normalization.GroupNorm(32, 256) 19 | 20 | self.f9 = torch.nn.Conv2d(448, 448, 1, bias=False) 21 | 22 | torch.nn.init.kaiming_normal_(self.f8_3.weight) 23 | torch.nn.init.kaiming_normal_(self.f8_4.weight) 24 | torch.nn.init.kaiming_normal_(self.f8_5.weight) 25 | torch.nn.init.xavier_uniform_(self.f9.weight, gain=4) 26 | 27 | self.not_training = [self.conv1_1, self.conv1_2, self.conv2_1, self.conv2_2] 28 | self.from_scratch_layers = [self.f8_3, self.f8_4, self.f8_5, self.f9] 29 | 30 | self.predefined_featuresize = int(448//8) 31 | self.ind_from, self.ind_to = pyutils.get_indices_of_pairs(5, (self.predefined_featuresize, self.predefined_featuresize)) 32 | self.ind_from = torch.from_numpy(self.ind_from); self.ind_to = torch.from_numpy(self.ind_to) 33 | 34 | return 35 | 36 | 37 | def forward(self, x, to_dense=False): 38 | 39 | d = super().forward_as_dict(x) 40 | 41 | f8_3 = F.elu(self.gn8_3(self.f8_3(d['conv4']))) 42 | f8_4 = F.elu(self.gn8_4(self.f8_4(d['conv5']))) 43 | f8_5 = F.elu(self.gn8_5(self.f8_5(d['conv5fc']))) 44 | 45 | x = torch.cat([f8_3, f8_4, f8_5], dim=1) 46 | x = F.elu(self.f9(x)) 47 | 48 | if x.size(2) == self.predefined_featuresize and x.size(3) == self.predefined_featuresize: 49 | ind_from = self.ind_from 50 | ind_to = self.ind_to 51 | else: 52 | ind_from, ind_to = pyutils.get_indices_of_pairs(5, (x.size(2), x.size(3))) 53 | ind_from = torch.from_numpy(ind_from); ind_to = torch.from_numpy(ind_to) 54 | 55 | x = x.view(x.size(0), x.size(1), -1) 56 | 57 | ff = torch.index_select(x, dim=2, index=ind_from.cuda(non_blocking=True)) 58 | ft = torch.index_select(x, dim=2, index=ind_to.cuda(non_blocking=True)) 59 | 60 | ff = torch.unsqueeze(ff, dim=2) 61 | ft = ft.view(ft.size(0), ft.size(1), -1, ff.size(3)) 62 | 63 | aff = torch.exp(-torch.mean(torch.abs(ft-ff), dim=1)) 64 | 65 | if to_dense: 66 | aff = aff.view(-1).cpu() 67 | 68 | ind_from_exp = torch.unsqueeze(ind_from, dim=0).expand(ft.size(2), -1).contiguous().view(-1) 69 | indices = torch.stack([ind_from_exp, ind_to]) 70 | indices_tp = torch.stack([ind_to, ind_from_exp]) 71 | 72 | area = x.size(2) 73 | indices_id = torch.stack([torch.arange(0, area).long(), torch.arange(0, area).long()]) 74 | 75 | aff_mat = sparse.FloatTensor(torch.cat([indices, indices_id, indices_tp], dim=1), 76 | torch.cat([aff, torch.ones([area]), aff])).to_dense().cuda() 77 | return aff_mat 78 | 79 | else: 80 | return aff 81 | 82 | def get_parameter_groups(self): 83 | groups = ([], [], [], []) 84 | 85 | for m in self.modules(): 86 | 87 | if (isinstance(m, nn.Conv2d) or isinstance(m, nn.modules.normalization.GroupNorm)): 88 | 89 | if m.weight.requires_grad: 90 | if m in self.from_scratch_layers: 91 | groups[2].append(m.weight) 92 | else: 93 | groups[0].append(m.weight) 94 | 95 | if m.bias is not None and m.bias.requires_grad: 96 | 97 | if m in self.from_scratch_layers: 98 | groups[3].append(m.bias) 99 | else: 100 | groups[1].append(m.bias) 101 | 102 | return groups 103 | 104 | 105 | 106 | -------------------------------------------------------------------------------- /evaluation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import numpy as np 4 | from PIL import Image 5 | import multiprocessing 6 | import argparse 7 | 8 | categories = ['background','aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow', 9 | 'diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor'] 10 | def do_python_eval(predict_folder, gt_folder, name_list, num_cls=21): 11 | TP = [] 12 | P = [] 13 | T = [] 14 | for i in range(num_cls): 15 | TP.append(multiprocessing.Value('i', 0, lock=True)) 16 | P.append(multiprocessing.Value('i', 0, lock=True)) 17 | T.append(multiprocessing.Value('i', 0, lock=True)) 18 | 19 | def compare(start,step,TP,P,T): 20 | for idx in range(start,len(name_list),step): 21 | #print('%d/%d'%(idx,len(name_list))) 22 | name = name_list[idx] 23 | predict_file = os.path.join(predict_folder,'%s.png'%name) 24 | predict = np.array(Image.open(predict_file)) #cv2.imread(predict_file) 25 | 26 | gt_file = os.path.join(gt_folder,'%s.png'%name) 27 | gt = np.array(Image.open(gt_file)) 28 | cal = gt<255 29 | mask = (predict==gt) * cal 30 | 31 | for i in range(num_cls): 32 | P[i].acquire() 33 | P[i].value += np.sum((predict==i)*cal) 34 | P[i].release() 35 | T[i].acquire() 36 | T[i].value += np.sum((gt==i)*cal) 37 | T[i].release() 38 | TP[i].acquire() 39 | TP[i].value += np.sum((gt==i)*mask) 40 | TP[i].release() 41 | p_list = [] 42 | for i in range(8): 43 | p = multiprocessing.Process(target=compare, args=(i,8,TP,P,T)) 44 | p.start() 45 | p_list.append(p) 46 | for p in p_list: 47 | p.join() 48 | IoU = [] 49 | T_TP = [] 50 | P_TP = [] 51 | FP_ALL = [] 52 | FN_ALL = [] 53 | for i in range(num_cls): 54 | IoU.append(TP[i].value/(T[i].value+P[i].value-TP[i].value+1e-10)) 55 | T_TP.append(T[i].value/(TP[i].value+1e-10)) 56 | P_TP.append(P[i].value/(TP[i].value+1e-10)) 57 | FP_ALL.append((P[i].value-TP[i].value)/(T[i].value + P[i].value - TP[i].value + 1e-10)) 58 | FN_ALL.append((T[i].value-TP[i].value)/(T[i].value + P[i].value - TP[i].value + 1e-10)) 59 | loglist = {} 60 | for i in range(num_cls): 61 | if i%2 != 1: 62 | print('%11s:%7.3f%%'%(categories[i],IoU[i]*100),end='\t') 63 | else: 64 | print('%11s:%7.3f%%'%(categories[i],IoU[i]*100)) 65 | loglist[categories[i]] = IoU[i] * 100 66 | 67 | miou = np.mean(np.array(IoU)) 68 | t_tp = np.mean(np.array(T_TP)[1:]) 69 | p_tp = np.mean(np.array(P_TP)[1:]) 70 | fp_all = np.mean(np.array(FP_ALL)[1:]) 71 | fn_all = np.mean(np.array(FN_ALL)[1:]) 72 | miou_foreground = np.mean(np.array(IoU)[1:]) 73 | print('\n======================================================') 74 | print('%11s:%7.3f%%'%('mIoU',miou*100)) 75 | print('%11s:%7.3f'%('T/TP',t_tp)) 76 | print('%11s:%7.3f'%('P/TP',p_tp)) 77 | print('%11s:%7.3f'%('FP/ALL',fp_all)) 78 | print('%11s:%7.3f'%('FN/ALL',fn_all)) 79 | print('%11s:%7.3f'%('miou_foreground',miou_foreground)) 80 | loglist['mIoU'] = miou * 100 81 | loglist['t_tp'] = t_tp 82 | loglist['p_tp'] = p_tp 83 | loglist['fp_all'] = fp_all 84 | loglist['fn_all'] = fn_all 85 | loglist['miou_foreground'] = miou_foreground 86 | return loglist 87 | 88 | def writedict(file, dictionary): 89 | s = '' 90 | for key in dictionary.keys(): 91 | sub = '%s:%s '%(key, dictionary[key]) 92 | s += sub 93 | s += '\n' 94 | file.write(s) 95 | 96 | def writelog(filepath, metric, comment): 97 | filepath = filepath 98 | logfile = open(filepath,'a') 99 | import time 100 | logfile.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) 101 | logfile.write('\t%s\n'%comment) 102 | writedict(logfile, metric) 103 | logfile.write('=====================================\n') 104 | logfile.close() 105 | 106 | 107 | if __name__ == '__main__': 108 | 109 | parser = argparse.ArgumentParser() 110 | parser.add_argument("--list", default='./VOC2012/ImageSets/Segmentation/val.txt', type=str) 111 | parser.add_argument("--predict_dir", default='./out_rw', type=str) 112 | parser.add_argument("--gt_dir", default='./VOC2012/SegmentationClass', type=str) 113 | parser.add_argument('--logfile', default='./evallog.txt',type=str) 114 | parser.add_argument('--comment', default='', type=str) 115 | args = parser.parse_args() 116 | 117 | df = pd.read_csv(args.list, names=['filename']) 118 | name_list = df['filename'].values 119 | loglist = do_python_eval(args.predict_dir, args.gt_dir, name_list, 21) 120 | writelog(args.logfile, loglist, args.comment) 121 | -------------------------------------------------------------------------------- /infer_cls_ser.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import torch 4 | from torch.backends import cudnn 5 | cudnn.enabled = True 6 | import voc12.data 7 | import scipy.misc 8 | import importlib 9 | from torch.utils.data import DataLoader 10 | import torchvision 11 | from tool import imutils, pyutils 12 | import argparse 13 | from PIL import Image 14 | import torch.nn.functional as F 15 | import os.path 16 | 17 | if __name__ == '__main__': 18 | 19 | parser = argparse.ArgumentParser() 20 | parser.add_argument("--weights", required=True, type=str) 21 | parser.add_argument("--network", default="network.resnet38_cls_ser", type=str) 22 | parser.add_argument("--infer_list", default="voc12/val.txt", type=str) 23 | parser.add_argument("--num_workers", default=8, type=int) 24 | parser.add_argument("--voc12_root", default='VOC2012', type=str) 25 | parser.add_argument("--out_cam", default=None, type=str) 26 | parser.add_argument("--out_crf", default=None, type=str) 27 | parser.add_argument("--out_cam_pred", default=None, type=str) 28 | 29 | args = parser.parse_args() 30 | crf_alpha = [4,24] 31 | model = getattr(importlib.import_module(args.network), 'Net')() 32 | model.load_state_dict(torch.load(args.weights)) 33 | 34 | model.eval() 35 | model.cuda() 36 | 37 | infer_dataset = voc12.data.VOC12ClsDatasetMSF(args.infer_list, voc12_root=args.voc12_root, 38 | scales=[1, 0.5, 1.5, 2.0], 39 | inter_transform=torchvision.transforms.Compose( 40 | [np.asarray, 41 | model.normalize, 42 | imutils.HWC_to_CHW])) 43 | 44 | infer_data_loader = DataLoader(infer_dataset, shuffle=False, num_workers=args.num_workers, pin_memory=True) 45 | 46 | n_gpus = torch.cuda.device_count() 47 | model_replicas = torch.nn.parallel.replicate(model, list(range(n_gpus))) 48 | 49 | for iter, (img_name, img_list, label) in enumerate(infer_data_loader): 50 | img_name = img_name[0]; label = label[0] 51 | 52 | img_path = voc12.data.get_img_path(img_name, args.voc12_root) 53 | orig_img = np.asarray(Image.open(img_path)) 54 | orig_img_size = orig_img.shape[:2] 55 | 56 | def _work(i, img): 57 | with torch.no_grad(): 58 | with torch.cuda.device(i%n_gpus): 59 | cam = model_replicas[i%n_gpus](img.cuda()) 60 | cam = F.relu(cam, inplace=True) 61 | cam = F.interpolate(cam, orig_img_size, mode='bilinear', align_corners=False)[0] 62 | cam = cam.cpu().numpy() * label.clone().view(20, 1, 1).numpy() 63 | if i % 2 == 1: 64 | cam = np.flip(cam, axis=-1) 65 | return cam 66 | 67 | thread_pool = pyutils.BatchThreader(_work, list(enumerate(img_list)), 68 | batch_size=12, prefetch_size=0, processes=args.num_workers) 69 | 70 | cam_list = thread_pool.pop_results() 71 | 72 | sum_cam = np.sum(cam_list, axis=0) 73 | norm_cam = sum_cam / (np.max(sum_cam, (1, 2), keepdims=True) + 1e-5) 74 | 75 | cam_dict = {} 76 | for i in range(20): 77 | if label[i] > 1e-5: 78 | cam_dict[i] = norm_cam[i] 79 | 80 | if args.out_cam is not None: 81 | if not os.path.exists(args.out_cam): 82 | os.makedirs(args.out_cam) 83 | np.save(os.path.join(args.out_cam, img_name + '.npy'), cam_dict) 84 | 85 | if args.out_cam_pred is not None: 86 | if not os.path.exists(args.out_cam_pred): 87 | os.makedirs(args.out_cam_pred) 88 | bg_score = [np.ones_like(norm_cam[0])*0.2] 89 | pred = np.argmax(np.concatenate((bg_score, norm_cam)), 0) 90 | scipy.misc.imsave(os.path.join(args.out_cam_pred, img_name + '.png'), pred.astype(np.uint8)) 91 | 92 | def _crf_with_alpha(cam_dict, alpha): 93 | v = np.array(list(cam_dict.values())) 94 | bg_score = np.power(1 - np.max(v, axis=0, keepdims=True), alpha) 95 | bgcam_score = np.concatenate((bg_score, v), axis=0) 96 | crf_score = imutils.crf_inference(orig_img, bgcam_score, labels=bgcam_score.shape[0]) 97 | 98 | n_crf_al = dict() 99 | 100 | n_crf_al[0] = crf_score[0] 101 | for i, key in enumerate(cam_dict.keys()): 102 | n_crf_al[key+1] = crf_score[i+1] 103 | 104 | return n_crf_al 105 | 106 | if args.out_crf is not None: 107 | for t in crf_alpha: 108 | crf = _crf_with_alpha(cam_dict, t) 109 | folder = args.out_crf + ('_%.1f'%t) 110 | if not os.path.exists(folder): 111 | os.makedirs(folder) 112 | np.save(os.path.join(folder, img_name + '.npy'), crf) 113 | print(iter) 114 | 115 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SSENet-pytorch 2 | 3 | **Update: The code is deprecated. Please see our latest work SEAM (https://github.com/YudeWang/SEAM)** 4 | 5 | 6 | ## Introduction 7 | ![CAMs visualization](https://github.com/YudeWang/SSENet-pytorch/blob/master/cams.png) 8 | As well-known, conventional CAM tends to be incomplete or over-activated due to weak supervision. Fortunately, we find that semantic segmentation has a characteristic of spatial transformation equivariance, which can form a few self-supervisions to help weakly supervised learning. This work mainly explores the advantages of scale equivariant constrains for CAM generation, formulated as a self supervised scale equivariant network (SSENet). Extensive experiments on PASCAL VOC 2012 datasets demonstrate that our method achieves outstanding performance comparing with other state-of-the-arts. 9 | 10 | Thanks to [jiwoon-ahn](https://github.com/jiwoon-ahn), the code of this repository borrow heavly from his [AffinityNet](https://github.com/jiwoon-ahn/psa) project, and we follw the same pipeline to verify the effectiveness of our SSENet. 11 | 12 | ## Dependency 13 | 14 | - This repo is tested on Ubuntu 16.04, with python 3.6, pytorch 0.4, torchvision 0.2.1, CUDA 9.0, 4xGPUs (NVIDIA TITAN XP 12GB) 15 | - Please install [tensorboardX](https://github.com/lanpa/tensorboardX) for training visualization. 16 | - The dataset we used is PASCAL VOC 2012, please download the VOC [development kit](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). It is suggested to make a soft link toward downloaded dataset. 17 | ``` 18 | ln -s $your_dataset_path/VOCdevkit/VOC2012 $your_voc12_root 19 | ``` 20 | - (Optional) The image-level labels have already been given in `voc12/cls_label.npy`. If you want to regenerate it (which is unnecessary), please download the annotation of VOC 2012 SegmentationClassAug training set (containing 10582 images), which can be download [here](https://www.dropbox.com/s/oeu149j8qtbs1x0/SegmentationClassAug.zip?dl=0) and place them all as `$your_voc12_root/SegmentationClassAug/xxxxxx.png`. Then run the code 21 | ``` 22 | cd voc12 23 | python make_cls_labels.py --voc12_root $your_voc12_root 24 | ``` 25 | - (Optional) If you want to train the network by yourself, here is ImageNet pretrained model for VGG16 [vgg16_20M.caffemodel](http://liangchiehchen.com/projects/Init%20Models.html) and ResNet38 [ilsvrc-cls_rna-a1_cls1000_ep-0001.params](https://github.com/itijyou/ademxapp). Noting that our SSENet is **only tested on ResNet38**. 26 | 27 | 28 | ## Usage 29 | ### CAM generalization step 30 | 31 | 1. SSENet training 32 | ``` 33 | python train_cls_ser.py --voc12_root $your_voc12_root --weights $your_weights_file --session_name $your_session_name 34 | ``` 35 | 36 | 2. SSENet inference. Noting that the the crf results will be saved in `$your_crf_dir`+`_4.0` and `$your_crf_dir`+`_24.0`, where the parameters can be modified in `infer_cls_ser.py`. These two folders will be further used in following AffinityNet training step. 37 | 38 | ``` 39 | python infer_cls_ser.py --weights $your_SSENet_checkpoint --infer_list [voc12/val.txt | voc12/train.txt | voc12/train_aug.txt] --out_cam $your_cam_dir --out_crf $your_crf_dir --out_cam_pred $your_pred_dir 40 | ``` 41 | 42 | 3. CAM step evaluation. We provide python mIoU evaluation script `evaluation.py`, or you can use official development kit. 43 | ``` 44 | python evaluation.py --list $your_voc12_root/ImageSets/Segmentation/[val.txt | train.txt] --predict_dir $your_pred_dir --gt_dir $your_voc12_root/SegmentationClass 45 | ``` 46 | ### Random walk step 47 | The random walk step keep the same with AffinityNet project. 48 | 1. Train AffinityNet. 49 | ``` 50 | python train_aff.py --weights $your_weights_file --voc12_root $your_voc12_root --la_crf_dir $your_crf_dir_4.0 --ha_crf_dir $your_crf_dir_24.0 --session_name $your_session_name 51 | ``` 52 | 2. Random walk propagation 53 | ``` 54 | python infer_aff.py --weights $your_weights_file --infer_list [voc12/val.txt | voc12/train.txt] --cam_dir $your_cam_dir --voc12_root $your_voc12_root --out_rw $your_rw_dir 55 | ``` 56 | 3. Random walk step evaluation 57 | ``` 58 | python evaluation.py --list $your_voc12_root/ImageSets/Segmentation/[val.txt | train.txt] --predict_dir $your_rw_dir --gt_dir $your_voc12_root/SegmentationClass 59 | ``` 60 | 61 | ## Results 62 | 63 | The generated pseudo labels are evaluated on PASCAL VOC 2012 train set. 64 | 65 | Model | CAM step (mIoU) | CAM+rw step (mIoU) | | 66 | :----:|:---------------:|:------------------:|:----------:| 67 | ResNet38 | 48.0 | 58.1 | AffinityNet cvpr submission[1]| 68 | ResNet38 | 47.3 | 58.8 | reimplemented baseline | 69 | SSENet-ResNet38 | 49.8 | 62.1 | branch downsampling rate = 0.3 ([weights](https://drive.google.com/open?id=12CZil1LV8iq3Clj-xZCQlUhUWG1wOEt3)) 70 | 71 | ## Citation 72 | We encourage you to cite our latest work [**SEAM**](https://github.com/YudeWang/SEAM), which is accepted by CVPR 2020. 73 | ``` 74 | @InProceedings{Wang_2020_CVPR_SEAM, 75 | author = {Yude Wang and Jie Zhang and Meina Kan and Shiguang Shan and Xilin Chen}, 76 | title = {Self-supervised Equivariant Attention Mechanism for Weakly Supervised Semantic Segmentation}, 77 | booktitle = {Proc. IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, 78 | year = {2020} 79 | } 80 | ``` 81 | 82 | ## Reference 83 | [1] J. Ahn and S. Kwak. Learning pixel-level semantic affinity 84 | with image-level supervision for weakly supervised semantic segmentation. In Proc. IEEE Conference on Computer 85 | Vision and Pattern Recognition (CVPR), 2018. 86 | 87 | -------------------------------------------------------------------------------- /infer_aff.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | from tool import imutils 4 | 5 | import argparse 6 | import importlib 7 | import numpy as np 8 | 9 | import voc12.data 10 | from torch.utils.data import DataLoader 11 | import scipy.misc 12 | import torch.nn.functional as F 13 | import os.path 14 | 15 | def get_indices_in_radius(height, width, radius): 16 | 17 | search_dist = [] 18 | for x in range(1, radius): 19 | search_dist.append((0, x)) 20 | 21 | for y in range(1, radius): 22 | for x in range(-radius+1, radius): 23 | if x*x + y*y < radius*radius: 24 | search_dist.append((y, x)) 25 | 26 | full_indices = np.reshape(np.arange(0, height * width, dtype=np.int64), 27 | (height, width)) 28 | radius_floor = radius-1 29 | cropped_height = height - radius_floor 30 | cropped_width = width - 2 * radius_floor 31 | 32 | indices_from = np.reshape(full_indices[:-radius_floor, radius_floor:-radius_floor], [-1]) 33 | 34 | indices_from_to_list = [] 35 | 36 | for dy, dx in search_dist: 37 | 38 | indices_to = full_indices[dy:dy + cropped_height, radius_floor + dx:radius_floor + dx + cropped_width] 39 | indices_to = np.reshape(indices_to, [-1]) 40 | 41 | indices_from_to = np.stack((indices_from, indices_to), axis=1) 42 | 43 | indices_from_to_list.append(indices_from_to) 44 | 45 | concat_indices_from_to = np.concatenate(indices_from_to_list, axis=0) 46 | 47 | return concat_indices_from_to 48 | 49 | 50 | if __name__ == '__main__': 51 | 52 | parser = argparse.ArgumentParser() 53 | parser.add_argument("--weights", required=True, type=str) 54 | parser.add_argument("--network", default="network.resnet38_aff", type=str) 55 | parser.add_argument("--infer_list", default="voc12/val.txt", type=str) 56 | parser.add_argument("--num_workers", default=8, type=int) 57 | parser.add_argument("--cam_dir", required=True, type=str) 58 | parser.add_argument("--voc12_root", default='VOC2012', type=str) 59 | parser.add_argument("--out_rw", default='out_rw', type=str) 60 | parser.add_argument("--beta", default=8, type=int) 61 | parser.add_argument("--logt", default=8, type=int) 62 | parser.add_argument("--crf", default=False, type=bool) 63 | 64 | args = parser.parse_args() 65 | if not os.path.exists(args.out_rw): 66 | os.makedirs(args.out_rw) 67 | 68 | model = getattr(importlib.import_module(args.network), 'Net')() 69 | 70 | model.load_state_dict(torch.load(args.weights), strict=False) 71 | 72 | model.eval() 73 | model.cuda() 74 | 75 | infer_dataset = voc12.data.VOC12ImageDataset(args.infer_list, voc12_root=args.voc12_root, 76 | transform=torchvision.transforms.Compose( 77 | [np.asarray, 78 | model.normalize, 79 | imutils.HWC_to_CHW])) 80 | infer_data_loader = DataLoader(infer_dataset, shuffle=False, num_workers=args.num_workers, pin_memory=True) 81 | 82 | for iter, (name, img) in enumerate(infer_data_loader): 83 | 84 | name = name[0] 85 | print(iter) 86 | 87 | orig_shape = img.shape 88 | padded_size = (int(np.ceil(img.shape[2]/8)*8), int(np.ceil(img.shape[3]/8)*8)) 89 | 90 | p2d = (0, padded_size[1] - img.shape[3], 0, padded_size[0] - img.shape[2]) 91 | img = F.pad(img, p2d) 92 | 93 | dheight = int(np.ceil(img.shape[2]/8)) 94 | dwidth = int(np.ceil(img.shape[3]/8)) 95 | 96 | cam = np.load(os.path.join(args.cam_dir, name + '.npy')).item() 97 | 98 | cam_full_arr = np.zeros((21, orig_shape[2], orig_shape[3]), np.float32) 99 | for k, v in cam.items(): 100 | cam_full_arr[k+1] = v 101 | cam_full_arr[0] = 0.2 102 | cam_full_arr = np.pad(cam_full_arr, ((0, 0), (0, p2d[3]), (0, p2d[1])), mode='constant') 103 | 104 | with torch.no_grad(): 105 | aff_mat = torch.pow(model.forward(img.cuda(), True), args.beta) 106 | 107 | trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True) 108 | for _ in range(args.logt): 109 | trans_mat = torch.matmul(trans_mat, trans_mat) 110 | 111 | cam_full_arr = torch.from_numpy(cam_full_arr) 112 | cam_full_arr = F.avg_pool2d(cam_full_arr, 8, 8) 113 | 114 | cam_vec = cam_full_arr.view(21, -1) 115 | 116 | cam_rw = torch.matmul(cam_vec.cuda(), trans_mat) 117 | cam_rw = cam_rw.view(1, 21, dheight, dwidth) 118 | 119 | cam_rw = torch.nn.Upsample((img.shape[2], img.shape[3]), mode='bilinear')(cam_rw) 120 | 121 | if args.crf: 122 | img_8 = img[0].numpy().transpose((1,2,0)) 123 | img_8 = np.ascontiguousarray(img_8) 124 | mean = (0.485, 0.456, 0.406) 125 | std = (0.229, 0.224, 0.225) 126 | img_8[:,:,0] = (img_8[:,:,0]*std[0] + mean[0])*255 127 | img_8[:,:,1] = (img_8[:,:,1]*std[1] + mean[1])*255 128 | img_8[:,:,2] = (img_8[:,:,2]*std[2] + mean[2])*255 129 | img_8[img_8 > 255] = 255 130 | img_8[img_8 < 0] = 0 131 | img_8 = img_8.astype(np.uint8) 132 | cam_rw = cam_rw[0].cpu().numpy() 133 | cam_rw = imutils.crf_inference(img_8, cam_rw, t=1) 134 | cam_rw = torch.from_numpy(cam_rw).view(1, 21, img.shape[2], img.shape[3]).cuda() 135 | 136 | 137 | _, cam_rw_pred = torch.max(cam_rw, 1) 138 | 139 | res = np.uint8(cam_rw_pred.cpu().data[0])[:orig_shape[2], :orig_shape[3]] 140 | 141 | scipy.misc.imsave(os.path.join(args.out_rw, name + '.png'), res) 142 | -------------------------------------------------------------------------------- /tool/pyutils.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import time 4 | import sys 5 | 6 | class Logger(object): 7 | def __init__(self, outfile): 8 | self.terminal = sys.stdout 9 | self.log = open(outfile, "w") 10 | sys.stdout = self 11 | 12 | def write(self, message): 13 | self.terminal.write(message) 14 | self.log.write(message) 15 | 16 | def flush(self): 17 | self.terminal.flush() 18 | 19 | 20 | class AverageMeter: 21 | def __init__(self, *keys): 22 | self.__data = dict() 23 | for k in keys: 24 | self.__data[k] = [0.0, 0] 25 | 26 | def add(self, dict): 27 | for k, v in dict.items(): 28 | self.__data[k][0] += v 29 | self.__data[k][1] += 1 30 | 31 | def get(self, *keys): 32 | if len(keys) == 1: 33 | return self.__data[keys[0]][0] / self.__data[keys[0]][1] 34 | else: 35 | v_list = [self.__data[k][0] / self.__data[k][1] for k in keys] 36 | return tuple(v_list) 37 | 38 | def pop(self, key=None): 39 | if key is None: 40 | for k in self.__data.keys(): 41 | self.__data[k] = [0.0, 0] 42 | else: 43 | v = self.get(key) 44 | self.__data[key] = [0.0, 0] 45 | return v 46 | 47 | 48 | class Timer: 49 | def __init__(self, starting_msg = None): 50 | self.start = time.time() 51 | self.stage_start = self.start 52 | 53 | if starting_msg is not None: 54 | print(starting_msg, time.ctime(time.time())) 55 | 56 | 57 | def update_progress(self, progress): 58 | self.elapsed = time.time() - self.start 59 | self.est_total = self.elapsed / progress 60 | self.est_remaining = self.est_total - self.elapsed 61 | self.est_finish = int(self.start + self.est_total) 62 | 63 | 64 | def str_est_finish(self): 65 | return str(time.ctime(self.est_finish)) 66 | 67 | def get_stage_elapsed(self): 68 | return time.time() - self.stage_start 69 | 70 | def reset_stage(self): 71 | self.stage_start = time.time() 72 | 73 | 74 | from multiprocessing.pool import ThreadPool 75 | 76 | class BatchThreader: 77 | 78 | def __init__(self, func, args_list, batch_size, prefetch_size=4, processes=12): 79 | self.batch_size = batch_size 80 | self.prefetch_size = prefetch_size 81 | 82 | self.pool = ThreadPool(processes=processes) 83 | self.async_result = [] 84 | 85 | self.func = func 86 | self.left_args_list = args_list 87 | self.n_tasks = len(args_list) 88 | 89 | # initial work 90 | self.__start_works(self.__get_n_pending_works()) 91 | 92 | 93 | def __start_works(self, times): 94 | for _ in range(times): 95 | args = self.left_args_list.pop(0) 96 | self.async_result.append( 97 | self.pool.apply_async(self.func, args)) 98 | 99 | 100 | def __get_n_pending_works(self): 101 | return min((self.prefetch_size + 1) * self.batch_size - len(self.async_result) 102 | , len(self.left_args_list)) 103 | 104 | 105 | 106 | def pop_results(self): 107 | 108 | n_inwork = len(self.async_result) 109 | 110 | n_fetch = min(n_inwork, self.batch_size) 111 | rtn = [self.async_result.pop(0).get() 112 | for _ in range(n_fetch)] 113 | 114 | to_fill = self.__get_n_pending_works() 115 | if to_fill == 0: 116 | self.pool.close() 117 | else: 118 | self.__start_works(to_fill) 119 | 120 | return rtn 121 | 122 | 123 | 124 | 125 | def get_indices_of_pairs(radius, size): 126 | 127 | search_dist = [] 128 | 129 | for x in range(1, radius): 130 | search_dist.append((0, x)) 131 | 132 | for y in range(1, radius): 133 | for x in range(-radius + 1, radius): 134 | if x * x + y * y < radius * radius: 135 | search_dist.append((y, x)) 136 | 137 | radius_floor = radius - 1 138 | 139 | full_indices = np.reshape(np.arange(0, size[0]*size[1], dtype=np.int64), 140 | (size[0], size[1])) 141 | 142 | cropped_height = size[0] - radius_floor 143 | cropped_width = size[1] - 2 * radius_floor 144 | 145 | indices_from = np.reshape(full_indices[:-radius_floor, radius_floor:-radius_floor], 146 | [-1]) 147 | 148 | indices_to_list = [] 149 | 150 | for dy, dx in search_dist: 151 | indices_to = full_indices[dy:dy + cropped_height, 152 | radius_floor + dx:radius_floor + dx + cropped_width] 153 | indices_to = np.reshape(indices_to, [-1]) 154 | 155 | indices_to_list.append(indices_to) 156 | 157 | concat_indices_to = np.concatenate(indices_to_list, axis=0) 158 | 159 | return indices_from, concat_indices_to 160 | 161 | def get_indices_of_pairs_circle(radius, size): 162 | 163 | search_dist = [] 164 | 165 | for y in range(-radius + 1, radius): 166 | for x in range(-radius + 1, radius): 167 | if x * x + y * y < radius * radius and x*x+y*y!=0: 168 | search_dist.append((y, x)) 169 | 170 | radius_floor = radius - 1 171 | 172 | full_indices = np.reshape(np.arange(0, size[0]*size[1], dtype=np.int64), 173 | (size[0], size[1])) 174 | 175 | cropped_height = size[0] - 2 * radius_floor 176 | cropped_width = size[1] - 2 * radius_floor 177 | 178 | indices_from = np.reshape(full_indices[radius_floor:-radius_floor, radius_floor:-radius_floor], 179 | [-1]) 180 | 181 | indices_to_list = [] 182 | 183 | for dy, dx in search_dist: 184 | indices_to = full_indices[radius_floor + dy : radius_floor + dy + cropped_height, 185 | radius_floor + dx : radius_floor + dx + cropped_width] 186 | indices_to = np.reshape(indices_to, [-1]) 187 | 188 | indices_to_list.append(indices_to) 189 | 190 | concat_indices_to = np.concatenate(indices_to_list, axis=0) 191 | 192 | return indices_from, concat_indices_to 193 | -------------------------------------------------------------------------------- /network/vgg16_20M.prototxt: -------------------------------------------------------------------------------- 1 | name: "VGG_ILSVRC_16_layers" 2 | input: "data" 3 | input_dim: 10 4 | input_dim: 3 5 | input_dim: 224 6 | input_dim: 224 7 | layer { 8 | name: "conv1_1" 9 | type: "Convolution" 10 | bottom: "data" 11 | top: "conv1_1a" 12 | convolution_param { 13 | num_output: 64 14 | pad: 1 15 | kernel_size: 3 16 | } 17 | } 18 | layer { 19 | name: "relu1_1" 20 | type: "ReLU" 21 | bottom: "conv1_1a" 22 | top: "conv1_1" 23 | } 24 | layer { 25 | name: "conv1_2" 26 | type: "Convolution" 27 | bottom: "conv1_1" 28 | top: "conv1_2" 29 | convolution_param { 30 | num_output: 64 31 | pad: 1 32 | kernel_size: 3 33 | } 34 | } 35 | layer { 36 | name: "relu1_2" 37 | type: "ReLU" 38 | bottom: "conv1_2" 39 | top: "conv1_2" 40 | } 41 | layer { 42 | name: "pool1" 43 | type: "Pooling" 44 | bottom: "conv1_2" 45 | top: "pool1" 46 | pooling_param { 47 | pool: MAX 48 | kernel_size: 2 49 | stride: 2 50 | } 51 | } 52 | layer { 53 | name: "conv2_1" 54 | type: "Convolution" 55 | bottom: "pool1" 56 | top: "conv2_1" 57 | convolution_param { 58 | num_output: 128 59 | pad: 1 60 | kernel_size: 3 61 | } 62 | } 63 | layer { 64 | name: "relu2_1" 65 | type: "ReLU" 66 | bottom: "conv2_1" 67 | top: "conv2_1" 68 | } 69 | layer { 70 | name: "conv2_2" 71 | type: "Convolution" 72 | bottom: "conv2_1" 73 | top: "conv2_2" 74 | convolution_param { 75 | num_output: 128 76 | pad: 1 77 | kernel_size: 3 78 | } 79 | } 80 | layer { 81 | name: "relu2_2" 82 | type: "ReLU" 83 | bottom: "conv2_2" 84 | top: "conv2_2" 85 | } 86 | layer { 87 | name: "pool2" 88 | type: "Pooling" 89 | bottom: "conv2_2" 90 | top: "pool2" 91 | pooling_param { 92 | pool: MAX 93 | kernel_size: 2 94 | stride: 2 95 | } 96 | } 97 | layer { 98 | name: "conv3_1" 99 | type: "Convolution" 100 | bottom: "pool2" 101 | top: "conv3_1" 102 | convolution_param { 103 | num_output: 256 104 | pad: 1 105 | kernel_size: 3 106 | } 107 | } 108 | layer { 109 | name: "relu3_1" 110 | type: "ReLU" 111 | bottom: "conv3_1" 112 | top: "conv3_1" 113 | } 114 | layer { 115 | name: "conv3_2" 116 | type: "Convolution" 117 | bottom: "conv3_1" 118 | top: "conv3_2" 119 | convolution_param { 120 | num_output: 256 121 | pad: 1 122 | kernel_size: 3 123 | } 124 | } 125 | layer { 126 | name: "relu3_2" 127 | type: "ReLU" 128 | bottom: "conv3_2" 129 | top: "conv3_2" 130 | } 131 | layer { 132 | name: "conv3_3" 133 | type: "Convolution" 134 | bottom: "conv3_2" 135 | top: "conv3_3" 136 | convolution_param { 137 | num_output: 256 138 | pad: 1 139 | kernel_size: 3 140 | } 141 | } 142 | layer { 143 | name: "relu3_3" 144 | type: "ReLU" 145 | bottom: "conv3_3" 146 | top: "conv3_3" 147 | } 148 | layer { 149 | name: "pool3" 150 | type: "Pooling" 151 | bottom: "conv3_3" 152 | top: "pool3" 153 | pooling_param { 154 | pool: MAX 155 | kernel_size: 2 156 | stride: 2 157 | } 158 | } 159 | layer { 160 | name: "conv4_1" 161 | type: "Convolution" 162 | bottom: "pool3" 163 | top: "conv4_1" 164 | convolution_param { 165 | num_output: 512 166 | pad: 1 167 | kernel_size: 3 168 | } 169 | } 170 | layer { 171 | name: "relu4_1" 172 | type: "ReLU" 173 | bottom: "conv4_1" 174 | top: "conv4_1" 175 | } 176 | layer { 177 | name: "conv4_2" 178 | type: "Convolution" 179 | bottom: "conv4_1" 180 | top: "conv4_2" 181 | convolution_param { 182 | num_output: 512 183 | pad: 1 184 | kernel_size: 3 185 | } 186 | } 187 | layer { 188 | name: "relu4_2" 189 | type: "ReLU" 190 | bottom: "conv4_2" 191 | top: "conv4_2" 192 | } 193 | layer { 194 | name: "conv4_3" 195 | type: "Convolution" 196 | bottom: "conv4_2" 197 | top: "conv4_3" 198 | convolution_param { 199 | num_output: 512 200 | pad: 1 201 | kernel_size: 3 202 | } 203 | } 204 | layer { 205 | name: "relu4_3" 206 | type: "ReLU" 207 | bottom: "conv4_3" 208 | top: "conv4_3" 209 | } 210 | layer { 211 | name: "pool4" 212 | type: "Pooling" 213 | bottom: "conv4_3" 214 | top: "pool4" 215 | pooling_param { 216 | pool: MAX 217 | kernel_size: 2 218 | stride: 2 219 | } 220 | } 221 | layer { 222 | name: "conv5_1" 223 | type: "Convolution" 224 | bottom: "pool4" 225 | top: "conv5_1" 226 | convolution_param { 227 | num_output: 512 228 | pad: 1 229 | kernel_size: 3 230 | } 231 | } 232 | layer { 233 | name: "relu5_1" 234 | type: "ReLU" 235 | bottom: "conv5_1" 236 | top: "conv5_1" 237 | } 238 | layer { 239 | name: "conv5_2" 240 | type: "Convolution" 241 | bottom: "conv5_1" 242 | top: "conv5_2" 243 | convolution_param { 244 | num_output: 512 245 | pad: 1 246 | kernel_size: 3 247 | } 248 | } 249 | layer { 250 | name: "relu5_2" 251 | type: "ReLU" 252 | bottom: "conv5_2" 253 | top: "conv5_2" 254 | } 255 | layer { 256 | name: "conv5_3" 257 | type: "Convolution" 258 | bottom: "conv5_2" 259 | top: "conv5_3" 260 | convolution_param { 261 | num_output: 512 262 | pad: 1 263 | kernel_size: 3 264 | } 265 | } 266 | layer { 267 | name: "relu5_3" 268 | type: "ReLU" 269 | bottom: "conv5_3" 270 | top: "conv5_3" 271 | } 272 | 273 | layer { 274 | bottom: "conv5_3" 275 | top: "pool5" 276 | name: "pool5" 277 | type: "Pooling" 278 | pooling_param { 279 | pool: MAX 280 | kernel_size: 3 281 | stride: 1 282 | pad: 1 283 | } 284 | } 285 | layer { 286 | bottom: "pool5" 287 | top: "pool5a" 288 | name: "pool5a" 289 | type: "Pooling" 290 | pooling_param { 291 | pool: AVE 292 | kernel_size: 3 293 | stride: 1 294 | pad: 1 295 | } 296 | } 297 | layer { 298 | name: "fc6" 299 | type: "Convolution" 300 | bottom: "pool5a" 301 | top: "fc6" 302 | convolution_param { 303 | num_output: 1024 304 | pad: 1 305 | kernel_size: 3 306 | } 307 | } 308 | layer { 309 | name: "relu6" 310 | type: "ReLU" 311 | bottom: "fc6" 312 | top: "fc6" 313 | } 314 | layer { 315 | name: "fc7" 316 | type: "Convolution" 317 | bottom: "fc6" 318 | top: "fc7" 319 | convolution_param { 320 | num_output: 1024 321 | kernel_size: 1 322 | } 323 | } 324 | layer { 325 | name: "relu7" 326 | type: "ReLU" 327 | bottom: "fc7" 328 | top: "fc7" 329 | } -------------------------------------------------------------------------------- /tool/torchutils.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | from torch.utils.data import Dataset 4 | from PIL import Image 5 | import os.path 6 | import random 7 | import numpy as np 8 | from tool import imutils 9 | 10 | class PolyOptimizer(torch.optim.SGD): 11 | 12 | def __init__(self, params, lr, weight_decay, max_step, momentum=0.9): 13 | super().__init__(params, lr, weight_decay) 14 | 15 | self.global_step = 0 16 | self.max_step = max_step 17 | self.momentum = momentum 18 | 19 | self.__initial_lr = [group['lr'] for group in self.param_groups] 20 | 21 | 22 | def step(self, closure=None): 23 | 24 | if self.global_step < self.max_step: 25 | lr_mult = (1 - self.global_step / self.max_step) ** self.momentum 26 | 27 | for i in range(len(self.param_groups)): 28 | self.param_groups[i]['lr'] = self.__initial_lr[i] * lr_mult 29 | 30 | super().step(closure) 31 | 32 | self.global_step += 1 33 | 34 | 35 | class BatchNorm2dFixed(torch.nn.Module): 36 | 37 | def __init__(self, num_features, eps=1e-5): 38 | super(BatchNorm2dFixed, self).__init__() 39 | self.num_features = num_features 40 | self.eps = eps 41 | self.weight = torch.nn.Parameter(torch.Tensor(num_features)) 42 | self.bias = torch.nn.Parameter(torch.Tensor(num_features)) 43 | self.register_buffer('running_mean', torch.zeros(num_features)) 44 | self.register_buffer('running_var', torch.ones(num_features)) 45 | 46 | 47 | def forward(self, input): 48 | 49 | return F.batch_norm( 50 | input, self.running_mean, self.running_var, self.weight, self.bias, 51 | False, eps=self.eps) 52 | 53 | def __call__(self, x): 54 | return self.forward(x) 55 | 56 | 57 | class SegmentationDataset(Dataset): 58 | def __init__(self, img_name_list_path, img_dir, label_dir, rescale=None, flip=False, cropsize=None, 59 | img_transform=None, mask_transform=None): 60 | self.img_name_list_path = img_name_list_path 61 | self.img_dir = img_dir 62 | self.label_dir = label_dir 63 | 64 | self.img_transform = img_transform 65 | self.mask_transform = mask_transform 66 | 67 | self.img_name_list = open(self.img_name_list_path).read().splitlines() 68 | 69 | self.rescale = rescale 70 | self.flip = flip 71 | self.cropsize = cropsize 72 | 73 | def __len__(self): 74 | return len(self.img_name_list) 75 | 76 | def __getitem__(self, idx): 77 | 78 | name = self.img_name_list[idx] 79 | 80 | img = Image.open(os.path.join(self.img_dir, name + '.jpg')).convert("RGB") 81 | mask = Image.open(os.path.join(self.label_dir, name + '.png')) 82 | 83 | if self.rescale is not None: 84 | s = self.rescale[0] + random.random() * (self.rescale[1] - self.rescale[0]) 85 | adj_size = (round(img.size[0]*s/8)*8, round(img.size[1]*s/8)*8) 86 | img = img.resize(adj_size, resample=Image.CUBIC) 87 | mask = img.resize(adj_size, resample=Image.NEAREST) 88 | 89 | if self.img_transform is not None: 90 | img = self.img_transform(img) 91 | if self.mask_transform is not None: 92 | mask = self.mask_transform(mask) 93 | 94 | if self.cropsize is not None: 95 | img, mask = imutils.random_crop([img, mask], self.cropsize, (0, 255)) 96 | 97 | mask = imutils.RescaleNearest(0.125)(mask) 98 | 99 | if self.flip is True and bool(random.getrandbits(1)): 100 | img = np.flip(img, 1).copy() 101 | mask = np.flip(mask, 1).copy() 102 | 103 | img = np.transpose(img, (2, 0, 1)) 104 | 105 | return name, img, mask 106 | 107 | 108 | class ExtractAffinityLabelInRadius(): 109 | 110 | def __init__(self, cropsize, radius=5): 111 | self.radius = radius 112 | 113 | self.search_dist = [] 114 | 115 | for x in range(1, radius): 116 | self.search_dist.append((0, x)) 117 | 118 | for y in range(1, radius): 119 | for x in range(-radius+1, radius): 120 | if x*x + y*y < radius*radius: 121 | self.search_dist.append((y, x)) 122 | 123 | self.radius_floor = radius-1 124 | 125 | self.crop_height = cropsize - self.radius_floor 126 | self.crop_width = cropsize - 2 * self.radius_floor 127 | return 128 | 129 | def __call__(self, label): 130 | 131 | labels_from = label[:-self.radius_floor, self.radius_floor:-self.radius_floor] 132 | labels_from = np.reshape(labels_from, [-1]) 133 | 134 | labels_to_list = [] 135 | valid_pair_list = [] 136 | 137 | for dy, dx in self.search_dist: 138 | labels_to = label[dy:dy+self.crop_height, self.radius_floor+dx:self.radius_floor+dx+self.crop_width] 139 | labels_to = np.reshape(labels_to, [-1]) 140 | 141 | valid_pair = np.logical_and(np.less(labels_to, 255), np.less(labels_from, 255)) 142 | 143 | labels_to_list.append(labels_to) 144 | valid_pair_list.append(valid_pair) 145 | 146 | bc_labels_from = np.expand_dims(labels_from, 0) 147 | concat_labels_to = np.stack(labels_to_list) 148 | concat_valid_pair = np.stack(valid_pair_list) 149 | 150 | pos_affinity_label = np.equal(bc_labels_from, concat_labels_to) 151 | 152 | bg_pos_affinity_label = np.logical_and(pos_affinity_label, np.equal(bc_labels_from, 0)).astype(np.float32) 153 | 154 | fg_pos_affinity_label = np.logical_and(np.logical_and(pos_affinity_label, np.not_equal(bc_labels_from, 0)), concat_valid_pair).astype(np.float32) 155 | 156 | neg_affinity_label = np.logical_and(np.logical_not(pos_affinity_label), concat_valid_pair).astype(np.float32) 157 | 158 | return bg_pos_affinity_label, fg_pos_affinity_label, neg_affinity_label 159 | 160 | class AffinityFromMaskDataset(SegmentationDataset): 161 | def __init__(self, img_name_list_path, img_dir, label_dir, rescale=None, flip=False, cropsize=None, 162 | img_transform=None, mask_transform=None, radius=5): 163 | super().__init__(img_name_list_path, img_dir, label_dir, rescale, flip, cropsize, img_transform, mask_transform) 164 | 165 | self.radius = radius 166 | 167 | self.extract_aff_lab_func = ExtractAffinityLabelInRadius(cropsize=cropsize//8, radius=radius) 168 | 169 | def __getitem__(self, idx): 170 | name, img, mask = super().__getitem__(idx) 171 | 172 | aff_label = self.extract_aff_lab_func(mask) 173 | 174 | return name, img, aff_label 175 | -------------------------------------------------------------------------------- /train_aff.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import torch 4 | import random 5 | from torch.backends import cudnn 6 | torch.manual_seed(1) # cpu 7 | torch.cuda.manual_seed(1) #gpu 8 | np.random.seed(1) #numpy 9 | random.seed(1) 10 | cudnn.enabled = False 11 | from torch.utils.data import DataLoader 12 | from torchvision import transforms 13 | import voc12.data 14 | from tool import pyutils, imutils, torchutils 15 | import argparse 16 | import importlib 17 | 18 | 19 | 20 | if __name__ == '__main__': 21 | 22 | parser = argparse.ArgumentParser() 23 | parser.add_argument("--batch_size", default=8, type=int) 24 | parser.add_argument("--max_epoches", default=8, type=int) 25 | parser.add_argument("--network", default="network.resnet38_aff", type=str) 26 | parser.add_argument("--lr", default=0.01, type=float) 27 | parser.add_argument("--num_workers", default=8, type=int) 28 | parser.add_argument("--wt_dec", default=5e-4, type=float) 29 | parser.add_argument("--train_list", default="voc12/train_aug.txt", type=str) 30 | parser.add_argument("--val_list", default="voc12/val.txt", type=str) 31 | parser.add_argument("--session_name", default="resnet38_aff", type=str) 32 | parser.add_argument("--crop_size", default=448, type=int) 33 | parser.add_argument("--weights", required=True, type=str) 34 | parser.add_argument("--voc12_root", default='VOC2012', type=str) 35 | parser.add_argument("--la_crf_dir", required=True, type=str) 36 | parser.add_argument("--ha_crf_dir", required=True, type=str) 37 | args = parser.parse_args() 38 | 39 | pyutils.Logger(args.session_name + '.log') 40 | 41 | print(vars(args)) 42 | 43 | model = getattr(importlib.import_module(args.network), 'Net')() 44 | 45 | print(model) 46 | 47 | 48 | train_dataset = voc12.data.VOC12AffDataset(args.train_list, label_la_dir=args.la_crf_dir, label_ha_dir=args.ha_crf_dir, 49 | voc12_root=args.voc12_root, cropsize=args.crop_size, radius=5, 50 | joint_transform_list=[ 51 | None, 52 | None, 53 | imutils.RandomCrop(args.crop_size), 54 | imutils.RandomHorizontalFlip() 55 | ], 56 | img_transform_list=[ 57 | transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1), 58 | np.asarray, 59 | model.normalize, 60 | imutils.HWC_to_CHW 61 | ], 62 | label_transform_list=[ 63 | None, 64 | None, 65 | None, 66 | imutils.AvgPool2d(8) 67 | ]) 68 | def worker_init_fn(worker_id): 69 | np.random.seed(1 + worker_id) 70 | train_data_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, 71 | pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn) 72 | max_step = len(train_dataset) // args.batch_size * args.max_epoches 73 | 74 | param_groups = model.get_parameter_groups() 75 | optimizer = torchutils.PolyOptimizer([ 76 | {'params': param_groups[0], 'lr': args.lr, 'weight_decay': args.wt_dec}, 77 | {'params': param_groups[1], 'lr': 2*args.lr, 'weight_decay': 0}, 78 | {'params': param_groups[2], 'lr': 10*args.lr, 'weight_decay': args.wt_dec}, 79 | {'params': param_groups[3], 'lr': 20*args.lr, 'weight_decay': 0} 80 | ], lr=args.lr, weight_decay=args.wt_dec, max_step=max_step) 81 | 82 | if args.weights[-7:] == '.params': 83 | import network.resnet38d 84 | assert args.network == "network.resnet38_aff" 85 | weights_dict = network.resnet38d.convert_mxnet_to_torch(args.weights) 86 | elif args.weights[-11:] == '.caffemodel': 87 | import network.vgg16d 88 | assert args.network == "network.vgg16_aff" 89 | weights_dict = network.vgg16d.convert_caffe_to_torch(args.weights) 90 | else: 91 | weights_dict = torch.load(args.weights) 92 | 93 | model.load_state_dict(weights_dict, strict=False) 94 | model = torch.nn.DataParallel(model).cuda() 95 | model.train() 96 | 97 | avg_meter = pyutils.AverageMeter('loss', 'bg_loss', 'fg_loss', 'neg_loss', 'bg_cnt', 'fg_cnt', 'neg_cnt') 98 | 99 | timer = pyutils.Timer("Session started: ") 100 | 101 | for ep in range(args.max_epoches): 102 | 103 | for iter, pack in enumerate(train_data_loader): 104 | 105 | aff = model.forward(pack[0]) 106 | 107 | bg_label = pack[1][0].cuda(non_blocking=True) 108 | fg_label = pack[1][1].cuda(non_blocking=True) 109 | neg_label = pack[1][2].cuda(non_blocking=True) 110 | 111 | bg_count = torch.sum(bg_label) + 1e-5 112 | fg_count = torch.sum(fg_label) + 1e-5 113 | neg_count = torch.sum(neg_label) + 1e-5 114 | 115 | bg_loss = torch.sum(- bg_label * torch.log(aff + 1e-5)) / bg_count 116 | fg_loss = torch.sum(- fg_label * torch.log(aff + 1e-5)) / fg_count 117 | neg_loss = torch.sum(- neg_label * torch.log(1. + 1e-5 - aff)) / neg_count 118 | 119 | loss = bg_loss/4 + fg_loss/4 + neg_loss/2 120 | 121 | optimizer.zero_grad() 122 | loss.backward() 123 | optimizer.step() 124 | 125 | avg_meter.add({ 126 | 'loss': loss.item(), 127 | 'bg_loss': bg_loss.item(), 'fg_loss': fg_loss.item(), 'neg_loss': neg_loss.item(), 128 | 'bg_cnt': bg_count.item(), 'fg_cnt': fg_count.item(), 'neg_cnt': neg_count.item() 129 | }) 130 | 131 | if (optimizer.global_step - 1) % 50 == 0: 132 | 133 | timer.update_progress(optimizer.global_step / max_step) 134 | 135 | print('Iter:%5d/%5d' % (optimizer.global_step-1, max_step), 136 | 'loss:%.4f %.4f %.4f %.4f' % avg_meter.get('loss', 'bg_loss', 'fg_loss', 'neg_loss'), 137 | 'cnt:%.0f %.0f %.0f' % avg_meter.get('bg_cnt', 'fg_cnt', 'neg_cnt'), 138 | 'imps:%.1f' % ((iter+1) * args.batch_size / timer.get_stage_elapsed()), 139 | 'Fin:%s' % (timer.str_est_finish()), 140 | 'lr: %.4f' % (optimizer.param_groups[0]['lr']), flush=True) 141 | 142 | avg_meter.pop() 143 | 144 | 145 | else: 146 | print('') 147 | timer.reset_stage() 148 | 149 | torch.save(model.module.state_dict(), args.session_name + '.pth') 150 | -------------------------------------------------------------------------------- /tool/imutils.py: -------------------------------------------------------------------------------- 1 | 2 | import PIL.Image 3 | import random 4 | import numpy as np 5 | 6 | class RandomResizeLong(): 7 | 8 | def __init__(self, min_long, max_long): 9 | self.min_long = min_long 10 | self.max_long = max_long 11 | 12 | def __call__(self, img): 13 | 14 | target_long = random.randint(self.min_long, self.max_long) 15 | w, h = img.size 16 | 17 | if w < h: 18 | target_shape = (int(round(w * target_long / h)), target_long) 19 | else: 20 | target_shape = (target_long, int(round(h * target_long / w))) 21 | 22 | img = img.resize(target_shape, resample=PIL.Image.CUBIC) 23 | 24 | return img 25 | 26 | 27 | class RandomCrop(): 28 | 29 | def __init__(self, cropsize): 30 | self.cropsize = cropsize 31 | 32 | def __call__(self, imgarr): 33 | 34 | h, w, c = imgarr.shape 35 | 36 | ch = min(self.cropsize, h) 37 | cw = min(self.cropsize, w) 38 | 39 | w_space = w - self.cropsize 40 | h_space = h - self.cropsize 41 | 42 | if w_space > 0: 43 | cont_left = 0 44 | img_left = random.randrange(w_space+1) 45 | else: 46 | cont_left = random.randrange(-w_space+1) 47 | img_left = 0 48 | 49 | if h_space > 0: 50 | cont_top = 0 51 | img_top = random.randrange(h_space+1) 52 | else: 53 | cont_top = random.randrange(-h_space+1) 54 | img_top = 0 55 | 56 | container = np.zeros((self.cropsize, self.cropsize, imgarr.shape[-1]), np.float32) 57 | container[cont_top:cont_top+ch, cont_left:cont_left+cw] = \ 58 | imgarr[img_top:img_top+ch, img_left:img_left+cw] 59 | 60 | return container 61 | 62 | def get_random_crop_box(imgsize, cropsize): 63 | h, w = imgsize 64 | 65 | ch = min(cropsize, h) 66 | cw = min(cropsize, w) 67 | 68 | w_space = w - cropsize 69 | h_space = h - cropsize 70 | 71 | if w_space > 0: 72 | cont_left = 0 73 | img_left = random.randrange(w_space + 1) 74 | else: 75 | cont_left = random.randrange(-w_space + 1) 76 | img_left = 0 77 | 78 | if h_space > 0: 79 | cont_top = 0 80 | img_top = random.randrange(h_space + 1) 81 | else: 82 | cont_top = random.randrange(-h_space + 1) 83 | img_top = 0 84 | 85 | return cont_top, cont_top+ch, cont_left, cont_left+cw, img_top, img_top+ch, img_left, img_left+cw 86 | 87 | def crop_with_box(img, box): 88 | if len(img.shape) == 3: 89 | img_cont = np.zeros((max(box[1]-box[0], box[4]-box[5]), max(box[3]-box[2], box[7]-box[6]), img.shape[-1]), dtype=img.dtype) 90 | else: 91 | img_cont = np.zeros((max(box[1] - box[0], box[4] - box[5]), max(box[3] - box[2], box[7] - box[6])), dtype=img.dtype) 92 | img_cont[box[0]:box[1], box[2]:box[3]] = img[box[4]:box[5], box[6]:box[7]] 93 | return img_cont 94 | 95 | 96 | def random_crop(images, cropsize, fills): 97 | if isinstance(images[0], PIL.Image.Image): 98 | imgsize = images[0].size[::-1] 99 | else: 100 | imgsize = images[0].shape[:2] 101 | box = get_random_crop_box(imgsize, cropsize) 102 | 103 | new_images = [] 104 | for img, f in zip(images, fills): 105 | 106 | if isinstance(img, PIL.Image.Image): 107 | img = img.crop((box[6], box[4], box[7], box[5])) 108 | cont = PIL.Image.new(img.mode, (cropsize, cropsize)) 109 | cont.paste(img, (box[2], box[0])) 110 | new_images.append(cont) 111 | 112 | else: 113 | if len(img.shape) == 3: 114 | cont = np.ones((cropsize, cropsize, img.shape[2]), img.dtype)*f 115 | else: 116 | cont = np.ones((cropsize, cropsize), img.dtype)*f 117 | cont[box[0]:box[1], box[2]:box[3]] = img[box[4]:box[5], box[6]:box[7]] 118 | new_images.append(cont) 119 | 120 | return new_images 121 | 122 | 123 | class AvgPool2d(): 124 | 125 | def __init__(self, ksize): 126 | self.ksize = ksize 127 | 128 | def __call__(self, img): 129 | import skimage.measure 130 | 131 | return skimage.measure.block_reduce(img, (self.ksize, self.ksize, 1), np.mean) 132 | 133 | 134 | class RandomHorizontalFlip(): 135 | def __init__(self): 136 | return 137 | 138 | def __call__(self, img): 139 | if bool(random.getrandbits(1)): 140 | img = np.fliplr(img).copy() 141 | return img 142 | 143 | 144 | class CenterCrop(): 145 | 146 | def __init__(self, cropsize, default_value=0): 147 | self.cropsize = cropsize 148 | self.default_value = default_value 149 | 150 | def __call__(self, npimg): 151 | 152 | h, w = npimg.shape[:2] 153 | 154 | ch = min(self.cropsize, h) 155 | cw = min(self.cropsize, w) 156 | 157 | sh = h - self.cropsize 158 | sw = w - self.cropsize 159 | 160 | if sw > 0: 161 | cont_left = 0 162 | img_left = int(round(sw / 2)) 163 | else: 164 | cont_left = int(round(-sw / 2)) 165 | img_left = 0 166 | 167 | if sh > 0: 168 | cont_top = 0 169 | img_top = int(round(sh / 2)) 170 | else: 171 | cont_top = int(round(-sh / 2)) 172 | img_top = 0 173 | 174 | if len(npimg.shape) == 2: 175 | container = np.ones((self.cropsize, self.cropsize), npimg.dtype)*self.default_value 176 | else: 177 | container = np.ones((self.cropsize, self.cropsize, npimg.shape[2]), npimg.dtype)*self.default_value 178 | 179 | container[cont_top:cont_top+ch, cont_left:cont_left+cw] = \ 180 | npimg[img_top:img_top+ch, img_left:img_left+cw] 181 | 182 | return container 183 | 184 | 185 | def HWC_to_CHW(img): 186 | return np.transpose(img, (2, 0, 1)) 187 | 188 | 189 | class RescaleNearest(): 190 | def __init__(self, scale): 191 | self.scale = scale 192 | 193 | def __call__(self, npimg): 194 | import cv2 195 | return cv2.resize(npimg, None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_NEAREST) 196 | 197 | 198 | 199 | 200 | def crf_inference(img, probs, t=10, scale_factor=1, labels=21): 201 | import pydensecrf.densecrf as dcrf 202 | from pydensecrf.utils import unary_from_softmax 203 | 204 | h, w = img.shape[:2] 205 | n_labels = labels 206 | 207 | d = dcrf.DenseCRF2D(w, h, n_labels) 208 | 209 | unary = unary_from_softmax(probs) 210 | unary = np.ascontiguousarray(unary) 211 | 212 | d.setUnaryEnergy(unary) 213 | d.addPairwiseGaussian(sxy=3/scale_factor, compat=3) 214 | d.addPairwiseBilateral(sxy=80/scale_factor, srgb=13, rgbim=np.copy(img), compat=10) 215 | Q = d.inference(t) 216 | 217 | return np.array(Q).reshape((n_labels, h, w)) -------------------------------------------------------------------------------- /network/resnet38d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import numpy as np 4 | 5 | import torch.nn.functional as F 6 | bn_mom = 0.0003 7 | class ResBlock(nn.Module): 8 | def __init__(self, in_channels, mid_channels, out_channels, stride=1, first_dilation=None, dilation=1): 9 | super(ResBlock, self).__init__() 10 | 11 | self.same_shape = (in_channels == out_channels and stride == 1) 12 | 13 | if first_dilation == None: first_dilation = dilation 14 | 15 | self.bn_branch2a = nn.BatchNorm2d(in_channels) 16 | 17 | self.conv_branch2a = nn.Conv2d(in_channels, mid_channels, 3, stride, 18 | padding=first_dilation, dilation=first_dilation, bias=False) 19 | 20 | self.bn_branch2b1 = nn.BatchNorm2d(mid_channels) 21 | 22 | self.conv_branch2b1 = nn.Conv2d(mid_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False) 23 | 24 | if not self.same_shape: 25 | self.conv_branch1 = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False) 26 | 27 | def forward(self, x, get_x_bn_relu=False): 28 | 29 | branch2 = self.bn_branch2a(x) 30 | branch2 = F.relu(branch2) 31 | 32 | x_bn_relu = branch2 33 | 34 | if not self.same_shape: 35 | branch1 = self.conv_branch1(branch2) 36 | else: 37 | branch1 = x 38 | 39 | branch2 = self.conv_branch2a(branch2) 40 | branch2 = self.bn_branch2b1(branch2) 41 | branch2 = F.relu(branch2) 42 | branch2 = self.conv_branch2b1(branch2) 43 | 44 | x = branch1 + branch2 45 | 46 | if get_x_bn_relu: 47 | return x, x_bn_relu 48 | 49 | return x 50 | 51 | def __call__(self, x, get_x_bn_relu=False): 52 | return self.forward(x, get_x_bn_relu=get_x_bn_relu) 53 | 54 | class ResBlock_bot(nn.Module): 55 | def __init__(self, in_channels, out_channels, stride=1, dilation=1, dropout=0.): 56 | super(ResBlock_bot, self).__init__() 57 | 58 | self.same_shape = (in_channels == out_channels and stride == 1) 59 | 60 | self.bn_branch2a = nn.BatchNorm2d(in_channels) 61 | self.conv_branch2a = nn.Conv2d(in_channels, out_channels//4, 1, stride, bias=False) 62 | 63 | self.bn_branch2b1 = nn.BatchNorm2d(out_channels//4) 64 | self.dropout_2b1 = torch.nn.Dropout2d(dropout) 65 | self.conv_branch2b1 = nn.Conv2d(out_channels//4, out_channels//2, 3, padding=dilation, dilation=dilation, bias=False) 66 | 67 | self.bn_branch2b2 = nn.BatchNorm2d(out_channels//2) 68 | self.dropout_2b2 = torch.nn.Dropout2d(dropout) 69 | self.conv_branch2b2 = nn.Conv2d(out_channels//2, out_channels, 1, bias=False) 70 | 71 | if not self.same_shape: 72 | self.conv_branch1 = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False) 73 | 74 | def forward(self, x, get_x_bn_relu=False): 75 | 76 | branch2 = self.bn_branch2a(x) 77 | branch2 = F.relu(branch2) 78 | x_bn_relu = branch2 79 | 80 | branch1 = self.conv_branch1(branch2) 81 | 82 | branch2 = self.conv_branch2a(branch2) 83 | 84 | branch2 = self.bn_branch2b1(branch2) 85 | branch2 = F.relu(branch2) 86 | branch2 = self.dropout_2b1(branch2) 87 | branch2 = self.conv_branch2b1(branch2) 88 | 89 | branch2 = self.bn_branch2b2(branch2) 90 | branch2 = F.relu(branch2) 91 | branch2 = self.dropout_2b2(branch2) 92 | branch2 = self.conv_branch2b2(branch2) 93 | 94 | x = branch1 + branch2 95 | 96 | if get_x_bn_relu: 97 | return x, x_bn_relu 98 | 99 | return x 100 | 101 | def __call__(self, x, get_x_bn_relu=False): 102 | return self.forward(x, get_x_bn_relu=get_x_bn_relu) 103 | 104 | class Normalize(): 105 | def __init__(self, mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225)): 106 | 107 | self.mean = mean 108 | self.std = std 109 | 110 | def __call__(self, img): 111 | imgarr = np.asarray(img) 112 | proc_img = np.empty_like(imgarr, np.float32) 113 | 114 | proc_img[..., 0] = (imgarr[..., 0] / 255. - self.mean[0]) / self.std[0] 115 | proc_img[..., 1] = (imgarr[..., 1] / 255. - self.mean[1]) / self.std[1] 116 | proc_img[..., 2] = (imgarr[..., 2] / 255. - self.mean[2]) / self.std[2] 117 | 118 | return proc_img 119 | 120 | class Net(nn.Module): 121 | def __init__(self): 122 | super(Net, self).__init__() 123 | 124 | self.conv1a = nn.Conv2d(3, 64, 3, padding=1, bias=False) 125 | 126 | self.b2 = ResBlock(64, 128, 128, stride=2) 127 | self.b2_1 = ResBlock(128, 128, 128) 128 | self.b2_2 = ResBlock(128, 128, 128) 129 | 130 | self.b3 = ResBlock(128, 256, 256, stride=2) 131 | self.b3_1 = ResBlock(256, 256, 256) 132 | self.b3_2 = ResBlock(256, 256, 256) 133 | 134 | self.b4 = ResBlock(256, 512, 512, stride=2) 135 | self.b4_1 = ResBlock(512, 512, 512) 136 | self.b4_2 = ResBlock(512, 512, 512) 137 | self.b4_3 = ResBlock(512, 512, 512) 138 | self.b4_4 = ResBlock(512, 512, 512) 139 | self.b4_5 = ResBlock(512, 512, 512) 140 | 141 | self.b5 = ResBlock(512, 512, 1024, stride=1, first_dilation=1, dilation=2) 142 | self.b5_1 = ResBlock(1024, 512, 1024, dilation=2) 143 | self.b5_2 = ResBlock(1024, 512, 1024, dilation=2) 144 | 145 | self.b6 = ResBlock_bot(1024, 2048, stride=1, dilation=4, dropout=0.3) 146 | 147 | self.b7 = ResBlock_bot(2048, 4096, dilation=4, dropout=0.5) 148 | 149 | self.bn7 = nn.BatchNorm2d(4096) 150 | 151 | self.not_training = [self.conv1a] 152 | 153 | self.normalize = Normalize() 154 | 155 | return 156 | 157 | def forward(self, x): 158 | return self.forward_as_dict(x)['conv6'] 159 | 160 | def forward_as_dict(self, x): 161 | 162 | x = self.conv1a(x) 163 | 164 | x = self.b2(x) 165 | x = self.b2_1(x) 166 | x = self.b2_2(x) 167 | 168 | x = self.b3(x) 169 | x = self.b3_1(x) 170 | x = self.b3_2(x) 171 | 172 | x = self.b4(x) 173 | x = self.b4_1(x) 174 | x = self.b4_2(x) 175 | x = self.b4_3(x) 176 | x = self.b4_4(x) 177 | x = self.b4_5(x) 178 | 179 | x, conv4 = self.b5(x, get_x_bn_relu=True) 180 | x = self.b5_1(x) 181 | x = self.b5_2(x) 182 | 183 | x, conv5 = self.b6(x, get_x_bn_relu=True) 184 | 185 | x = self.b7(x) 186 | conv6 = F.relu(self.bn7(x)) 187 | 188 | return dict({'conv4': conv4, 'conv5': conv5, 'conv6': conv6}) 189 | 190 | 191 | def train(self, mode=True): 192 | 193 | super().train(mode) 194 | 195 | for layer in self.not_training: 196 | 197 | if isinstance(layer, torch.nn.Conv2d): 198 | layer.weight.requires_grad = False 199 | 200 | elif isinstance(layer, torch.nn.Module): 201 | for c in layer.children(): 202 | c.weight.requires_grad = False 203 | if c.bias is not None: 204 | c.bias.requires_grad = False 205 | 206 | for layer in self.modules(): 207 | 208 | if isinstance(layer, torch.nn.BatchNorm2d): 209 | layer.eval() 210 | layer.bias.requires_grad = False 211 | layer.weight.requires_grad = False 212 | 213 | return 214 | 215 | def convert_mxnet_to_torch(filename): 216 | import mxnet 217 | 218 | save_dict = mxnet.nd.load(filename) 219 | 220 | renamed_dict = dict() 221 | 222 | bn_param_mx_pt = {'beta': 'bias', 'gamma': 'weight', 'mean': 'running_mean', 'var': 'running_var'} 223 | 224 | for k, v in save_dict.items(): 225 | 226 | v = torch.from_numpy(v.asnumpy()) 227 | toks = k.split('_') 228 | 229 | if 'conv1a' in toks[0]: 230 | renamed_dict['conv1a.weight'] = v 231 | 232 | elif 'linear1000' in toks[0]: 233 | pass 234 | 235 | elif 'branch' in toks[1]: 236 | 237 | pt_name = [] 238 | 239 | if toks[0][-1] != 'a': 240 | pt_name.append('b' + toks[0][-3] + '_' + toks[0][-1]) 241 | else: 242 | pt_name.append('b' + toks[0][-2]) 243 | 244 | if 'res' in toks[0]: 245 | layer_type = 'conv' 246 | last_name = 'weight' 247 | 248 | else: # 'bn' in toks[0]: 249 | layer_type = 'bn' 250 | last_name = bn_param_mx_pt[toks[-1]] 251 | 252 | pt_name.append(layer_type + '_' + toks[1]) 253 | 254 | pt_name.append(last_name) 255 | 256 | torch_name = '.'.join(pt_name) 257 | renamed_dict[torch_name] = v 258 | 259 | else: 260 | last_name = bn_param_mx_pt[toks[-1]] 261 | renamed_dict['bn7.' + last_name] = v 262 | 263 | return renamed_dict 264 | 265 | -------------------------------------------------------------------------------- /train_cls_ser.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import numpy as np 4 | import random 5 | torch.manual_seed(1) # cpu 6 | torch.cuda.manual_seed(1) #gpu 7 | np.random.seed(1) #numpy 8 | random.seed(1) #random and transforms 9 | torch.backends.cudnn.deterministic=False # cudnn 10 | import cv2 11 | from torch.utils.data import DataLoader 12 | from torchvision import transforms 13 | import voc12.data 14 | from tool import pyutils, imutils, torchutils, visualization 15 | import argparse 16 | import importlib 17 | import torch.nn.functional as F 18 | from tensorboardX import SummaryWriter 19 | 20 | def validate(model, data_loader): 21 | print('\nvalidating ... ', flush=True, end='') 22 | 23 | val_loss_meter = pyutils.AverageMeter('loss') 24 | 25 | model.eval() 26 | 27 | with torch.no_grad(): 28 | for pack in data_loader: 29 | img = pack[1] 30 | label = pack[2].cuda(non_blocking=True) 31 | label = label.unsqueeze(2).unsqueeze(3) 32 | 33 | x = model(img) 34 | x = F.adaptive_avg_pool2d(x, (1,1)) 35 | loss = F.multilabel_soft_margin_loss(x, label) 36 | 37 | val_loss_meter.add({'loss': loss.item()}) 38 | 39 | model.train() 40 | 41 | print('loss:', val_loss_meter.pop('loss')) 42 | 43 | return 44 | 45 | 46 | if __name__ == '__main__': 47 | 48 | parser = argparse.ArgumentParser() 49 | parser.add_argument("--batch_size", default=16, type=int) 50 | parser.add_argument("--max_epoches", default=15, type=int) 51 | parser.add_argument("--network", default="network.resnet38_cls_ser", type=str) 52 | parser.add_argument("--lr", default=0.01, type=float) 53 | parser.add_argument("--num_workers", default=8, type=int) 54 | parser.add_argument("--wt_dec", default=5e-4, type=float) 55 | parser.add_argument("--weights", required=True, type=str) 56 | parser.add_argument("--train_list", default="voc12/train_aug.txt", type=str) 57 | parser.add_argument("--val_list", default="voc12/val.txt", type=str) 58 | parser.add_argument("--session_name", default="resnet38_cls_ser", type=str) 59 | parser.add_argument("--crop_size", default=448, type=int) 60 | parser.add_argument("--voc12_root", default='VOC2012', type=str) 61 | parser.add_argument("--tblog_dir", default='./log', type=str) 62 | args = parser.parse_args() 63 | 64 | model = getattr(importlib.import_module(args.network), 'Net')() 65 | 66 | tblogger = SummaryWriter(args.tblog_dir) 67 | pyutils.Logger(args.session_name + '.log') 68 | 69 | print(vars(args)) 70 | 71 | train_dataset = voc12.data.VOC12ClsDataset(args.train_list, voc12_root=args.voc12_root, 72 | transform=transforms.Compose([ 73 | imutils.RandomResizeLong(448, 768), 74 | transforms.RandomHorizontalFlip(), 75 | transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1), 76 | np.asarray, 77 | model.normalize, 78 | imutils.RandomCrop(args.crop_size), 79 | imutils.HWC_to_CHW, 80 | torch.from_numpy 81 | ])) 82 | def worker_init_fn(worker_id): 83 | np.random.seed(1 + worker_id) 84 | train_data_loader = DataLoader(train_dataset, batch_size=args.batch_size, 85 | shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=True, 86 | worker_init_fn=worker_init_fn) 87 | 88 | max_step = (len(train_dataset) // args.batch_size) * args.max_epoches 89 | 90 | val_dataset = voc12.data.VOC12ClsDataset(args.val_list, voc12_root=args.voc12_root, 91 | transform=transforms.Compose([ 92 | np.asarray, 93 | model.normalize, 94 | imutils.CenterCrop(500), 95 | imutils.HWC_to_CHW, 96 | torch.from_numpy 97 | ])) 98 | val_data_loader = DataLoader(val_dataset, batch_size=args.batch_size, 99 | shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=True) 100 | 101 | param_groups = model.get_parameter_groups() 102 | optimizer = torchutils.PolyOptimizer([ 103 | {'params': param_groups[0], 'lr': args.lr, 'weight_decay': args.wt_dec}, 104 | {'params': param_groups[1], 'lr': 2*args.lr, 'weight_decay': 0}, 105 | {'params': param_groups[2], 'lr': 10*args.lr, 'weight_decay': args.wt_dec}, 106 | {'params': param_groups[3], 'lr': 20*args.lr, 'weight_decay': 0} 107 | ], lr=args.lr, weight_decay=args.wt_dec, max_step=max_step) 108 | 109 | if args.weights[-7:] == '.params': 110 | assert args.network == "network.resnet38_cls_ser" 111 | import network.resnet38d 112 | weights_dict = network.resnet38d.convert_mxnet_to_torch(args.weights) 113 | elif args.weights[-11:] == '.caffemodel': 114 | assert args.network == "network.vgg16_cls_ser" 115 | import network.vgg16d 116 | weights_dict = network.vgg16d.convert_caffe_to_torch(args.weights) 117 | else: 118 | weights_dict = torch.load(args.weights) 119 | 120 | model.load_state_dict(weights_dict, strict=False) 121 | model = torch.nn.DataParallel(model).cuda() 122 | model.train() 123 | 124 | avg_meter = pyutils.AverageMeter('loss','loss_cls','loss_cls_s','loss_r') 125 | 126 | timer = pyutils.Timer("Session started: ") 127 | 128 | for ep in range(args.max_epoches): 129 | 130 | for iter, pack in enumerate(train_data_loader): 131 | 132 | img = pack[1] 133 | label = pack[2].cuda(non_blocking=True) 134 | label = label.unsqueeze(2).unsqueeze(3) 135 | 136 | cam = model(img) 137 | N,C,H,W = cam.size() 138 | predicts = F.adaptive_avg_pool2d(cam, (1,1)) 139 | loss_cls = F.multilabel_soft_margin_loss(predicts, label) 140 | branch_rate = 0.3 141 | img_s = F.interpolate(img, scale_factor=branch_rate,mode='bilinear') 142 | cam_s = model(img_s) 143 | Ns,Cs,Hs,Ws = cam_s.size() 144 | predicts_s = F.adaptive_avg_pool2d(cam_s, (1,1)) 145 | loss_cls_s = F.multilabel_soft_margin_loss(predicts_s, label) 146 | 147 | 148 | cam_sn = F.relu(cam_s) 149 | cam_sn_max = torch.max(cam_sn.view(N,C,-1), dim=-1)[0].view(N,C,1,1)+1e-5 150 | cam_sn = F.relu(cam_sn-1e-5, inplace=True)/cam_sn_max 151 | cam_sn = cam_sn * label 152 | 153 | cam_r = F.interpolate(cam, scale_factor=branch_rate, mode='bilinear') 154 | cam_rn = F.relu(cam_r) 155 | cam_rn_max = torch.max(cam_rn.view(N,C,-1), dim=-1)[0].view(N,C,1,1)+1e-5 156 | cam_rn = F.relu(cam_rn-1e-5, inplace=True)/cam_rn_max 157 | cam_rn = cam_rn * label 158 | 159 | 160 | loss_r = torch.mean(torch.pow(cam_sn - cam_rn, 2)) 161 | loss = (loss_cls/2 + loss_cls_s/2) + loss_r 162 | 163 | avg_meter.add({'loss': loss.item(), 'loss_cls': loss_cls.item(), 'loss_cls_s': loss_cls_s.item(), 'loss_r':loss_r.item()}) 164 | 165 | optimizer.zero_grad() 166 | loss.backward() 167 | optimizer.step() 168 | 169 | if (optimizer.global_step-1)%10 == 0: 170 | timer.update_progress(optimizer.global_step / max_step) 171 | 172 | print('Iter:%5d/%5d' % (optimizer.global_step - 1, max_step), 173 | 'Loss:%.4f %.4f %.4f %.4f' % (avg_meter.get('loss','loss_cls','loss_cls_s','loss_r')), 174 | 'imps:%.1f' % ((iter+1) * args.batch_size / timer.get_stage_elapsed()), 175 | 'Fin:%s' % (timer.str_est_finish()), 176 | 'lr: %.4f' % (optimizer.param_groups[0]['lr']), flush=True) 177 | avg_meter.pop() 178 | 179 | 180 | img_8 = img[0].numpy().transpose((1,2,0)) 181 | img_8 = np.ascontiguousarray(img_8) 182 | mean = (0.485, 0.456, 0.406) 183 | std = (0.229, 0.224, 0.225) 184 | img_8[:,:,0] = (img_8[:,:,0]*std[0] + mean[0])*255 185 | img_8[:,:,1] = (img_8[:,:,1]*std[1] + mean[1])*255 186 | img_8[:,:,2] = (img_8[:,:,2]*std[2] + mean[2])*255 187 | img_8[img_8 > 255] = 255 188 | img_8[img_8 < 0] = 0 189 | img_8 = img_8.astype(np.uint8) 190 | 191 | input_img = img_8.transpose((2,0,1)) 192 | h = H//4; w = W//4 193 | p = F.interpolate(cam,(h,w),mode='bilinear')[0].detach().cpu().numpy() 194 | p_s = F.interpolate(cam_s,(h,w),mode='bilinear')[0].detach().cpu().numpy() 195 | bg_score = np.zeros((1,h,w),np.float32) 196 | p = np.concatenate((bg_score,p), axis=0) 197 | p_s = np.concatenate((bg_score,p_s), axis=0) 198 | bg_label = np.ones((1,1,1),np.float32) 199 | l = label[0].detach().cpu().numpy() 200 | l = np.concatenate((bg_label,l),axis=0) 201 | image = cv2.resize(img_8, (w,h), interpolation=cv2.INTER_CUBIC).transpose((2,0,1)) 202 | CLS, CAM, CLS_crf, CAM_crf = visualization.generate_vis(p, l, image, func_label2color=visualization.VOClabel2colormap) 203 | CLS_s, CAM_s, CLS_crf_s, CAM_crf_s = visualization.generate_vis(p_s, l, image, func_label2color=visualization.VOClabel2colormap) 204 | loss_dict = {'loss':loss.item(), 205 | 'loss_cls':loss_cls.item(), 206 | 'loss_cls_s':loss_cls_s.item(), 207 | 'loss_r':loss_r.item()} 208 | itr = optimizer.global_step - 1 209 | tblogger.add_scalars('loss', loss_dict, itr) 210 | tblogger.add_scalar('lr', optimizer.param_groups[0]['lr'], itr) 211 | tblogger.add_image('Image', input_img, itr) 212 | tblogger.add_image('CLS', CLS, itr) 213 | tblogger.add_image('CLS_s', CLS_s, itr) 214 | tblogger.add_images('CAM', CAM, itr) 215 | tblogger.add_images('CAM_s', CAM_s, itr) 216 | else: 217 | validate(model, val_data_loader) 218 | timer.reset_stage() 219 | 220 | torch.save(model.module.state_dict(), args.session_name + '.pth') 221 | -------------------------------------------------------------------------------- /voc12/data.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import torch 4 | from torch.utils.data import Dataset 5 | import PIL.Image 6 | import os.path 7 | import scipy.misc 8 | 9 | IMG_FOLDER_NAME = "JPEGImages" 10 | ANNOT_FOLDER_NAME = "Annotations" 11 | 12 | CAT_LIST = ['aeroplane', 'bicycle', 'bird', 'boat', 13 | 'bottle', 'bus', 'car', 'cat', 'chair', 14 | 'cow', 'diningtable', 'dog', 'horse', 15 | 'motorbike', 'person', 'pottedplant', 16 | 'sheep', 'sofa', 'train', 17 | 'tvmonitor'] 18 | 19 | CAT_NAME_TO_NUM = dict(zip(CAT_LIST,range(len(CAT_LIST)))) 20 | 21 | def load_image_label_from_xml(img_name, voc12_root): 22 | from xml.dom import minidom 23 | 24 | el_list = minidom.parse(os.path.join(voc12_root, ANNOT_FOLDER_NAME,img_name + '.xml')).getElementsByTagName('name') 25 | 26 | multi_cls_lab = np.zeros((20), np.float32) 27 | 28 | for el in el_list: 29 | cat_name = el.firstChild.data 30 | if cat_name in CAT_LIST: 31 | cat_num = CAT_NAME_TO_NUM[cat_name] 32 | multi_cls_lab[cat_num] = 1.0 33 | 34 | return multi_cls_lab 35 | 36 | def load_image_label_list_from_xml(img_name_list, voc12_root): 37 | 38 | return [load_image_label_from_xml(img_name, voc12_root) for img_name in img_name_list] 39 | 40 | def load_image_label_list_from_npy(img_name_list): 41 | 42 | cls_labels_dict = np.load('voc12/cls_labels.npy').item() 43 | 44 | return [cls_labels_dict[img_name] for img_name in img_name_list] 45 | 46 | def get_img_path(img_name, voc12_root): 47 | return os.path.join(voc12_root, IMG_FOLDER_NAME, img_name + '.jpg') 48 | 49 | def load_img_name_list(dataset_path): 50 | 51 | img_gt_name_list = open(dataset_path).read().splitlines() 52 | img_name_list = [img_gt_name.split(' ')[0][-15:-4] for img_gt_name in img_gt_name_list] 53 | 54 | return img_name_list 55 | 56 | class VOC12ImageDataset(Dataset): 57 | 58 | def __init__(self, img_name_list_path, voc12_root, transform=None): 59 | self.img_name_list = load_img_name_list(img_name_list_path) 60 | self.voc12_root = voc12_root 61 | self.transform = transform 62 | 63 | def __len__(self): 64 | return len(self.img_name_list) 65 | 66 | def __getitem__(self, idx): 67 | name = self.img_name_list[idx] 68 | 69 | img = PIL.Image.open(get_img_path(name, self.voc12_root)).convert("RGB") 70 | 71 | if self.transform: 72 | img = self.transform(img) 73 | 74 | return name, img 75 | 76 | 77 | class VOC12ClsDataset(VOC12ImageDataset): 78 | 79 | def __init__(self, img_name_list_path, voc12_root, transform=None): 80 | super().__init__(img_name_list_path, voc12_root, transform) 81 | self.label_list = load_image_label_list_from_npy(self.img_name_list) 82 | 83 | def __getitem__(self, idx): 84 | name, img = super().__getitem__(idx) 85 | 86 | label = torch.from_numpy(self.label_list[idx]) 87 | 88 | return name, img, label 89 | 90 | 91 | class VOC12ClsDatasetMSF(VOC12ClsDataset): 92 | 93 | def __init__(self, img_name_list_path, voc12_root, scales, inter_transform=None, unit=1): 94 | super().__init__(img_name_list_path, voc12_root, transform=None) 95 | self.scales = scales 96 | self.unit = unit 97 | self.inter_transform = inter_transform 98 | 99 | def __getitem__(self, idx): 100 | name, img, label = super().__getitem__(idx) 101 | 102 | rounded_size = (int(round(img.size[0]/self.unit)*self.unit), int(round(img.size[1]/self.unit)*self.unit)) 103 | 104 | ms_img_list = [] 105 | for s in self.scales: 106 | target_size = (round(rounded_size[0]*s), 107 | round(rounded_size[1]*s)) 108 | s_img = img.resize(target_size, resample=PIL.Image.CUBIC) 109 | ms_img_list.append(s_img) 110 | 111 | if self.inter_transform: 112 | for i in range(len(ms_img_list)): 113 | ms_img_list[i] = self.inter_transform(ms_img_list[i]) 114 | 115 | msf_img_list = [] 116 | for i in range(len(ms_img_list)): 117 | msf_img_list.append(ms_img_list[i]) 118 | msf_img_list.append(np.flip(ms_img_list[i], -1).copy()) 119 | 120 | return name, msf_img_list, label 121 | 122 | class ExtractTripletLabelInRadius(): 123 | 124 | def __init__(self, cropsize, radius=5): 125 | self.radius = radius 126 | 127 | self.search_dist = [] 128 | 129 | for y in range(-radius+1, radius): 130 | for x in range(-radius+1, radius): 131 | if x*x + y*y < radius*radius and x*x+y*y != 0: 132 | self.search_dist.append((y, x)) 133 | 134 | self.radius_floor = radius-1 135 | 136 | self.crop_height = cropsize - 2 * self.radius_floor 137 | self.crop_width = cropsize - 2 * self.radius_floor 138 | return 139 | 140 | def __call__(self, label): 141 | 142 | labels_from = label[self.radius_floor:-self.radius_floor, self.radius_floor:-self.radius_floor] 143 | labels_from = np.reshape(labels_from, [-1]) 144 | 145 | labels_to_list = [] 146 | valid_pair_list = [] 147 | 148 | for dy, dx in self.search_dist: 149 | labels_to = label[self.radius_floor+dy:self.radius_floor+dy+self.crop_height,\ 150 | self.radius_floor+dx:self.radius_floor+dx+self.crop_width] 151 | labels_to = np.reshape(labels_to, [-1]) 152 | 153 | valid_pair = np.logical_and(np.less(labels_to, 255), np.less(labels_from, 255)) 154 | 155 | labels_to_list.append(labels_to) 156 | valid_pair_list.append(valid_pair) 157 | 158 | bc_labels_from = np.expand_dims(labels_from, 0) 159 | concat_labels_to = np.stack(labels_to_list) 160 | concat_valid_pair = np.stack(valid_pair_list) 161 | 162 | pos_affinity_label = np.equal(bc_labels_from, concat_labels_to).astype(np.float32) 163 | 164 | neg_affinity_label = np.logical_and(np.logical_not(pos_affinity_label), concat_valid_pair).astype(np.float32) 165 | 166 | return torch.from_numpy(pos_affinity_label), torch.from_numpy(neg_affinity_label) 167 | 168 | class ExtractAffinityLabelInRadius(): 169 | 170 | def __init__(self, cropsize, radius=5): 171 | self.radius = radius 172 | 173 | self.search_dist = [] 174 | 175 | for x in range(1, radius): 176 | self.search_dist.append((0, x)) 177 | 178 | for y in range(1, radius): 179 | for x in range(-radius+1, radius): 180 | if x*x + y*y < radius*radius: 181 | self.search_dist.append((y, x)) 182 | 183 | self.radius_floor = radius-1 184 | 185 | self.crop_height = cropsize - self.radius_floor 186 | self.crop_width = cropsize - 2 * self.radius_floor 187 | return 188 | 189 | def __call__(self, label): 190 | 191 | labels_from = label[:-self.radius_floor, self.radius_floor:-self.radius_floor] 192 | labels_from = np.reshape(labels_from, [-1]) 193 | 194 | labels_to_list = [] 195 | valid_pair_list = [] 196 | 197 | for dy, dx in self.search_dist: 198 | labels_to = label[dy:dy+self.crop_height, self.radius_floor+dx:self.radius_floor+dx+self.crop_width] 199 | labels_to = np.reshape(labels_to, [-1]) 200 | 201 | valid_pair = np.logical_and(np.less(labels_to, 255), np.less(labels_from, 255)) 202 | 203 | labels_to_list.append(labels_to) 204 | valid_pair_list.append(valid_pair) 205 | 206 | bc_labels_from = np.expand_dims(labels_from, 0) 207 | concat_labels_to = np.stack(labels_to_list) 208 | concat_valid_pair = np.stack(valid_pair_list) 209 | 210 | pos_affinity_label = np.equal(bc_labels_from, concat_labels_to) 211 | 212 | bg_pos_affinity_label = np.logical_and(pos_affinity_label, np.equal(bc_labels_from, 0)).astype(np.float32) 213 | 214 | fg_pos_affinity_label = np.logical_and(np.logical_and(pos_affinity_label, np.not_equal(bc_labels_from, 0)), concat_valid_pair).astype(np.float32) 215 | 216 | neg_affinity_label = np.logical_and(np.logical_not(pos_affinity_label), concat_valid_pair).astype(np.float32) 217 | 218 | return torch.from_numpy(bg_pos_affinity_label), torch.from_numpy(fg_pos_affinity_label), torch.from_numpy(neg_affinity_label) 219 | 220 | class ExtractAffinityLabelInRadius_mask(): 221 | 222 | def __init__(self, cropsize, radius=5): 223 | self.radius = radius 224 | 225 | self.search_dist = [] 226 | 227 | for x in range(1, radius): 228 | self.search_dist.append((0, x)) 229 | 230 | for y in range(1, radius): 231 | for x in range(-radius+1, radius): 232 | if x*x + y*y < radius*radius: 233 | self.search_dist.append((y, x)) 234 | 235 | self.radius_floor = radius-1 236 | 237 | self.crop_height = cropsize - self.radius_floor 238 | self.crop_width = cropsize - 2 * self.radius_floor 239 | return 240 | 241 | def __call__(self, label, bg_pre=None, fg_pre=None, neg_pre=None): 242 | 243 | labels_from = label[:-self.radius_floor, self.radius_floor:-self.radius_floor] 244 | labels_from = np.reshape(labels_from, [-1]) 245 | 246 | labels_to_list = [] 247 | valid_pair_list = [] 248 | 249 | for dy, dx in self.search_dist: 250 | labels_to = label[dy:dy+self.crop_height, self.radius_floor+dx:self.radius_floor+dx+self.crop_width] 251 | labels_to = np.reshape(labels_to, [-1]) 252 | 253 | valid_pair = np.logical_and(np.less(labels_to, 255), np.less(labels_from, 255)) 254 | 255 | labels_to_list.append(labels_to) 256 | valid_pair_list.append(valid_pair) 257 | 258 | bc_labels_from = np.expand_dims(labels_from, 0) 259 | concat_labels_to = np.stack(labels_to_list) 260 | concat_valid_pair = np.stack(valid_pair_list) 261 | 262 | pos_affinity_label = np.equal(bc_labels_from, concat_labels_to) 263 | 264 | bg_pos_affinity_label = np.logical_and(pos_affinity_label, np.equal(bc_labels_from, 0)) 265 | if bg_pre is not None: 266 | bg_pos_affinity_label = np.logical_and(bg_pos_affinity_label, np.logical_not(bg_pre)) 267 | 268 | fg_pos_affinity_label = np.logical_and(np.logical_and(pos_affinity_label, np.not_equal(bc_labels_from, 0)), concat_valid_pair) 269 | if fg_pre is not None: 270 | fg_pos_affinity_label = np.logical_and(fg_pos_affinity_label, np.logical_not(fg_pre)) 271 | 272 | neg_affinity_label = np.logical_and(np.logical_not(pos_affinity_label), concat_valid_pair) 273 | if neg_pre is not None: 274 | neg_affinity_label = np.logical_and(neg_affinity_label, np.logical_not(neg_pre)) 275 | 276 | return torch.from_numpy(bg_pos_affinity_label.astype(np.float32)),\ 277 | torch.from_numpy(fg_pos_affinity_label.astype(np.float32)),\ 278 | torch.from_numpy(neg_affinity_label.astype(np.float32)) 279 | 280 | class VOC12AffDataset(VOC12ImageDataset): 281 | 282 | def __init__(self, img_name_list_path, label_la_dir, label_ha_dir, cropsize, voc12_root, radius=5, 283 | joint_transform_list=None, img_transform_list=None, label_transform_list=None): 284 | super().__init__(img_name_list_path, voc12_root, transform=None) 285 | 286 | self.label_la_dir = label_la_dir 287 | self.label_ha_dir = label_ha_dir 288 | self.voc12_root = voc12_root 289 | 290 | self.joint_transform_list = joint_transform_list 291 | self.img_transform_list = img_transform_list 292 | self.label_transform_list = label_transform_list 293 | 294 | self.extract_aff_lab_func = ExtractAffinityLabelInRadius(cropsize=cropsize//8, radius=radius) 295 | 296 | def __len__(self): 297 | return len(self.img_name_list) 298 | 299 | def __getitem__(self, idx): 300 | name, img = super().__getitem__(idx) 301 | 302 | label_la_path = os.path.join(self.label_la_dir, name + '.npy') 303 | 304 | label_ha_path = os.path.join(self.label_ha_dir, name + '.npy') 305 | 306 | label_la = np.load(label_la_path).item() 307 | label_ha = np.load(label_ha_path).item() 308 | 309 | label = np.array(list(label_la.values()) + list(label_ha.values())) 310 | label = np.transpose(label, (1, 2, 0)) 311 | 312 | for joint_transform, img_transform, label_transform \ 313 | in zip(self.joint_transform_list, self.img_transform_list, self.label_transform_list): 314 | 315 | if joint_transform: 316 | img_label = np.concatenate((img, label), axis=-1) 317 | img_label = joint_transform(img_label) 318 | img = img_label[..., :3] 319 | label = img_label[..., 3:] 320 | 321 | if img_transform: 322 | img = img_transform(img) 323 | if label_transform: 324 | label = label_transform(label) 325 | 326 | no_score_region = np.max(label, -1) < 1e-5 327 | label_la, label_ha = np.array_split(label, 2, axis=-1) 328 | label_la = np.argmax(label_la, axis=-1).astype(np.uint8) 329 | label_ha = np.argmax(label_ha, axis=-1).astype(np.uint8) 330 | label = label_la.copy() 331 | label[label_la == 0] = 255 332 | label[label_ha == 0] = 0 333 | label[no_score_region] = 255 # mostly outer of cropped region 334 | label = self.extract_aff_lab_func(label) 335 | 336 | return img, label 337 | 338 | class VOC12AffGtDataset(VOC12ImageDataset): 339 | 340 | def __init__(self, img_name_list_path, label_dir, cropsize, voc12_root, radius=5, 341 | joint_transform_list=None, img_transform_list=None, label_transform_list=None): 342 | super().__init__(img_name_list_path, voc12_root, transform=None) 343 | 344 | self.label_dir = label_dir 345 | self.voc12_root = voc12_root 346 | 347 | self.joint_transform_list = joint_transform_list 348 | self.img_transform_list = img_transform_list 349 | self.label_transform_list = label_transform_list 350 | 351 | self.extract_aff_lab_func = ExtractAffinityLabelInRadius(cropsize=cropsize//8, radius=radius) 352 | 353 | def __len__(self): 354 | return len(self.img_name_list) 355 | 356 | def __getitem__(self, idx): 357 | name, img = super().__getitem__(idx) 358 | 359 | label_path = os.path.join(self.label_dir, name + '.png') 360 | 361 | label = scipy.misc.imread(label_path) 362 | 363 | for joint_transform, img_transform, label_transform \ 364 | in zip(self.joint_transform_list, self.img_transform_list, self.label_transform_list): 365 | 366 | if joint_transform: 367 | img_label = np.concatenate((img, label), axis=-1) 368 | img_label = joint_transform(img_label) 369 | img = img_label[..., :3] 370 | label = img_label[..., 3:] 371 | 372 | if img_transform: 373 | img = img_transform(img) 374 | if label_transform: 375 | label = label_transform(label) 376 | 377 | label = self.extract_aff_lab_func(label) 378 | 379 | return img, label 380 | -------------------------------------------------------------------------------- /voc12/test.txt: -------------------------------------------------------------------------------- 1 | /JPEGImages/2008_000006.jpg 2 | /JPEGImages/2008_000011.jpg 3 | /JPEGImages/2008_000012.jpg 4 | /JPEGImages/2008_000018.jpg 5 | /JPEGImages/2008_000024.jpg 6 | /JPEGImages/2008_000030.jpg 7 | /JPEGImages/2008_000031.jpg 8 | /JPEGImages/2008_000046.jpg 9 | /JPEGImages/2008_000047.jpg 10 | /JPEGImages/2008_000048.jpg 11 | /JPEGImages/2008_000057.jpg 12 | /JPEGImages/2008_000058.jpg 13 | /JPEGImages/2008_000068.jpg 14 | /JPEGImages/2008_000072.jpg 15 | /JPEGImages/2008_000079.jpg 16 | /JPEGImages/2008_000081.jpg 17 | /JPEGImages/2008_000083.jpg 18 | /JPEGImages/2008_000088.jpg 19 | /JPEGImages/2008_000094.jpg 20 | /JPEGImages/2008_000101.jpg 21 | /JPEGImages/2008_000104.jpg 22 | /JPEGImages/2008_000106.jpg 23 | /JPEGImages/2008_000108.jpg 24 | /JPEGImages/2008_000110.jpg 25 | /JPEGImages/2008_000111.jpg 26 | /JPEGImages/2008_000126.jpg 27 | /JPEGImages/2008_000127.jpg 28 | /JPEGImages/2008_000129.jpg 29 | /JPEGImages/2008_000130.jpg 30 | /JPEGImages/2008_000135.jpg 31 | /JPEGImages/2008_000150.jpg 32 | /JPEGImages/2008_000152.jpg 33 | /JPEGImages/2008_000156.jpg 34 | /JPEGImages/2008_000159.jpg 35 | /JPEGImages/2008_000160.jpg 36 | /JPEGImages/2008_000161.jpg 37 | /JPEGImages/2008_000166.jpg 38 | /JPEGImages/2008_000167.jpg 39 | /JPEGImages/2008_000168.jpg 40 | /JPEGImages/2008_000169.jpg 41 | /JPEGImages/2008_000171.jpg 42 | /JPEGImages/2008_000175.jpg 43 | /JPEGImages/2008_000178.jpg 44 | /JPEGImages/2008_000186.jpg 45 | /JPEGImages/2008_000198.jpg 46 | /JPEGImages/2008_000206.jpg 47 | /JPEGImages/2008_000208.jpg 48 | /JPEGImages/2008_000209.jpg 49 | /JPEGImages/2008_000211.jpg 50 | /JPEGImages/2008_000220.jpg 51 | /JPEGImages/2008_000224.jpg 52 | /JPEGImages/2008_000230.jpg 53 | /JPEGImages/2008_000240.jpg 54 | /JPEGImages/2008_000248.jpg 55 | /JPEGImages/2008_000249.jpg 56 | /JPEGImages/2008_000250.jpg 57 | /JPEGImages/2008_000256.jpg 58 | /JPEGImages/2008_000279.jpg 59 | /JPEGImages/2008_000282.jpg 60 | /JPEGImages/2008_000285.jpg 61 | /JPEGImages/2008_000286.jpg 62 | /JPEGImages/2008_000296.jpg 63 | /JPEGImages/2008_000300.jpg 64 | /JPEGImages/2008_000322.jpg 65 | /JPEGImages/2008_000324.jpg 66 | /JPEGImages/2008_000337.jpg 67 | /JPEGImages/2008_000366.jpg 68 | /JPEGImages/2008_000369.jpg 69 | /JPEGImages/2008_000377.jpg 70 | /JPEGImages/2008_000384.jpg 71 | /JPEGImages/2008_000390.jpg 72 | /JPEGImages/2008_000404.jpg 73 | /JPEGImages/2008_000411.jpg 74 | /JPEGImages/2008_000434.jpg 75 | /JPEGImages/2008_000440.jpg 76 | /JPEGImages/2008_000460.jpg 77 | /JPEGImages/2008_000467.jpg 78 | /JPEGImages/2008_000478.jpg 79 | /JPEGImages/2008_000485.jpg 80 | /JPEGImages/2008_000487.jpg 81 | /JPEGImages/2008_000490.jpg 82 | /JPEGImages/2008_000503.jpg 83 | /JPEGImages/2008_000504.jpg 84 | /JPEGImages/2008_000507.jpg 85 | /JPEGImages/2008_000513.jpg 86 | /JPEGImages/2008_000523.jpg 87 | /JPEGImages/2008_000529.jpg 88 | /JPEGImages/2008_000556.jpg 89 | /JPEGImages/2008_000565.jpg 90 | /JPEGImages/2008_000580.jpg 91 | /JPEGImages/2008_000590.jpg 92 | /JPEGImages/2008_000596.jpg 93 | /JPEGImages/2008_000597.jpg 94 | /JPEGImages/2008_000600.jpg 95 | /JPEGImages/2008_000603.jpg 96 | /JPEGImages/2008_000604.jpg 97 | /JPEGImages/2008_000612.jpg 98 | /JPEGImages/2008_000617.jpg 99 | /JPEGImages/2008_000621.jpg 100 | /JPEGImages/2008_000627.jpg 101 | /JPEGImages/2008_000633.jpg 102 | /JPEGImages/2008_000643.jpg 103 | /JPEGImages/2008_000644.jpg 104 | /JPEGImages/2008_000649.jpg 105 | /JPEGImages/2008_000651.jpg 106 | /JPEGImages/2008_000664.jpg 107 | /JPEGImages/2008_000665.jpg 108 | /JPEGImages/2008_000680.jpg 109 | /JPEGImages/2008_000681.jpg 110 | /JPEGImages/2008_000684.jpg 111 | /JPEGImages/2008_000685.jpg 112 | /JPEGImages/2008_000688.jpg 113 | /JPEGImages/2008_000693.jpg 114 | /JPEGImages/2008_000698.jpg 115 | /JPEGImages/2008_000707.jpg 116 | /JPEGImages/2008_000709.jpg 117 | /JPEGImages/2008_000712.jpg 118 | /JPEGImages/2008_000747.jpg 119 | /JPEGImages/2008_000751.jpg 120 | /JPEGImages/2008_000754.jpg 121 | /JPEGImages/2008_000762.jpg 122 | /JPEGImages/2008_000767.jpg 123 | /JPEGImages/2008_000768.jpg 124 | /JPEGImages/2008_000773.jpg 125 | /JPEGImages/2008_000774.jpg 126 | /JPEGImages/2008_000779.jpg 127 | /JPEGImages/2008_000797.jpg 128 | /JPEGImages/2008_000813.jpg 129 | /JPEGImages/2008_000816.jpg 130 | /JPEGImages/2008_000846.jpg 131 | /JPEGImages/2008_000866.jpg 132 | /JPEGImages/2008_000871.jpg 133 | /JPEGImages/2008_000872.jpg 134 | /JPEGImages/2008_000891.jpg 135 | /JPEGImages/2008_000892.jpg 136 | /JPEGImages/2008_000894.jpg 137 | /JPEGImages/2008_000896.jpg 138 | /JPEGImages/2008_000898.jpg 139 | /JPEGImages/2008_000909.jpg 140 | /JPEGImages/2008_000913.jpg 141 | /JPEGImages/2008_000920.jpg 142 | /JPEGImages/2008_000933.jpg 143 | /JPEGImages/2008_000935.jpg 144 | /JPEGImages/2008_000937.jpg 145 | /JPEGImages/2008_000938.jpg 146 | /JPEGImages/2008_000954.jpg 147 | /JPEGImages/2008_000958.jpg 148 | /JPEGImages/2008_000963.jpg 149 | /JPEGImages/2008_000967.jpg 150 | /JPEGImages/2008_000974.jpg 151 | /JPEGImages/2008_000986.jpg 152 | /JPEGImages/2008_000994.jpg 153 | /JPEGImages/2008_000995.jpg 154 | /JPEGImages/2008_001008.jpg 155 | /JPEGImages/2008_001010.jpg 156 | /JPEGImages/2008_001014.jpg 157 | /JPEGImages/2008_001016.jpg 158 | /JPEGImages/2008_001025.jpg 159 | /JPEGImages/2008_001029.jpg 160 | /JPEGImages/2008_001037.jpg 161 | /JPEGImages/2008_001059.jpg 162 | /JPEGImages/2008_001061.jpg 163 | /JPEGImages/2008_001072.jpg 164 | /JPEGImages/2008_001124.jpg 165 | /JPEGImages/2008_001126.jpg 166 | /JPEGImages/2008_001131.jpg 167 | /JPEGImages/2008_001138.jpg 168 | /JPEGImages/2008_001144.jpg 169 | /JPEGImages/2008_001151.jpg 170 | /JPEGImages/2008_001156.jpg 171 | /JPEGImages/2008_001179.jpg 172 | /JPEGImages/2008_001181.jpg 173 | /JPEGImages/2008_001184.jpg 174 | /JPEGImages/2008_001186.jpg 175 | /JPEGImages/2008_001197.jpg 176 | /JPEGImages/2008_001207.jpg 177 | /JPEGImages/2008_001212.jpg 178 | /JPEGImages/2008_001233.jpg 179 | /JPEGImages/2008_001234.jpg 180 | /JPEGImages/2008_001258.jpg 181 | /JPEGImages/2008_001268.jpg 182 | /JPEGImages/2008_001279.jpg 183 | /JPEGImages/2008_001281.jpg 184 | /JPEGImages/2008_001288.jpg 185 | /JPEGImages/2008_001291.jpg 186 | /JPEGImages/2008_001298.jpg 187 | /JPEGImages/2008_001309.jpg 188 | /JPEGImages/2008_001315.jpg 189 | /JPEGImages/2008_001316.jpg 190 | /JPEGImages/2008_001319.jpg 191 | /JPEGImages/2008_001327.jpg 192 | /JPEGImages/2008_001328.jpg 193 | /JPEGImages/2008_001332.jpg 194 | /JPEGImages/2008_001341.jpg 195 | /JPEGImages/2008_001347.jpg 196 | /JPEGImages/2008_001355.jpg 197 | /JPEGImages/2008_001378.jpg 198 | /JPEGImages/2008_001386.jpg 199 | /JPEGImages/2008_001400.jpg 200 | /JPEGImages/2008_001409.jpg 201 | /JPEGImages/2008_001411.jpg 202 | /JPEGImages/2008_001416.jpg 203 | /JPEGImages/2008_001418.jpg 204 | /JPEGImages/2008_001435.jpg 205 | /JPEGImages/2008_001459.jpg 206 | /JPEGImages/2008_001469.jpg 207 | /JPEGImages/2008_001474.jpg 208 | /JPEGImages/2008_001477.jpg 209 | /JPEGImages/2008_001483.jpg 210 | /JPEGImages/2008_001484.jpg 211 | /JPEGImages/2008_001485.jpg 212 | /JPEGImages/2008_001496.jpg 213 | /JPEGImages/2008_001507.jpg 214 | /JPEGImages/2008_001511.jpg 215 | /JPEGImages/2008_001519.jpg 216 | /JPEGImages/2008_001557.jpg 217 | /JPEGImages/2008_001567.jpg 218 | /JPEGImages/2008_001570.jpg 219 | /JPEGImages/2008_001571.jpg 220 | /JPEGImages/2008_001572.jpg 221 | /JPEGImages/2008_001579.jpg 222 | /JPEGImages/2008_001587.jpg 223 | /JPEGImages/2008_001608.jpg 224 | /JPEGImages/2008_001611.jpg 225 | /JPEGImages/2008_001614.jpg 226 | /JPEGImages/2008_001621.jpg 227 | /JPEGImages/2008_001639.jpg 228 | /JPEGImages/2008_001658.jpg 229 | /JPEGImages/2008_001678.jpg 230 | /JPEGImages/2008_001700.jpg 231 | /JPEGImages/2008_001713.jpg 232 | /JPEGImages/2008_001720.jpg 233 | /JPEGImages/2008_001755.jpg 234 | /JPEGImages/2008_001779.jpg 235 | /JPEGImages/2008_001785.jpg 236 | /JPEGImages/2008_001793.jpg 237 | /JPEGImages/2008_001794.jpg 238 | /JPEGImages/2008_001803.jpg 239 | /JPEGImages/2008_001818.jpg 240 | /JPEGImages/2008_001848.jpg 241 | /JPEGImages/2008_001855.jpg 242 | /JPEGImages/2008_001857.jpg 243 | /JPEGImages/2008_001861.jpg 244 | /JPEGImages/2008_001875.jpg 245 | /JPEGImages/2008_001878.jpg 246 | /JPEGImages/2008_001886.jpg 247 | /JPEGImages/2008_001897.jpg 248 | /JPEGImages/2008_001916.jpg 249 | /JPEGImages/2008_001925.jpg 250 | /JPEGImages/2008_001949.jpg 251 | /JPEGImages/2008_001953.jpg 252 | /JPEGImages/2008_001972.jpg 253 | /JPEGImages/2008_001999.jpg 254 | /JPEGImages/2008_002027.jpg 255 | /JPEGImages/2008_002040.jpg 256 | /JPEGImages/2008_002057.jpg 257 | /JPEGImages/2008_002070.jpg 258 | /JPEGImages/2008_002075.jpg 259 | /JPEGImages/2008_002095.jpg 260 | /JPEGImages/2008_002104.jpg 261 | /JPEGImages/2008_002105.jpg 262 | /JPEGImages/2008_002106.jpg 263 | /JPEGImages/2008_002136.jpg 264 | /JPEGImages/2008_002137.jpg 265 | /JPEGImages/2008_002147.jpg 266 | /JPEGImages/2008_002149.jpg 267 | /JPEGImages/2008_002163.jpg 268 | /JPEGImages/2008_002173.jpg 269 | /JPEGImages/2008_002174.jpg 270 | /JPEGImages/2008_002184.jpg 271 | /JPEGImages/2008_002186.jpg 272 | /JPEGImages/2008_002188.jpg 273 | /JPEGImages/2008_002190.jpg 274 | /JPEGImages/2008_002203.jpg 275 | /JPEGImages/2008_002211.jpg 276 | /JPEGImages/2008_002217.jpg 277 | /JPEGImages/2008_002228.jpg 278 | /JPEGImages/2008_002233.jpg 279 | /JPEGImages/2008_002246.jpg 280 | /JPEGImages/2008_002257.jpg 281 | /JPEGImages/2008_002261.jpg 282 | /JPEGImages/2008_002285.jpg 283 | /JPEGImages/2008_002287.jpg 284 | /JPEGImages/2008_002295.jpg 285 | /JPEGImages/2008_002303.jpg 286 | /JPEGImages/2008_002306.jpg 287 | /JPEGImages/2008_002309.jpg 288 | /JPEGImages/2008_002310.jpg 289 | /JPEGImages/2008_002318.jpg 290 | /JPEGImages/2008_002320.jpg 291 | /JPEGImages/2008_002332.jpg 292 | /JPEGImages/2008_002337.jpg 293 | /JPEGImages/2008_002345.jpg 294 | /JPEGImages/2008_002348.jpg 295 | /JPEGImages/2008_002352.jpg 296 | /JPEGImages/2008_002360.jpg 297 | /JPEGImages/2008_002381.jpg 298 | /JPEGImages/2008_002387.jpg 299 | /JPEGImages/2008_002388.jpg 300 | /JPEGImages/2008_002393.jpg 301 | /JPEGImages/2008_002406.jpg 302 | /JPEGImages/2008_002440.jpg 303 | /JPEGImages/2008_002455.jpg 304 | /JPEGImages/2008_002460.jpg 305 | /JPEGImages/2008_002462.jpg 306 | /JPEGImages/2008_002480.jpg 307 | /JPEGImages/2008_002518.jpg 308 | /JPEGImages/2008_002525.jpg 309 | /JPEGImages/2008_002535.jpg 310 | /JPEGImages/2008_002544.jpg 311 | /JPEGImages/2008_002553.jpg 312 | /JPEGImages/2008_002569.jpg 313 | /JPEGImages/2008_002572.jpg 314 | /JPEGImages/2008_002587.jpg 315 | /JPEGImages/2008_002635.jpg 316 | /JPEGImages/2008_002655.jpg 317 | /JPEGImages/2008_002695.jpg 318 | /JPEGImages/2008_002702.jpg 319 | /JPEGImages/2008_002706.jpg 320 | /JPEGImages/2008_002707.jpg 321 | /JPEGImages/2008_002722.jpg 322 | /JPEGImages/2008_002745.jpg 323 | /JPEGImages/2008_002757.jpg 324 | /JPEGImages/2008_002779.jpg 325 | /JPEGImages/2008_002805.jpg 326 | /JPEGImages/2008_002871.jpg 327 | /JPEGImages/2008_002895.jpg 328 | /JPEGImages/2008_002905.jpg 329 | /JPEGImages/2008_002923.jpg 330 | /JPEGImages/2008_002927.jpg 331 | /JPEGImages/2008_002939.jpg 332 | /JPEGImages/2008_002941.jpg 333 | /JPEGImages/2008_002962.jpg 334 | /JPEGImages/2008_002975.jpg 335 | /JPEGImages/2008_003000.jpg 336 | /JPEGImages/2008_003031.jpg 337 | /JPEGImages/2008_003038.jpg 338 | /JPEGImages/2008_003042.jpg 339 | /JPEGImages/2008_003069.jpg 340 | /JPEGImages/2008_003070.jpg 341 | /JPEGImages/2008_003115.jpg 342 | /JPEGImages/2008_003116.jpg 343 | /JPEGImages/2008_003130.jpg 344 | /JPEGImages/2008_003137.jpg 345 | /JPEGImages/2008_003138.jpg 346 | /JPEGImages/2008_003139.jpg 347 | /JPEGImages/2008_003165.jpg 348 | /JPEGImages/2008_003171.jpg 349 | /JPEGImages/2008_003176.jpg 350 | /JPEGImages/2008_003192.jpg 351 | /JPEGImages/2008_003194.jpg 352 | /JPEGImages/2008_003195.jpg 353 | /JPEGImages/2008_003198.jpg 354 | /JPEGImages/2008_003227.jpg 355 | /JPEGImages/2008_003247.jpg 356 | /JPEGImages/2008_003262.jpg 357 | /JPEGImages/2008_003298.jpg 358 | /JPEGImages/2008_003299.jpg 359 | /JPEGImages/2008_003307.jpg 360 | /JPEGImages/2008_003337.jpg 361 | /JPEGImages/2008_003353.jpg 362 | /JPEGImages/2008_003355.jpg 363 | /JPEGImages/2008_003363.jpg 364 | /JPEGImages/2008_003383.jpg 365 | /JPEGImages/2008_003389.jpg 366 | /JPEGImages/2008_003392.jpg 367 | /JPEGImages/2008_003399.jpg 368 | /JPEGImages/2008_003436.jpg 369 | /JPEGImages/2008_003457.jpg 370 | /JPEGImages/2008_003465.jpg 371 | /JPEGImages/2008_003481.jpg 372 | /JPEGImages/2008_003539.jpg 373 | /JPEGImages/2008_003548.jpg 374 | /JPEGImages/2008_003550.jpg 375 | /JPEGImages/2008_003567.jpg 376 | /JPEGImages/2008_003568.jpg 377 | /JPEGImages/2008_003606.jpg 378 | /JPEGImages/2008_003615.jpg 379 | /JPEGImages/2008_003654.jpg 380 | /JPEGImages/2008_003670.jpg 381 | /JPEGImages/2008_003700.jpg 382 | /JPEGImages/2008_003705.jpg 383 | /JPEGImages/2008_003727.jpg 384 | /JPEGImages/2008_003731.jpg 385 | /JPEGImages/2008_003734.jpg 386 | /JPEGImages/2008_003760.jpg 387 | /JPEGImages/2008_003804.jpg 388 | /JPEGImages/2008_003807.jpg 389 | /JPEGImages/2008_003810.jpg 390 | /JPEGImages/2008_003822.jpg 391 | /JPEGImages/2008_003833.jpg 392 | /JPEGImages/2008_003877.jpg 393 | /JPEGImages/2008_003879.jpg 394 | /JPEGImages/2008_003895.jpg 395 | /JPEGImages/2008_003901.jpg 396 | /JPEGImages/2008_003903.jpg 397 | /JPEGImages/2008_003911.jpg 398 | /JPEGImages/2008_003919.jpg 399 | /JPEGImages/2008_003927.jpg 400 | /JPEGImages/2008_003937.jpg 401 | /JPEGImages/2008_003946.jpg 402 | /JPEGImages/2008_003950.jpg 403 | /JPEGImages/2008_003955.jpg 404 | /JPEGImages/2008_003981.jpg 405 | /JPEGImages/2008_003991.jpg 406 | /JPEGImages/2008_004009.jpg 407 | /JPEGImages/2008_004039.jpg 408 | /JPEGImages/2008_004052.jpg 409 | /JPEGImages/2008_004063.jpg 410 | /JPEGImages/2008_004070.jpg 411 | /JPEGImages/2008_004078.jpg 412 | /JPEGImages/2008_004104.jpg 413 | /JPEGImages/2008_004139.jpg 414 | /JPEGImages/2008_004177.jpg 415 | /JPEGImages/2008_004181.jpg 416 | /JPEGImages/2008_004200.jpg 417 | /JPEGImages/2008_004219.jpg 418 | /JPEGImages/2008_004236.jpg 419 | /JPEGImages/2008_004250.jpg 420 | /JPEGImages/2008_004266.jpg 421 | /JPEGImages/2008_004299.jpg 422 | /JPEGImages/2008_004320.jpg 423 | /JPEGImages/2008_004334.jpg 424 | /JPEGImages/2008_004343.jpg 425 | /JPEGImages/2008_004349.jpg 426 | /JPEGImages/2008_004366.jpg 427 | /JPEGImages/2008_004386.jpg 428 | /JPEGImages/2008_004401.jpg 429 | /JPEGImages/2008_004423.jpg 430 | /JPEGImages/2008_004448.jpg 431 | /JPEGImages/2008_004481.jpg 432 | /JPEGImages/2008_004516.jpg 433 | /JPEGImages/2008_004536.jpg 434 | /JPEGImages/2008_004582.jpg 435 | /JPEGImages/2008_004609.jpg 436 | /JPEGImages/2008_004638.jpg 437 | /JPEGImages/2008_004642.jpg 438 | /JPEGImages/2008_004644.jpg 439 | /JPEGImages/2008_004669.jpg 440 | /JPEGImages/2008_004673.jpg 441 | /JPEGImages/2008_004691.jpg 442 | /JPEGImages/2008_004693.jpg 443 | /JPEGImages/2008_004709.jpg 444 | /JPEGImages/2008_004715.jpg 445 | /JPEGImages/2008_004757.jpg 446 | /JPEGImages/2008_004775.jpg 447 | /JPEGImages/2008_004782.jpg 448 | /JPEGImages/2008_004785.jpg 449 | /JPEGImages/2008_004798.jpg 450 | /JPEGImages/2008_004848.jpg 451 | /JPEGImages/2008_004861.jpg 452 | /JPEGImages/2008_004870.jpg 453 | /JPEGImages/2008_004877.jpg 454 | /JPEGImages/2008_004884.jpg 455 | /JPEGImages/2008_004891.jpg 456 | /JPEGImages/2008_004901.jpg 457 | /JPEGImages/2008_004919.jpg 458 | /JPEGImages/2008_005058.jpg 459 | /JPEGImages/2008_005069.jpg 460 | /JPEGImages/2008_005086.jpg 461 | /JPEGImages/2008_005087.jpg 462 | /JPEGImages/2008_005112.jpg 463 | /JPEGImages/2008_005113.jpg 464 | /JPEGImages/2008_005118.jpg 465 | /JPEGImages/2008_005128.jpg 466 | /JPEGImages/2008_005129.jpg 467 | /JPEGImages/2008_005153.jpg 468 | /JPEGImages/2008_005161.jpg 469 | /JPEGImages/2008_005162.jpg 470 | /JPEGImages/2008_005165.jpg 471 | /JPEGImages/2008_005187.jpg 472 | /JPEGImages/2008_005227.jpg 473 | /JPEGImages/2008_005308.jpg 474 | /JPEGImages/2008_005318.jpg 475 | /JPEGImages/2008_005320.jpg 476 | /JPEGImages/2008_005351.jpg 477 | /JPEGImages/2008_005372.jpg 478 | /JPEGImages/2008_005383.jpg 479 | /JPEGImages/2008_005391.jpg 480 | /JPEGImages/2008_005407.jpg 481 | /JPEGImages/2008_005420.jpg 482 | /JPEGImages/2008_005440.jpg 483 | /JPEGImages/2008_005487.jpg 484 | /JPEGImages/2008_005493.jpg 485 | /JPEGImages/2008_005520.jpg 486 | /JPEGImages/2008_005551.jpg 487 | /JPEGImages/2008_005556.jpg 488 | /JPEGImages/2008_005576.jpg 489 | /JPEGImages/2008_005578.jpg 490 | /JPEGImages/2008_005594.jpg 491 | /JPEGImages/2008_005619.jpg 492 | /JPEGImages/2008_005629.jpg 493 | /JPEGImages/2008_005644.jpg 494 | /JPEGImages/2008_005645.jpg 495 | /JPEGImages/2008_005651.jpg 496 | /JPEGImages/2008_005661.jpg 497 | /JPEGImages/2008_005662.jpg 498 | /JPEGImages/2008_005667.jpg 499 | /JPEGImages/2008_005694.jpg 500 | /JPEGImages/2008_005697.jpg 501 | /JPEGImages/2008_005709.jpg 502 | /JPEGImages/2008_005710.jpg 503 | /JPEGImages/2008_005733.jpg 504 | /JPEGImages/2008_005749.jpg 505 | /JPEGImages/2008_005753.jpg 506 | /JPEGImages/2008_005771.jpg 507 | /JPEGImages/2008_005781.jpg 508 | /JPEGImages/2008_005793.jpg 509 | /JPEGImages/2008_005802.jpg 510 | /JPEGImages/2008_005833.jpg 511 | /JPEGImages/2008_005844.jpg 512 | /JPEGImages/2008_005908.jpg 513 | /JPEGImages/2008_005931.jpg 514 | /JPEGImages/2008_005952.jpg 515 | /JPEGImages/2008_006016.jpg 516 | /JPEGImages/2008_006030.jpg 517 | /JPEGImages/2008_006033.jpg 518 | /JPEGImages/2008_006054.jpg 519 | /JPEGImages/2008_006073.jpg 520 | /JPEGImages/2008_006091.jpg 521 | /JPEGImages/2008_006142.jpg 522 | /JPEGImages/2008_006150.jpg 523 | /JPEGImages/2008_006206.jpg 524 | /JPEGImages/2008_006217.jpg 525 | /JPEGImages/2008_006264.jpg 526 | /JPEGImages/2008_006283.jpg 527 | /JPEGImages/2008_006308.jpg 528 | /JPEGImages/2008_006313.jpg 529 | /JPEGImages/2008_006333.jpg 530 | /JPEGImages/2008_006343.jpg 531 | /JPEGImages/2008_006381.jpg 532 | /JPEGImages/2008_006391.jpg 533 | /JPEGImages/2008_006423.jpg 534 | /JPEGImages/2008_006428.jpg 535 | /JPEGImages/2008_006440.jpg 536 | /JPEGImages/2008_006444.jpg 537 | /JPEGImages/2008_006473.jpg 538 | /JPEGImages/2008_006505.jpg 539 | /JPEGImages/2008_006531.jpg 540 | /JPEGImages/2008_006560.jpg 541 | /JPEGImages/2008_006571.jpg 542 | /JPEGImages/2008_006582.jpg 543 | /JPEGImages/2008_006594.jpg 544 | /JPEGImages/2008_006601.jpg 545 | /JPEGImages/2008_006633.jpg 546 | /JPEGImages/2008_006653.jpg 547 | /JPEGImages/2008_006678.jpg 548 | /JPEGImages/2008_006755.jpg 549 | /JPEGImages/2008_006772.jpg 550 | /JPEGImages/2008_006788.jpg 551 | /JPEGImages/2008_006799.jpg 552 | /JPEGImages/2008_006809.jpg 553 | /JPEGImages/2008_006838.jpg 554 | /JPEGImages/2008_006845.jpg 555 | /JPEGImages/2008_006852.jpg 556 | /JPEGImages/2008_006894.jpg 557 | /JPEGImages/2008_006905.jpg 558 | /JPEGImages/2008_006947.jpg 559 | /JPEGImages/2008_006983.jpg 560 | /JPEGImages/2008_007049.jpg 561 | /JPEGImages/2008_007065.jpg 562 | /JPEGImages/2008_007068.jpg 563 | /JPEGImages/2008_007111.jpg 564 | /JPEGImages/2008_007148.jpg 565 | /JPEGImages/2008_007159.jpg 566 | /JPEGImages/2008_007193.jpg 567 | /JPEGImages/2008_007228.jpg 568 | /JPEGImages/2008_007235.jpg 569 | /JPEGImages/2008_007249.jpg 570 | /JPEGImages/2008_007255.jpg 571 | /JPEGImages/2008_007268.jpg 572 | /JPEGImages/2008_007275.jpg 573 | /JPEGImages/2008_007292.jpg 574 | /JPEGImages/2008_007299.jpg 575 | /JPEGImages/2008_007306.jpg 576 | /JPEGImages/2008_007316.jpg 577 | /JPEGImages/2008_007400.jpg 578 | /JPEGImages/2008_007401.jpg 579 | /JPEGImages/2008_007419.jpg 580 | /JPEGImages/2008_007437.jpg 581 | /JPEGImages/2008_007483.jpg 582 | /JPEGImages/2008_007487.jpg 583 | /JPEGImages/2008_007520.jpg 584 | /JPEGImages/2008_007551.jpg 585 | /JPEGImages/2008_007603.jpg 586 | /JPEGImages/2008_007616.jpg 587 | /JPEGImages/2008_007654.jpg 588 | /JPEGImages/2008_007663.jpg 589 | /JPEGImages/2008_007708.jpg 590 | /JPEGImages/2008_007795.jpg 591 | /JPEGImages/2008_007801.jpg 592 | /JPEGImages/2008_007859.jpg 593 | /JPEGImages/2008_007903.jpg 594 | /JPEGImages/2008_007920.jpg 595 | /JPEGImages/2008_007926.jpg 596 | /JPEGImages/2008_008014.jpg 597 | /JPEGImages/2008_008017.jpg 598 | /JPEGImages/2008_008060.jpg 599 | /JPEGImages/2008_008077.jpg 600 | /JPEGImages/2008_008107.jpg 601 | /JPEGImages/2008_008108.jpg 602 | /JPEGImages/2008_008119.jpg 603 | /JPEGImages/2008_008126.jpg 604 | /JPEGImages/2008_008133.jpg 605 | /JPEGImages/2008_008144.jpg 606 | /JPEGImages/2008_008216.jpg 607 | /JPEGImages/2008_008244.jpg 608 | /JPEGImages/2008_008248.jpg 609 | /JPEGImages/2008_008250.jpg 610 | /JPEGImages/2008_008260.jpg 611 | /JPEGImages/2008_008277.jpg 612 | /JPEGImages/2008_008280.jpg 613 | /JPEGImages/2008_008290.jpg 614 | /JPEGImages/2008_008304.jpg 615 | /JPEGImages/2008_008340.jpg 616 | /JPEGImages/2008_008371.jpg 617 | /JPEGImages/2008_008390.jpg 618 | /JPEGImages/2008_008397.jpg 619 | /JPEGImages/2008_008409.jpg 620 | /JPEGImages/2008_008412.jpg 621 | /JPEGImages/2008_008419.jpg 622 | /JPEGImages/2008_008454.jpg 623 | /JPEGImages/2008_008491.jpg 624 | /JPEGImages/2008_008498.jpg 625 | /JPEGImages/2008_008565.jpg 626 | /JPEGImages/2008_008599.jpg 627 | /JPEGImages/2008_008603.jpg 628 | /JPEGImages/2008_008631.jpg 629 | /JPEGImages/2008_008634.jpg 630 | /JPEGImages/2008_008640.jpg 631 | /JPEGImages/2008_008646.jpg 632 | /JPEGImages/2008_008660.jpg 633 | /JPEGImages/2008_008663.jpg 634 | /JPEGImages/2008_008664.jpg 635 | /JPEGImages/2008_008709.jpg 636 | /JPEGImages/2008_008720.jpg 637 | /JPEGImages/2008_008747.jpg 638 | /JPEGImages/2008_008768.jpg 639 | /JPEGImages/2009_000004.jpg 640 | /JPEGImages/2009_000019.jpg 641 | /JPEGImages/2009_000024.jpg 642 | /JPEGImages/2009_000025.jpg 643 | /JPEGImages/2009_000053.jpg 644 | /JPEGImages/2009_000076.jpg 645 | /JPEGImages/2009_000107.jpg 646 | /JPEGImages/2009_000110.jpg 647 | /JPEGImages/2009_000115.jpg 648 | /JPEGImages/2009_000117.jpg 649 | /JPEGImages/2009_000175.jpg 650 | /JPEGImages/2009_000220.jpg 651 | /JPEGImages/2009_000259.jpg 652 | /JPEGImages/2009_000275.jpg 653 | /JPEGImages/2009_000314.jpg 654 | /JPEGImages/2009_000368.jpg 655 | /JPEGImages/2009_000373.jpg 656 | /JPEGImages/2009_000384.jpg 657 | /JPEGImages/2009_000388.jpg 658 | /JPEGImages/2009_000423.jpg 659 | /JPEGImages/2009_000433.jpg 660 | /JPEGImages/2009_000434.jpg 661 | /JPEGImages/2009_000458.jpg 662 | /JPEGImages/2009_000475.jpg 663 | /JPEGImages/2009_000481.jpg 664 | /JPEGImages/2009_000495.jpg 665 | /JPEGImages/2009_000514.jpg 666 | /JPEGImages/2009_000555.jpg 667 | /JPEGImages/2009_000556.jpg 668 | /JPEGImages/2009_000561.jpg 669 | /JPEGImages/2009_000571.jpg 670 | /JPEGImages/2009_000581.jpg 671 | /JPEGImages/2009_000605.jpg 672 | /JPEGImages/2009_000609.jpg 673 | /JPEGImages/2009_000644.jpg 674 | /JPEGImages/2009_000654.jpg 675 | /JPEGImages/2009_000671.jpg 676 | /JPEGImages/2009_000733.jpg 677 | /JPEGImages/2009_000740.jpg 678 | /JPEGImages/2009_000766.jpg 679 | /JPEGImages/2009_000775.jpg 680 | /JPEGImages/2009_000776.jpg 681 | /JPEGImages/2009_000795.jpg 682 | /JPEGImages/2009_000850.jpg 683 | /JPEGImages/2009_000881.jpg 684 | /JPEGImages/2009_000900.jpg 685 | /JPEGImages/2009_000914.jpg 686 | /JPEGImages/2009_000941.jpg 687 | /JPEGImages/2009_000977.jpg 688 | /JPEGImages/2009_000984.jpg 689 | /JPEGImages/2009_000986.jpg 690 | /JPEGImages/2009_001005.jpg 691 | /JPEGImages/2009_001015.jpg 692 | /JPEGImages/2009_001058.jpg 693 | /JPEGImages/2009_001072.jpg 694 | /JPEGImages/2009_001087.jpg 695 | /JPEGImages/2009_001092.jpg 696 | /JPEGImages/2009_001109.jpg 697 | /JPEGImages/2009_001114.jpg 698 | /JPEGImages/2009_001115.jpg 699 | /JPEGImages/2009_001141.jpg 700 | /JPEGImages/2009_001174.jpg 701 | /JPEGImages/2009_001175.jpg 702 | /JPEGImages/2009_001182.jpg 703 | /JPEGImages/2009_001222.jpg 704 | /JPEGImages/2009_001228.jpg 705 | /JPEGImages/2009_001246.jpg 706 | /JPEGImages/2009_001262.jpg 707 | /JPEGImages/2009_001274.jpg 708 | /JPEGImages/2009_001284.jpg 709 | /JPEGImages/2009_001297.jpg 710 | /JPEGImages/2009_001331.jpg 711 | /JPEGImages/2009_001336.jpg 712 | /JPEGImages/2009_001337.jpg 713 | /JPEGImages/2009_001379.jpg 714 | /JPEGImages/2009_001392.jpg 715 | /JPEGImages/2009_001451.jpg 716 | /JPEGImages/2009_001485.jpg 717 | /JPEGImages/2009_001488.jpg 718 | /JPEGImages/2009_001497.jpg 719 | /JPEGImages/2009_001504.jpg 720 | /JPEGImages/2009_001506.jpg 721 | /JPEGImages/2009_001573.jpg 722 | /JPEGImages/2009_001576.jpg 723 | /JPEGImages/2009_001603.jpg 724 | /JPEGImages/2009_001613.jpg 725 | /JPEGImages/2009_001652.jpg 726 | /JPEGImages/2009_001661.jpg 727 | /JPEGImages/2009_001668.jpg 728 | /JPEGImages/2009_001680.jpg 729 | /JPEGImages/2009_001688.jpg 730 | /JPEGImages/2009_001697.jpg 731 | /JPEGImages/2009_001729.jpg 732 | /JPEGImages/2009_001771.jpg 733 | /JPEGImages/2009_001785.jpg 734 | /JPEGImages/2009_001793.jpg 735 | /JPEGImages/2009_001814.jpg 736 | /JPEGImages/2009_001866.jpg 737 | /JPEGImages/2009_001872.jpg 738 | /JPEGImages/2009_001880.jpg 739 | /JPEGImages/2009_001883.jpg 740 | /JPEGImages/2009_001891.jpg 741 | /JPEGImages/2009_001913.jpg 742 | /JPEGImages/2009_001938.jpg 743 | /JPEGImages/2009_001946.jpg 744 | /JPEGImages/2009_001953.jpg 745 | /JPEGImages/2009_001969.jpg 746 | /JPEGImages/2009_001978.jpg 747 | /JPEGImages/2009_001995.jpg 748 | /JPEGImages/2009_002007.jpg 749 | /JPEGImages/2009_002036.jpg 750 | /JPEGImages/2009_002041.jpg 751 | /JPEGImages/2009_002049.jpg 752 | /JPEGImages/2009_002051.jpg 753 | /JPEGImages/2009_002062.jpg 754 | /JPEGImages/2009_002063.jpg 755 | /JPEGImages/2009_002067.jpg 756 | /JPEGImages/2009_002085.jpg 757 | /JPEGImages/2009_002092.jpg 758 | /JPEGImages/2009_002114.jpg 759 | /JPEGImages/2009_002115.jpg 760 | /JPEGImages/2009_002142.jpg 761 | /JPEGImages/2009_002148.jpg 762 | /JPEGImages/2009_002157.jpg 763 | /JPEGImages/2009_002181.jpg 764 | /JPEGImages/2009_002220.jpg 765 | /JPEGImages/2009_002284.jpg 766 | /JPEGImages/2009_002287.jpg 767 | /JPEGImages/2009_002300.jpg 768 | /JPEGImages/2009_002310.jpg 769 | /JPEGImages/2009_002315.jpg 770 | /JPEGImages/2009_002334.jpg 771 | /JPEGImages/2009_002337.jpg 772 | /JPEGImages/2009_002354.jpg 773 | /JPEGImages/2009_002357.jpg 774 | /JPEGImages/2009_002411.jpg 775 | /JPEGImages/2009_002426.jpg 776 | /JPEGImages/2009_002458.jpg 777 | /JPEGImages/2009_002459.jpg 778 | /JPEGImages/2009_002461.jpg 779 | /JPEGImages/2009_002466.jpg 780 | /JPEGImages/2009_002481.jpg 781 | /JPEGImages/2009_002483.jpg 782 | /JPEGImages/2009_002503.jpg 783 | /JPEGImages/2009_002581.jpg 784 | /JPEGImages/2009_002583.jpg 785 | /JPEGImages/2009_002589.jpg 786 | /JPEGImages/2009_002600.jpg 787 | /JPEGImages/2009_002601.jpg 788 | /JPEGImages/2009_002602.jpg 789 | /JPEGImages/2009_002641.jpg 790 | /JPEGImages/2009_002646.jpg 791 | /JPEGImages/2009_002656.jpg 792 | /JPEGImages/2009_002666.jpg 793 | /JPEGImages/2009_002720.jpg 794 | /JPEGImages/2009_002767.jpg 795 | /JPEGImages/2009_002768.jpg 796 | /JPEGImages/2009_002794.jpg 797 | /JPEGImages/2009_002821.jpg 798 | /JPEGImages/2009_002825.jpg 799 | /JPEGImages/2009_002839.jpg 800 | /JPEGImages/2009_002840.jpg 801 | /JPEGImages/2009_002859.jpg 802 | /JPEGImages/2009_002860.jpg 803 | /JPEGImages/2009_002881.jpg 804 | /JPEGImages/2009_002889.jpg 805 | /JPEGImages/2009_002892.jpg 806 | /JPEGImages/2009_002895.jpg 807 | /JPEGImages/2009_002896.jpg 808 | /JPEGImages/2009_002900.jpg 809 | /JPEGImages/2009_002924.jpg 810 | /JPEGImages/2009_002966.jpg 811 | /JPEGImages/2009_002973.jpg 812 | /JPEGImages/2009_002981.jpg 813 | /JPEGImages/2009_003004.jpg 814 | /JPEGImages/2009_003021.jpg 815 | /JPEGImages/2009_003028.jpg 816 | /JPEGImages/2009_003037.jpg 817 | /JPEGImages/2009_003038.jpg 818 | /JPEGImages/2009_003055.jpg 819 | /JPEGImages/2009_003085.jpg 820 | /JPEGImages/2009_003100.jpg 821 | /JPEGImages/2009_003106.jpg 822 | /JPEGImages/2009_003117.jpg 823 | /JPEGImages/2009_003139.jpg 824 | /JPEGImages/2009_003170.jpg 825 | /JPEGImages/2009_003179.jpg 826 | /JPEGImages/2009_003184.jpg 827 | /JPEGImages/2009_003186.jpg 828 | /JPEGImages/2009_003190.jpg 829 | /JPEGImages/2009_003221.jpg 830 | /JPEGImages/2009_003236.jpg 831 | /JPEGImages/2009_003242.jpg 832 | /JPEGImages/2009_003244.jpg 833 | /JPEGImages/2009_003260.jpg 834 | /JPEGImages/2009_003264.jpg 835 | /JPEGImages/2009_003274.jpg 836 | /JPEGImages/2009_003283.jpg 837 | /JPEGImages/2009_003296.jpg 838 | /JPEGImages/2009_003332.jpg 839 | /JPEGImages/2009_003341.jpg 840 | /JPEGImages/2009_003354.jpg 841 | /JPEGImages/2009_003370.jpg 842 | /JPEGImages/2009_003371.jpg 843 | /JPEGImages/2009_003374.jpg 844 | /JPEGImages/2009_003391.jpg 845 | /JPEGImages/2009_003393.jpg 846 | /JPEGImages/2009_003404.jpg 847 | /JPEGImages/2009_003405.jpg 848 | /JPEGImages/2009_003414.jpg 849 | /JPEGImages/2009_003428.jpg 850 | /JPEGImages/2009_003470.jpg 851 | /JPEGImages/2009_003474.jpg 852 | /JPEGImages/2009_003532.jpg 853 | /JPEGImages/2009_003536.jpg 854 | /JPEGImages/2009_003578.jpg 855 | /JPEGImages/2009_003580.jpg 856 | /JPEGImages/2009_003620.jpg 857 | /JPEGImages/2009_003621.jpg 858 | /JPEGImages/2009_003680.jpg 859 | /JPEGImages/2009_003699.jpg 860 | /JPEGImages/2009_003727.jpg 861 | /JPEGImages/2009_003737.jpg 862 | /JPEGImages/2009_003780.jpg 863 | /JPEGImages/2009_003811.jpg 864 | /JPEGImages/2009_003824.jpg 865 | /JPEGImages/2009_003831.jpg 866 | /JPEGImages/2009_003844.jpg 867 | /JPEGImages/2009_003850.jpg 868 | /JPEGImages/2009_003851.jpg 869 | /JPEGImages/2009_003864.jpg 870 | /JPEGImages/2009_003868.jpg 871 | /JPEGImages/2009_003869.jpg 872 | /JPEGImages/2009_003893.jpg 873 | /JPEGImages/2009_003909.jpg 874 | /JPEGImages/2009_003924.jpg 875 | /JPEGImages/2009_003925.jpg 876 | /JPEGImages/2009_003960.jpg 877 | /JPEGImages/2009_003979.jpg 878 | /JPEGImages/2009_003990.jpg 879 | /JPEGImages/2009_003997.jpg 880 | /JPEGImages/2009_004006.jpg 881 | /JPEGImages/2009_004010.jpg 882 | /JPEGImages/2009_004066.jpg 883 | /JPEGImages/2009_004077.jpg 884 | /JPEGImages/2009_004081.jpg 885 | /JPEGImages/2009_004097.jpg 886 | /JPEGImages/2009_004098.jpg 887 | /JPEGImages/2009_004136.jpg 888 | /JPEGImages/2009_004216.jpg 889 | /JPEGImages/2009_004220.jpg 890 | /JPEGImages/2009_004266.jpg 891 | /JPEGImages/2009_004269.jpg 892 | /JPEGImages/2009_004286.jpg 893 | /JPEGImages/2009_004296.jpg 894 | /JPEGImages/2009_004321.jpg 895 | /JPEGImages/2009_004342.jpg 896 | /JPEGImages/2009_004343.jpg 897 | /JPEGImages/2009_004344.jpg 898 | /JPEGImages/2009_004385.jpg 899 | /JPEGImages/2009_004408.jpg 900 | /JPEGImages/2009_004420.jpg 901 | /JPEGImages/2009_004441.jpg 902 | /JPEGImages/2009_004447.jpg 903 | /JPEGImages/2009_004461.jpg 904 | /JPEGImages/2009_004467.jpg 905 | /JPEGImages/2009_004485.jpg 906 | /JPEGImages/2009_004488.jpg 907 | /JPEGImages/2009_004516.jpg 908 | /JPEGImages/2009_004521.jpg 909 | /JPEGImages/2009_004544.jpg 910 | /JPEGImages/2009_004596.jpg 911 | /JPEGImages/2009_004613.jpg 912 | /JPEGImages/2009_004615.jpg 913 | /JPEGImages/2009_004618.jpg 914 | /JPEGImages/2009_004621.jpg 915 | /JPEGImages/2009_004646.jpg 916 | /JPEGImages/2009_004659.jpg 917 | /JPEGImages/2009_004663.jpg 918 | /JPEGImages/2009_004666.jpg 919 | /JPEGImages/2009_004691.jpg 920 | /JPEGImages/2009_004715.jpg 921 | /JPEGImages/2009_004726.jpg 922 | /JPEGImages/2009_004753.jpg 923 | /JPEGImages/2009_004776.jpg 924 | /JPEGImages/2009_004811.jpg 925 | /JPEGImages/2009_004814.jpg 926 | /JPEGImages/2009_004818.jpg 927 | /JPEGImages/2009_004835.jpg 928 | /JPEGImages/2009_004863.jpg 929 | /JPEGImages/2009_004894.jpg 930 | /JPEGImages/2009_004909.jpg 931 | /JPEGImages/2009_004928.jpg 932 | /JPEGImages/2009_004937.jpg 933 | /JPEGImages/2009_004954.jpg 934 | /JPEGImages/2009_004966.jpg 935 | /JPEGImages/2009_004970.jpg 936 | /JPEGImages/2009_004976.jpg 937 | /JPEGImages/2009_005004.jpg 938 | /JPEGImages/2009_005011.jpg 939 | /JPEGImages/2009_005053.jpg 940 | /JPEGImages/2009_005072.jpg 941 | /JPEGImages/2009_005115.jpg 942 | /JPEGImages/2009_005146.jpg 943 | /JPEGImages/2009_005151.jpg 944 | /JPEGImages/2009_005164.jpg 945 | /JPEGImages/2009_005179.jpg 946 | /JPEGImages/2009_005224.jpg 947 | /JPEGImages/2009_005243.jpg 948 | /JPEGImages/2009_005249.jpg 949 | /JPEGImages/2009_005252.jpg 950 | /JPEGImages/2009_005254.jpg 951 | /JPEGImages/2009_005258.jpg 952 | /JPEGImages/2009_005264.jpg 953 | /JPEGImages/2009_005266.jpg 954 | /JPEGImages/2009_005276.jpg 955 | /JPEGImages/2009_005290.jpg 956 | /JPEGImages/2009_005295.jpg 957 | /JPEGImages/2010_000004.jpg 958 | /JPEGImages/2010_000005.jpg 959 | /JPEGImages/2010_000006.jpg 960 | /JPEGImages/2010_000032.jpg 961 | /JPEGImages/2010_000062.jpg 962 | /JPEGImages/2010_000093.jpg 963 | /JPEGImages/2010_000094.jpg 964 | /JPEGImages/2010_000161.jpg 965 | /JPEGImages/2010_000176.jpg 966 | /JPEGImages/2010_000223.jpg 967 | /JPEGImages/2010_000226.jpg 968 | /JPEGImages/2010_000236.jpg 969 | /JPEGImages/2010_000239.jpg 970 | /JPEGImages/2010_000287.jpg 971 | /JPEGImages/2010_000300.jpg 972 | /JPEGImages/2010_000301.jpg 973 | /JPEGImages/2010_000328.jpg 974 | /JPEGImages/2010_000378.jpg 975 | /JPEGImages/2010_000405.jpg 976 | /JPEGImages/2010_000407.jpg 977 | /JPEGImages/2010_000472.jpg 978 | /JPEGImages/2010_000479.jpg 979 | /JPEGImages/2010_000491.jpg 980 | /JPEGImages/2010_000533.jpg 981 | /JPEGImages/2010_000535.jpg 982 | /JPEGImages/2010_000542.jpg 983 | /JPEGImages/2010_000554.jpg 984 | /JPEGImages/2010_000580.jpg 985 | /JPEGImages/2010_000594.jpg 986 | /JPEGImages/2010_000596.jpg 987 | /JPEGImages/2010_000599.jpg 988 | /JPEGImages/2010_000606.jpg 989 | /JPEGImages/2010_000615.jpg 990 | /JPEGImages/2010_000654.jpg 991 | /JPEGImages/2010_000659.jpg 992 | /JPEGImages/2010_000693.jpg 993 | /JPEGImages/2010_000698.jpg 994 | /JPEGImages/2010_000730.jpg 995 | /JPEGImages/2010_000734.jpg 996 | /JPEGImages/2010_000741.jpg 997 | /JPEGImages/2010_000755.jpg 998 | /JPEGImages/2010_000768.jpg 999 | /JPEGImages/2010_000794.jpg 1000 | /JPEGImages/2010_000813.jpg 1001 | /JPEGImages/2010_000817.jpg 1002 | /JPEGImages/2010_000834.jpg 1003 | /JPEGImages/2010_000839.jpg 1004 | /JPEGImages/2010_000848.jpg 1005 | /JPEGImages/2010_000881.jpg 1006 | /JPEGImages/2010_000888.jpg 1007 | /JPEGImages/2010_000900.jpg 1008 | /JPEGImages/2010_000903.jpg 1009 | /JPEGImages/2010_000924.jpg 1010 | /JPEGImages/2010_000946.jpg 1011 | /JPEGImages/2010_000953.jpg 1012 | /JPEGImages/2010_000957.jpg 1013 | /JPEGImages/2010_000967.jpg 1014 | /JPEGImages/2010_000992.jpg 1015 | /JPEGImages/2010_000998.jpg 1016 | /JPEGImages/2010_001053.jpg 1017 | /JPEGImages/2010_001067.jpg 1018 | /JPEGImages/2010_001114.jpg 1019 | /JPEGImages/2010_001132.jpg 1020 | /JPEGImages/2010_001138.jpg 1021 | /JPEGImages/2010_001169.jpg 1022 | /JPEGImages/2010_001171.jpg 1023 | /JPEGImages/2010_001228.jpg 1024 | /JPEGImages/2010_001260.jpg 1025 | /JPEGImages/2010_001268.jpg 1026 | /JPEGImages/2010_001280.jpg 1027 | /JPEGImages/2010_001298.jpg 1028 | /JPEGImages/2010_001302.jpg 1029 | /JPEGImages/2010_001308.jpg 1030 | /JPEGImages/2010_001324.jpg 1031 | /JPEGImages/2010_001332.jpg 1032 | /JPEGImages/2010_001335.jpg 1033 | /JPEGImages/2010_001345.jpg 1034 | /JPEGImages/2010_001346.jpg 1035 | /JPEGImages/2010_001349.jpg 1036 | /JPEGImages/2010_001373.jpg 1037 | /JPEGImages/2010_001381.jpg 1038 | /JPEGImages/2010_001392.jpg 1039 | /JPEGImages/2010_001396.jpg 1040 | /JPEGImages/2010_001420.jpg 1041 | /JPEGImages/2010_001500.jpg 1042 | /JPEGImages/2010_001506.jpg 1043 | /JPEGImages/2010_001521.jpg 1044 | /JPEGImages/2010_001532.jpg 1045 | /JPEGImages/2010_001558.jpg 1046 | /JPEGImages/2010_001598.jpg 1047 | /JPEGImages/2010_001611.jpg 1048 | /JPEGImages/2010_001631.jpg 1049 | /JPEGImages/2010_001639.jpg 1050 | /JPEGImages/2010_001651.jpg 1051 | /JPEGImages/2010_001663.jpg 1052 | /JPEGImages/2010_001664.jpg 1053 | /JPEGImages/2010_001728.jpg 1054 | /JPEGImages/2010_001778.jpg 1055 | /JPEGImages/2010_001861.jpg 1056 | /JPEGImages/2010_001874.jpg 1057 | /JPEGImages/2010_001900.jpg 1058 | /JPEGImages/2010_001905.jpg 1059 | /JPEGImages/2010_001969.jpg 1060 | /JPEGImages/2010_002008.jpg 1061 | /JPEGImages/2010_002014.jpg 1062 | /JPEGImages/2010_002049.jpg 1063 | /JPEGImages/2010_002052.jpg 1064 | /JPEGImages/2010_002091.jpg 1065 | /JPEGImages/2010_002115.jpg 1066 | /JPEGImages/2010_002119.jpg 1067 | /JPEGImages/2010_002134.jpg 1068 | /JPEGImages/2010_002156.jpg 1069 | /JPEGImages/2010_002160.jpg 1070 | /JPEGImages/2010_002186.jpg 1071 | /JPEGImages/2010_002210.jpg 1072 | /JPEGImages/2010_002241.jpg 1073 | /JPEGImages/2010_002252.jpg 1074 | /JPEGImages/2010_002258.jpg 1075 | /JPEGImages/2010_002262.jpg 1076 | /JPEGImages/2010_002273.jpg 1077 | /JPEGImages/2010_002290.jpg 1078 | /JPEGImages/2010_002292.jpg 1079 | /JPEGImages/2010_002347.jpg 1080 | /JPEGImages/2010_002358.jpg 1081 | /JPEGImages/2010_002360.jpg 1082 | /JPEGImages/2010_002367.jpg 1083 | /JPEGImages/2010_002416.jpg 1084 | /JPEGImages/2010_002451.jpg 1085 | /JPEGImages/2010_002481.jpg 1086 | /JPEGImages/2010_002490.jpg 1087 | /JPEGImages/2010_002495.jpg 1088 | /JPEGImages/2010_002588.jpg 1089 | /JPEGImages/2010_002607.jpg 1090 | /JPEGImages/2010_002609.jpg 1091 | /JPEGImages/2010_002610.jpg 1092 | /JPEGImages/2010_002641.jpg 1093 | /JPEGImages/2010_002685.jpg 1094 | /JPEGImages/2010_002699.jpg 1095 | /JPEGImages/2010_002719.jpg 1096 | /JPEGImages/2010_002735.jpg 1097 | /JPEGImages/2010_002751.jpg 1098 | /JPEGImages/2010_002804.jpg 1099 | /JPEGImages/2010_002835.jpg 1100 | /JPEGImages/2010_002852.jpg 1101 | /JPEGImages/2010_002885.jpg 1102 | /JPEGImages/2010_002889.jpg 1103 | /JPEGImages/2010_002904.jpg 1104 | /JPEGImages/2010_002908.jpg 1105 | /JPEGImages/2010_002916.jpg 1106 | /JPEGImages/2010_002974.jpg 1107 | /JPEGImages/2010_002977.jpg 1108 | /JPEGImages/2010_003005.jpg 1109 | /JPEGImages/2010_003021.jpg 1110 | /JPEGImages/2010_003030.jpg 1111 | /JPEGImages/2010_003038.jpg 1112 | /JPEGImages/2010_003046.jpg 1113 | /JPEGImages/2010_003052.jpg 1114 | /JPEGImages/2010_003089.jpg 1115 | /JPEGImages/2010_003110.jpg 1116 | /JPEGImages/2010_003118.jpg 1117 | /JPEGImages/2010_003171.jpg 1118 | /JPEGImages/2010_003217.jpg 1119 | /JPEGImages/2010_003221.jpg 1120 | /JPEGImages/2010_003228.jpg 1121 | /JPEGImages/2010_003243.jpg 1122 | /JPEGImages/2010_003271.jpg 1123 | /JPEGImages/2010_003295.jpg 1124 | /JPEGImages/2010_003306.jpg 1125 | /JPEGImages/2010_003324.jpg 1126 | /JPEGImages/2010_003363.jpg 1127 | /JPEGImages/2010_003382.jpg 1128 | /JPEGImages/2010_003388.jpg 1129 | /JPEGImages/2010_003389.jpg 1130 | /JPEGImages/2010_003392.jpg 1131 | /JPEGImages/2010_003430.jpg 1132 | /JPEGImages/2010_003442.jpg 1133 | /JPEGImages/2010_003459.jpg 1134 | /JPEGImages/2010_003485.jpg 1135 | /JPEGImages/2010_003486.jpg 1136 | /JPEGImages/2010_003500.jpg 1137 | /JPEGImages/2010_003523.jpg 1138 | /JPEGImages/2010_003542.jpg 1139 | /JPEGImages/2010_003552.jpg 1140 | /JPEGImages/2010_003570.jpg 1141 | /JPEGImages/2010_003572.jpg 1142 | /JPEGImages/2010_003586.jpg 1143 | /JPEGImages/2010_003615.jpg 1144 | /JPEGImages/2010_003623.jpg 1145 | /JPEGImages/2010_003657.jpg 1146 | /JPEGImages/2010_003666.jpg 1147 | /JPEGImages/2010_003705.jpg 1148 | /JPEGImages/2010_003710.jpg 1149 | /JPEGImages/2010_003720.jpg 1150 | /JPEGImages/2010_003733.jpg 1151 | /JPEGImages/2010_003750.jpg 1152 | /JPEGImages/2010_003767.jpg 1153 | /JPEGImages/2010_003802.jpg 1154 | /JPEGImages/2010_003809.jpg 1155 | /JPEGImages/2010_003830.jpg 1156 | /JPEGImages/2010_003832.jpg 1157 | /JPEGImages/2010_003836.jpg 1158 | /JPEGImages/2010_003838.jpg 1159 | /JPEGImages/2010_003850.jpg 1160 | /JPEGImages/2010_003867.jpg 1161 | /JPEGImages/2010_003882.jpg 1162 | /JPEGImages/2010_003909.jpg 1163 | /JPEGImages/2010_003922.jpg 1164 | /JPEGImages/2010_003923.jpg 1165 | /JPEGImages/2010_003978.jpg 1166 | /JPEGImages/2010_003989.jpg 1167 | /JPEGImages/2010_003990.jpg 1168 | /JPEGImages/2010_004000.jpg 1169 | /JPEGImages/2010_004003.jpg 1170 | /JPEGImages/2010_004068.jpg 1171 | /JPEGImages/2010_004076.jpg 1172 | /JPEGImages/2010_004117.jpg 1173 | /JPEGImages/2010_004136.jpg 1174 | /JPEGImages/2010_004142.jpg 1175 | /JPEGImages/2010_004195.jpg 1176 | /JPEGImages/2010_004200.jpg 1177 | /JPEGImages/2010_004202.jpg 1178 | /JPEGImages/2010_004232.jpg 1179 | /JPEGImages/2010_004261.jpg 1180 | /JPEGImages/2010_004266.jpg 1181 | /JPEGImages/2010_004273.jpg 1182 | /JPEGImages/2010_004305.jpg 1183 | /JPEGImages/2010_004403.jpg 1184 | /JPEGImages/2010_004433.jpg 1185 | /JPEGImages/2010_004434.jpg 1186 | /JPEGImages/2010_004435.jpg 1187 | /JPEGImages/2010_004438.jpg 1188 | /JPEGImages/2010_004442.jpg 1189 | /JPEGImages/2010_004473.jpg 1190 | /JPEGImages/2010_004482.jpg 1191 | /JPEGImages/2010_004487.jpg 1192 | /JPEGImages/2010_004489.jpg 1193 | /JPEGImages/2010_004512.jpg 1194 | /JPEGImages/2010_004525.jpg 1195 | /JPEGImages/2010_004527.jpg 1196 | /JPEGImages/2010_004532.jpg 1197 | /JPEGImages/2010_004566.jpg 1198 | /JPEGImages/2010_004568.jpg 1199 | /JPEGImages/2010_004579.jpg 1200 | /JPEGImages/2010_004611.jpg 1201 | /JPEGImages/2010_004641.jpg 1202 | /JPEGImages/2010_004688.jpg 1203 | /JPEGImages/2010_004699.jpg 1204 | /JPEGImages/2010_004702.jpg 1205 | /JPEGImages/2010_004716.jpg 1206 | /JPEGImages/2010_004754.jpg 1207 | /JPEGImages/2010_004767.jpg 1208 | /JPEGImages/2010_004776.jpg 1209 | /JPEGImages/2010_004811.jpg 1210 | /JPEGImages/2010_004837.jpg 1211 | /JPEGImages/2010_004839.jpg 1212 | /JPEGImages/2010_004845.jpg 1213 | /JPEGImages/2010_004860.jpg 1214 | /JPEGImages/2010_004867.jpg 1215 | /JPEGImages/2010_004881.jpg 1216 | /JPEGImages/2010_004939.jpg 1217 | /JPEGImages/2010_005001.jpg 1218 | /JPEGImages/2010_005047.jpg 1219 | /JPEGImages/2010_005051.jpg 1220 | /JPEGImages/2010_005091.jpg 1221 | /JPEGImages/2010_005095.jpg 1222 | /JPEGImages/2010_005125.jpg 1223 | /JPEGImages/2010_005140.jpg 1224 | /JPEGImages/2010_005177.jpg 1225 | /JPEGImages/2010_005178.jpg 1226 | /JPEGImages/2010_005194.jpg 1227 | /JPEGImages/2010_005197.jpg 1228 | /JPEGImages/2010_005200.jpg 1229 | /JPEGImages/2010_005205.jpg 1230 | /JPEGImages/2010_005212.jpg 1231 | /JPEGImages/2010_005248.jpg 1232 | /JPEGImages/2010_005294.jpg 1233 | /JPEGImages/2010_005298.jpg 1234 | /JPEGImages/2010_005313.jpg 1235 | /JPEGImages/2010_005324.jpg 1236 | /JPEGImages/2010_005328.jpg 1237 | /JPEGImages/2010_005329.jpg 1238 | /JPEGImages/2010_005380.jpg 1239 | /JPEGImages/2010_005404.jpg 1240 | /JPEGImages/2010_005407.jpg 1241 | /JPEGImages/2010_005411.jpg 1242 | /JPEGImages/2010_005423.jpg 1243 | /JPEGImages/2010_005499.jpg 1244 | /JPEGImages/2010_005509.jpg 1245 | /JPEGImages/2010_005510.jpg 1246 | /JPEGImages/2010_005544.jpg 1247 | /JPEGImages/2010_005549.jpg 1248 | /JPEGImages/2010_005590.jpg 1249 | /JPEGImages/2010_005639.jpg 1250 | /JPEGImages/2010_005699.jpg 1251 | /JPEGImages/2010_005704.jpg 1252 | /JPEGImages/2010_005707.jpg 1253 | /JPEGImages/2010_005711.jpg 1254 | /JPEGImages/2010_005726.jpg 1255 | /JPEGImages/2010_005741.jpg 1256 | /JPEGImages/2010_005765.jpg 1257 | /JPEGImages/2010_005790.jpg 1258 | /JPEGImages/2010_005792.jpg 1259 | /JPEGImages/2010_005797.jpg 1260 | /JPEGImages/2010_005812.jpg 1261 | /JPEGImages/2010_005850.jpg 1262 | /JPEGImages/2010_005861.jpg 1263 | /JPEGImages/2010_005869.jpg 1264 | /JPEGImages/2010_005908.jpg 1265 | /JPEGImages/2010_005915.jpg 1266 | /JPEGImages/2010_005946.jpg 1267 | /JPEGImages/2010_005965.jpg 1268 | /JPEGImages/2010_006044.jpg 1269 | /JPEGImages/2010_006047.jpg 1270 | /JPEGImages/2010_006052.jpg 1271 | /JPEGImages/2010_006081.jpg 1272 | /JPEGImages/2011_000001.jpg 1273 | /JPEGImages/2011_000013.jpg 1274 | /JPEGImages/2011_000014.jpg 1275 | /JPEGImages/2011_000020.jpg 1276 | /JPEGImages/2011_000032.jpg 1277 | /JPEGImages/2011_000042.jpg 1278 | /JPEGImages/2011_000063.jpg 1279 | /JPEGImages/2011_000115.jpg 1280 | /JPEGImages/2011_000120.jpg 1281 | /JPEGImages/2011_000240.jpg 1282 | /JPEGImages/2011_000244.jpg 1283 | /JPEGImages/2011_000254.jpg 1284 | /JPEGImages/2011_000261.jpg 1285 | /JPEGImages/2011_000262.jpg 1286 | /JPEGImages/2011_000271.jpg 1287 | /JPEGImages/2011_000274.jpg 1288 | /JPEGImages/2011_000306.jpg 1289 | /JPEGImages/2011_000311.jpg 1290 | /JPEGImages/2011_000316.jpg 1291 | /JPEGImages/2011_000328.jpg 1292 | /JPEGImages/2011_000351.jpg 1293 | /JPEGImages/2011_000352.jpg 1294 | /JPEGImages/2011_000406.jpg 1295 | /JPEGImages/2011_000414.jpg 1296 | /JPEGImages/2011_000448.jpg 1297 | /JPEGImages/2011_000451.jpg 1298 | /JPEGImages/2011_000470.jpg 1299 | /JPEGImages/2011_000473.jpg 1300 | /JPEGImages/2011_000515.jpg 1301 | /JPEGImages/2011_000537.jpg 1302 | /JPEGImages/2011_000576.jpg 1303 | /JPEGImages/2011_000603.jpg 1304 | /JPEGImages/2011_000616.jpg 1305 | /JPEGImages/2011_000636.jpg 1306 | /JPEGImages/2011_000639.jpg 1307 | /JPEGImages/2011_000654.jpg 1308 | /JPEGImages/2011_000660.jpg 1309 | /JPEGImages/2011_000664.jpg 1310 | /JPEGImages/2011_000667.jpg 1311 | /JPEGImages/2011_000670.jpg 1312 | /JPEGImages/2011_000676.jpg 1313 | /JPEGImages/2011_000721.jpg 1314 | /JPEGImages/2011_000723.jpg 1315 | /JPEGImages/2011_000762.jpg 1316 | /JPEGImages/2011_000766.jpg 1317 | /JPEGImages/2011_000786.jpg 1318 | /JPEGImages/2011_000802.jpg 1319 | /JPEGImages/2011_000810.jpg 1320 | /JPEGImages/2011_000821.jpg 1321 | /JPEGImages/2011_000841.jpg 1322 | /JPEGImages/2011_000844.jpg 1323 | /JPEGImages/2011_000846.jpg 1324 | /JPEGImages/2011_000869.jpg 1325 | /JPEGImages/2011_000890.jpg 1326 | /JPEGImages/2011_000915.jpg 1327 | /JPEGImages/2011_000924.jpg 1328 | /JPEGImages/2011_000937.jpg 1329 | /JPEGImages/2011_000939.jpg 1330 | /JPEGImages/2011_000952.jpg 1331 | /JPEGImages/2011_000968.jpg 1332 | /JPEGImages/2011_000974.jpg 1333 | /JPEGImages/2011_001037.jpg 1334 | /JPEGImages/2011_001072.jpg 1335 | /JPEGImages/2011_001085.jpg 1336 | /JPEGImages/2011_001089.jpg 1337 | /JPEGImages/2011_001090.jpg 1338 | /JPEGImages/2011_001099.jpg 1339 | /JPEGImages/2011_001104.jpg 1340 | /JPEGImages/2011_001112.jpg 1341 | /JPEGImages/2011_001120.jpg 1342 | /JPEGImages/2011_001132.jpg 1343 | /JPEGImages/2011_001151.jpg 1344 | /JPEGImages/2011_001194.jpg 1345 | /JPEGImages/2011_001258.jpg 1346 | /JPEGImages/2011_001274.jpg 1347 | /JPEGImages/2011_001314.jpg 1348 | /JPEGImages/2011_001317.jpg 1349 | /JPEGImages/2011_001321.jpg 1350 | /JPEGImages/2011_001379.jpg 1351 | /JPEGImages/2011_001425.jpg 1352 | /JPEGImages/2011_001431.jpg 1353 | /JPEGImages/2011_001443.jpg 1354 | /JPEGImages/2011_001446.jpg 1355 | /JPEGImages/2011_001452.jpg 1356 | /JPEGImages/2011_001454.jpg 1357 | /JPEGImages/2011_001477.jpg 1358 | /JPEGImages/2011_001509.jpg 1359 | /JPEGImages/2011_001512.jpg 1360 | /JPEGImages/2011_001515.jpg 1361 | /JPEGImages/2011_001528.jpg 1362 | /JPEGImages/2011_001554.jpg 1363 | /JPEGImages/2011_001561.jpg 1364 | /JPEGImages/2011_001580.jpg 1365 | /JPEGImages/2011_001587.jpg 1366 | /JPEGImages/2011_001623.jpg 1367 | /JPEGImages/2011_001648.jpg 1368 | /JPEGImages/2011_001651.jpg 1369 | /JPEGImages/2011_001654.jpg 1370 | /JPEGImages/2011_001684.jpg 1371 | /JPEGImages/2011_001696.jpg 1372 | /JPEGImages/2011_001697.jpg 1373 | /JPEGImages/2011_001760.jpg 1374 | /JPEGImages/2011_001761.jpg 1375 | /JPEGImages/2011_001798.jpg 1376 | /JPEGImages/2011_001807.jpg 1377 | /JPEGImages/2011_001851.jpg 1378 | /JPEGImages/2011_001852.jpg 1379 | /JPEGImages/2011_001853.jpg 1380 | /JPEGImages/2011_001888.jpg 1381 | /JPEGImages/2011_001940.jpg 1382 | /JPEGImages/2011_002014.jpg 1383 | /JPEGImages/2011_002028.jpg 1384 | /JPEGImages/2011_002056.jpg 1385 | /JPEGImages/2011_002061.jpg 1386 | /JPEGImages/2011_002068.jpg 1387 | /JPEGImages/2011_002076.jpg 1388 | /JPEGImages/2011_002090.jpg 1389 | /JPEGImages/2011_002095.jpg 1390 | /JPEGImages/2011_002104.jpg 1391 | /JPEGImages/2011_002136.jpg 1392 | /JPEGImages/2011_002138.jpg 1393 | /JPEGImages/2011_002151.jpg 1394 | /JPEGImages/2011_002153.jpg 1395 | /JPEGImages/2011_002155.jpg 1396 | /JPEGImages/2011_002197.jpg 1397 | /JPEGImages/2011_002198.jpg 1398 | /JPEGImages/2011_002243.jpg 1399 | /JPEGImages/2011_002250.jpg 1400 | /JPEGImages/2011_002257.jpg 1401 | /JPEGImages/2011_002262.jpg 1402 | /JPEGImages/2011_002264.jpg 1403 | /JPEGImages/2011_002296.jpg 1404 | /JPEGImages/2011_002314.jpg 1405 | /JPEGImages/2011_002331.jpg 1406 | /JPEGImages/2011_002333.jpg 1407 | /JPEGImages/2011_002411.jpg 1408 | /JPEGImages/2011_002417.jpg 1409 | /JPEGImages/2011_002425.jpg 1410 | /JPEGImages/2011_002437.jpg 1411 | /JPEGImages/2011_002444.jpg 1412 | /JPEGImages/2011_002445.jpg 1413 | /JPEGImages/2011_002449.jpg 1414 | /JPEGImages/2011_002468.jpg 1415 | /JPEGImages/2011_002469.jpg 1416 | /JPEGImages/2011_002473.jpg 1417 | /JPEGImages/2011_002508.jpg 1418 | /JPEGImages/2011_002523.jpg 1419 | /JPEGImages/2011_002534.jpg 1420 | /JPEGImages/2011_002557.jpg 1421 | /JPEGImages/2011_002564.jpg 1422 | /JPEGImages/2011_002572.jpg 1423 | /JPEGImages/2011_002597.jpg 1424 | /JPEGImages/2011_002622.jpg 1425 | /JPEGImages/2011_002632.jpg 1426 | /JPEGImages/2011_002635.jpg 1427 | /JPEGImages/2011_002643.jpg 1428 | /JPEGImages/2011_002653.jpg 1429 | /JPEGImages/2011_002667.jpg 1430 | /JPEGImages/2011_002681.jpg 1431 | /JPEGImages/2011_002707.jpg 1432 | /JPEGImages/2011_002736.jpg 1433 | /JPEGImages/2011_002759.jpg 1434 | /JPEGImages/2011_002783.jpg 1435 | /JPEGImages/2011_002792.jpg 1436 | /JPEGImages/2011_002799.jpg 1437 | /JPEGImages/2011_002824.jpg 1438 | /JPEGImages/2011_002835.jpg 1439 | /JPEGImages/2011_002866.jpg 1440 | /JPEGImages/2011_002876.jpg 1441 | /JPEGImages/2011_002888.jpg 1442 | /JPEGImages/2011_002894.jpg 1443 | /JPEGImages/2011_002903.jpg 1444 | /JPEGImages/2011_002905.jpg 1445 | /JPEGImages/2011_002986.jpg 1446 | /JPEGImages/2011_003045.jpg 1447 | /JPEGImages/2011_003064.jpg 1448 | /JPEGImages/2011_003070.jpg 1449 | /JPEGImages/2011_003083.jpg 1450 | /JPEGImages/2011_003093.jpg 1451 | /JPEGImages/2011_003096.jpg 1452 | /JPEGImages/2011_003102.jpg 1453 | /JPEGImages/2011_003156.jpg 1454 | /JPEGImages/2011_003170.jpg 1455 | /JPEGImages/2011_003178.jpg 1456 | /JPEGImages/2011_003231.jpg 1457 | --------------------------------------------------------------------------------