├── model ├── __init__.py └── SEAN.py ├── utils ├── __init__.py └── tools.py ├── dataset ├── __init__.py ├── info16.json ├── info.json ├── gta5_dataset.py ├── synthia_dataset.py ├── cityscapes_dataset.py ├── cityscapes16_dataset.py └── cityscapes_labellist_val.txt ├── Figure └── Framework.jpg ├── LICENSE ├── evaluation.py ├── README.md ├── SEAN_GTA5.py └── SEAN_Synthia.py /model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /dataset/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Figure/Framework.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YonghaoXu/SEANet/HEAD/Figure/Framework.jpg -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /dataset/info16.json: -------------------------------------------------------------------------------- 1 | { 2 | "classes":16, 3 | "label2train":[ 4 | [0, 255], 5 | [1, 255], 6 | [2, 255], 7 | [3, 255], 8 | [4, 255], 9 | [5, 255], 10 | [6, 255], 11 | [7, 0], 12 | [8, 1], 13 | [9, 255], 14 | [10, 255], 15 | [11, 2], 16 | [12, 3], 17 | [13, 4], 18 | [14, 255], 19 | [15, 255], 20 | [16, 255], 21 | [17, 5], 22 | [18, 255], 23 | [19, 6], 24 | [20, 7], 25 | [21, 8], 26 | [22, 9], 27 | [23, 10], 28 | [24, 11], 29 | [25, 12], 30 | [26, 13], 31 | [27, 14], 32 | [28, 15], 33 | [29, 255], 34 | [30, 255], 35 | [31, 16], 36 | [32, 17], 37 | [33, 18], 38 | [-1, 255]], 39 | "label":[ 40 | "road", 41 | "sidewalk", 42 | "building", 43 | "wall", 44 | "fence", 45 | "pole", 46 | "light", 47 | "sign", 48 | "vegetation", 49 | "sky", 50 | "person", 51 | "rider", 52 | "car", 53 | "bus", 54 | "motocycle", 55 | "bicycle"], 56 | "palette":[ 57 | [128,64,128], 58 | [244,35,232], 59 | [70,70,70], 60 | [102,102,156], 61 | [190,153,153], 62 | [153,153,153], 63 | [250,170,30], 64 | [220,220,0], 65 | [107,142,35], 66 | [152,251,152], 67 | [70,130,180], 68 | [220,20,60], 69 | [255,0,0], 70 | [0,0,142], 71 | [0,0,70], 72 | [0,60,100], 73 | [0,80,100], 74 | [0,0,230], 75 | [119,11,32], 76 | [0,0,0]], 77 | "mean":[ 78 | 73.158359210711552, 79 | 82.908917542625858, 80 | 72.392398761941593], 81 | "std":[ 82 | 47.675755341814678, 83 | 48.494214368814916, 84 | 47.736546325441594] 85 | } 86 | -------------------------------------------------------------------------------- /dataset/info.json: -------------------------------------------------------------------------------- 1 | { 2 | "classes":19, 3 | "label2train":[ 4 | [0, 255], 5 | [1, 255], 6 | [2, 255], 7 | [3, 255], 8 | [4, 255], 9 | [5, 255], 10 | [6, 255], 11 | [7, 0], 12 | [8, 1], 13 | [9, 255], 14 | [10, 255], 15 | [11, 2], 16 | [12, 3], 17 | [13, 4], 18 | [14, 255], 19 | [15, 255], 20 | [16, 255], 21 | [17, 5], 22 | [18, 255], 23 | [19, 6], 24 | [20, 7], 25 | [21, 8], 26 | [22, 9], 27 | [23, 10], 28 | [24, 11], 29 | [25, 12], 30 | [26, 13], 31 | [27, 14], 32 | [28, 15], 33 | [29, 255], 34 | [30, 255], 35 | [31, 16], 36 | [32, 17], 37 | [33, 18], 38 | [-1, 255]], 39 | "label":[ 40 | "road", 41 | "sidewalk", 42 | "building", 43 | "wall", 44 | "fence", 45 | "pole", 46 | "light", 47 | "sign", 48 | "vegetation", 49 | "terrain", 50 | "sky", 51 | "person", 52 | "rider", 53 | "car", 54 | "truck", 55 | "bus", 56 | "train", 57 | "motocycle", 58 | "bicycle"], 59 | "palette":[ 60 | [128,64,128], 61 | [244,35,232], 62 | [70,70,70], 63 | [102,102,156], 64 | [190,153,153], 65 | [153,153,153], 66 | [250,170,30], 67 | [220,220,0], 68 | [107,142,35], 69 | [152,251,152], 70 | [70,130,180], 71 | [220,20,60], 72 | [255,0,0], 73 | [0,0,142], 74 | [0,0,70], 75 | [0,60,100], 76 | [0,80,100], 77 | [0,0,230], 78 | [119,11,32], 79 | [0,0,0]], 80 | "mean":[ 81 | 73.158359210711552, 82 | 82.908917542625858, 83 | 72.392398761941593], 84 | "std":[ 85 | 47.675755341814678, 86 | 48.494214368814916, 87 | 47.736546325441594] 88 | } 89 | -------------------------------------------------------------------------------- /dataset/gta5_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import collections 6 | import torch 7 | import torchvision 8 | from torch.utils import data 9 | from PIL import Image 10 | 11 | 12 | class GTA5DataSet(data.Dataset): 13 | def __init__(self, root, list_path, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255): 14 | self.root = root 15 | self.list_path = list_path 16 | self.crop_size = crop_size 17 | self.scale = scale 18 | self.ignore_label = ignore_label 19 | self.mean = mean 20 | self.is_mirror = mirror 21 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 22 | self.files = [] 23 | 24 | self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5, 25 | 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26 | 26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18} 27 | 28 | for name in self.img_ids: 29 | img_file = osp.join(self.root, "images/%s" % name) 30 | label_file = osp.join(self.root, "labels/%s" % name) 31 | self.files.append({ 32 | "img": img_file, 33 | "label": label_file, 34 | "name": name 35 | }) 36 | 37 | def __len__(self): 38 | return len(self.files) 39 | 40 | 41 | def __getitem__(self, index): 42 | datafiles = self.files[index] 43 | 44 | image = Image.open(datafiles["img"]).convert('RGB') 45 | label = Image.open(datafiles["label"]) 46 | name = datafiles["name"] 47 | 48 | image = image.resize(self.crop_size, Image.BICUBIC) 49 | label = label.resize(self.crop_size, Image.NEAREST) 50 | 51 | image = np.asarray(image, np.float32) 52 | label = np.asarray(label, np.float32) 53 | 54 | label_copy = 255 * np.ones(label.shape, dtype=np.float32) 55 | for k, v in self.id_to_trainid.items(): 56 | label_copy[label == k] = v 57 | 58 | size = image.shape 59 | image = image[:, :, ::-1] 60 | image -= self.mean 61 | image = image.transpose((2, 0, 1)) 62 | 63 | return image.copy(), label_copy.copy(), np.array(size), name 64 | 65 | 66 | -------------------------------------------------------------------------------- /dataset/synthia_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import collections 6 | import torch 7 | import torchvision 8 | from torch.utils import data 9 | from PIL import Image 10 | 11 | 12 | class synthiaDataSet(data.Dataset): 13 | def __init__(self, root, list_path, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255): 14 | self.root = root 15 | self.list_path = list_path 16 | self.crop_size = crop_size 17 | self.scale = scale 18 | self.ignore_label = ignore_label 19 | self.mean = mean 20 | self.is_mirror = mirror 21 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 22 | self.files = [] 23 | 24 | self.id_to_trainid = {1: 9, 2: 2, 3: 0, 4: 1, 5: 4, 6: 8, 25 | 7: 5, 8: 12, 9: 7, 10: 10, 11: 15, 12: 14, 15: 6, 26 | 17: 11, 19: 13, 21: 3} 27 | 28 | for name in self.img_ids: 29 | img_file = osp.join(self.root, "images/%s" % name) 30 | label_file = osp.join(self.root, "labels/%s" % name) 31 | self.files.append({ 32 | "img": img_file, 33 | "label": label_file, 34 | "name": name 35 | }) 36 | 37 | def __len__(self): 38 | return len(self.files) 39 | 40 | 41 | def __getitem__(self, index): 42 | datafiles = self.files[index] 43 | 44 | image = Image.open(datafiles["img"]).convert('RGB') 45 | label = Image.open(datafiles["label"]) 46 | name = datafiles["name"] 47 | 48 | image = image.resize(self.crop_size, Image.BICUBIC) 49 | label = label.resize(self.crop_size, Image.NEAREST) 50 | 51 | image = np.asarray(image, np.float32) 52 | label = np.asarray(label, np.float32) 53 | 54 | label_copy = 255 * np.ones(label.shape, dtype=np.float32) 55 | for k, v in self.id_to_trainid.items(): 56 | label_copy[label == k] = v 57 | 58 | size = image.shape 59 | image = image[:, :, ::-1] 60 | image -= self.mean 61 | image = image.transpose((2, 0, 1)) 62 | 63 | return image.copy(), label_copy.copy(), np.array(size), name 64 | 65 | 66 | -------------------------------------------------------------------------------- /model/SEAN.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import nn 4 | from torchvision import models 5 | 6 | class Classifier_Module(nn.Module): 7 | 8 | def __init__(self, dims_in, dilation_series, padding_series, num_classes): 9 | super(Classifier_Module, self).__init__() 10 | self.conv2d_list = nn.ModuleList() 11 | for dilation, padding in zip(dilation_series, padding_series): 12 | self.conv2d_list.append(nn.Conv2d(dims_in, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True)) 13 | 14 | for m in self.conv2d_list: 15 | m.weight.data.normal_(0, 0.01) 16 | 17 | def forward(self, x): 18 | out = self.conv2d_list[0](x) 19 | for i in range(len(self.conv2d_list)-1): 20 | out += self.conv2d_list[i+1](x) 21 | return out 22 | 23 | 24 | class SEANet(nn.Module): 25 | def __init__(self, num_classes, vgg16_caffe_path=None, pretrained=False): 26 | super(SEANet, self).__init__() 27 | vgg = models.vgg16() 28 | if pretrained: 29 | vgg.load_state_dict(torch.load(vgg16_caffe_path)) 30 | 31 | features, classifier = list(vgg.features.children()), list(vgg.classifier.children()) 32 | 33 | features = nn.Sequential(*(features[i] for i in list(range(23))+list(range(24,30)))) 34 | 35 | for i in [23,25,27]: 36 | features[i].dilation = (2,2) 37 | features[i].padding = (2,2) 38 | 39 | fc6 = nn.Conv2d(512, 1024, kernel_size=3, padding=4, dilation=4) 40 | fc7 = nn.Conv2d(1024, 1024, kernel_size=3, padding=4, dilation=4) 41 | 42 | self.features = nn.Sequential(*([features[i] for i in range(len(features))] + [ fc6, nn.ReLU(inplace=True), fc7, nn.ReLU(inplace=True)])) 43 | 44 | 45 | #attention moduel 46 | self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) 47 | self.aconv = nn.Conv2d(1024, 1, kernel_size=1, stride=1, bias=False) 48 | self.sigmoid = nn.Sigmoid() 49 | 50 | self.classifier = Classifier_Module(1024, [6,12,18,24],[6,12,18,24],num_classes) 51 | 52 | 53 | def forward(self, x): 54 | x = self.features(x) 55 | 56 | mask = self.avgpool(x) 57 | interpolation = nn.UpsamplingBilinear2d(size=x.shape[2:4]) 58 | mask = interpolation(mask) 59 | mask = self.aconv(mask) 60 | mask = self.sigmoid(mask) 61 | 62 | x = self.classifier((1+mask)*x) 63 | return mask,x 64 | 65 | def optim_parameters(self, args): 66 | return self.parameters() 67 | -------------------------------------------------------------------------------- /dataset/cityscapes_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | 6 | import collections 7 | import torch 8 | import torchvision 9 | from torch.utils import data 10 | from PIL import Image 11 | 12 | class cityscapesDataSet(data.Dataset): 13 | def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255, set='val'): 14 | self.root = root 15 | self.list_path = list_path 16 | self.crop_size = crop_size 17 | self.scale = scale 18 | self.ignore_label = ignore_label 19 | self.mean = mean 20 | self.is_mirror = mirror 21 | 22 | self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5, 23 | 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 24 | 26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18} 25 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 26 | if not max_iters==None: 27 | n_repeat = int(max_iters / len(self.img_ids)) 28 | self.img_ids = self.img_ids * n_repeat + self.img_ids[:max_iters-n_repeat*len(self.img_ids)] 29 | self.files = [] 30 | self.set = set 31 | for name in self.img_ids: 32 | img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, name)) 33 | label_file = osp.join(self.root, "gtFine/%s/%s" % (self.set, name[:-15]+'gtFine_labelIds.png')) 34 | self.files.append({ 35 | "img": img_file, 36 | "label": label_file, 37 | "name": name.split('/')[-1] 38 | }) 39 | 40 | def __len__(self): 41 | return len(self.files) 42 | 43 | def __getitem__(self, index): 44 | datafiles = self.files[index] 45 | 46 | image = Image.open(datafiles["img"]).convert('RGB') 47 | label = Image.open(datafiles["label"]) 48 | name = datafiles["name"] 49 | 50 | # resize 51 | image = image.resize(self.crop_size, Image.BICUBIC) 52 | if self.set == 'train': 53 | label = label.resize(self.crop_size, Image.NEAREST) 54 | 55 | image = np.asarray(image, np.float32) 56 | label = np.asarray(label, np.float32) 57 | 58 | label_copy = 255 * np.ones(label.shape, dtype=np.float32) 59 | for k, v in self.id_to_trainid.items(): 60 | label_copy[label == k] = v 61 | 62 | size = image.shape 63 | image = image[:, :, ::-1] 64 | image -= self.mean 65 | image = image.transpose((2, 0, 1)) 66 | 67 | return image.copy(), label_copy.copy(),np.array(size), name 68 | 69 | -------------------------------------------------------------------------------- /dataset/cityscapes16_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | 6 | import collections 7 | import torch 8 | import torchvision 9 | from torch.utils import data 10 | from PIL import Image 11 | 12 | class cityscapes16DataSet(data.Dataset): 13 | def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255, set='val'): 14 | self.root = root 15 | self.list_path = list_path 16 | self.crop_size = crop_size 17 | self.scale = scale 18 | self.ignore_label = ignore_label 19 | self.mean = mean 20 | self.is_mirror = mirror 21 | 22 | self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5, 23 | 19: 6, 20: 7, 21: 8, 23: 9, 24: 10, 25: 11, 24 | 26: 12, 28: 13, 32: 14, 33: 15} 25 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 26 | if not max_iters==None: 27 | n_repeat = int(max_iters / len(self.img_ids)) 28 | self.img_ids = self.img_ids * n_repeat + self.img_ids[:max_iters-n_repeat*len(self.img_ids)] 29 | self.files = [] 30 | self.set = set 31 | for name in self.img_ids: 32 | img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, name)) 33 | label_file = osp.join(self.root, "gtFine/%s/%s" % (self.set, name[:-15]+'gtFine_labelIds.png')) 34 | self.files.append({ 35 | "img": img_file, 36 | "label": label_file, 37 | "name": name.split('/')[-1] 38 | }) 39 | 40 | def __len__(self): 41 | return len(self.files) 42 | 43 | def __getitem__(self, index): 44 | datafiles = self.files[index] 45 | 46 | image = Image.open(datafiles["img"]).convert('RGB') 47 | label = Image.open(datafiles["label"]) 48 | name = datafiles["name"] 49 | 50 | image = image.resize(self.crop_size, Image.BICUBIC) 51 | if self.set == 'train': 52 | label = label.resize(self.crop_size, Image.NEAREST) 53 | 54 | image = np.asarray(image, np.float32) 55 | label = np.asarray(label, np.float32) 56 | 57 | label_copy = 255 * np.ones(label.shape, dtype=np.float32) 58 | for k, v in self.id_to_trainid.items(): 59 | label_copy[label == k] = v 60 | 61 | size = image.shape 62 | image = image[:, :, ::-1] 63 | image -= self.mean 64 | image = image.transpose((2, 0, 1)) 65 | 66 | return image.copy(), label_copy.copy(),np.array(size), name 67 | -------------------------------------------------------------------------------- /evaluation.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import scipy 3 | from scipy import ndimage 4 | import numpy as np 5 | import sys 6 | 7 | import torch 8 | from torch.autograd import Variable 9 | import torchvision.models as models 10 | import torch.nn.functional as F 11 | from torch.utils import data, model_zoo 12 | from model.SEAN import SEANet 13 | from dataset.cityscapes_dataset import cityscapesDataSet 14 | from collections import OrderedDict 15 | import os 16 | from PIL import Image 17 | from utils.tools import * 18 | 19 | import matplotlib.pyplot as plt 20 | import torch.nn as nn 21 | IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32) 22 | 23 | 24 | def get_arguments(): 25 | """Parse all the arguments provided from the CLI. 26 | 27 | Returns: 28 | A list of parsed arguments. 29 | """ 30 | parser = argparse.ArgumentParser(description="SEAN") 31 | parser.add_argument("--data_dir", type=str, default='/data/yonghao.xu/SegmentationData/cityscapes/', 32 | help="target dataset path.") 33 | parser.add_argument("--data_list", type=str, default='./dataset/cityscapes_labellist_val.txt', 34 | help="target dataset list file.") 35 | parser.add_argument("--ignore-label", type=int, default=255, 36 | help="the index of the label to ignore in the training.") 37 | parser.add_argument("--num-classes", type=int, default=19, 38 | help="number of classes.") 39 | parser.add_argument("--restore-from", type=str, default='/data/yonghao.xu/PreTrainedModel/GTA2Cityscapes.pth', 40 | help="restored model.") 41 | parser.add_argument("--snapshot_dir", type=str, default='./Snap/Maps', 42 | help="Path to save result.") 43 | return parser.parse_args() 44 | 45 | 46 | def main(): 47 | """Create the model and start the evaluation process.""" 48 | args = get_arguments() 49 | 50 | if not os.path.exists(args.snapshot_dir): 51 | os.makedirs(args.snapshot_dir) 52 | f = open(args.snapshot_dir+'Evaluation.txt', 'w') 53 | 54 | model = SEANet(num_classes=args.num_classes) 55 | 56 | saved_state_dict = torch.load(args.restore_from) 57 | model.load_state_dict(saved_state_dict) 58 | 59 | model.eval() 60 | model.cuda() 61 | testloader = data.DataLoader(cityscapesDataSet(args.data_dir, args.data_list, crop_size=(1024, 512), mean=IMG_MEAN, scale=False, mirror=False, set='val'), 62 | batch_size=1, shuffle=False, pin_memory=True) 63 | 64 | input_size_target = (2048,1024) 65 | interp = nn.Upsample(size=(1024,2048), mode='bilinear') 66 | 67 | test_mIoU(f,model, testloader, 0,input_size_target,print_per_batches=10) 68 | 69 | for index, batch in enumerate(testloader): 70 | if index % 100 == 0: 71 | print('%d processd' % index) 72 | image, _,_, name = batch 73 | _,output = model(image.cuda()) 74 | output = interp(output).cpu().data[0].numpy() 75 | 76 | 77 | output = output.transpose(1,2,0) 78 | output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8) 79 | 80 | output_col = colorize_mask(output) 81 | output = Image.fromarray(output) 82 | 83 | name = name[0].split('/')[-1] 84 | output.save('%s/%s' % (args.snapshot_dir, name)) 85 | output_col.save('%s/%s_color.png' % (args.snapshot_dir, name.split('.')[0])) 86 | 87 | f.close() 88 | 89 | if __name__ == '__main__': 90 | main() 91 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Self-Ensembling Attention Networks: Addressing Domain Shift for Semantic Segmentation 2 | 3 | Pytorch implementation of our method for domain adaptation in semantic segmentation task. 4 | 5 | 6 | ![](Figure/Framework.jpg) 7 | 8 | ## Paper 9 | [Self-Ensembling Attention Networks: Addressing Domain Shift for Semantic Segmentation](https://m.aaai.org/ojs/index.php/AAAI/article/view/4500) 10 | 11 | Please cite our paper if you find it useful for your research. 12 | 13 | ``` 14 | @inproceedings{SEAN, 15 | title={Self-Ensembling Attention Networks: Addressing Domain Shift for Semantic Segmentation}, 16 | author={Xu, Yonghao and Du, Bo and Zhang, Lefei and Zhang, Qian and Wang, Guoli and Zhang, Liangpei}, 17 | booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, 18 | volume={33}, 19 | pages={5581--5588}, 20 | year={2019} 21 | } 22 | ``` 23 | 24 | ## Installation 25 | * Install `Pytorch 0.4.0` from https://github.com/pytorch/pytorch with `Python 3.6`. 26 | 27 | * Clone this repo. 28 | ``` 29 | git clone https://github.com/YonghaoXu/SEANet 30 | ``` 31 | 32 | ## Dataset 33 | * Download the [GTA-5 Dataset](https://download.visinf.tu-darmstadt.de/data/from_games/). 34 | 35 | * Download the [SYNTHIA-RAND-CITYSCAPES Dataset](http://synthia-dataset.net/download/808/). 36 | - Note: The ground-truth data in the original SYNTHIA-RAND-CITYSCAPES dataset should be adjusted to be consistent with those in the cityscapes dataset. Here we attach the transformed [ground-truth data](https://drive.google.com/open?id=1GvdXSG4nq8Px0xYs3ate0reNNKtci2dS) for the SYNTHIA-RAND-CITYSCAPES dataset. 37 | 38 | * Download the [Cityscapes Dataset](https://www.cityscapes-dataset.com/). 39 | 40 | ## Pretrained Model 41 | * Download the pretrained [VGG-16 Model](https://drive.google.com/file/d/1PGuOb-ZIOc10aMGOxj5xFSubi8mkVXaq/view?usp=sharing). 42 | 43 | ## Training 44 | * Training for GTA-5 to Cityscapes. 45 | - Change the default path of `--data_dir_source` in `SEAN_GTA5.py` with your GTA-5 dataset folder. 46 | - Change the default path of `--data_dir_target` in `SEAN_GTA5.py` with your Cityscapes dataset folder. 47 | - Change the default path of `--restore_from` in `SEAN_GTA5.py` with your pretrained VGG model path. 48 | - Refer to `dataset/gta5_dataset.py` and `dataset/cityscapes_dataset.py` for further guidance about how the images and ground-truth files are organized. 49 | 50 | ``` 51 | python SEAN_GTA5.py 52 | ``` 53 | 54 | * Training for Synthia to Cityscapes. 55 | - Change the default path of `--data_dir_source` in `SEAN_Synthia.py` with your Synthia dataset folder. 56 | - Change the default path of `--data_dir_target` in `SEAN_Synthia.py` with your Cityscapes dataset folder. 57 | - Change the default path of `--restore_from` in `SEAN_Synthia.py` with your pretrained VGG model path. 58 | - Refer to `dataset/synthia_dataset.py` and `dataset/cityscapes16_dataset.py` for further guidance about how the images and ground-truth files are organized. 59 | 60 | ``` 61 | python SEAN_Synthia.py 62 | ``` 63 | 64 | ## Evaluation 65 | * Test for GTA-5 to Cityscapes. 66 | - Change the default path of `--data_dir` in `evaluation.py` with your Cityscapes dataset folder. 67 | - Change the default path of `--restore_from` in `evaluation.py` with your trained model path. You can also download our [GTA-5 to Cityscapes model](https://drive.google.com/open?id=1g-NSAaHxkvru4G0lBNolmcioH8elCoqo) for a look. 68 | 69 | ``` 70 | python evaluation.py 71 | ``` 72 | 73 | * Test for Synthia to Cityscapes. 74 | - For evaluation on Synthia to Cityscapes case, please replace the `test_mIoU` function in `evaluation.py` with the `test_mIoU16` function. Since there are only 16 categories in common in this case, the code for writing the segmentation maps parts needs to be further modified. If you want to share your implementation for this issue, please pull a request. 75 | 76 | 77 | ## Empirical Observations 78 | * Following the previous research setting in this task, we check the mIoU value on the target domain after every 500 iterations. A lower frequency for the checking would accelerate the network training, but may also miss the best performance. 79 | * A large `--attention_threshold` would be detrimental to the performance of the framework. Empirically, 0 to 0.3 is a suitable range for this parameter. 80 | * Best performance is usually obtained within 6 epochs. For the GTA-5 to Cityscapes case, the mIoU can reach about 34% to 35%. For the Synthia to Cityscapes case, the mIoU can reach about 36% to 37%. 81 | 82 | ## Multi-GPU Training 83 | * This repo is tested with a batch size of 1 using a single GPU. For a larger batch size with multi-GPU training, the code may need to be modified. If you want to share your implementation for this issue, please pull a request. 84 | -------------------------------------------------------------------------------- /utils/tools.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import torch.nn as nn 4 | from torch.autograd import Variable 5 | import numpy as np 6 | from PIL import Image 7 | import json 8 | 9 | class WeightEMA(object): 10 | def __init__(self, params, src_params, alpha): 11 | self.params = list(params) 12 | self.src_params = list(src_params) 13 | self.alpha = alpha 14 | 15 | for p, src_p in zip(self.params, self.src_params): 16 | p.data[:] = src_p.data[:] 17 | 18 | def step(self): 19 | one_minus_alpha = 1.0 - self.alpha 20 | for p, src_p in zip(self.params, self.src_params): 21 | p.data.mul_(self.alpha) 22 | p.data.add_(src_p.data * one_minus_alpha) 23 | 24 | def colorize_mask(mask): 25 | # mask: numpy array of the mask 26 | palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30, 27 | 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70, 28 | 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32] 29 | 30 | zero_pad = 256 * 3 - len(palette) 31 | for i in range(zero_pad): 32 | palette.append(0) 33 | 34 | new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P') 35 | new_mask.putpalette(palette) 36 | 37 | return new_mask 38 | 39 | 40 | def loss_calc(pred, label): 41 | """ 42 | This function returns cross entropy loss for semantic segmentation 43 | """ 44 | # out shape batch_size x channels x h x w -> batch_size x channels x h x w 45 | # label shape h x w x 1 x batch_size -> batch_size x 1 x h x w 46 | label = label.long().cuda() 47 | criterion = CrossEntropy2d().cuda() 48 | return criterion(pred, label) 49 | 50 | def _fast_hist(label_true, label_pred, n_class): 51 | mask = (label_true >= 0) & (label_true < n_class) 52 | hist = np.bincount( 53 | n_class * label_true[mask].astype(int) + 54 | label_pred[mask].astype(int), minlength=n_class ** 2).reshape(n_class, n_class) 55 | return hist 56 | 57 | def label_accuracy_score(label_trues, label_preds, n_class=19): 58 | """Returns accuracy score evaluation result. 59 | 60 | - overall accuracy 61 | - mean accuracy 62 | - mean IU 63 | - fwavacc 64 | """ 65 | hist = np.zeros((n_class, n_class)) 66 | for lt, lp in zip(label_trues, label_preds): 67 | hist += _fast_hist(lt.flatten(), lp.flatten(), n_class) 68 | acc = np.diag(hist).sum() / hist.sum() 69 | acc_cls = np.diag(hist) / hist.sum(axis=1) 70 | acc_cls = np.nanmean(acc_cls) 71 | iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) 72 | mean_iu = np.nanmean(iu) 73 | freq = hist.sum(axis=1) / hist.sum() 74 | fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() 75 | return acc, acc_cls, mean_iu, fwavacc 76 | 77 | def fast_hist(a, b, n): 78 | k = (a >= 0) & (a < n) 79 | return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n) 80 | 81 | 82 | def per_class_iu(hist): 83 | return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) 84 | 85 | 86 | def label_mapping(input, mapping): 87 | output = np.copy(input) 88 | for ind in range(len(mapping)): 89 | output[input == mapping[ind][0]] = mapping[ind][1] 90 | return np.array(output, dtype=np.int64) 91 | 92 | 93 | def test_mIoU(f,model, data_loader, epoch,input_size, print_per_batches=10): 94 | 95 | model.eval() 96 | num_classes = 19 97 | num_batches = len(data_loader) 98 | hist = np.zeros((num_classes, num_classes)) 99 | with open('./dataset/info.json','r') as fp: 100 | info = json.load(fp) 101 | name_classes = np.array(info['label'], dtype=np.str) 102 | interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear') 103 | 104 | for ind, data in enumerate(data_loader): 105 | image, label = data[0].cuda(),data[1].squeeze() 106 | 107 | _,outputs = model(image) 108 | outputs = interp(outputs) 109 | _, predicted = torch.max(outputs, 1) 110 | 111 | pred = predicted.cpu().squeeze().numpy() 112 | label = label.numpy() 113 | 114 | if len(label.flatten()) != len(pred.flatten()): 115 | print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:d}'.format(len(label.flatten()), len(pred.flatten()), ind)) 116 | continue 117 | hist += fast_hist(label.flatten(), pred.flatten(), num_classes) 118 | if ind > 0 and ind % print_per_batches == 0: 119 | print('{:d} / {:d}: {:0.2f}'.format(ind, num_batches, 100*np.mean(per_class_iu(hist)))) 120 | f.write('{:d} / {:d}: {:0.2f}\n'.format(ind, num_batches, 100*np.mean(per_class_iu(hist)))) 121 | 122 | mIoUs = per_class_iu(hist) 123 | for ind_class in range(num_classes): 124 | f.write('\n===>' + name_classes[ind_class] + ':\t' + str(round(mIoUs[ind_class] * 100, 2))) 125 | print('===>' + name_classes[ind_class] + ':\t' + str(round(mIoUs[ind_class] * 100, 2))) 126 | f.write('\n epoch %d ===> mIoU: ' %(epoch) + str(round(np.nanmean(mIoUs) * 100, 2))+'\n') 127 | f.flush() 128 | print('epoch %d ===> mIoU: ' %(epoch) + str(round(np.nanmean(mIoUs) * 100, 2))) 129 | return np.nanmean(mIoUs) 130 | 131 | def test_mIoU16(f,model, data_loader, epoch,input_size, print_per_batches=10): 132 | 133 | model.eval() 134 | num_classes = 16 135 | num_batches = len(data_loader) 136 | hist = np.zeros((num_classes, num_classes)) 137 | with open('./dataset/info16.json','r') as fp: 138 | info = json.load(fp) 139 | name_classes = np.array(info['label'], dtype=np.str) 140 | interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear') 141 | 142 | for ind, data in enumerate(data_loader): 143 | image, label = data[0].cuda(),data[1].squeeze() 144 | 145 | _,outputs = model(image) 146 | outputs = interp(outputs) 147 | _, predicted = torch.max(outputs, 1) 148 | 149 | pred = predicted.cpu().squeeze().numpy() 150 | label = label.numpy() 151 | 152 | if len(label.flatten()) != len(pred.flatten()): 153 | print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:d}'.format(len(label.flatten()), len(pred.flatten()), ind)) 154 | continue 155 | hist += fast_hist(label.flatten(), pred.flatten(), num_classes) 156 | if ind > 0 and ind % print_per_batches == 0: 157 | print('{:d} / {:d}: {:0.2f}'.format(ind, num_batches, 100*np.mean(per_class_iu(hist)))) 158 | f.write('{:d} / {:d}: {:0.2f}\n'.format(ind, num_batches, 100*np.mean(per_class_iu(hist)))) 159 | 160 | mIoUs = per_class_iu(hist) 161 | for ind_class in range(num_classes): 162 | f.write('\n===>' + name_classes[ind_class] + ':\t' + str(round(mIoUs[ind_class] * 100, 2))) 163 | print('===>' + name_classes[ind_class] + ':\t' + str(round(mIoUs[ind_class] * 100, 2))) 164 | f.write('\n epoch %d ===> mIoU: ' %(epoch) + str(round(np.nanmean(mIoUs) * 100, 2))+'\n') 165 | f.flush() 166 | print('epoch %d ===> mIoU: ' %(epoch) + str(round(np.nanmean(mIoUs) * 100, 2))) 167 | return np.nanmean(mIoUs) 168 | 169 | 170 | 171 | 172 | class CrossEntropy2d(nn.Module): 173 | 174 | def __init__(self, size_average=True, ignore_label=255): 175 | super(CrossEntropy2d, self).__init__() 176 | self.size_average = size_average 177 | self.ignore_label = ignore_label 178 | 179 | def forward(self, predict, target, weight=None): 180 | """ 181 | Args: 182 | predict:(n, c, h, w) 183 | target:(n, h, w) 184 | weight (Tensor, optional): a manual rescaling weight given to each class. 185 | If given, has to be a Tensor of size "nclasses" 186 | """ 187 | assert not target.requires_grad 188 | assert predict.dim() == 4 189 | assert target.dim() == 3 190 | assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 191 | assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1)) 192 | assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3)) 193 | n, c, h, w = predict.size() 194 | target_mask = (target >= 0) * (target != self.ignore_label) 195 | target = target[target_mask] 196 | if not target.data.dim(): 197 | return torch.zeros(1) 198 | predict = predict.transpose(1, 2).transpose(2, 3).contiguous() 199 | predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c) 200 | loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average) 201 | return loss 202 | -------------------------------------------------------------------------------- /SEAN_GTA5.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import torch.nn as nn 4 | from torch.utils import data, model_zoo 5 | import numpy as np 6 | from torch.autograd import Variable 7 | import torch.optim as optim 8 | import torch.nn.functional as F 9 | import sys 10 | import os 11 | import os.path as osp 12 | import time 13 | from utils.tools import * 14 | from dataset.gta5_dataset import GTA5DataSet 15 | from dataset.cityscapes_dataset import cityscapesDataSet 16 | from model.SEAN import SEANet 17 | 18 | IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) 19 | 20 | 21 | def get_arguments(): 22 | 23 | parser = argparse.ArgumentParser(description="SEAN") 24 | 25 | #dataset 26 | parser.add_argument("--data_dir_source", type=str, default='/data/yonghao.xu/SegmentationData/GTA5/', 27 | help="source dataset path.") 28 | parser.add_argument("--data_list_source", type=str, default='./dataset/GTA5_imagelist_train.txt', 29 | help="source dataset list file.") 30 | parser.add_argument("--data_dir_target", type=str, default='/data/yonghao.xu/SegmentationData/cityscapes/', 31 | help="target dataset path.") 32 | parser.add_argument("--data_list_target", type=str, default='./dataset/cityscapes_labellist_val.txt', 33 | help="target dataset list file.") 34 | parser.add_argument("--ignore_label", type=int, default=255, 35 | help="the index of the label ignored in the training.") 36 | parser.add_argument("--input_size", type=str, default='1024,512', 37 | help="width and height of input images.") 38 | parser.add_argument("--input_size_target", type=str, default='2048,1024', 39 | help="width and height of target images.") 40 | parser.add_argument("--num_classes", type=int, default=19, 41 | help="number of classes.") 42 | 43 | #network 44 | parser.add_argument("--batch_size", type=int, default=1, 45 | help="number of images in each batch.") 46 | parser.add_argument("--num_workers", type=int, default=1, 47 | help="number of workers for multithread dataloading.") 48 | parser.add_argument("--learning_rate", type=float, default=1e-5, 49 | help="base learning rate.") 50 | parser.add_argument("--momentum", type=float, default=0.9, 51 | help="momentum.") 52 | parser.add_argument("--num_epoch", type=int, default=10, 53 | help="number of training epochs.") 54 | parser.add_argument("--restore_from", type=str, default='/data/yonghao.xu/PreTrainedModel/fcn8s_from_caffe.pth', 55 | help="pretrained VGG model.") 56 | parser.add_argument("--weight_decay", type=float, default=0.00005, 57 | help="regularisation parameter for L2-loss.") 58 | parser.add_argument("--noise", type=float, default=0.1, 59 | help="noise.") 60 | 61 | #hyperparameters 62 | parser.add_argument("--teacher_alpha", type=float, default=0.99, 63 | help="teacher alpha in EMA.") 64 | parser.add_argument("--attention_threshold", type=float, default=0.1, 65 | help="attention threshold.") 66 | parser.add_argument("--st_weight", type=float, default=0.3, 67 | help="self-ensembling weight.") 68 | 69 | #result 70 | parser.add_argument("--snapshot_dir", type=str, default='./Snap/', 71 | help="where to save snapshots of the model.") 72 | 73 | return parser.parse_args() 74 | 75 | def main(): 76 | 77 | """Create the model and start the training.""" 78 | args = get_arguments() 79 | if os.path.exists(args.snapshot_dir)==False: 80 | os.mkdir(args.snapshot_dir) 81 | f = open(args.snapshot_dir+'GTA2Cityscapes_log.txt', 'w') 82 | 83 | w, h = map(int, args.input_size.split(',')) 84 | input_size = (w, h) 85 | w, h = map(int, args.input_size_target.split(',')) 86 | input_size_target = (w, h) 87 | 88 | # Create network 89 | student_net = SEANet(num_classes=args.num_classes) 90 | teacher_net = SEANet(num_classes=args.num_classes) 91 | 92 | saved_state_dict = torch.load(args.restore_from) 93 | 94 | new_params = student_net.state_dict().copy() 95 | for i,j in zip(saved_state_dict,new_params): 96 | if (i[0] !='f')&(i[0] != 's')&(i[0] != 'u'): 97 | new_params[j] = saved_state_dict[i] 98 | 99 | student_net.load_state_dict(new_params) 100 | teacher_net.load_state_dict(new_params) 101 | 102 | 103 | for name, param in teacher_net.named_parameters(): 104 | param.requires_grad=False 105 | 106 | teacher_net = teacher_net.cuda() 107 | student_net = student_net.cuda() 108 | 109 | 110 | src_loader = data.DataLoader( 111 | GTA5DataSet(args.data_dir_source, args.data_list_source, 112 | crop_size=input_size, 113 | scale=False, mirror=False, mean=IMG_MEAN), 114 | batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) 115 | 116 | tgt_loader = data.DataLoader( 117 | cityscapesDataSet(args.data_dir_target, args.data_list_target, max_iters=24966, 118 | crop_size=input_size, 119 | scale=False, mirror=False, mean=IMG_MEAN, 120 | set='val'), 121 | batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, 122 | pin_memory=True) 123 | 124 | val_loader = data.DataLoader( 125 | cityscapesDataSet(args.data_dir_target, args.data_list_target, max_iters=None, 126 | crop_size=input_size, 127 | scale=False, mirror=False, mean=IMG_MEAN, 128 | set='val'), 129 | batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, 130 | pin_memory=True) 131 | 132 | 133 | num_batches = min(len(src_loader),len(tgt_loader)) 134 | 135 | optimizer = optim.Adam(student_net.parameters(), 136 | lr=args.learning_rate, weight_decay=args.weight_decay) 137 | optimizer.zero_grad() 138 | 139 | 140 | student_params = list(student_net.parameters()) 141 | teacher_params = list(teacher_net.parameters()) 142 | 143 | teacher_optimizer = WeightEMA( 144 | teacher_params, 145 | student_params, 146 | alpha=args.teacher_alpha, 147 | ) 148 | 149 | 150 | 151 | interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear') 152 | n_class = args.num_classes 153 | num_steps = args.num_epoch*num_batches 154 | loss_hist = np.zeros((num_steps,5)) 155 | index_i = -1 156 | OA_hist = 0.2 157 | aug_loss = torch.nn.MSELoss() 158 | 159 | for epoch in range(args.num_epoch): 160 | if epoch==6: 161 | return 162 | for batch_index, (src_data, tgt_data) in enumerate(zip(src_loader, tgt_loader)): 163 | index_i += 1 164 | 165 | tem_time = time.time() 166 | student_net.train() 167 | optimizer.zero_grad() 168 | 169 | # train with source 170 | images, src_label, _, im_name = src_data 171 | images = images.cuda() 172 | src_label = src_label.cuda() 173 | _,src_output = student_net(images) 174 | src_output = interp(src_output) 175 | # Segmentation Loss 176 | cls_loss_value = loss_calc(src_output, src_label) 177 | _, predict_labels = torch.max(src_output, 1) 178 | lbl_pred = predict_labels.detach().cpu().numpy() 179 | lbl_true = src_label.detach().cpu().numpy() 180 | metrics_batch = [] 181 | for lt, lp in zip(lbl_true, lbl_pred): 182 | _,_,mean_iu,_ = label_accuracy_score(lt, lp, n_class=args.num_classes) 183 | metrics_batch.append(mean_iu) 184 | miu = np.mean(metrics_batch, axis=0) 185 | 186 | 187 | # train with target 188 | images, label_target,_, im_name = tgt_data 189 | images = images.cuda() 190 | label_target = label_target.cuda() 191 | tgt_t_input = images + torch.randn(images.size()).cuda() * args.noise 192 | tgt_s_input = images + torch.randn(images.size()).cuda() * args.noise 193 | 194 | _,tgt_s_output = student_net(tgt_s_input) 195 | t_confidence,tgt_t_output = teacher_net(tgt_t_input) 196 | 197 | t_confidence = t_confidence.squeeze() 198 | 199 | # self-ensembling Loss 200 | tgt_t_predicts = F.softmax(tgt_t_output, dim=1).transpose(1, 2).transpose(2, 3) 201 | tgt_s_predicts = F.softmax(tgt_s_output, dim=1).transpose(1, 2).transpose(2, 3) 202 | 203 | 204 | mask = t_confidence > args.attention_threshold 205 | mask = mask.view(-1) 206 | num_pixel = mask.shape[0] 207 | 208 | mask_rate = torch.sum(mask).float() / num_pixel 209 | 210 | tgt_s_predicts = tgt_s_predicts.contiguous().view(-1,n_class) 211 | tgt_s_predicts = tgt_s_predicts[mask] 212 | tgt_t_predicts = tgt_t_predicts.contiguous().view(-1,n_class) 213 | tgt_t_predicts = tgt_t_predicts[mask] 214 | aug_loss_value = aug_loss(tgt_s_predicts, tgt_t_predicts) 215 | aug_loss_value = args.st_weight * aug_loss_value 216 | 217 | # TOTAL LOSS 218 | if mask_rate==0.: 219 | aug_loss_value = torch.tensor(0.).cuda() 220 | 221 | total_loss = cls_loss_value + aug_loss_value 222 | 223 | total_loss.backward() 224 | loss_hist[index_i,0] = total_loss.item() 225 | loss_hist[index_i,1] = cls_loss_value.item() 226 | loss_hist[index_i,2] = aug_loss_value.item() 227 | loss_hist[index_i,3] = miu 228 | 229 | optimizer.step() 230 | teacher_optimizer.step() 231 | batch_time = time.time()-tem_time 232 | 233 | if (batch_index+1) % 10 == 0: 234 | print('epoch %d/%d: %d/%d time: %.2f miu = %.1f cls_loss = %.3f st_loss = %.3f \n'%(epoch+1, args.num_epoch,batch_index+1,num_batches,batch_time,np.mean(loss_hist[index_i-9:index_i+1,3])*100,np.mean(loss_hist[index_i-9:index_i+1,1]),np.mean(loss_hist[index_i-9:index_i+1,2]))) 235 | f.write('epoch %d/%d: %d/%d time: %.2f miu = %.1f cls_loss = %.3f st_loss = %.3f \n'%(epoch+1, args.num_epoch,batch_index+1,num_batches,batch_time,np.mean(loss_hist[index_i-9:index_i+1,3])*100,np.mean(loss_hist[index_i-9:index_i+1,1]),np.mean(loss_hist[index_i-9:index_i+1,2]))) 236 | f.flush() 237 | 238 | if (batch_index+1) % 500 == 0: 239 | OA_new = test_mIoU(f,teacher_net, val_loader, epoch+1,input_size_target,print_per_batches=10) 240 | 241 | # Saving the models 242 | if OA_new > OA_hist: 243 | f.write('Save Model\n') 244 | print('Save Model') 245 | model_name = 'GTA2Cityscapes_epoch'+repr(epoch+1)+'batch'+repr(batch_index+1)+'tgt_miu_'+repr(int(OA_new*1000))+'.pth' 246 | torch.save(teacher_net.state_dict(), os.path.join( 247 | args.snapshot_dir, model_name)) 248 | OA_hist = OA_new 249 | 250 | f.close() 251 | torch.save(teacher_net.state_dict(), os.path.join( 252 | args.snapshot_dir, 'GTA_TeacherNet.pth')) 253 | np.savez(args.snapshot_dir+'GTA_loss.npz',loss_hist=loss_hist) 254 | 255 | 256 | 257 | if __name__ == '__main__': 258 | main() 259 | -------------------------------------------------------------------------------- /SEAN_Synthia.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import torch.nn as nn 4 | from torch.utils import data, model_zoo 5 | import numpy as np 6 | from torch.autograd import Variable 7 | import torch.optim as optim 8 | import torch.nn.functional as F 9 | import sys 10 | import os 11 | import os.path as osp 12 | import time 13 | from utils.tools import * 14 | from dataset.synthia_dataset import synthiaDataSet 15 | from dataset.cityscapes16_dataset import cityscapes16DataSet 16 | from model.SEAN import SEANet 17 | 18 | IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) 19 | 20 | 21 | 22 | def get_arguments(): 23 | 24 | parser = argparse.ArgumentParser(description="SEAN") 25 | 26 | #dataset 27 | parser.add_argument("--data_dir_source", type=str, default='/data/yonghao.xu/SegmentationData/synthia/', 28 | help="source dataset path.") 29 | parser.add_argument("--data_list_source", type=str, default='./dataset/synthia_imagelist_train_.txt', 30 | help="source dataset list file.") 31 | parser.add_argument("--data_dir_target", type=str, default='/data/yonghao.xu/SegmentationData/cityscapes/', 32 | help="target dataset path.") 33 | parser.add_argument("--data_list_target", type=str, default='./dataset/cityscapes_labellist_val.txt', 34 | help="target dataset list file.") 35 | parser.add_argument("--ignore_label", type=int, default=255, 36 | help="the index of the label ignored in the training.") 37 | parser.add_argument("--input_size", type=str, default='1024,512', 38 | help="width and height of input images.") 39 | parser.add_argument("--input_size_target", type=str, default='2048,1024', 40 | help="width and height of target images.") 41 | parser.add_argument("--num_classes", type=int, default=16, 42 | help="number of classes.") 43 | 44 | #network 45 | parser.add_argument("--batch_size", type=int, default=1, 46 | help="number of images in each batch.") 47 | parser.add_argument("--num_workers", type=int, default=1, 48 | help="number of workers for multithread dataloading.") 49 | parser.add_argument("--learning_rate", type=float, default=1e-5, 50 | help="base learning rate.") 51 | parser.add_argument("--momentum", type=float, default=0.9, 52 | help="momentum.") 53 | parser.add_argument("--num_epoch", type=int, default=10, 54 | help="Number of training epochs.") 55 | parser.add_argument("--restore_from", type=str, default='/data/yonghao.xu/PreTrainedModel/fcn8s_from_caffe.pth', 56 | help="pretrained VGG model.") 57 | parser.add_argument("--weight_decay", type=float, default=0.00005, 58 | help="regularisation parameter for L2-loss.") 59 | parser.add_argument("--noise", type=float, default=0.1, 60 | help="noise.") 61 | 62 | #hyperparameters 63 | parser.add_argument("--teacher_alpha", type=float, default=0.99, 64 | help="teacher alpha in EMA.") 65 | parser.add_argument("--attention_threshold", type=float, default=0.3, 66 | help="attention threshold.") 67 | parser.add_argument("--st_weight", type=float, default=0.3, 68 | help="self-ensembling weight.") 69 | 70 | #result 71 | parser.add_argument("--snapshot_dir", type=str, default='./Snap/', 72 | help="where to save snapshots of the model.") 73 | 74 | 75 | return parser.parse_args() 76 | 77 | 78 | def main(): 79 | 80 | """Create the model and start the training.""" 81 | args = get_arguments() 82 | if os.path.exists(args.snapshot_dir)==False: 83 | os.mkdir(args.snapshot_dir) 84 | f = open(args.snapshot_dir+'Synthia2Cityscapes_log.txt', 'w') 85 | 86 | w, h = map(int, args.input_size.split(',')) 87 | input_size = (w, h) 88 | w, h = map(int, args.input_size_target.split(',')) 89 | input_size_target = (w, h) 90 | 91 | # Create network 92 | student_net = SEANet(num_classes=args.num_classes) 93 | teacher_net = SEANet(num_classes=args.num_classes) 94 | 95 | saved_state_dict = torch.load(args.restore_from) 96 | 97 | new_params = student_net.state_dict().copy() 98 | for i,j in zip(saved_state_dict,new_params): 99 | if (i[0] !='f')&(i[0] != 's')&(i[0] != 'u'): 100 | new_params[j] = saved_state_dict[i] 101 | 102 | student_net.load_state_dict(new_params) 103 | teacher_net.load_state_dict(new_params) 104 | 105 | 106 | for name, param in teacher_net.named_parameters(): 107 | param.requires_grad=False 108 | 109 | teacher_net = teacher_net.cuda() 110 | student_net = student_net.cuda() 111 | 112 | 113 | 114 | src_loader = data.DataLoader( 115 | synthiaDataSet(args.data_dir_source, args.data_list_source, 116 | crop_size=input_size, 117 | scale=False, mirror=False, mean=IMG_MEAN), 118 | batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) 119 | 120 | tgt_loader = data.DataLoader( 121 | cityscapes16DataSet(args.data_dir_target, args.data_list_target, max_iters=9400, 122 | crop_size=input_size, 123 | scale=False, mirror=False, mean=IMG_MEAN, 124 | set='val'), 125 | batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, 126 | pin_memory=True) 127 | 128 | val_loader = data.DataLoader( 129 | cityscapes16DataSet(args.data_dir_target, args.data_list_target, max_iters=None, 130 | crop_size=input_size, 131 | scale=False, mirror=False, mean=IMG_MEAN, 132 | set='val'), 133 | batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, 134 | pin_memory=True) 135 | 136 | 137 | num_batches = min(len(src_loader),len(tgt_loader)) 138 | 139 | optimizer = optim.Adam(student_net.parameters(), 140 | lr=args.learning_rate, weight_decay=args.weight_decay) 141 | optimizer.zero_grad() 142 | 143 | 144 | student_params = list(student_net.parameters()) 145 | teacher_params = list(teacher_net.parameters()) 146 | 147 | teacher_optimizer = WeightEMA( 148 | teacher_params, 149 | student_params, 150 | alpha=args.teacher_alpha, 151 | ) 152 | 153 | 154 | 155 | interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear') 156 | n_class = args.num_classes 157 | num_steps = args.num_epoch*num_batches 158 | loss_hist = np.zeros((num_steps,5)) 159 | index_i = -1 160 | OA_hist = 0.2 161 | aug_loss = torch.nn.MSELoss() 162 | 163 | for epoch in range(args.num_epoch): 164 | if epoch==6: 165 | return 166 | for batch_index, (src_data, tgt_data) in enumerate(zip(src_loader, tgt_loader)): 167 | index_i += 1 168 | 169 | tem_time = time.time() 170 | student_net.train() 171 | optimizer.zero_grad() 172 | 173 | # train with source 174 | images, src_label, _, im_name = src_data 175 | images = images.cuda() 176 | src_label = src_label.cuda() 177 | _,src_output = student_net(images) 178 | src_output = interp(src_output) 179 | # Segmentation Loss 180 | cls_loss_value = loss_calc(src_output, src_label) 181 | _, predict_labels = torch.max(src_output, 1) 182 | lbl_pred = predict_labels.detach().cpu().numpy() 183 | lbl_true = src_label.detach().cpu().numpy() 184 | metrics_batch = [] 185 | for lt, lp in zip(lbl_true, lbl_pred): 186 | _,_,mean_iu,_ = label_accuracy_score(lt, lp, n_class=args.num_classes) 187 | metrics_batch.append(mean_iu) 188 | miu = np.mean(metrics_batch, axis=0) 189 | 190 | 191 | # train with target 192 | images, label_target,_, im_name = tgt_data 193 | images = images.cuda() 194 | label_target = label_target.cuda() 195 | tgt_t_input = images + torch.randn(images.size()).cuda() * args.noise 196 | tgt_s_input = images + torch.randn(images.size()).cuda() * args.noise 197 | 198 | _,tgt_s_output = student_net(tgt_s_input) 199 | t_confidence,tgt_t_output = teacher_net(tgt_t_input) 200 | 201 | t_confidence = t_confidence.squeeze() 202 | 203 | # self-ensembling Loss 204 | tgt_t_predicts = F.softmax(tgt_t_output, dim=1).transpose(1, 2).transpose(2, 3) 205 | tgt_s_predicts = F.softmax(tgt_s_output, dim=1).transpose(1, 2).transpose(2, 3) 206 | 207 | 208 | mask = t_confidence > args.attention_threshold 209 | mask = mask.view(-1) 210 | num_pixel = mask.shape[0] 211 | 212 | mask_rate = torch.sum(mask).float() / num_pixel 213 | 214 | tgt_s_predicts = tgt_s_predicts.contiguous().view(-1,n_class) 215 | tgt_s_predicts = tgt_s_predicts[mask] 216 | tgt_t_predicts = tgt_t_predicts.contiguous().view(-1,n_class) 217 | tgt_t_predicts = tgt_t_predicts[mask] 218 | aug_loss_value = aug_loss(tgt_s_predicts, tgt_t_predicts) 219 | aug_loss_value = args.st_weight * aug_loss_value 220 | 221 | # TOTAL LOSS 222 | if mask_rate==0.: 223 | aug_loss_value = torch.tensor(0.).cuda() 224 | 225 | total_loss = cls_loss_value + aug_loss_value 226 | 227 | total_loss.backward() 228 | loss_hist[index_i,0] = total_loss.item() 229 | loss_hist[index_i,1] = cls_loss_value.item() 230 | loss_hist[index_i,2] = aug_loss_value.item() 231 | loss_hist[index_i,3] = miu 232 | 233 | optimizer.step() 234 | teacher_optimizer.step() 235 | batch_time = time.time()-tem_time 236 | 237 | if (batch_index+1) % 10 == 0: 238 | print('epoch %d/%d: %d/%d time: %.2f miu = %.1f cls_loss = %.3f st_loss = %.3f \n'%(epoch+1, args.num_epoch,batch_index+1,num_batches,batch_time,np.mean(loss_hist[index_i-9:index_i+1,3])*100,np.mean(loss_hist[index_i-9:index_i+1,1]),np.mean(loss_hist[index_i-9:index_i+1,2]))) 239 | f.write('epoch %d/%d: %d/%d time: %.2f miu = %.1f cls_loss = %.3f st_loss = %.3f \n'%(epoch+1, args.num_epoch,batch_index+1,num_batches,batch_time,np.mean(loss_hist[index_i-9:index_i+1,3])*100,np.mean(loss_hist[index_i-9:index_i+1,1]),np.mean(loss_hist[index_i-9:index_i+1,2]))) 240 | f.flush() 241 | 242 | if (batch_index+1) % 500 == 0: 243 | OA_new = test_mIoU16(f,teacher_net, val_loader, epoch+1,input_size_target,print_per_batches=10) 244 | 245 | # Saving the models 246 | if OA_new > OA_hist: 247 | f.write('Save Model\n') 248 | print('Save Model') 249 | model_name = 'Synthia2Cityscapes_epoch'+repr(epoch+1)+'batch'+repr(batch_index+1)+'tgt_miu_'+repr(int(OA_new*1000))+'.pth' 250 | torch.save(teacher_net.state_dict(), os.path.join( 251 | args.snapshot_dir, model_name)) 252 | OA_hist = OA_new 253 | 254 | 255 | f.close() 256 | torch.save(teacher_net.state_dict(), os.path.join( 257 | args.snapshot_dir, 'Synthia_TeacherNet.pth')) 258 | np.savez(args.snapshot_dir+'Synthia_loss.npz',loss_hist=loss_hist) 259 | 260 | 261 | 262 | if __name__ == '__main__': 263 | main() 264 | -------------------------------------------------------------------------------- /dataset/cityscapes_labellist_val.txt: -------------------------------------------------------------------------------- 1 | frankfurt/frankfurt_000001_007973_leftImg8bit.png 2 | frankfurt/frankfurt_000001_025921_leftImg8bit.png 3 | frankfurt/frankfurt_000001_062016_leftImg8bit.png 4 | frankfurt/frankfurt_000001_049078_leftImg8bit.png 5 | frankfurt/frankfurt_000000_009561_leftImg8bit.png 6 | frankfurt/frankfurt_000001_013710_leftImg8bit.png 7 | frankfurt/frankfurt_000001_041664_leftImg8bit.png 8 | frankfurt/frankfurt_000000_013240_leftImg8bit.png 9 | frankfurt/frankfurt_000001_044787_leftImg8bit.png 10 | frankfurt/frankfurt_000001_015328_leftImg8bit.png 11 | frankfurt/frankfurt_000001_073243_leftImg8bit.png 12 | frankfurt/frankfurt_000001_034816_leftImg8bit.png 13 | frankfurt/frankfurt_000001_041074_leftImg8bit.png 14 | frankfurt/frankfurt_000001_005898_leftImg8bit.png 15 | frankfurt/frankfurt_000000_022254_leftImg8bit.png 16 | frankfurt/frankfurt_000001_044658_leftImg8bit.png 17 | frankfurt/frankfurt_000001_009504_leftImg8bit.png 18 | frankfurt/frankfurt_000001_024927_leftImg8bit.png 19 | frankfurt/frankfurt_000001_017842_leftImg8bit.png 20 | frankfurt/frankfurt_000001_068208_leftImg8bit.png 21 | frankfurt/frankfurt_000001_013016_leftImg8bit.png 22 | frankfurt/frankfurt_000001_010156_leftImg8bit.png 23 | frankfurt/frankfurt_000000_002963_leftImg8bit.png 24 | frankfurt/frankfurt_000001_020693_leftImg8bit.png 25 | frankfurt/frankfurt_000001_078803_leftImg8bit.png 26 | frankfurt/frankfurt_000001_025713_leftImg8bit.png 27 | frankfurt/frankfurt_000001_007285_leftImg8bit.png 28 | frankfurt/frankfurt_000001_070099_leftImg8bit.png 29 | frankfurt/frankfurt_000000_009291_leftImg8bit.png 30 | frankfurt/frankfurt_000000_019607_leftImg8bit.png 31 | frankfurt/frankfurt_000001_068063_leftImg8bit.png 32 | frankfurt/frankfurt_000000_003920_leftImg8bit.png 33 | frankfurt/frankfurt_000001_077233_leftImg8bit.png 34 | frankfurt/frankfurt_000001_029086_leftImg8bit.png 35 | frankfurt/frankfurt_000001_060545_leftImg8bit.png 36 | frankfurt/frankfurt_000001_001464_leftImg8bit.png 37 | frankfurt/frankfurt_000001_028590_leftImg8bit.png 38 | frankfurt/frankfurt_000001_016462_leftImg8bit.png 39 | frankfurt/frankfurt_000001_060422_leftImg8bit.png 40 | frankfurt/frankfurt_000001_009058_leftImg8bit.png 41 | frankfurt/frankfurt_000001_080830_leftImg8bit.png 42 | frankfurt/frankfurt_000001_012870_leftImg8bit.png 43 | frankfurt/frankfurt_000001_077434_leftImg8bit.png 44 | frankfurt/frankfurt_000001_033655_leftImg8bit.png 45 | frankfurt/frankfurt_000001_051516_leftImg8bit.png 46 | frankfurt/frankfurt_000001_044413_leftImg8bit.png 47 | frankfurt/frankfurt_000001_055172_leftImg8bit.png 48 | frankfurt/frankfurt_000001_040575_leftImg8bit.png 49 | frankfurt/frankfurt_000000_020215_leftImg8bit.png 50 | frankfurt/frankfurt_000000_017228_leftImg8bit.png 51 | frankfurt/frankfurt_000001_041354_leftImg8bit.png 52 | frankfurt/frankfurt_000000_008206_leftImg8bit.png 53 | frankfurt/frankfurt_000001_043564_leftImg8bit.png 54 | frankfurt/frankfurt_000001_032711_leftImg8bit.png 55 | frankfurt/frankfurt_000001_064130_leftImg8bit.png 56 | frankfurt/frankfurt_000001_053102_leftImg8bit.png 57 | frankfurt/frankfurt_000001_082087_leftImg8bit.png 58 | frankfurt/frankfurt_000001_057478_leftImg8bit.png 59 | frankfurt/frankfurt_000001_007407_leftImg8bit.png 60 | frankfurt/frankfurt_000001_008200_leftImg8bit.png 61 | frankfurt/frankfurt_000001_038844_leftImg8bit.png 62 | frankfurt/frankfurt_000001_016029_leftImg8bit.png 63 | frankfurt/frankfurt_000001_058176_leftImg8bit.png 64 | frankfurt/frankfurt_000001_057181_leftImg8bit.png 65 | frankfurt/frankfurt_000001_039895_leftImg8bit.png 66 | frankfurt/frankfurt_000000_000294_leftImg8bit.png 67 | frankfurt/frankfurt_000001_055062_leftImg8bit.png 68 | frankfurt/frankfurt_000001_083029_leftImg8bit.png 69 | frankfurt/frankfurt_000001_010444_leftImg8bit.png 70 | frankfurt/frankfurt_000001_041517_leftImg8bit.png 71 | frankfurt/frankfurt_000001_069633_leftImg8bit.png 72 | frankfurt/frankfurt_000001_020287_leftImg8bit.png 73 | frankfurt/frankfurt_000001_012038_leftImg8bit.png 74 | frankfurt/frankfurt_000001_046504_leftImg8bit.png 75 | frankfurt/frankfurt_000001_032556_leftImg8bit.png 76 | frankfurt/frankfurt_000000_001751_leftImg8bit.png 77 | frankfurt/frankfurt_000001_000538_leftImg8bit.png 78 | frankfurt/frankfurt_000001_083852_leftImg8bit.png 79 | frankfurt/frankfurt_000001_077092_leftImg8bit.png 80 | frankfurt/frankfurt_000001_017101_leftImg8bit.png 81 | frankfurt/frankfurt_000001_044525_leftImg8bit.png 82 | frankfurt/frankfurt_000001_005703_leftImg8bit.png 83 | frankfurt/frankfurt_000001_080391_leftImg8bit.png 84 | frankfurt/frankfurt_000001_038418_leftImg8bit.png 85 | frankfurt/frankfurt_000001_066832_leftImg8bit.png 86 | frankfurt/frankfurt_000000_003357_leftImg8bit.png 87 | frankfurt/frankfurt_000000_020880_leftImg8bit.png 88 | frankfurt/frankfurt_000001_062396_leftImg8bit.png 89 | frankfurt/frankfurt_000001_046272_leftImg8bit.png 90 | frankfurt/frankfurt_000001_062509_leftImg8bit.png 91 | frankfurt/frankfurt_000001_054415_leftImg8bit.png 92 | frankfurt/frankfurt_000001_021406_leftImg8bit.png 93 | frankfurt/frankfurt_000001_030310_leftImg8bit.png 94 | frankfurt/frankfurt_000000_014480_leftImg8bit.png 95 | frankfurt/frankfurt_000001_005410_leftImg8bit.png 96 | frankfurt/frankfurt_000000_022797_leftImg8bit.png 97 | frankfurt/frankfurt_000001_035144_leftImg8bit.png 98 | frankfurt/frankfurt_000001_014565_leftImg8bit.png 99 | frankfurt/frankfurt_000001_065850_leftImg8bit.png 100 | frankfurt/frankfurt_000000_000576_leftImg8bit.png 101 | frankfurt/frankfurt_000001_065617_leftImg8bit.png 102 | frankfurt/frankfurt_000000_005543_leftImg8bit.png 103 | frankfurt/frankfurt_000001_055709_leftImg8bit.png 104 | frankfurt/frankfurt_000001_027325_leftImg8bit.png 105 | frankfurt/frankfurt_000001_011835_leftImg8bit.png 106 | frankfurt/frankfurt_000001_046779_leftImg8bit.png 107 | frankfurt/frankfurt_000001_064305_leftImg8bit.png 108 | frankfurt/frankfurt_000001_012738_leftImg8bit.png 109 | frankfurt/frankfurt_000001_048355_leftImg8bit.png 110 | frankfurt/frankfurt_000001_019969_leftImg8bit.png 111 | frankfurt/frankfurt_000001_080091_leftImg8bit.png 112 | frankfurt/frankfurt_000000_011007_leftImg8bit.png 113 | frankfurt/frankfurt_000000_015676_leftImg8bit.png 114 | frankfurt/frankfurt_000001_044227_leftImg8bit.png 115 | frankfurt/frankfurt_000001_055387_leftImg8bit.png 116 | frankfurt/frankfurt_000001_038245_leftImg8bit.png 117 | frankfurt/frankfurt_000001_059642_leftImg8bit.png 118 | frankfurt/frankfurt_000001_030669_leftImg8bit.png 119 | frankfurt/frankfurt_000001_068772_leftImg8bit.png 120 | frankfurt/frankfurt_000001_079206_leftImg8bit.png 121 | frankfurt/frankfurt_000001_055306_leftImg8bit.png 122 | frankfurt/frankfurt_000001_012699_leftImg8bit.png 123 | frankfurt/frankfurt_000001_042384_leftImg8bit.png 124 | frankfurt/frankfurt_000001_054077_leftImg8bit.png 125 | frankfurt/frankfurt_000001_010830_leftImg8bit.png 126 | frankfurt/frankfurt_000001_052120_leftImg8bit.png 127 | frankfurt/frankfurt_000001_032018_leftImg8bit.png 128 | frankfurt/frankfurt_000001_051737_leftImg8bit.png 129 | frankfurt/frankfurt_000001_028335_leftImg8bit.png 130 | frankfurt/frankfurt_000001_049770_leftImg8bit.png 131 | frankfurt/frankfurt_000001_054884_leftImg8bit.png 132 | frankfurt/frankfurt_000001_019698_leftImg8bit.png 133 | frankfurt/frankfurt_000000_011461_leftImg8bit.png 134 | frankfurt/frankfurt_000000_001016_leftImg8bit.png 135 | frankfurt/frankfurt_000001_062250_leftImg8bit.png 136 | frankfurt/frankfurt_000001_004736_leftImg8bit.png 137 | frankfurt/frankfurt_000001_068682_leftImg8bit.png 138 | frankfurt/frankfurt_000000_006589_leftImg8bit.png 139 | frankfurt/frankfurt_000000_011810_leftImg8bit.png 140 | frankfurt/frankfurt_000001_066574_leftImg8bit.png 141 | frankfurt/frankfurt_000001_048654_leftImg8bit.png 142 | frankfurt/frankfurt_000001_049209_leftImg8bit.png 143 | frankfurt/frankfurt_000001_042098_leftImg8bit.png 144 | frankfurt/frankfurt_000001_031416_leftImg8bit.png 145 | frankfurt/frankfurt_000000_009969_leftImg8bit.png 146 | frankfurt/frankfurt_000001_038645_leftImg8bit.png 147 | frankfurt/frankfurt_000001_020046_leftImg8bit.png 148 | frankfurt/frankfurt_000001_054219_leftImg8bit.png 149 | frankfurt/frankfurt_000001_002759_leftImg8bit.png 150 | frankfurt/frankfurt_000001_066438_leftImg8bit.png 151 | frankfurt/frankfurt_000000_020321_leftImg8bit.png 152 | frankfurt/frankfurt_000001_002646_leftImg8bit.png 153 | frankfurt/frankfurt_000001_046126_leftImg8bit.png 154 | frankfurt/frankfurt_000000_002196_leftImg8bit.png 155 | frankfurt/frankfurt_000001_057954_leftImg8bit.png 156 | frankfurt/frankfurt_000001_011715_leftImg8bit.png 157 | frankfurt/frankfurt_000000_021879_leftImg8bit.png 158 | frankfurt/frankfurt_000001_082466_leftImg8bit.png 159 | frankfurt/frankfurt_000000_003025_leftImg8bit.png 160 | frankfurt/frankfurt_000001_023369_leftImg8bit.png 161 | frankfurt/frankfurt_000001_061682_leftImg8bit.png 162 | frankfurt/frankfurt_000001_017459_leftImg8bit.png 163 | frankfurt/frankfurt_000001_059789_leftImg8bit.png 164 | frankfurt/frankfurt_000001_073464_leftImg8bit.png 165 | frankfurt/frankfurt_000001_063045_leftImg8bit.png 166 | frankfurt/frankfurt_000001_064651_leftImg8bit.png 167 | frankfurt/frankfurt_000000_013382_leftImg8bit.png 168 | frankfurt/frankfurt_000001_002512_leftImg8bit.png 169 | frankfurt/frankfurt_000001_032942_leftImg8bit.png 170 | frankfurt/frankfurt_000001_010600_leftImg8bit.png 171 | frankfurt/frankfurt_000001_030067_leftImg8bit.png 172 | frankfurt/frankfurt_000001_014741_leftImg8bit.png 173 | frankfurt/frankfurt_000000_021667_leftImg8bit.png 174 | frankfurt/frankfurt_000001_051807_leftImg8bit.png 175 | frankfurt/frankfurt_000001_019854_leftImg8bit.png 176 | frankfurt/frankfurt_000001_015768_leftImg8bit.png 177 | frankfurt/frankfurt_000001_007857_leftImg8bit.png 178 | frankfurt/frankfurt_000001_058914_leftImg8bit.png 179 | frankfurt/frankfurt_000000_012868_leftImg8bit.png 180 | frankfurt/frankfurt_000000_013942_leftImg8bit.png 181 | frankfurt/frankfurt_000001_014406_leftImg8bit.png 182 | frankfurt/frankfurt_000001_049298_leftImg8bit.png 183 | frankfurt/frankfurt_000001_023769_leftImg8bit.png 184 | frankfurt/frankfurt_000001_012519_leftImg8bit.png 185 | frankfurt/frankfurt_000001_064925_leftImg8bit.png 186 | frankfurt/frankfurt_000001_072295_leftImg8bit.png 187 | frankfurt/frankfurt_000001_058504_leftImg8bit.png 188 | frankfurt/frankfurt_000001_059119_leftImg8bit.png 189 | frankfurt/frankfurt_000001_015091_leftImg8bit.png 190 | frankfurt/frankfurt_000001_058057_leftImg8bit.png 191 | frankfurt/frankfurt_000001_003056_leftImg8bit.png 192 | frankfurt/frankfurt_000001_007622_leftImg8bit.png 193 | frankfurt/frankfurt_000001_016273_leftImg8bit.png 194 | frankfurt/frankfurt_000001_035864_leftImg8bit.png 195 | frankfurt/frankfurt_000001_067092_leftImg8bit.png 196 | frankfurt/frankfurt_000000_013067_leftImg8bit.png 197 | frankfurt/frankfurt_000001_067474_leftImg8bit.png 198 | frankfurt/frankfurt_000001_060135_leftImg8bit.png 199 | frankfurt/frankfurt_000000_018797_leftImg8bit.png 200 | frankfurt/frankfurt_000000_005898_leftImg8bit.png 201 | frankfurt/frankfurt_000001_055603_leftImg8bit.png 202 | frankfurt/frankfurt_000001_060906_leftImg8bit.png 203 | frankfurt/frankfurt_000001_062653_leftImg8bit.png 204 | frankfurt/frankfurt_000000_004617_leftImg8bit.png 205 | frankfurt/frankfurt_000001_055538_leftImg8bit.png 206 | frankfurt/frankfurt_000000_008451_leftImg8bit.png 207 | frankfurt/frankfurt_000001_052594_leftImg8bit.png 208 | frankfurt/frankfurt_000001_004327_leftImg8bit.png 209 | frankfurt/frankfurt_000001_075296_leftImg8bit.png 210 | frankfurt/frankfurt_000001_073088_leftImg8bit.png 211 | frankfurt/frankfurt_000001_005184_leftImg8bit.png 212 | frankfurt/frankfurt_000000_016286_leftImg8bit.png 213 | frankfurt/frankfurt_000001_008688_leftImg8bit.png 214 | frankfurt/frankfurt_000000_011074_leftImg8bit.png 215 | frankfurt/frankfurt_000001_056580_leftImg8bit.png 216 | frankfurt/frankfurt_000001_067735_leftImg8bit.png 217 | frankfurt/frankfurt_000001_034047_leftImg8bit.png 218 | frankfurt/frankfurt_000001_076502_leftImg8bit.png 219 | frankfurt/frankfurt_000001_071288_leftImg8bit.png 220 | frankfurt/frankfurt_000001_067295_leftImg8bit.png 221 | frankfurt/frankfurt_000001_071781_leftImg8bit.png 222 | frankfurt/frankfurt_000000_012121_leftImg8bit.png 223 | frankfurt/frankfurt_000001_004859_leftImg8bit.png 224 | frankfurt/frankfurt_000001_073911_leftImg8bit.png 225 | frankfurt/frankfurt_000001_047552_leftImg8bit.png 226 | frankfurt/frankfurt_000001_037705_leftImg8bit.png 227 | frankfurt/frankfurt_000001_025512_leftImg8bit.png 228 | frankfurt/frankfurt_000001_047178_leftImg8bit.png 229 | frankfurt/frankfurt_000001_014221_leftImg8bit.png 230 | frankfurt/frankfurt_000000_007365_leftImg8bit.png 231 | frankfurt/frankfurt_000001_049698_leftImg8bit.png 232 | frankfurt/frankfurt_000001_065160_leftImg8bit.png 233 | frankfurt/frankfurt_000001_061763_leftImg8bit.png 234 | frankfurt/frankfurt_000000_010351_leftImg8bit.png 235 | frankfurt/frankfurt_000001_072155_leftImg8bit.png 236 | frankfurt/frankfurt_000001_023235_leftImg8bit.png 237 | frankfurt/frankfurt_000000_015389_leftImg8bit.png 238 | frankfurt/frankfurt_000000_009688_leftImg8bit.png 239 | frankfurt/frankfurt_000000_016005_leftImg8bit.png 240 | frankfurt/frankfurt_000001_054640_leftImg8bit.png 241 | frankfurt/frankfurt_000001_029600_leftImg8bit.png 242 | frankfurt/frankfurt_000001_028232_leftImg8bit.png 243 | frankfurt/frankfurt_000001_050686_leftImg8bit.png 244 | frankfurt/frankfurt_000001_013496_leftImg8bit.png 245 | frankfurt/frankfurt_000001_066092_leftImg8bit.png 246 | frankfurt/frankfurt_000001_009854_leftImg8bit.png 247 | frankfurt/frankfurt_000001_067178_leftImg8bit.png 248 | frankfurt/frankfurt_000001_028854_leftImg8bit.png 249 | frankfurt/frankfurt_000001_083199_leftImg8bit.png 250 | frankfurt/frankfurt_000001_064798_leftImg8bit.png 251 | frankfurt/frankfurt_000001_018113_leftImg8bit.png 252 | frankfurt/frankfurt_000001_050149_leftImg8bit.png 253 | frankfurt/frankfurt_000001_048196_leftImg8bit.png 254 | frankfurt/frankfurt_000000_001236_leftImg8bit.png 255 | frankfurt/frankfurt_000000_017476_leftImg8bit.png 256 | frankfurt/frankfurt_000001_003588_leftImg8bit.png 257 | frankfurt/frankfurt_000001_021825_leftImg8bit.png 258 | frankfurt/frankfurt_000000_010763_leftImg8bit.png 259 | frankfurt/frankfurt_000001_062793_leftImg8bit.png 260 | frankfurt/frankfurt_000001_029236_leftImg8bit.png 261 | frankfurt/frankfurt_000001_075984_leftImg8bit.png 262 | frankfurt/frankfurt_000001_031266_leftImg8bit.png 263 | frankfurt/frankfurt_000001_043395_leftImg8bit.png 264 | frankfurt/frankfurt_000001_040732_leftImg8bit.png 265 | frankfurt/frankfurt_000001_011162_leftImg8bit.png 266 | frankfurt/frankfurt_000000_012009_leftImg8bit.png 267 | frankfurt/frankfurt_000001_042733_leftImg8bit.png 268 | lindau/lindau_000052_000019_leftImg8bit.png 269 | lindau/lindau_000009_000019_leftImg8bit.png 270 | lindau/lindau_000037_000019_leftImg8bit.png 271 | lindau/lindau_000047_000019_leftImg8bit.png 272 | lindau/lindau_000015_000019_leftImg8bit.png 273 | lindau/lindau_000030_000019_leftImg8bit.png 274 | lindau/lindau_000012_000019_leftImg8bit.png 275 | lindau/lindau_000032_000019_leftImg8bit.png 276 | lindau/lindau_000046_000019_leftImg8bit.png 277 | lindau/lindau_000000_000019_leftImg8bit.png 278 | lindau/lindau_000031_000019_leftImg8bit.png 279 | lindau/lindau_000011_000019_leftImg8bit.png 280 | lindau/lindau_000027_000019_leftImg8bit.png 281 | lindau/lindau_000054_000019_leftImg8bit.png 282 | lindau/lindau_000026_000019_leftImg8bit.png 283 | lindau/lindau_000017_000019_leftImg8bit.png 284 | lindau/lindau_000023_000019_leftImg8bit.png 285 | lindau/lindau_000005_000019_leftImg8bit.png 286 | lindau/lindau_000056_000019_leftImg8bit.png 287 | lindau/lindau_000025_000019_leftImg8bit.png 288 | lindau/lindau_000045_000019_leftImg8bit.png 289 | lindau/lindau_000014_000019_leftImg8bit.png 290 | lindau/lindau_000004_000019_leftImg8bit.png 291 | lindau/lindau_000021_000019_leftImg8bit.png 292 | lindau/lindau_000049_000019_leftImg8bit.png 293 | lindau/lindau_000033_000019_leftImg8bit.png 294 | lindau/lindau_000042_000019_leftImg8bit.png 295 | lindau/lindau_000013_000019_leftImg8bit.png 296 | lindau/lindau_000024_000019_leftImg8bit.png 297 | lindau/lindau_000002_000019_leftImg8bit.png 298 | lindau/lindau_000043_000019_leftImg8bit.png 299 | lindau/lindau_000016_000019_leftImg8bit.png 300 | lindau/lindau_000050_000019_leftImg8bit.png 301 | lindau/lindau_000018_000019_leftImg8bit.png 302 | lindau/lindau_000007_000019_leftImg8bit.png 303 | lindau/lindau_000048_000019_leftImg8bit.png 304 | lindau/lindau_000022_000019_leftImg8bit.png 305 | lindau/lindau_000053_000019_leftImg8bit.png 306 | lindau/lindau_000038_000019_leftImg8bit.png 307 | lindau/lindau_000001_000019_leftImg8bit.png 308 | lindau/lindau_000036_000019_leftImg8bit.png 309 | lindau/lindau_000035_000019_leftImg8bit.png 310 | lindau/lindau_000003_000019_leftImg8bit.png 311 | lindau/lindau_000034_000019_leftImg8bit.png 312 | lindau/lindau_000010_000019_leftImg8bit.png 313 | lindau/lindau_000055_000019_leftImg8bit.png 314 | lindau/lindau_000006_000019_leftImg8bit.png 315 | lindau/lindau_000019_000019_leftImg8bit.png 316 | lindau/lindau_000029_000019_leftImg8bit.png 317 | lindau/lindau_000039_000019_leftImg8bit.png 318 | lindau/lindau_000051_000019_leftImg8bit.png 319 | lindau/lindau_000020_000019_leftImg8bit.png 320 | lindau/lindau_000057_000019_leftImg8bit.png 321 | lindau/lindau_000041_000019_leftImg8bit.png 322 | lindau/lindau_000040_000019_leftImg8bit.png 323 | lindau/lindau_000044_000019_leftImg8bit.png 324 | lindau/lindau_000028_000019_leftImg8bit.png 325 | lindau/lindau_000058_000019_leftImg8bit.png 326 | lindau/lindau_000008_000019_leftImg8bit.png 327 | munster/munster_000000_000019_leftImg8bit.png 328 | munster/munster_000012_000019_leftImg8bit.png 329 | munster/munster_000032_000019_leftImg8bit.png 330 | munster/munster_000068_000019_leftImg8bit.png 331 | munster/munster_000101_000019_leftImg8bit.png 332 | munster/munster_000153_000019_leftImg8bit.png 333 | munster/munster_000115_000019_leftImg8bit.png 334 | munster/munster_000029_000019_leftImg8bit.png 335 | munster/munster_000019_000019_leftImg8bit.png 336 | munster/munster_000156_000019_leftImg8bit.png 337 | munster/munster_000129_000019_leftImg8bit.png 338 | munster/munster_000169_000019_leftImg8bit.png 339 | munster/munster_000150_000019_leftImg8bit.png 340 | munster/munster_000165_000019_leftImg8bit.png 341 | munster/munster_000050_000019_leftImg8bit.png 342 | munster/munster_000025_000019_leftImg8bit.png 343 | munster/munster_000116_000019_leftImg8bit.png 344 | munster/munster_000132_000019_leftImg8bit.png 345 | munster/munster_000066_000019_leftImg8bit.png 346 | munster/munster_000096_000019_leftImg8bit.png 347 | munster/munster_000030_000019_leftImg8bit.png 348 | munster/munster_000146_000019_leftImg8bit.png 349 | munster/munster_000098_000019_leftImg8bit.png 350 | munster/munster_000059_000019_leftImg8bit.png 351 | munster/munster_000093_000019_leftImg8bit.png 352 | munster/munster_000122_000019_leftImg8bit.png 353 | munster/munster_000024_000019_leftImg8bit.png 354 | munster/munster_000036_000019_leftImg8bit.png 355 | munster/munster_000086_000019_leftImg8bit.png 356 | munster/munster_000163_000019_leftImg8bit.png 357 | munster/munster_000001_000019_leftImg8bit.png 358 | munster/munster_000053_000019_leftImg8bit.png 359 | munster/munster_000071_000019_leftImg8bit.png 360 | munster/munster_000079_000019_leftImg8bit.png 361 | munster/munster_000159_000019_leftImg8bit.png 362 | munster/munster_000038_000019_leftImg8bit.png 363 | munster/munster_000138_000019_leftImg8bit.png 364 | munster/munster_000135_000019_leftImg8bit.png 365 | munster/munster_000065_000019_leftImg8bit.png 366 | munster/munster_000139_000019_leftImg8bit.png 367 | munster/munster_000108_000019_leftImg8bit.png 368 | munster/munster_000020_000019_leftImg8bit.png 369 | munster/munster_000074_000019_leftImg8bit.png 370 | munster/munster_000035_000019_leftImg8bit.png 371 | munster/munster_000067_000019_leftImg8bit.png 372 | munster/munster_000151_000019_leftImg8bit.png 373 | munster/munster_000083_000019_leftImg8bit.png 374 | munster/munster_000118_000019_leftImg8bit.png 375 | munster/munster_000046_000019_leftImg8bit.png 376 | munster/munster_000147_000019_leftImg8bit.png 377 | munster/munster_000047_000019_leftImg8bit.png 378 | munster/munster_000043_000019_leftImg8bit.png 379 | munster/munster_000168_000019_leftImg8bit.png 380 | munster/munster_000167_000019_leftImg8bit.png 381 | munster/munster_000021_000019_leftImg8bit.png 382 | munster/munster_000073_000019_leftImg8bit.png 383 | munster/munster_000089_000019_leftImg8bit.png 384 | munster/munster_000060_000019_leftImg8bit.png 385 | munster/munster_000155_000019_leftImg8bit.png 386 | munster/munster_000140_000019_leftImg8bit.png 387 | munster/munster_000145_000019_leftImg8bit.png 388 | munster/munster_000077_000019_leftImg8bit.png 389 | munster/munster_000018_000019_leftImg8bit.png 390 | munster/munster_000045_000019_leftImg8bit.png 391 | munster/munster_000166_000019_leftImg8bit.png 392 | munster/munster_000037_000019_leftImg8bit.png 393 | munster/munster_000112_000019_leftImg8bit.png 394 | munster/munster_000080_000019_leftImg8bit.png 395 | munster/munster_000144_000019_leftImg8bit.png 396 | munster/munster_000142_000019_leftImg8bit.png 397 | munster/munster_000070_000019_leftImg8bit.png 398 | munster/munster_000044_000019_leftImg8bit.png 399 | munster/munster_000137_000019_leftImg8bit.png 400 | munster/munster_000041_000019_leftImg8bit.png 401 | munster/munster_000113_000019_leftImg8bit.png 402 | munster/munster_000075_000019_leftImg8bit.png 403 | munster/munster_000157_000019_leftImg8bit.png 404 | munster/munster_000158_000019_leftImg8bit.png 405 | munster/munster_000109_000019_leftImg8bit.png 406 | munster/munster_000033_000019_leftImg8bit.png 407 | munster/munster_000088_000019_leftImg8bit.png 408 | munster/munster_000090_000019_leftImg8bit.png 409 | munster/munster_000114_000019_leftImg8bit.png 410 | munster/munster_000171_000019_leftImg8bit.png 411 | munster/munster_000013_000019_leftImg8bit.png 412 | munster/munster_000130_000019_leftImg8bit.png 413 | munster/munster_000016_000019_leftImg8bit.png 414 | munster/munster_000136_000019_leftImg8bit.png 415 | munster/munster_000007_000019_leftImg8bit.png 416 | munster/munster_000014_000019_leftImg8bit.png 417 | munster/munster_000052_000019_leftImg8bit.png 418 | munster/munster_000104_000019_leftImg8bit.png 419 | munster/munster_000173_000019_leftImg8bit.png 420 | munster/munster_000057_000019_leftImg8bit.png 421 | munster/munster_000072_000019_leftImg8bit.png 422 | munster/munster_000003_000019_leftImg8bit.png 423 | munster/munster_000161_000019_leftImg8bit.png 424 | munster/munster_000002_000019_leftImg8bit.png 425 | munster/munster_000028_000019_leftImg8bit.png 426 | munster/munster_000051_000019_leftImg8bit.png 427 | munster/munster_000105_000019_leftImg8bit.png 428 | munster/munster_000061_000019_leftImg8bit.png 429 | munster/munster_000058_000019_leftImg8bit.png 430 | munster/munster_000094_000019_leftImg8bit.png 431 | munster/munster_000027_000019_leftImg8bit.png 432 | munster/munster_000062_000019_leftImg8bit.png 433 | munster/munster_000127_000019_leftImg8bit.png 434 | munster/munster_000110_000019_leftImg8bit.png 435 | munster/munster_000170_000019_leftImg8bit.png 436 | munster/munster_000023_000019_leftImg8bit.png 437 | munster/munster_000084_000019_leftImg8bit.png 438 | munster/munster_000121_000019_leftImg8bit.png 439 | munster/munster_000087_000019_leftImg8bit.png 440 | munster/munster_000097_000019_leftImg8bit.png 441 | munster/munster_000119_000019_leftImg8bit.png 442 | munster/munster_000128_000019_leftImg8bit.png 443 | munster/munster_000078_000019_leftImg8bit.png 444 | munster/munster_000010_000019_leftImg8bit.png 445 | munster/munster_000015_000019_leftImg8bit.png 446 | munster/munster_000048_000019_leftImg8bit.png 447 | munster/munster_000085_000019_leftImg8bit.png 448 | munster/munster_000164_000019_leftImg8bit.png 449 | munster/munster_000111_000019_leftImg8bit.png 450 | munster/munster_000099_000019_leftImg8bit.png 451 | munster/munster_000117_000019_leftImg8bit.png 452 | munster/munster_000009_000019_leftImg8bit.png 453 | munster/munster_000049_000019_leftImg8bit.png 454 | munster/munster_000148_000019_leftImg8bit.png 455 | munster/munster_000022_000019_leftImg8bit.png 456 | munster/munster_000131_000019_leftImg8bit.png 457 | munster/munster_000006_000019_leftImg8bit.png 458 | munster/munster_000005_000019_leftImg8bit.png 459 | munster/munster_000102_000019_leftImg8bit.png 460 | munster/munster_000160_000019_leftImg8bit.png 461 | munster/munster_000107_000019_leftImg8bit.png 462 | munster/munster_000095_000019_leftImg8bit.png 463 | munster/munster_000106_000019_leftImg8bit.png 464 | munster/munster_000034_000019_leftImg8bit.png 465 | munster/munster_000143_000019_leftImg8bit.png 466 | munster/munster_000017_000019_leftImg8bit.png 467 | munster/munster_000040_000019_leftImg8bit.png 468 | munster/munster_000152_000019_leftImg8bit.png 469 | munster/munster_000154_000019_leftImg8bit.png 470 | munster/munster_000100_000019_leftImg8bit.png 471 | munster/munster_000004_000019_leftImg8bit.png 472 | munster/munster_000141_000019_leftImg8bit.png 473 | munster/munster_000011_000019_leftImg8bit.png 474 | munster/munster_000055_000019_leftImg8bit.png 475 | munster/munster_000134_000019_leftImg8bit.png 476 | munster/munster_000054_000019_leftImg8bit.png 477 | munster/munster_000064_000019_leftImg8bit.png 478 | munster/munster_000039_000019_leftImg8bit.png 479 | munster/munster_000103_000019_leftImg8bit.png 480 | munster/munster_000092_000019_leftImg8bit.png 481 | munster/munster_000172_000019_leftImg8bit.png 482 | munster/munster_000042_000019_leftImg8bit.png 483 | munster/munster_000124_000019_leftImg8bit.png 484 | munster/munster_000069_000019_leftImg8bit.png 485 | munster/munster_000026_000019_leftImg8bit.png 486 | munster/munster_000120_000019_leftImg8bit.png 487 | munster/munster_000031_000019_leftImg8bit.png 488 | munster/munster_000162_000019_leftImg8bit.png 489 | munster/munster_000056_000019_leftImg8bit.png 490 | munster/munster_000081_000019_leftImg8bit.png 491 | munster/munster_000123_000019_leftImg8bit.png 492 | munster/munster_000125_000019_leftImg8bit.png 493 | munster/munster_000082_000019_leftImg8bit.png 494 | munster/munster_000133_000019_leftImg8bit.png 495 | munster/munster_000126_000019_leftImg8bit.png 496 | munster/munster_000063_000019_leftImg8bit.png 497 | munster/munster_000008_000019_leftImg8bit.png 498 | munster/munster_000149_000019_leftImg8bit.png 499 | munster/munster_000076_000019_leftImg8bit.png 500 | munster/munster_000091_000019_leftImg8bit.png 501 | --------------------------------------------------------------------------------