├── LICENSE ├── README.md ├── _config.yml ├── evaluate.py ├── examples ├── performance.png └── prediction_results.png ├── model ├── __init__.py ├── model.py ├── model_util.py ├── pspnet.py └── seg_model.py ├── train_dise_gta2city.py ├── train_dise_synthia2city.py └── util ├── __init__.py ├── info.json ├── loader ├── CityDemoLoader.py ├── CityLoader.py ├── CityTestLoader.py ├── GTA5Loader.py ├── SYNTHIALoader.py ├── __init__.py ├── augmentations.py ├── cityscapes_list │ ├── .DS_Store │ ├── info.json │ ├── label.txt │ ├── test.txt │ ├── train.txt │ ├── train_label.txt │ ├── val.txt │ └── val_label.txt ├── gta5_list │ ├── train.txt │ └── train_modified.txt └── synthia_list │ └── train.txt ├── loss.py ├── metrics.py └── utils.py /README.md: -------------------------------------------------------------------------------- 1 | # DISE-Domain-Invariant-Structure-Extraction 2 | Pytorch Implementation of the paper All about Structure: Adapting Structural Information across Domains for Boosting Semantic Segmentation, CVPR 2019. 3 | 4 | |[Introduction video](https://youtu.be/YnD_zQNbfK4) | [Paper (ArXiv)](https://arxiv.org/abs/1903.12212)| [Project Page](https://hui-po-wang.github.io/DISE-Domain-Invariant-Structure-Extraction/) | 5 | |---|---|---| 6 | ## Paper 7 | All about Structure: Adapting Structural Information across Domains for Boosting Semantic Segmentation 8 | Wei-Lun Chang*, Hui-Po Wang*, Wen-Hsiao Peng, Wei-Chen Chiu (\*contribute equally) 9 | IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 10 | ``` 11 | @inproceedings{chang2019all, 12 | title={All about Structure: Adapting Structural Information across Domains for Boosting Semantic Segmentation}, 13 | author={Chang, Wei-Lun and Wang, Hui-Po and Peng, Wen-Hsiao and Chiu, Wei-Chen}, 14 | booktitle={IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, 15 | year={2019} 16 | } 17 | ``` 18 | ## Example Results 19 | ![prediction_results.png](examples/prediction_results.png) 20 | 21 | ## Quantitative Results 22 | ![performance.png](examples/performance.png) 23 | 24 | ## Prerequisite 25 | - Pytorch 0.3.1 26 | - Nvidia GPU with at least 16 GB memory 27 | 28 | ## Installation 29 | ``` 30 | git clone https://github.com/a514514772/DISE-Domain-Invariant-Structure-Extraction.git 31 | ``` 32 | ## Datasets 33 | 34 | 1. Download the [GTA5 Dataset](https://download.visinf.tu-darmstadt.de/data/from_games/) as the source domain and unzip it to `/data` 35 | 2. Download the [Cityscapes Dataset](https://www.cityscapes-dataset.com) as the target domain and unzip it to `/data` 36 | 37 | The structure of `/data` may look like this: 38 | ``` 39 | ├── data 40 | ├── Cityscapes 41 | │ ├── gtFine 42 | │ └── leftImg8bit 43 | ├── GTA5 44 | ├── images 45 | └── labels 46 | ``` 47 | ## Usage 48 | ### Pretrained Weights 49 | [Google drive](https://drive.google.com/drive/folders/1NSPhGnTqBp6oeBd6awNs7VVTGuL1p4wP?usp=sharing) 50 | 51 | ### Example Training Script: GTA5 to Cityscapes 52 | ``` 53 | python train_dise_gta2city.py --gta5_data_path /data/GTA5 --city_data_path /data/Cityscapes 54 | ``` 55 | ### Example Testing Script: 56 | Note that, to test performance on the testing set, we provide scripts to generate 1024x2048 outputs which are compatible with [the testing server](https://www.cityscapes-dataset.com/benchmarks/#instance-level-results). 57 | ``` 58 | python evaluate.py ./weights --city_data_path /data/Cityscapes 59 | ``` 60 | ### More options 61 | ``` 62 | python train_dise_gta2city.py -h 63 | usage: train_dise_gta2city.py [-h] [--dump_logs DUMP_LOGS] [--log_dir LOG_DIR] [--gen_img_dir GEN_IMG_DIR] 64 | [--gta5_data_path GTA5_DATA_PATH] [--city_data_path CITY_DATA_PATH] 65 | [--data_list_path_gta5 DATA_LIST_PATH_GTA5] 66 | [--data_list_path_city_img DATA_LIST_PATH_CITY_IMG] 67 | [--data_list_path_city_lbl DATA_LIST_PATH_CITY_LBL] 68 | [--data_list_path_val_img DATA_LIST_PATH_VAL_IMG] 69 | [--data_list_path_val_lbl DATA_LIST_PATH_VAL_LBL] 70 | [--cuda_device_id CUDA_DEVICE_ID [CUDA_DEVICE_ID ...]] 71 | 72 | Domain Invariant Structure Extraction (DISE) for unsupervised domain adaptation for semantic segmentation 73 | ``` 74 | 75 | ## Acknowledgement 76 | We implement this project heavily based on [AdaptSeg](https://github.com/wasidennis/AdaptSegNet) proposed by Tsai et el.. 77 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /evaluate.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import torch 3 | import argparse 4 | import numpy as np 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | import torch.nn.functional as F 8 | import torchvision 9 | import torchvision.utils as vutils 10 | import torchvision.models as models 11 | import torch.utils.data as torch_data 12 | import torch.backends.cudnn as cudnn 13 | 14 | import matplotlib.pyplot as plt 15 | import os 16 | 17 | # from tensorboardX import SummaryWriter 18 | from PIL import Image 19 | from torch.autograd import Variable 20 | from tqdm import tqdm 21 | 22 | from util.metrics import runningScore 23 | from model.model import SharedEncoder 24 | from util.utils import poly_lr_scheduler, adjust_learning_rate, save_models, load_models 25 | 26 | from util.loader.CityTestLoader import CityTestLoader 27 | 28 | num_classes = 19 29 | CITY_DATA_PATH = '/workspace/lustre/data/Cityscapes' 30 | DATA_LIST_PATH_TEST_IMG = './util/loader/cityscapes_list/test.txt' 31 | WEIGHT_DIR = './weight' 32 | OUTPUT_DIR = './result' 33 | DEFAULT_GPU = 0 34 | IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) 35 | 36 | parser = argparse.ArgumentParser(description='Domain Invariant Structure Extraction (DISE) \ 37 | for unsupervised domain adaptation for semantic segmentation') 38 | parser.add_argument('weight_dir', type=str, default=WEIGHT_DIR) 39 | parser.add_argument('--city_data_path', type=str, default=CITY_DATA_PATH, help='the path to cityscapes.') 40 | parser.add_argument('--data_list_path_test_img', type=str, default=DATA_LIST_PATH_TEST_IMG) 41 | parser.add_argument('--gpu', type=str, default=DEFAULT_GPU) 42 | parser.add_argument('--output_dir', type=str, default=OUTPUT_DIR) 43 | 44 | args = parser.parse_args() 45 | 46 | test_set = CityTestLoader(args.city_data_path, args.data_list_path_test_img, max_iters=None, crop_size=[512, 1024], mean=IMG_MEAN, set='test') 47 | test_loader= torch_data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=4, pin_memory=True) 48 | 49 | upsample_1024 = nn.Upsample(size=[1024, 2048], mode='bilinear') 50 | 51 | model_dict = {} 52 | 53 | enc_shared = SharedEncoder().cuda(args.gpu) 54 | model_dict['enc_shared'] = enc_shared 55 | 56 | load_models(model_dict, args.weight_dir) 57 | 58 | enc_shared.eval() 59 | cty_running_metrics = runningScore(num_classes) 60 | for i_test, (images_test, name) in tqdm(enumerate(test_loader)): 61 | images_test = Variable(images_test.cuda(), volatile=True) 62 | 63 | _, _, pred, _ = enc_shared(images_test) 64 | pred = upsample_1024(pred) 65 | 66 | pred = pred.data.cpu().numpy()[0] 67 | pred = pred.transpose(1,2,0) 68 | pred = np.asarray(np.argmax(pred, axis=2), dtype=np.uint8) 69 | pred = np.asarray(test_set.convert_back_to_id(pred), dtype=np.uint8) 70 | pred = Image.fromarray(pred) 71 | 72 | name = name[0][0].split('/')[-1] 73 | if not os.path.exists(args.output_dir): 74 | os.makedirs(args.output_dir) 75 | pred.save(os.path.join(args.output_dir, name)) 76 | -------------------------------------------------------------------------------- /examples/performance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hui-po-wang/DISE-Domain-Invariant-Structure-Extraction/97c3940d54c451f92e4d40a3d642caf9c6ceca58/examples/performance.png -------------------------------------------------------------------------------- /examples/prediction_results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hui-po-wang/DISE-Domain-Invariant-Structure-Extraction/97c3940d54c451f92e4d40a3d642caf9c6ceca58/examples/prediction_results.png -------------------------------------------------------------------------------- /model/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /model/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | import torch.nn.functional as F 6 | import torchvision 7 | import torchvision.models as models 8 | 9 | from torch.autograd import Variable 10 | from .model_util import * 11 | from .seg_model import DeeplabMulti 12 | 13 | pspnet_specs = { 14 | 'n_classes': 19, 15 | 'input_size': (713, 713), 16 | 'block_config': [3, 4, 23, 3], 17 | } 18 | ''' 19 | Sequential blocks 20 | ''' 21 | class SharedEncoder(nn.Module): 22 | def __init__(self): 23 | super(SharedEncoder, self).__init__() 24 | self.n_classes = pspnet_specs['n_classes'] 25 | 26 | Seg_Model = DeeplabMulti(num_classes=self.n_classes) 27 | 28 | self.layer0 = nn.Sequential(Seg_Model.conv1, Seg_Model.bn1, Seg_Model.relu, Seg_Model.maxpool) 29 | self.layer1 = Seg_Model.layer1 30 | self.layer2 = Seg_Model.layer2 31 | self.layer3 = Seg_Model.layer3 32 | self.layer4 = Seg_Model.layer4 33 | 34 | self.final1 = Seg_Model.layer5 35 | self.final2 = Seg_Model.layer6 36 | 37 | def forward(self, x): 38 | inp_shape = x.shape[2:] 39 | 40 | low = self.layer0(x) 41 | #[2, 64, 65, 129] 42 | x = self.layer1(low) 43 | x = self.layer2(x) 44 | 45 | x = self.layer3(x) 46 | x1= self.final1(x) 47 | 48 | rec= self.layer4(x) 49 | x2 = self.final2(rec) 50 | 51 | return low, x1, x2, rec 52 | 53 | def get_1x_lr_params_NOscale(self): 54 | b = [] 55 | 56 | b.append(self.layer0) 57 | b.append(self.layer1) 58 | b.append(self.layer2) 59 | b.append(self.layer3) 60 | b.append(self.layer4) 61 | 62 | for i in range(len(b)): 63 | for j in b[i].modules(): 64 | jj = 0 65 | for k in j.parameters(): 66 | jj += 1 67 | if k.requires_grad: 68 | yield k 69 | 70 | def get_10x_lr_params(self): 71 | b = [] 72 | b.append(self.final1.parameters()) 73 | b.append(self.final2.parameters()) 74 | 75 | for j in range(len(b)): 76 | for i in b[j]: 77 | yield i 78 | 79 | def optim_parameters(self, learning_rate): 80 | return [{'params': self.get_1x_lr_params_NOscale(), 'lr': 1* learning_rate}, 81 | {'params': self.get_10x_lr_params(), 'lr': 10* learning_rate}] 82 | 83 | class Classifier(nn.Module): 84 | def __init__(self, inp_shape): 85 | super(Classifier, self).__init__() 86 | n_classes = pspnet_specs['n_classes'] 87 | self.inp_shape = inp_shape 88 | 89 | # PSPNet_Model = PSPNet(pretrained=True) 90 | 91 | self.dropout = nn.Dropout2d(0.1) 92 | self.cls = nn.Conv2d(512, n_classes, kernel_size=1) 93 | 94 | def forward(self, x): 95 | x = self.dropout(x) 96 | x = self.cls(x) 97 | x = F.upsample(x, size=self.inp_shape, mode='bilinear') 98 | return x 99 | 100 | class PrivateEncoder(nn.Module): 101 | def __init__(self, input_channels, code_size): 102 | super(PrivateEncoder, self).__init__() 103 | self.input_channels = input_channels 104 | self.code_size = code_size 105 | 106 | self.cnn = nn.Sequential(nn.Conv2d(self.input_channels, 64, 7, stride=2, padding=3), # 128 * 256 107 | nn.BatchNorm2d(64), 108 | nn.ReLU(), 109 | nn.Conv2d(64, 128, 3, stride=2, padding=1), # 64 * 128 110 | nn.BatchNorm2d(128), 111 | nn.ReLU(), 112 | nn.Conv2d(128, 256, 3, stride=2, padding=1), # 32 * 64 113 | nn.BatchNorm2d(256), 114 | nn.ReLU(), 115 | nn.Conv2d(256, 256, 3, stride=2, padding=1), # 16 * 32 116 | nn.BatchNorm2d(256), 117 | nn.ReLU(), 118 | nn.Conv2d(256, 256, 3, stride=2, padding=1), # 8 * 16 119 | nn.BatchNorm2d(256), 120 | nn.ReLU()) 121 | self.model = [] 122 | self.model += [self.cnn] 123 | self.model += [nn.AdaptiveAvgPool2d((1, 1))] 124 | self.model += [nn.Conv2d(256, code_size, 1, 1, 0)] 125 | self.model = nn.Sequential(*self.model) 126 | 127 | #self.pooling = nn.AvgPool2d(4) 128 | 129 | #self.fc = nn.Sequential(nn.Conv2d(128, code_size, 1, 1, 0)) 130 | 131 | def forward(self, x): 132 | bs = x.size(0) 133 | #feats = self.model(x) 134 | #feats = self.pooling(feats) 135 | 136 | output = self.model(x).view(bs, -1) 137 | 138 | return output 139 | 140 | class PrivateDecoder(nn.Module): 141 | def __init__(self, shared_code_channel, private_code_size): 142 | super(PrivateDecoder, self).__init__() 143 | num_att = 256 144 | self.shared_code_channel = shared_code_channel 145 | self.private_code_size = private_code_size 146 | 147 | self.main = [] 148 | self.upsample = nn.Sequential( 149 | # input: 1/8 * 1/8 150 | nn.ConvTranspose2d(256, 256, 4, 2, 2, bias=False), 151 | nn.InstanceNorm2d(256), 152 | nn.ReLU(True), 153 | Conv2dBlock(256, 128, 3, 1, 1, norm='ln', activation='relu', pad_type='zero'), 154 | # 1/4 * 1/4 155 | nn.ConvTranspose2d(128, 128, 4, 2, 1, bias=False), 156 | nn.InstanceNorm2d(128), 157 | nn.ReLU(True), 158 | Conv2dBlock(128, 64 , 3, 1, 1, norm='ln', activation='relu', pad_type='zero'), 159 | # 1/2 * 1/2 160 | nn.ConvTranspose2d(64, 64, 4, 2, 1, bias=False), 161 | nn.InstanceNorm2d(64), 162 | nn.ReLU(True), 163 | Conv2dBlock(64 , 32 , 3, 1, 1, norm='ln', activation='relu', pad_type='zero'), 164 | # 1 * 1 165 | nn.Conv2d(32, 3, 3, 1, 1), 166 | nn.Tanh()) 167 | 168 | self.main += [Conv2dBlock(shared_code_channel+num_att+1, 256, 3, stride=1, padding=1, norm='ln', activation='relu', pad_type='reflect', bias=False)] 169 | self.main += [ResBlocks(3, 256, 'ln', 'relu', pad_type='zero')] 170 | self.main += [self.upsample] 171 | 172 | self.main = nn.Sequential(*self.main) 173 | self.mlp_att = nn.Sequential(nn.Linear(private_code_size, private_code_size), 174 | nn.ReLU(), 175 | nn.Linear(private_code_size, private_code_size), 176 | nn.ReLU(), 177 | nn.Linear(private_code_size, private_code_size), 178 | nn.ReLU(), 179 | nn.Linear(private_code_size, num_att)) 180 | 181 | def assign_adain_params(self, adain_params, model): 182 | # assign the adain_params to the AdaIN layers in model 183 | for m in model.modules(): 184 | if m.__class__.__name__ == "AdaptiveInstanceNorm2d": 185 | mean = adain_params[:, :m.num_features] 186 | std = torch.exp(adain_params[:, m.num_features:2*m.num_features]) 187 | m.bias = mean.contiguous().view(-1) 188 | m.weight = std.contiguous().view(-1) 189 | if adain_params.size(1) > 2*m.num_features: 190 | adain_params = adain_params[:, 2*m.num_features:] 191 | 192 | def get_num_adain_params(self, model): 193 | # return the number of AdaIN parameters needed by the model 194 | num_adain_params = 0 195 | for m in model.modules(): 196 | if m.__class__.__name__ == "AdaptiveInstanceNorm2d": 197 | num_adain_params += 2*m.num_features 198 | return num_adain_params 199 | 200 | def forward(self, shared_code, private_code, d): 201 | d = Variable(torch.FloatTensor(shared_code.shape[0], 1).fill_(d)).cuda() 202 | d = d.unsqueeze(1) 203 | d_img = d.view(d.size(0), d.size(1), 1, 1).expand(d.size(0), d.size(1), shared_code.size(2), shared_code.size(3)) 204 | att_params = self.mlp_att(private_code) 205 | att_img = att_params.view(att_params.size(0), att_params.size(1), 1, 1).expand(att_params.size(0), att_params.size(1), shared_code.size(2), shared_code.size(3)) 206 | code = torch.cat([shared_code, att_img, d_img], 1) 207 | 208 | output = self.main(code) 209 | return output 210 | 211 | class Discriminator(nn.Module): 212 | def __init__(self): 213 | super(Discriminator, self).__init__() 214 | # FCN classification layer 215 | self.feature = nn.Sequential( 216 | Conv2dBlock(3, 64, 6, stride=2, padding=2, norm='none', activation='lrelu', bias=False), 217 | Conv2dBlock(64, 128, 4, stride=2, padding=1, norm='in', activation='lrelu', bias=False), 218 | Conv2dBlock(128, 256, 4, stride=2, padding=1, norm='in', activation='lrelu', bias=False), 219 | Conv2dBlock(256, 512, 4, stride=2, padding=1, norm='in', activation='lrelu', bias=False), 220 | nn.Conv2d(512, 1, 1, padding=0), 221 | # nn.Sigmoid() 222 | ) 223 | self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) 224 | 225 | def forward(self, x): 226 | x = self.feature(x) 227 | # x = self.global_pooling(x).view(-1) 228 | return x 229 | 230 | class DomainClassifier(nn.Module): 231 | def __init__(self): 232 | super(DomainClassifier, self).__init__() 233 | n_classes = pspnet_specs['n_classes'] 234 | # FCN classification layer 235 | 236 | self.feature = nn.Sequential( 237 | Conv2dBlock(n_classes, 64, 4, stride=2, padding=1, norm='none', activation='lrelu', bias=False), 238 | Conv2dBlock(64 , 128, 4, stride=2, padding=1, norm='none', activation='lrelu', bias=False), 239 | Conv2dBlock(128, 256, 4, stride=2, padding=1, norm='none', activation='lrelu', bias=False), 240 | Conv2dBlock(256, 512, 4, stride=2, padding=1, norm='none', activation='lrelu', bias=False), 241 | nn.Conv2d(512, 1, 4, padding=2) 242 | ) 243 | def forward(self, x): 244 | x = self.feature(x) 245 | return x 246 | -------------------------------------------------------------------------------- /model/model_util.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import torchvision.models as models 5 | import os 6 | import sys 7 | 8 | from torch.autograd import Variable 9 | 10 | try: 11 | from urllib import urlretrieve 12 | except ImportError: 13 | from urllib.request import urlretrieve 14 | 15 | pspnet_specs = { 16 | 'n_classes': 19, 17 | 'input_size': (713, 713), 18 | 'block_config': [3, 4, 23, 3], 19 | } 20 | 21 | class ResBlocks(nn.Module): 22 | def __init__(self, num_blocks, dim, norm='in', activation='relu', pad_type='zero'): 23 | super(ResBlocks, self).__init__() 24 | self.model = [] 25 | for i in range(num_blocks): 26 | self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type)] 27 | self.model = nn.Sequential(*self.model) 28 | 29 | def forward(self, x): 30 | return self.model(x) 31 | 32 | ''' 33 | Basic blocks 34 | ''' 35 | class ResBlock(nn.Module): 36 | def __init__(self, dim, norm='in', activation='relu', pad_type='zero'): 37 | super(ResBlock, self).__init__() 38 | 39 | model = [] 40 | model += [Conv2dBlock(dim ,dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)] 41 | model += [Conv2dBlock(dim ,dim, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)] 42 | self.model = nn.Sequential(*model) 43 | 44 | def forward(self, x): 45 | residual = x 46 | out = self.model(x) 47 | out += residual 48 | return out 49 | 50 | class Conv2dBlock(nn.Module): 51 | def __init__(self, input_dim ,output_dim, kernel_size, stride=1, 52 | padding=0, dilation=1, norm='none', activation='relu', pad_type='zero', bias=True): 53 | super(Conv2dBlock, self).__init__() 54 | self.use_bias = bias 55 | # initialize padding 56 | if pad_type == 'reflect': 57 | self.pad = nn.ReflectionPad2d(padding) 58 | elif pad_type == 'zero': 59 | self.pad = nn.ZeroPad2d(padding) 60 | # else: 61 | # assert 0, "Unsupported padding type: {}".format(pad_type) 62 | 63 | # initialize normalization 64 | norm_dim = output_dim 65 | if norm == 'bn': 66 | self.norm = nn.BatchNorm2d(norm_dim) 67 | elif norm == 'in': 68 | self.norm = nn.InstanceNorm2d(norm_dim) 69 | elif norm == 'ln': 70 | self.norm = LayerNorm(norm_dim) 71 | elif norm == 'adain': 72 | self.norm = AdaptiveInstanceNorm2d(norm_dim) 73 | elif norm == 'none': 74 | self.norm = None 75 | else: 76 | assert 0, "Unsupported normalization: {}".format(norm) 77 | 78 | # initialize activation 79 | if activation == 'relu': 80 | self.activation = nn.ReLU(inplace=True) 81 | elif activation == 'lrelu': 82 | self.activation = nn.LeakyReLU(0.2, inplace=True) 83 | elif activation == 'prelu': 84 | self.activation = nn.PReLU() 85 | elif activation == 'selu': 86 | self.activation = nn.SELU(inplace=True) 87 | elif activation == 'tanh': 88 | self.activation = nn.Tanh() 89 | elif activation == 'none': 90 | self.activation = None 91 | else: 92 | assert 0, "Unsupported activation: {}".format(activation) 93 | 94 | # initialize convolution 95 | self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, dilation=dilation, bias=self.use_bias) 96 | 97 | def forward(self, x): 98 | x = self.conv(self.pad(x)) 99 | if self.norm: 100 | x = self.norm(x) 101 | if self.activation: 102 | x = self.activation(x) 103 | return x 104 | 105 | class LayerNorm(nn.Module): 106 | def __init__(self, num_features, eps=1e-5, affine=True): 107 | super(LayerNorm, self).__init__() 108 | self.num_features = num_features 109 | self.affine = affine 110 | self.eps = eps 111 | 112 | if self.affine: 113 | self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) 114 | self.beta = nn.Parameter(torch.zeros(num_features)) 115 | 116 | def forward(self, x): 117 | shape = [-1] + [1] * (x.dim() - 1) 118 | mean = x.view(x.size(0), -1).mean(1).view(*shape) 119 | std = x.view(x.size(0), -1).std(1).view(*shape) 120 | x = (x - mean) / (std + self.eps) 121 | 122 | if self.affine: 123 | shape = [1, -1] + [1] * (x.dim() - 2) 124 | x = x * self.gamma.view(*shape) + self.beta.view(*shape) 125 | return x 126 | 127 | class AdaptiveInstanceNorm2d(nn.Module): 128 | def __init__(self, num_features, eps=1e-5, momentum=0.1): 129 | super(AdaptiveInstanceNorm2d, self).__init__() 130 | self.num_features = num_features 131 | self.eps = eps 132 | self.momentum = momentum 133 | # weight and bias are dynamically assigned 134 | self.weight = None 135 | self.bias = None 136 | # just dummy buffers, not used 137 | self.register_buffer('running_mean', torch.zeros(num_features)) 138 | self.register_buffer('running_var', torch.ones(num_features)) 139 | 140 | def forward(self, x): 141 | assert self.weight is not None and self.bias is not None, "Please assign weight and bias before calling AdaIN!" 142 | b, c = x.size(0), x.size(1) 143 | running_mean = self.running_mean.repeat(b) 144 | running_var = self.running_var.repeat(b) 145 | 146 | # Apply instance norm 147 | x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) 148 | 149 | out = F.batch_norm( 150 | x_reshaped, running_mean, running_var, self.weight, self.bias, 151 | True, self.momentum, self.eps) 152 | 153 | return out.view(b, c, *x.size()[2:]) 154 | 155 | def __repr__(self): 156 | return self.__class__.__name__ + '(' + str(self.num_features) + ')' 157 | 158 | class ASPPModule(nn.Module): 159 | """Atrous Spatial Pyramid Pooling with image pool""" 160 | 161 | def __init__(self, in_channels, out_channels, pyramids): 162 | super(ASPPModule, self).__init__() 163 | self.stages = nn.Module() 164 | for i, (dilation, padding) in enumerate(zip(pyramids, pyramids)): 165 | self.stages.add_module( 166 | "c{}".format(i + 1), 167 | Conv2dBlock(in_channels, out_channels, 3, stride=1, padding=padding, dilation=dilation, norm='bn', activation='relu', pad_type='reflect', bias=False), 168 | ) 169 | 170 | def forward(self, x): 171 | h = [] 172 | for stage in self.stages.children(): 173 | h += [stage(x)] 174 | h = torch.cat(h, dim=1) 175 | return h 176 | 177 | class PyramidPooling(nn.Module): 178 | def __init__(self, fc_dim=2048, pool_scales=(1, 2, 3, 6)): 179 | super(PyramidPooling, self).__init__() 180 | self.ppm = [] 181 | for scale in pool_scales: 182 | self.ppm.append(nn.Sequential( 183 | nn.AdaptiveAvgPool2d(scale), 184 | nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), 185 | nn.BatchNorm2d(512), 186 | nn.ReLU(inplace=True) 187 | )) 188 | self.ppm = nn.ModuleList(self.ppm) 189 | 190 | 191 | def forward(self, conv_out, segSize=None): 192 | conv5 = conv_out 193 | input_size = conv5.size() 194 | ppm_out = [conv5] 195 | for pool_scale in self.ppm: 196 | ppm_out.append(nn.functional.upsample(pool_scale(conv5), (input_size[2], input_size[3]), mode='bilinear')) 197 | ppm_out = torch.cat(ppm_out, 1) 198 | 199 | return ppm_out 200 | 201 | class GaussianNoiseLayer(nn.Module): 202 | def __init__(self,): 203 | super(GaussianNoiseLayer, self).__init__() 204 | def forward(self, x): 205 | if self.training == False: 206 | return x 207 | noise = Variable(torch.randn(x.size()).cuda(x.get_device())) 208 | return x + noise 209 | 210 | def BatchNorm2d_no_grad(m): 211 | if type(m) == nn.BatchNorm2d: 212 | print (m) 213 | for i in m.parameters(): 214 | i.requires_grad = False 215 | 216 | def load_url(url, model_dir='/home/wilson/RL/image_segmentation/code/v11/pretrained', map_location=None): 217 | if not os.path.exists(model_dir): 218 | os.makedirs(model_dir) 219 | filename = url.split('/')[-1] 220 | cached_file = os.path.join(model_dir, filename) 221 | if not os.path.exists(cached_file): 222 | sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) 223 | urlretrieve(url, cached_file) 224 | return torch.load(cached_file, map_location=map_location) 225 | 226 | -------------------------------------------------------------------------------- /model/pspnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import torch.nn as nn 4 | import torchvision.models as models 5 | 6 | from math import ceil 7 | from torch.autograd import Variable 8 | 9 | from ptsemseg import caffe_pb2 10 | from ptsemseg.models.utils import * 11 | 12 | pspnet_specs = { 13 | 'pascalvoc': 14 | { 15 | 'n_classes': 21, 16 | 'input_size': (473, 473), 17 | 'block_config': [3, 4, 23, 3], 18 | }, 19 | 20 | 'cityscapes': 21 | { 22 | 'n_classes': 19, 23 | 'input_size': (713, 713), 24 | 'block_config': [3, 4, 23, 3], 25 | }, 26 | 27 | 'ade20k': 28 | { 29 | 'n_classes': 150, 30 | 'input_size': (473, 473), 31 | 'block_config': [3, 4, 6, 3], 32 | }, 33 | } 34 | 35 | class pspnet(nn.Module): 36 | 37 | """ 38 | Pyramid Scene Parsing Network 39 | URL: https://arxiv.org/abs/1612.01105 40 | 41 | References: 42 | 1) Original Author's code: https://github.com/hszhao/PSPNet 43 | 2) Chainer implementation by @mitmul: https://github.com/mitmul/chainer-pspnet 44 | 45 | Visualization: 46 | http://dgschwend.github.io/netscope/#/gist/6bfb59e6a3cfcb4e2bb8d47f827c2928 47 | 48 | """ 49 | 50 | def __init__(self, 51 | n_classes=21, 52 | block_config=[3, 4, 23, 3], 53 | input_size=(473,473), 54 | version=None): 55 | 56 | super(pspnet, self).__init__() 57 | """ 58 | self.block_config = pspnet_specs[version]['block_config'] if version is not None else block_config 59 | self.n_classes = pspnet_specs[version]['n_classes'] if version is not None else n_classes 60 | self.input_size = pspnet_specs[version]['input_size'] if version is not None else input_size 61 | 62 | # Encoder 63 | self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=64, 64 | padding=1, stride=2, bias=False) 65 | self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=64, 66 | padding=1, stride=1, bias=False) 67 | self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=128, 68 | padding=1, stride=1, bias=False) 69 | 70 | # Vanilla Residual Blocks 71 | self.res_block2 = residualBlockPSP(self.block_config[0], 128, 64, 256, 1, 1) 72 | self.res_block3 = residualBlockPSP(self.block_config[1], 256, 128, 512, 2, 1) 73 | 74 | # Dilated Residual Blocks 75 | self.res_block4 = residualBlockPSP(self.block_config[2], 512, 256, 1024, 1, 2) 76 | self.res_block5 = residualBlockPSP(self.block_config[3], 1024, 512, 2048, 1, 4) 77 | """ 78 | self.n_classes = pspnet_specs[version]['n_classes'] if version is not None else n_classes 79 | resnet = models.resnet101(pretrained=True) 80 | self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) 81 | self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 82 | 83 | for n, m in self.layer3.named_modules(): 84 | if 'conv2' in n: 85 | m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) 86 | elif 'downsample.0' in n: 87 | m.stride = (1, 1) 88 | for n, m in self.layer4.named_modules(): 89 | if 'conv2' in n: 90 | m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) 91 | elif 'downsample.0' in n: 92 | m.stride = (1, 1) 93 | # Pyramid Pooling Module 94 | self.pyramid_pooling = pyramidPooling(2048, [6, 3, 2, 1]) 95 | 96 | # Final conv layers 97 | self.cbr_final = conv2DBatchNormRelu(4096, 512, 3, 1, 1, False) 98 | self.dropout = nn.Dropout2d(p=0.1, inplace=True) 99 | self.classification = nn.Conv2d(512, self.n_classes, 1, 1, 0) 100 | 101 | def forward(self, x): 102 | inp_shape = x.shape[2:] 103 | 104 | """ 105 | # H, W -> H/2, W/2 106 | x = self.convbnrelu1_1(x) 107 | x = self.convbnrelu1_2(x) 108 | x = self.convbnrelu1_3(x) 109 | 110 | # H/2, W/2 -> H/4, W/4 111 | x = F.max_pool2d(x, 3, 2, 1) 112 | 113 | # H/4, W/4 -> H/8, W/8 114 | x = self.res_block2(x) 115 | x = self.res_block3(x) 116 | x = self.res_block4(x) 117 | x = self.res_block5(x) 118 | """ 119 | 120 | x = self.layer0(x) 121 | x = self.layer1(x) 122 | x = self.layer2(x) 123 | x = self.layer3(x) 124 | x = self.layer4(x) 125 | 126 | x = self.pyramid_pooling(x) 127 | 128 | x = self.cbr_final(x) 129 | x = self.dropout(x) 130 | 131 | x = self.classification(x) 132 | x = F.upsample(x, size=inp_shape, mode='bilinear') 133 | return x 134 | 135 | 136 | -------------------------------------------------------------------------------- /model/seg_model.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import math 3 | import torch.utils.model_zoo as model_zoo 4 | import torch 5 | import numpy as np 6 | 7 | RESTORE_FROM = 'http://vllab.ucmerced.edu/ytsai/CVPR18/DeepLab_resnet_pretrained_init-f81d91e8.pth' 8 | 9 | affine_par = True 10 | 11 | def outS(i): 12 | i = int(i) 13 | i = (i + 1) / 2 14 | i = int(np.ceil((i + 1) / 2.0)) 15 | i = (i + 1) / 2 16 | return i 17 | 18 | 19 | def conv3x3(in_planes, out_planes, stride=1): 20 | "3x3 convolution with padding" 21 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 22 | padding=1, bias=False) 23 | 24 | 25 | class BasicBlock(nn.Module): 26 | expansion = 1 27 | 28 | def __init__(self, inplanes, planes, stride=1, downsample=None): 29 | super(BasicBlock, self).__init__() 30 | self.conv1 = conv3x3(inplanes, planes, stride) 31 | self.bn1 = nn.BatchNorm2d(planes, affine=affine_par) 32 | self.relu = nn.ReLU(inplace=True) 33 | self.conv2 = conv3x3(planes, planes) 34 | self.bn2 = nn.BatchNorm2d(planes, affine=affine_par) 35 | self.downsample = downsample 36 | self.stride = stride 37 | 38 | def forward(self, x): 39 | residual = x 40 | 41 | out = self.conv1(x) 42 | out = self.bn1(out) 43 | out = self.relu(out) 44 | 45 | out = self.conv2(out) 46 | out = self.bn2(out) 47 | 48 | if self.downsample is not None: 49 | residual = self.downsample(x) 50 | 51 | out += residual 52 | out = self.relu(out) 53 | 54 | return out 55 | 56 | 57 | class Bottleneck(nn.Module): 58 | expansion = 4 59 | 60 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None): 61 | super(Bottleneck, self).__init__() 62 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change 63 | self.bn1 = nn.BatchNorm2d(planes, affine=affine_par) 64 | for i in self.bn1.parameters(): 65 | i.requires_grad = False 66 | 67 | padding = dilation 68 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change 69 | padding=padding, bias=False, dilation=dilation) 70 | self.bn2 = nn.BatchNorm2d(planes, affine=affine_par) 71 | for i in self.bn2.parameters(): 72 | i.requires_grad = False 73 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 74 | self.bn3 = nn.BatchNorm2d(planes * 4, affine=affine_par) 75 | for i in self.bn3.parameters(): 76 | i.requires_grad = False 77 | self.relu = nn.ReLU(inplace=True) 78 | self.downsample = downsample 79 | self.stride = stride 80 | 81 | def forward(self, x): 82 | residual = x 83 | 84 | out = self.conv1(x) 85 | out = self.bn1(out) 86 | out = self.relu(out) 87 | 88 | out = self.conv2(out) 89 | out = self.bn2(out) 90 | out = self.relu(out) 91 | 92 | out = self.conv3(out) 93 | out = self.bn3(out) 94 | 95 | if self.downsample is not None: 96 | residual = self.downsample(x) 97 | 98 | out += residual 99 | out = self.relu(out) 100 | 101 | return out 102 | 103 | 104 | class Classifier_Module(nn.Module): 105 | def __init__(self, inplanes, dilation_series, padding_series, num_classes): 106 | super(Classifier_Module, self).__init__() 107 | self.conv2d_list = nn.ModuleList() 108 | for dilation, padding in zip(dilation_series, padding_series): 109 | self.conv2d_list.append( 110 | nn.Conv2d(inplanes, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True)) 111 | 112 | for m in self.conv2d_list: 113 | m.weight.data.normal_(0, 0.01) 114 | 115 | def forward(self, x): 116 | out = self.conv2d_list[0](x) 117 | for i in range(len(self.conv2d_list) - 1): 118 | out += self.conv2d_list[i + 1](x) 119 | return out 120 | 121 | 122 | class ResNetMulti(nn.Module): 123 | def __init__(self, block, layers, num_classes): 124 | self.inplanes = 64 125 | super(ResNetMulti, self).__init__() 126 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 127 | bias=False) 128 | self.bn1 = nn.BatchNorm2d(64, affine=affine_par) 129 | for i in self.bn1.parameters(): 130 | i.requires_grad = False 131 | self.relu = nn.ReLU(inplace=True) 132 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change 133 | self.layer1 = self._make_layer(block, 64, layers[0]) 134 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 135 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) 136 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) 137 | self.layer5 = self._make_pred_layer(Classifier_Module, 1024, [6, 12, 18, 24], [6, 12, 18, 24], num_classes) 138 | self.layer6 = self._make_pred_layer(Classifier_Module, 2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes) 139 | 140 | for m in self.modules(): 141 | if isinstance(m, nn.Conv2d): 142 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 143 | m.weight.data.normal_(0, 0.01) 144 | elif isinstance(m, nn.BatchNorm2d): 145 | m.weight.data.fill_(1) 146 | m.bias.data.zero_() 147 | # for i in m.parameters(): 148 | # i.requires_grad = False 149 | 150 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1): 151 | downsample = None 152 | if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4: 153 | downsample = nn.Sequential( 154 | nn.Conv2d(self.inplanes, planes * block.expansion, 155 | kernel_size=1, stride=stride, bias=False), 156 | nn.BatchNorm2d(planes * block.expansion, affine=affine_par)) 157 | for i in downsample._modules['1'].parameters(): 158 | i.requires_grad = False 159 | layers = [] 160 | layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample)) 161 | self.inplanes = planes * block.expansion 162 | for i in range(1, blocks): 163 | layers.append(block(self.inplanes, planes, dilation=dilation)) 164 | 165 | return nn.Sequential(*layers) 166 | 167 | def _make_pred_layer(self, block, inplanes, dilation_series, padding_series, num_classes): 168 | return block(inplanes, dilation_series, padding_series, num_classes) 169 | 170 | def forward(self, x): 171 | x = self.conv1(x) 172 | x = self.bn1(x) 173 | x = self.relu(x) 174 | x = self.maxpool(x) 175 | x = self.layer1(x) 176 | x = self.layer2(x) 177 | 178 | x = self.layer3(x) 179 | x1 = self.layer5(x) 180 | 181 | x2 = self.layer4(x) 182 | x2 = self.layer6(x2) 183 | 184 | return x1, x2 185 | 186 | def get_1x_lr_params_NOscale(self): 187 | """ 188 | This generator returns all the parameters of the net except for 189 | the last classification layer. Note that for each batchnorm layer, 190 | requires_grad is set to False in deeplab_resnet.py, therefore this function does not return 191 | any batchnorm parameter 192 | """ 193 | b = [] 194 | 195 | b.append(self.conv1) 196 | b.append(self.bn1) 197 | b.append(self.layer1) 198 | b.append(self.layer2) 199 | b.append(self.layer3) 200 | b.append(self.layer4) 201 | 202 | for i in range(len(b)): 203 | for j in b[i].modules(): 204 | jj = 0 205 | for k in j.parameters(): 206 | jj += 1 207 | if k.requires_grad: 208 | yield k 209 | 210 | def get_10x_lr_params(self): 211 | """ 212 | This generator returns all the parameters for the last layer of the net, 213 | which does the classification of pixel into classes 214 | """ 215 | b = [] 216 | b.append(self.layer5.parameters()) 217 | b.append(self.layer6.parameters()) 218 | 219 | for j in range(len(b)): 220 | for i in b[j]: 221 | yield i 222 | 223 | def optim_parameters(self, args): 224 | return [{'params': self.get_1x_lr_params_NOscale(), 'lr': args.learning_rate}, 225 | {'params': self.get_10x_lr_params(), 'lr': 10 * args.learning_rate}] 226 | 227 | 228 | def DeeplabMulti(pretrained=True, num_classes=21): 229 | model = ResNetMulti(Bottleneck, [3, 4, 23, 3], num_classes) 230 | if pretrained: 231 | saved_state_dict = model_zoo.load_url(RESTORE_FROM) 232 | new_params = model.state_dict().copy() 233 | for i in saved_state_dict: 234 | i_parts = i.split('.') 235 | if not num_classes == 19 or not i_parts[1] == 'layer5': 236 | new_params['.'.join(i_parts[1:])] = saved_state_dict[i] 237 | model.load_state_dict(new_params) 238 | 239 | return model 240 | 241 | -------------------------------------------------------------------------------- /train_dise_gta2city.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import torch 3 | import argparse 4 | import numpy as np 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | import torch.nn.functional as F 8 | import torchvision 9 | import torchvision.utils as vutils 10 | import torchvision.models as models 11 | import torch.utils.data as torch_data 12 | import torch.backends.cudnn as cudnn 13 | 14 | import matplotlib.pyplot as plt 15 | import os 16 | 17 | # from tensorboardX import SummaryWriter 18 | from PIL import Image 19 | from torch.autograd import Variable 20 | from tqdm import tqdm 21 | 22 | from util.loader.CityLoader import CityLoader 23 | from util.loader.GTA5Loader import GTA5Loader 24 | from util.loader.augmentations import Compose, RandomHorizontallyFlip, RandomSized_and_Crop, RandomCrop 25 | from util.metrics import runningScore 26 | from util.loss import VGGLoss, VGGLoss_for_trans, cross_entropy2d 27 | from model.model import SharedEncoder, PrivateEncoder, PrivateDecoder, Discriminator, DomainClassifier 28 | from util.utils import poly_lr_scheduler, adjust_learning_rate, save_models, load_models 29 | 30 | # Data-related 31 | LOG_DIR = './log' 32 | GEN_IMG_DIR = './generated_imgs' 33 | 34 | GTA5_DATA_PATH = '/workspace/lustre/data/GTA5' 35 | CITY_DATA_PATH = '/workspace/lustre/data/Cityscapes' 36 | DATA_LIST_PATH_GTA5 = './util/loader/gta5_list/train_modified.txt' 37 | DATA_LIST_PATH_CITY_IMG = './util/loader/cityscapes_list/train.txt' 38 | DATA_LIST_PATH_CITY_LBL = './util/loader/cityscapes_list/train_label.txt' 39 | DATA_LIST_PATH_VAL_IMG = './util/loader/cityscapes_list/val.txt' 40 | DATA_LIST_PATH_VAL_LBL = './util/loader/cityscapes_list/val_label.txt' 41 | 42 | # Hyper-parameters 43 | CUDA_DIVICE_ID = '0' 44 | 45 | parser = argparse.ArgumentParser(description='Domain Invariant Structure Extraction (DISE) \ 46 | for unsupervised domain adaptation for semantic segmentation') 47 | parser.add_argument('--dump_logs', type=bool, default=False) 48 | parser.add_argument('--log_dir', type=str, default=LOG_DIR, help='the path to where you save plots and logs.') 49 | parser.add_argument('--gen_img_dir', type=str, default=GEN_IMG_DIR, help='the path to where you save translated images and segmentation maps.') 50 | parser.add_argument('--gta5_data_path', type=str, default=GTA5_DATA_PATH, help='the path to GTA5 dataset.') 51 | parser.add_argument('--city_data_path', type=str, default=CITY_DATA_PATH, help='the path to Cityscapes dataset.') 52 | parser.add_argument('--data_list_path_gta5', type=str, default=DATA_LIST_PATH_GTA5) 53 | parser.add_argument('--data_list_path_city_img', type=str, default=DATA_LIST_PATH_CITY_IMG) 54 | parser.add_argument('--data_list_path_city_lbl', type=str, default=DATA_LIST_PATH_CITY_LBL) 55 | parser.add_argument('--data_list_path_val_img', type=str, default=DATA_LIST_PATH_VAL_IMG) 56 | parser.add_argument('--data_list_path_val_lbl', type=str, default=DATA_LIST_PATH_VAL_LBL) 57 | 58 | parser.add_argument('--cuda_device_id', nargs='+', type=str, default=CUDA_DIVICE_ID) 59 | 60 | args = parser.parse_args() 61 | 62 | print ('cuda_device_id:', ','.join(args.cuda_device_id)) 63 | os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(args.cuda_device_id) 64 | 65 | if not os.path.exists(args.log_dir): 66 | os.makedirs(args.log_dir) 67 | 68 | if not os.path.exists(args.gen_img_dir): 69 | os.makedirs(args.gen_img_dir) 70 | 71 | if args.dump_logs == True: 72 | old_output = sys.stdout 73 | sys.stdout = open(os.path.join(args.log_dir, 'output.txt'), 'w') 74 | 75 | IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) 76 | 77 | num_classes = 19 78 | source_input_size = [720, 1280] 79 | target_input_size = [512, 1024] 80 | batch_size = 2 81 | 82 | max_epoch = 150 83 | num_steps = 250000 84 | num_calmIoU = 1000 85 | 86 | learning_rate_seg = 2.5e-4 87 | learning_rate_d = 1e-4 88 | learning_rate_rec = 1e-3 89 | learning_rate_dis = 1e-4 90 | power = 0.9 91 | weight_decay = 0.0005 92 | 93 | lambda_seg = 0.1 94 | lambda_adv_target1 = 0.0002 95 | lambda_adv_target2 = 0.001 96 | 97 | source_channels = 3 98 | target_channels = 3 99 | private_code_size = 8 100 | shared_code_channels = 2048 101 | 102 | # Setup Augmentations 103 | gta5_data_aug = Compose([RandomHorizontallyFlip(), 104 | RandomSized_and_Crop([256, 512]) 105 | ]) 106 | 107 | city_data_aug = Compose([RandomHorizontallyFlip(), 108 | RandomCrop([256, 512]) 109 | ]) 110 | # ==== DataLoader ==== 111 | gta5_set = GTA5Loader(args.gta5_data_path, args.data_list_path_gta5, max_iters=num_steps* batch_size, crop_size=source_input_size, transform=gta5_data_aug, mean=IMG_MEAN) 112 | source_loader= torch_data.DataLoader(gta5_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) 113 | 114 | city_set = CityLoader(args.city_data_path, args.data_list_path_city_img, args.data_list_path_city_lbl, max_iters=num_steps* batch_size, crop_size=target_input_size, transform=city_data_aug, mean=IMG_MEAN, set='train') 115 | target_loader= torch_data.DataLoader(city_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) 116 | 117 | val_set = CityLoader(args.city_data_path, args.data_list_path_val_img, args.data_list_path_val_lbl, max_iters=None, crop_size=[512, 1024], mean=IMG_MEAN, set='val') 118 | val_loader= torch_data.DataLoader(val_set, batch_size=1, shuffle=False, num_workers=4, pin_memory=True) 119 | 120 | sourceloader_iter = enumerate(source_loader) 121 | targetloader_iter = enumerate(target_loader) 122 | 123 | # Setup Metrics 124 | cty_running_metrics = runningScore(num_classes) 125 | 126 | model_dict = {} 127 | 128 | # Setup Model 129 | print ('building models ...') 130 | enc_shared = SharedEncoder().cuda() 131 | dclf1 = DomainClassifier().cuda() 132 | dclf2 = DomainClassifier().cuda() 133 | enc_s = PrivateEncoder(64, private_code_size).cuda() 134 | enc_t = PrivateEncoder(64, private_code_size).cuda() 135 | dec_s = PrivateDecoder(shared_code_channels, private_code_size).cuda() 136 | dec_t = dec_s 137 | dis_s2t = Discriminator().cuda() 138 | dis_t2s = Discriminator().cuda() 139 | 140 | model_dict['enc_shared'] = enc_shared 141 | model_dict['dclf1'] = dclf1 142 | model_dict['dclf2'] = dclf2 143 | model_dict['enc_s'] = enc_s 144 | model_dict['enc_t'] = enc_t 145 | model_dict['dec_s'] = dec_s 146 | model_dict['dec_t'] = dec_t 147 | model_dict['dis_s2t'] = dis_s2t 148 | model_dict['dis_t2s'] = dis_t2s 149 | 150 | enc_shared_opt = optim.SGD(enc_shared.optim_parameters(learning_rate_seg), lr=learning_rate_seg, momentum=0.9, weight_decay=weight_decay) 151 | dclf1_opt = optim.Adam(dclf1.parameters(), lr=learning_rate_d, betas=(0.9, 0.99)) 152 | dclf2_opt = optim.Adam(dclf2.parameters(), lr=learning_rate_d, betas=(0.9, 0.99)) 153 | enc_s_opt = optim.Adam(enc_s.parameters(), lr=learning_rate_rec, betas=(0.5, 0.999)) 154 | enc_t_opt = optim.Adam(enc_t.parameters(), lr=learning_rate_rec, betas=(0.5, 0.999)) 155 | dec_s_opt = optim.Adam(dec_s.parameters(), lr=learning_rate_rec, betas=(0.5, 0.999)) 156 | dec_t_opt = optim.Adam(dec_t.parameters(), lr=learning_rate_rec, betas=(0.5, 0.999)) 157 | dis_s2t_opt = optim.Adam(dis_s2t.parameters(), lr=learning_rate_dis, betas=(0.5, 0.999)) 158 | dis_t2s_opt = optim.Adam(dis_t2s.parameters(), lr=learning_rate_dis, betas=(0.5, 0.999)) 159 | 160 | seg_opt_list = [] 161 | dclf_opt_list = [] 162 | rec_opt_list = [] 163 | dis_opt_list = [] 164 | 165 | # Optimizer list for quickly adjusting learning rate 166 | seg_opt_list.append(enc_shared_opt) 167 | dclf_opt_list.append(dclf1_opt) 168 | dclf_opt_list.append(dclf2_opt) 169 | rec_opt_list.append(enc_s_opt) 170 | rec_opt_list.append(enc_t_opt) 171 | rec_opt_list.append(dec_s_opt) 172 | rec_opt_list.append(dec_t_opt) 173 | dis_opt_list.append(dis_s2t_opt) 174 | dis_opt_list.append(dis_t2s_opt) 175 | 176 | # load_models(model_dict, './weight_90000/') 177 | 178 | cudnn.enabled = True 179 | cudnn.benchmark = True 180 | 181 | mse_loss = nn.MSELoss(size_average=True).cuda() 182 | bce_loss = nn.BCEWithLogitsLoss().cuda() 183 | sg_loss = cross_entropy2d 184 | VGG_loss = VGGLoss() 185 | VGG_loss_for_trans = VGGLoss_for_trans() 186 | 187 | upsample_256 = nn.Upsample(size=[256, 512], mode='bilinear') 188 | upsample_360 = nn.Upsample(size=[360, 640], mode='bilinear') 189 | upsample_512 = nn.Upsample(size=[512, 1024], mode='bilinear') 190 | 191 | true_label = 1 192 | fake_label = 0 193 | 194 | i_iter_tmp = [] 195 | epoch_tmp = [] 196 | 197 | loss_rec_s_tmp = [] 198 | loss_rec_t_tmp = [] 199 | loss_rec_s2t_tmp = [] 200 | loss_rec_t2s_tmp = [] 201 | 202 | prob_dclf1_real1_tmp = [] 203 | prob_dclf1_fake1_tmp = [] 204 | prob_dclf1_fake2_tmp = [] 205 | prob_dclf2_real1_tmp = [] 206 | prob_dclf2_fake1_tmp = [] 207 | prob_dclf2_fake2_tmp = [] 208 | 209 | loss_sim_sg_tmp = [] 210 | 211 | prob_dis_s2t_real1_tmp = [] 212 | prob_dis_s2t_fake1_tmp = [] 213 | prob_dis_s2t_fake2_tmp = [] 214 | prob_dis_t2s_real1_tmp = [] 215 | prob_dis_t2s_fake1_tmp = [] 216 | prob_dis_t2s_fake2_tmp = [] 217 | 218 | City_tmp = [] 219 | 220 | dclf1.train() 221 | dclf2.train() 222 | enc_shared.train() 223 | enc_s.train() 224 | enc_t.train() 225 | dec_s.train() 226 | dec_t.train() 227 | dis_s2t.train() 228 | dis_t2s.train() 229 | 230 | best_iou = 0 231 | best_iter= 0 232 | for i_iter in range(num_steps): 233 | print (i_iter) 234 | sys.stdout.flush() 235 | 236 | enc_shared.train() 237 | adjust_learning_rate(seg_opt_list , base_lr=learning_rate_seg, i_iter=i_iter, max_iter=num_steps, power=power) 238 | adjust_learning_rate(dclf_opt_list, base_lr=learning_rate_d , i_iter=i_iter, max_iter=num_steps, power=power) 239 | adjust_learning_rate(rec_opt_list , base_lr=learning_rate_rec, i_iter=i_iter, max_iter=num_steps, power=power) 240 | adjust_learning_rate(dis_opt_list , base_lr=learning_rate_dis, i_iter=i_iter, max_iter=num_steps, power=power) 241 | 242 | # ==== sample data ==== 243 | idx_s, source_batch = next(sourceloader_iter) 244 | idx_t, target_batch = next(targetloader_iter) 245 | 246 | source_data, source_label = source_batch 247 | target_data, target_label = target_batch 248 | 249 | sdatav = Variable(source_data).cuda() 250 | slabelv = Variable(source_label).cuda() 251 | tdatav = Variable(target_data).cuda() 252 | tlabelv = Variable(target_label) 253 | 254 | # forwarding 255 | low_s, s_pred1, s_pred2, code_s_common = enc_shared(sdatav) 256 | low_t, t_pred1, t_pred2, code_t_common = enc_shared(tdatav) 257 | code_s_private = enc_s(low_s) 258 | code_t_private = enc_t(low_t) 259 | 260 | rec_s = dec_s(code_s_common, code_s_private, 0) 261 | rec_t = dec_t(code_t_common, code_t_private, 1) 262 | rec_t2s = dec_s(code_t_common, code_s_private, 0) 263 | rec_s2t = dec_t(code_s_common, code_t_private, 1) 264 | 265 | for p in dclf1.parameters(): 266 | p.requires_grad = True 267 | for p in dclf2.parameters(): 268 | p.requires_grad = True 269 | for p in dis_s2t.parameters(): 270 | p.requires_grad = True 271 | for p in dis_t2s.parameters(): 272 | p.requires_grad = True 273 | # train Domain classifier 274 | # ===== dclf1 ===== 275 | prob_dclf1_real1 = dclf1(F.softmax(upsample_256(s_pred1.detach()), dim=1)) 276 | prob_dclf1_fake1 = dclf1(F.softmax(upsample_256(t_pred1.detach()), dim=1)) 277 | loss_d_dclf1 = bce_loss(prob_dclf1_real1, Variable(torch.FloatTensor(prob_dclf1_real1.data.size()).fill_(true_label)).cuda()).cuda() \ 278 | + bce_loss(prob_dclf1_fake1, Variable(torch.FloatTensor(prob_dclf1_fake1.data.size()).fill_(fake_label)).cuda()).cuda() 279 | if i_iter%1 == 0: 280 | dclf1_opt.zero_grad() 281 | loss_d_dclf1.backward() 282 | dclf1_opt.step() 283 | 284 | # ===== dclf2 ===== 285 | prob_dclf2_real1 = dclf2(F.softmax(upsample_256(s_pred2.detach()), dim=1)) 286 | prob_dclf2_fake1 = dclf2(F.softmax(upsample_256(t_pred2.detach()), dim=1)) 287 | loss_d_dclf2 = bce_loss(prob_dclf2_real1, Variable(torch.FloatTensor(prob_dclf2_real1.data.size()).fill_(true_label)).cuda()).cuda() \ 288 | + bce_loss(prob_dclf2_fake1, Variable(torch.FloatTensor(prob_dclf2_fake1.data.size()).fill_(fake_label)).cuda()).cuda() 289 | if i_iter%1 == 0: 290 | dclf2_opt.zero_grad() 291 | loss_d_dclf2.backward() 292 | dclf2_opt.step() 293 | 294 | # train image discriminator -> LSGAN 295 | # ===== dis_s2t ===== 296 | if i_iter%5 == 0: 297 | prob_dis_s2t_real1 = dis_s2t(tdatav) 298 | prob_dis_s2t_fake1 = dis_s2t(rec_s2t.detach()) 299 | loss_d_s2t = 0.5* mse_loss(prob_dis_s2t_real1, Variable(torch.FloatTensor(prob_dis_s2t_real1.data.size()).fill_(true_label).cuda())).cuda() \ 300 | + 0.5* mse_loss(prob_dis_s2t_fake1, Variable(torch.FloatTensor(prob_dis_s2t_fake1.data.size()).fill_(fake_label).cuda())).cuda() 301 | dis_s2t_opt.zero_grad() 302 | loss_d_s2t.backward() 303 | dis_s2t_opt.step() 304 | 305 | # ===== dis_t2s ===== 306 | if i_iter%5 == 0: 307 | prob_dis_t2s_real1 = dis_t2s(sdatav) 308 | prob_dis_t2s_fake1 = dis_t2s(rec_t2s.detach()) 309 | loss_d_t2s = 0.5* mse_loss(prob_dis_t2s_real1, Variable(torch.FloatTensor(prob_dis_t2s_real1.data.size()).fill_(true_label).cuda())).cuda() \ 310 | + 0.5* mse_loss(prob_dis_t2s_fake1, Variable(torch.FloatTensor(prob_dis_t2s_fake1.data.size()).fill_(fake_label).cuda())).cuda() 311 | dis_t2s_opt.zero_grad() 312 | loss_d_t2s.backward() 313 | dis_t2s_opt.step() 314 | 315 | for p in dclf1.parameters(): 316 | p.requires_grad = False 317 | for p in dclf2.parameters(): 318 | p.requires_grad = False 319 | for p in dis_s2t.parameters(): 320 | p.requires_grad = False 321 | for p in dis_t2s.parameters(): 322 | p.requires_grad = False 323 | 324 | # ==== VGGLoss self-reconstruction loss ==== 325 | loss_rec_s = VGG_loss(rec_s, sdatav) 326 | loss_rec_t = VGG_loss(rec_t, tdatav) 327 | loss_rec_self = loss_rec_s + loss_rec_t 328 | 329 | loss_rec_s2t = VGG_loss_for_trans(rec_s2t, sdatav, tdatav, weights=[0, 0, 0, 1.0/4, 1.0]) 330 | loss_rec_t2s = VGG_loss_for_trans(rec_t2s, tdatav, sdatav, weights=[0, 0, 0, 1.0/4, 1.0]) 331 | loss_rec_tran = loss_rec_s2t + loss_rec_t2s 332 | 333 | # ==== domain agnostic loss ==== 334 | prob_dclf1_fake2 = dclf1(F.softmax(upsample_256(t_pred1), dim=1)) 335 | loss_feat1_similarity = bce_loss(prob_dclf1_fake2, Variable(torch.FloatTensor(prob_dclf1_fake2.data.size()).fill_(true_label)).cuda()) 336 | 337 | prob_dclf2_fake2 = dclf2(F.softmax(upsample_256(t_pred2), dim=1)) 338 | loss_feat2_similarity = bce_loss(prob_dclf2_fake2, Variable(torch.FloatTensor(prob_dclf2_fake2.data.size()).fill_(true_label)).cuda()) 339 | 340 | loss_feat_similarity = lambda_adv_target1* loss_feat1_similarity + lambda_adv_target2* loss_feat2_similarity 341 | 342 | # ==== image translation loss ==== 343 | # prob_dis_s2t_real2 = dis_s2t(tdatav) 344 | prob_dis_s2t_fake2 = dis_s2t(rec_s2t) 345 | loss_gen_s2t = mse_loss(prob_dis_s2t_fake2, Variable(torch.FloatTensor(prob_dis_s2t_fake2.data.size()).fill_(true_label)).cuda()) \ 346 | 347 | # prob_dis_t2s_real2 = dis_t2s(sdatav) 348 | prob_dis_t2s_fake2 = dis_t2s(rec_t2s) 349 | loss_gen_t2s = mse_loss(prob_dis_t2s_fake2, Variable(torch.FloatTensor(prob_dis_t2s_fake2.data.size()).fill_(true_label)).cuda()) \ 350 | 351 | loss_image_translation = loss_gen_s2t + loss_gen_t2s 352 | 353 | # ==== segmentation loss ==== 354 | s_pred1 = upsample_256(s_pred1) 355 | s_pred2 = upsample_256(s_pred2) 356 | loss_s_sg1 = sg_loss(s_pred1, slabelv) 357 | loss_s_sg2 = sg_loss(s_pred2, slabelv) 358 | 359 | loss_sim_sg = lambda_seg* loss_s_sg1 + loss_s_sg2 360 | 361 | # ==== tranalated segmentation==== 362 | # When to start using translated labels, it should be discussed 363 | if i_iter >= 0: 364 | # check if we have to detach the rec_s2t images 365 | _, s2t_pred1, s2t_pred2, _ = enc_shared(rec_s2t.detach()) 366 | s2t_pred1 = upsample_256(s2t_pred1) 367 | s2t_pred2 = upsample_256(s2t_pred2) 368 | loss_s2t_sg1 = sg_loss(s2t_pred1, slabelv) 369 | loss_s2t_sg2 = sg_loss(s2t_pred2, slabelv) 370 | loss_sim_sg += lambda_seg* loss_s2t_sg1 + loss_s2t_sg2 371 | 372 | # visualize segmentation map 373 | t_pred2 = upsample_256(t_pred2) 374 | 375 | pred_s = F.softmax(s_pred2, dim=1).data.max(1)[1].cpu().numpy() 376 | pred_t = F.softmax(t_pred2, dim=1).data.max(1)[1].cpu().numpy() 377 | 378 | map_s = gta5_set.decode_segmap(pred_s) 379 | map_t = city_set.decode_segmap(pred_t) 380 | 381 | gt_s = slabelv.data.cpu().numpy() 382 | gt_t = tlabelv.data.cpu().numpy() 383 | gt_s = gta5_set.decode_segmap(gt_s) 384 | gt_t = city_set.decode_segmap(gt_t) 385 | 386 | total_loss = \ 387 | 1.0 * loss_sim_sg \ 388 | + 1.0 * loss_feat_similarity \ 389 | + 0.5 * loss_rec_self \ 390 | + 0.01* loss_image_translation \ 391 | + 0.05 * loss_rec_tran 392 | 393 | enc_shared_opt.zero_grad() 394 | enc_s_opt.zero_grad() 395 | enc_t_opt.zero_grad() 396 | dec_s_opt.zero_grad() 397 | 398 | total_loss.backward() 399 | 400 | enc_shared_opt.step() 401 | enc_s_opt.step() 402 | enc_t_opt.step() 403 | dec_s_opt.step() 404 | 405 | if i_iter % 25 == 0: 406 | i_iter_tmp.append(i_iter) 407 | print ('Best Iter : '+str(best_iter)) 408 | print ('Best mIoU : '+str(best_iou)) 409 | 410 | plt.title('prob_s2t') 411 | prob_dis_s2t_real1_tmp.append(prob_dis_s2t_real1.data[0].mean()) 412 | prob_dis_s2t_fake1_tmp.append(prob_dis_s2t_fake1.data[0].mean()) 413 | prob_dis_s2t_fake2_tmp.append(prob_dis_s2t_fake2.data[0].mean()) 414 | plt.plot(i_iter_tmp, prob_dis_s2t_real1_tmp, label='prob_dis_s2t_real1') 415 | plt.plot(i_iter_tmp, prob_dis_s2t_fake1_tmp, label='prob_dis_s2t_fake1') 416 | plt.plot(i_iter_tmp, prob_dis_s2t_fake2_tmp, label='prob_dis_s2t_fake2') 417 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 418 | plt.grid() 419 | plt.savefig(os.path.join(args.log_dir, 'prob_s2t.png')) 420 | plt.close() 421 | 422 | plt.title('prob_t2s') 423 | prob_dis_t2s_real1_tmp.append(prob_dis_t2s_real1.data[0].mean()) 424 | prob_dis_t2s_fake1_tmp.append(prob_dis_t2s_fake1.data[0].mean()) 425 | prob_dis_t2s_fake2_tmp.append(prob_dis_t2s_fake2.data[0].mean()) 426 | plt.plot(i_iter_tmp, prob_dis_t2s_real1_tmp, label='prob_dis_t2s_real1') 427 | plt.plot(i_iter_tmp, prob_dis_t2s_fake1_tmp, label='prob_dis_t2s_fake1') 428 | plt.plot(i_iter_tmp, prob_dis_t2s_fake2_tmp, label='prob_dis_t2s_fake2') 429 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 430 | plt.grid() 431 | plt.savefig(os.path.join(args.log_dir, 'prob_t2s.png')) 432 | plt.close() 433 | 434 | plt.title('rec self loss') 435 | loss_rec_s_tmp.append(loss_rec_s.data[0]) 436 | loss_rec_t_tmp.append(loss_rec_t.data[0]) 437 | plt.plot(i_iter_tmp, loss_rec_s_tmp, label='loss_rec_s') 438 | plt.plot(i_iter_tmp, loss_rec_t_tmp, label='loss_rec_t') 439 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 440 | plt.grid() 441 | plt.savefig(os.path.join(args.log_dir, 'rec_loss.png')) 442 | plt.close() 443 | 444 | plt.title('rec tra loss') 445 | loss_rec_s2t_tmp.append(loss_rec_s2t.data[0]) 446 | loss_rec_t2s_tmp.append(loss_rec_t2s.data[0]) 447 | plt.plot(i_iter_tmp, loss_rec_s2t_tmp, label='loss_rec_s2t') 448 | plt.plot(i_iter_tmp, loss_rec_t2s_tmp, label='loss_rec_t2s') 449 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 450 | plt.grid() 451 | plt.savefig(os.path.join(args.log_dir, 'rec_tra_loss.png')) 452 | plt.close() 453 | 454 | plt.title('prob_dclf1') 455 | prob_dclf1_real1_tmp.append(prob_dclf1_real1.data[0].mean()) 456 | prob_dclf1_fake1_tmp.append(prob_dclf1_fake1.data[0].mean()) 457 | prob_dclf1_fake2_tmp.append(prob_dclf1_fake2.data[0].mean()) 458 | plt.plot(i_iter_tmp, prob_dclf1_real1_tmp, label='prob_dclf1_real1') 459 | plt.plot(i_iter_tmp, prob_dclf1_fake1_tmp, label='prob_dclf1_fake1') 460 | plt.plot(i_iter_tmp, prob_dclf1_fake2_tmp, label='prob_dclf1_fake2') 461 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 462 | plt.grid() 463 | plt.savefig(os.path.join(args.log_dir, 'prob_dclf1.png')) 464 | plt.close() 465 | 466 | plt.title('prob_dclf2') 467 | prob_dclf2_real1_tmp.append(prob_dclf2_real1.data[0].mean()) 468 | prob_dclf2_fake1_tmp.append(prob_dclf2_fake1.data[0].mean()) 469 | prob_dclf2_fake2_tmp.append(prob_dclf2_fake2.data[0].mean()) 470 | plt.plot(i_iter_tmp, prob_dclf2_real1_tmp, label='prob_dclf2_real1') 471 | plt.plot(i_iter_tmp, prob_dclf2_fake1_tmp, label='prob_dclf2_fake1') 472 | plt.plot(i_iter_tmp, prob_dclf2_fake2_tmp, label='prob_dclf2_fake2') 473 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 474 | plt.grid() 475 | plt.savefig(os.path.join(args.log_dir, 'prob_dclf2.png')) 476 | plt.close() 477 | 478 | plt.title('segmentation_loss') 479 | loss_sim_sg_tmp.append(loss_sim_sg.data[0]) 480 | plt.plot(i_iter_tmp, loss_sim_sg_tmp, label='loss_sim_sg') 481 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 482 | plt.grid() 483 | plt.savefig(os.path.join(args.log_dir, 'segmentation_loss.png')) 484 | plt.close() 485 | 486 | plt.title('mIoU') 487 | plt.plot(epoch_tmp, City_tmp, label='City') 488 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 489 | plt.grid() 490 | plt.savefig(os.path.join(args.log_dir, 'mIoU.png')) 491 | plt.close() 492 | 493 | if i_iter%500 == 0 : 494 | imgs_s = torch.cat(((sdatav[:,[2, 1, 0],:,:].cpu()+1)/2, (rec_s[:,[2, 1, 0],:,:].cpu()+1)/2, (rec_s2t[:,[2, 1, 0],:,:].cpu()+1)/2, Variable(torch.Tensor((map_s.transpose((0, 3, 1, 2))))), Variable(torch.Tensor((gt_s.transpose((0, 3, 1, 2)))))), 0) 495 | imgs_s = vutils.make_grid(imgs_s.data, nrow=batch_size, normalize=False, scale_each=True).cpu().numpy() 496 | imgs_s = np.clip(imgs_s*255,0,255).astype(np.uint8) 497 | imgs_s = imgs_s.transpose(1,2,0) 498 | imgs_s = Image.fromarray(imgs_s) 499 | filename = '%05d_source.jpg' % i_iter 500 | imgs_s.save(os.path.join(args.gen_img_dir, filename)) 501 | 502 | imgs_t = torch.cat(((tdatav[:,[2, 1, 0],:,:].cpu()+1)/2, (rec_t[:,[2, 1, 0],:,:].cpu()+1)/2, (rec_t2s[:,[2, 1, 0],:,:].cpu()+1)/2, Variable(torch.Tensor((map_t.transpose((0, 3, 1, 2))))), Variable(torch.Tensor((gt_t.transpose((0, 3, 1, 2)))))), 0) 503 | imgs_t = vutils.make_grid(imgs_t.data, nrow=batch_size, normalize=False, scale_each=True).cpu().numpy() 504 | imgs_t = np.clip(imgs_t*255,0,255).astype(np.uint8) 505 | imgs_t = imgs_t.transpose(1,2,0) 506 | imgs_t = Image.fromarray(imgs_t) 507 | filename = '%05d_target.jpg' % i_iter 508 | imgs_t.save(os.path.join(args.gen_img_dir, filename)) 509 | 510 | if i_iter % num_calmIoU == 0: 511 | enc_shared.eval() 512 | print ('evaluating models ...') 513 | for i_val, (images_val, labels_val) in tqdm(enumerate(val_loader)): 514 | images_val = Variable(images_val.cuda(), volatile=True) 515 | labels_val = Variable(labels_val, volatile=True) 516 | 517 | _, _, pred, _ = enc_shared(images_val) 518 | pred = upsample_512(pred) 519 | pred = pred.data.max(1)[1].cpu().numpy() 520 | gt = labels_val.data.cpu().numpy() 521 | cty_running_metrics.update(gt, pred) 522 | 523 | cty_score, cty_class_iou = cty_running_metrics.get_scores() 524 | 525 | for k, v in cty_score.items(): 526 | print(k, v) 527 | 528 | cty_running_metrics.reset() 529 | City_tmp.append(cty_score['Mean IoU : \t']) 530 | epoch_tmp.append(i_iter) 531 | if i_iter % 10000 == 0 and i_iter != 0: 532 | save_models(model_dict, './weight_' + str(i_iter)) 533 | 534 | if cty_score['Mean IoU : \t'] > best_iou: 535 | best_iter = i_iter 536 | best_iou = cty_score['Mean IoU : \t'] 537 | save_models(model_dict, './weight/') 538 | -------------------------------------------------------------------------------- /train_dise_synthia2city.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import torch 3 | import argparse 4 | import numpy as np 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | import torch.nn.functional as F 8 | import torchvision 9 | import torchvision.utils as vutils 10 | import torchvision.models as models 11 | import torch.utils.data as torch_data 12 | import torch.backends.cudnn as cudnn 13 | 14 | import matplotlib.pyplot as plt 15 | import os 16 | 17 | # from tensorboardX import SummaryWriter 18 | from PIL import Image 19 | from torch.autograd import Variable 20 | from tqdm import tqdm 21 | 22 | from util.loader.CityLoader import CityLoader 23 | from util.loader.SYNTHIALoader import SYNTHIALoader 24 | from util.loader.augmentations import Compose, RandomHorizontallyFlip, RandomSized_and_Crop, RandomCrop 25 | from util.metrics import runningScore 26 | from util.loss import VGGLoss, VGGLoss_for_trans, cross_entropy2d 27 | from model.model import SharedEncoder, PrivateEncoder, PrivateDecoder, Discriminator, DomainClassifier 28 | from util.utils import poly_lr_scheduler, adjust_learning_rate, save_models, load_models 29 | 30 | # Data-related 31 | LOG_DIR = './log' 32 | GEN_IMG_DIR = './generated_imgs' 33 | 34 | SYNTHIA_DATA_PATH = '/workspace/lustre/data/RAND_CITYSCAPES' 35 | CITY_DATA_PATH = '/workspace/lustre/data/Cityscapes' 36 | DATA_LIST_PATH_SYNTHIA = './util/loader/synthia_list/train.txt' 37 | DATA_LIST_PATH_CITY_IMG = './util/loader/cityscapes_list/train.txt' 38 | DATA_LIST_PATH_CITY_LBL = './util/loader/cityscapes_list/train_label.txt' 39 | DATA_LIST_PATH_VAL_IMG = './util/loader/cityscapes_list/val.txt' 40 | DATA_LIST_PATH_VAL_LBL = './util/loader/cityscapes_list/val_label.txt' 41 | 42 | # Hyper-parameters 43 | CUDA_DIVICE_ID = '0, 1' 44 | 45 | parser = argparse.ArgumentParser(description='Domain Invariant Structure Extraction (DISE) \ 46 | for unsupervised domain adaptation for semantic segmentation') 47 | parser.add_argument('--dump_logs', type=bool, default=False) 48 | parser.add_argument('--log_dir', type=str, default=LOG_DIR, help='the path to where you save plots and logs.') 49 | parser.add_argument('--gen_img_dir', type=str, default=GEN_IMG_DIR, help='the path to where you save translated images and segmentation maps.') 50 | parser.add_argument('--synthia_data_path', type=str, default=SYNTHIA_DATA_PATH, help='the path to SYNTHIA dataset.') 51 | parser.add_argument('--city_data_path', type=str, default=CITY_DATA_PATH, help='the path to Cityscapes dataset.') 52 | parser.add_argument('--data_list_path_synthia', type=str, default=DATA_LIST_PATH_SYNTHIA) 53 | parser.add_argument('--data_list_path_city_img', type=str, default=DATA_LIST_PATH_CITY_IMG) 54 | parser.add_argument('--data_list_path_city_lbl', type=str, default=DATA_LIST_PATH_CITY_LBL) 55 | parser.add_argument('--data_list_path_val_img', type=str, default=DATA_LIST_PATH_VAL_IMG) 56 | parser.add_argument('--data_list_path_val_lbl', type=str, default=DATA_LIST_PATH_VAL_LBL) 57 | 58 | parser.add_argument('--cuda_device_id', nargs='+', type=str, default=CUDA_DIVICE_ID) 59 | 60 | args = parser.parse_args() 61 | 62 | print ('cuda_device_id:', ','.join(args.cuda_device_id)) 63 | os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(args.cuda_device_id) 64 | 65 | if not os.path.exists(args.log_dir): 66 | os.makedirs(args.log_dir) 67 | 68 | if not os.path.exists(args.gen_img_dir): 69 | os.makedirs(args.gen_img_dir) 70 | 71 | if args.dump_logs == True: 72 | old_output = sys.stdout 73 | sys.stdout = open(os.path.join(args.log_dir, 'output.txt'), 'w') 74 | 75 | IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) 76 | 77 | num_classes = 19 78 | source_input_size = [720, 1280] 79 | target_input_size = [512, 1024] 80 | batch_size = 2 81 | 82 | max_epoch = 150 83 | num_steps = 250000 84 | num_calmIoU = 1000 85 | 86 | learning_rate_seg = 2.5e-4 87 | learning_rate_d = 1e-4 88 | learning_rate_rec = 1e-3 89 | learning_rate_dis = 1e-4 90 | power = 0.9 91 | weight_decay = 0.0005 92 | 93 | lambda_seg = 0.1 94 | lambda_adv_target1 = 0.0002 95 | lambda_adv_target2 = 0.001 96 | 97 | source_channels = 3 98 | target_channels = 3 99 | private_code_size = 8 100 | shared_code_channels = 2048 101 | 102 | # Setup Augmentations 103 | synthia_data_aug = Compose([RandomHorizontallyFlip(), 104 | RandomSized_and_Crop([512, 1024]) 105 | ]) 106 | 107 | city_data_aug = Compose([RandomHorizontallyFlip(), 108 | RandomCrop([512, 1024]) 109 | ]) 110 | # ==== DataLoader ==== 111 | synthia_set = SYNTHIALoader(args.synthia_data_path, args.data_list_path_synthia, max_iters=num_steps* batch_size, crop_size=source_input_size, transform=synthia_data_aug, mean=IMG_MEAN) 112 | source_loader= torch_data.DataLoader(synthia_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) 113 | 114 | city_set = CityLoader(args.city_data_path, args.data_list_path_city_img, args.data_list_path_city_lbl, max_iters=num_steps* batch_size, crop_size=target_input_size, transform=city_data_aug, mean=IMG_MEAN, set='train') 115 | target_loader= torch_data.DataLoader(city_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) 116 | 117 | val_set = CityLoader(args.city_data_path, args.data_list_path_val_img, args.data_list_path_val_lbl, max_iters=None, crop_size=[512, 1024], mean=IMG_MEAN, set='val') 118 | val_loader= torch_data.DataLoader(val_set, batch_size=1, shuffle=False, num_workers=4, pin_memory=True) 119 | 120 | sourceloader_iter = enumerate(source_loader) 121 | targetloader_iter = enumerate(target_loader) 122 | 123 | # Setup Metrics 124 | cty_running_metrics = runningScore(num_classes) 125 | 126 | model_dict = {} 127 | 128 | # Setup Model 129 | print ('building models ...') 130 | enc_shared = SharedEncoder().cuda() 131 | dclf1 = DomainClassifier().cuda() 132 | dclf2 = DomainClassifier().cuda() 133 | enc_s = PrivateEncoder(64, private_code_size).cuda() 134 | enc_t = PrivateEncoder(64, private_code_size).cuda() 135 | dec_s = PrivateDecoder(shared_code_channels, private_code_size).cuda() 136 | dec_t = dec_s 137 | dis_s2t = Discriminator().cuda() 138 | dis_t2s = Discriminator().cuda() 139 | 140 | model_dict['enc_shared'] = enc_shared 141 | model_dict['dclf1'] = dclf1 142 | model_dict['dclf2'] = dclf2 143 | model_dict['enc_s'] = enc_s 144 | model_dict['enc_t'] = enc_t 145 | model_dict['dec_s'] = dec_s 146 | model_dict['dec_t'] = dec_t 147 | model_dict['dis_s2t'] = dis_s2t 148 | model_dict['dis_t2s'] = dis_t2s 149 | 150 | enc_shared_opt = optim.SGD(enc_shared.optim_parameters(learning_rate_seg), lr=learning_rate_seg, momentum=0.9, weight_decay=weight_decay) 151 | dclf1_opt = optim.Adam(dclf1.parameters(), lr=learning_rate_d, betas=(0.9, 0.99)) 152 | dclf2_opt = optim.Adam(dclf2.parameters(), lr=learning_rate_d, betas=(0.9, 0.99)) 153 | enc_s_opt = optim.Adam(enc_s.parameters(), lr=learning_rate_rec, betas=(0.5, 0.999)) 154 | enc_t_opt = optim.Adam(enc_t.parameters(), lr=learning_rate_rec, betas=(0.5, 0.999)) 155 | dec_s_opt = optim.Adam(dec_s.parameters(), lr=learning_rate_rec, betas=(0.5, 0.999)) 156 | dec_t_opt = optim.Adam(dec_t.parameters(), lr=learning_rate_rec, betas=(0.5, 0.999)) 157 | dis_s2t_opt = optim.Adam(dis_s2t.parameters(), lr=learning_rate_dis, betas=(0.5, 0.999)) 158 | dis_t2s_opt = optim.Adam(dis_t2s.parameters(), lr=learning_rate_dis, betas=(0.5, 0.999)) 159 | 160 | seg_opt_list = [] 161 | dclf_opt_list = [] 162 | rec_opt_list = [] 163 | dis_opt_list = [] 164 | 165 | # Optimizer list for quickly adjusting learning rate 166 | seg_opt_list.append(enc_shared_opt) 167 | dclf_opt_list.append(dclf1_opt) 168 | dclf_opt_list.append(dclf2_opt) 169 | rec_opt_list.append(enc_s_opt) 170 | rec_opt_list.append(enc_t_opt) 171 | rec_opt_list.append(dec_s_opt) 172 | rec_opt_list.append(dec_t_opt) 173 | dis_opt_list.append(dis_s2t_opt) 174 | dis_opt_list.append(dis_t2s_opt) 175 | 176 | cudnn.enabled = True 177 | cudnn.benchmark = True 178 | 179 | mse_loss = nn.MSELoss(size_average=True).cuda() 180 | bce_loss = nn.BCEWithLogitsLoss().cuda() 181 | sg_loss = cross_entropy2d 182 | VGG_loss = VGGLoss() 183 | VGG_loss_for_trans = VGGLoss_for_trans() 184 | 185 | upsample_256 = nn.Upsample(size=[256, 512], mode='bilinear') 186 | upsample_360 = nn.Upsample(size=[360, 640], mode='bilinear') 187 | upsample_512 = nn.Upsample(size=[512, 1024], mode='bilinear') 188 | 189 | true_label = 1 190 | fake_label = 0 191 | 192 | i_iter_tmp = [] 193 | epoch_tmp = [] 194 | 195 | loss_rec_s_tmp = [] 196 | loss_rec_t_tmp = [] 197 | loss_rec_s2t_tmp = [] 198 | loss_rec_t2s_tmp = [] 199 | 200 | prob_dclf1_real1_tmp = [] 201 | prob_dclf1_fake1_tmp = [] 202 | prob_dclf1_fake2_tmp = [] 203 | prob_dclf2_real1_tmp = [] 204 | prob_dclf2_fake1_tmp = [] 205 | prob_dclf2_fake2_tmp = [] 206 | 207 | loss_sim_sg_tmp = [] 208 | 209 | prob_dis_s2t_real1_tmp = [] 210 | prob_dis_s2t_fake1_tmp = [] 211 | prob_dis_s2t_fake2_tmp = [] 212 | prob_dis_t2s_real1_tmp = [] 213 | prob_dis_t2s_fake1_tmp = [] 214 | prob_dis_t2s_fake2_tmp = [] 215 | 216 | City_tmp = [] 217 | 218 | dclf1.train() 219 | dclf2.train() 220 | enc_shared.train() 221 | enc_s.train() 222 | enc_t.train() 223 | dec_s.train() 224 | dec_t.train() 225 | dis_s2t.train() 226 | dis_t2s.train() 227 | 228 | best_iou = 0 229 | best_iter= 0 230 | for i_iter in range(num_steps): 231 | print (i_iter) 232 | sys.stdout.flush() 233 | 234 | enc_shared.train() 235 | adjust_learning_rate(seg_opt_list , base_lr=learning_rate_seg, i_iter=i_iter, max_iter=num_steps, power=power) 236 | adjust_learning_rate(dclf_opt_list, base_lr=learning_rate_d , i_iter=i_iter, max_iter=num_steps, power=power) 237 | adjust_learning_rate(rec_opt_list , base_lr=learning_rate_rec, i_iter=i_iter, max_iter=num_steps, power=power) 238 | adjust_learning_rate(dis_opt_list , base_lr=learning_rate_dis, i_iter=i_iter, max_iter=num_steps, power=power) 239 | 240 | # ==== sample data ==== 241 | idx_s, source_batch = next(sourceloader_iter) 242 | idx_t, target_batch = next(targetloader_iter) 243 | 244 | source_data, source_label = source_batch 245 | target_data, target_label = target_batch 246 | 247 | sdatav = Variable(source_data).cuda() 248 | slabelv = Variable(source_label).cuda() 249 | tdatav = Variable(target_data).cuda() 250 | tlabelv = Variable(target_label) 251 | 252 | # forwarding 253 | low_s, s_pred1, s_pred2, code_s_common = enc_shared(sdatav) 254 | low_t, t_pred1, t_pred2, code_t_common = enc_shared(tdatav) 255 | code_s_private = enc_s(low_s) 256 | code_t_private = enc_t(low_t) 257 | 258 | rec_s = dec_s(code_s_common, code_s_private, 0) 259 | rec_t = dec_t(code_t_common, code_t_private, 1) 260 | rec_t2s = dec_s(code_t_common, code_s_private, 0) 261 | rec_s2t = dec_t(code_s_common, code_t_private, 1) 262 | 263 | for p in dclf1.parameters(): 264 | p.requires_grad = True 265 | for p in dclf2.parameters(): 266 | p.requires_grad = True 267 | for p in dis_s2t.parameters(): 268 | p.requires_grad = True 269 | for p in dis_t2s.parameters(): 270 | p.requires_grad = True 271 | # train Domain classifier 272 | # ===== dclf1 ===== 273 | prob_dclf1_real1 = dclf1(F.softmax(upsample_256(s_pred1.detach()), dim=1)) 274 | prob_dclf1_fake1 = dclf1(F.softmax(upsample_256(t_pred1.detach()), dim=1)) 275 | loss_d_dclf1 = bce_loss(prob_dclf1_real1, Variable(torch.FloatTensor(prob_dclf1_real1.data.size()).fill_(true_label)).cuda()).cuda() \ 276 | + bce_loss(prob_dclf1_fake1, Variable(torch.FloatTensor(prob_dclf1_fake1.data.size()).fill_(fake_label)).cuda()).cuda() 277 | if i_iter%1 == 0: 278 | dclf1_opt.zero_grad() 279 | loss_d_dclf1.backward() 280 | dclf1_opt.step() 281 | 282 | # ===== dclf2 ===== 283 | prob_dclf2_real1 = dclf2(F.softmax(upsample_256(s_pred2.detach()), dim=1)) 284 | prob_dclf2_fake1 = dclf2(F.softmax(upsample_256(t_pred2.detach()), dim=1)) 285 | loss_d_dclf2 = bce_loss(prob_dclf2_real1, Variable(torch.FloatTensor(prob_dclf2_real1.data.size()).fill_(true_label)).cuda()).cuda() \ 286 | + bce_loss(prob_dclf2_fake1, Variable(torch.FloatTensor(prob_dclf2_fake1.data.size()).fill_(fake_label)).cuda()).cuda() 287 | if i_iter%1 == 0: 288 | dclf2_opt.zero_grad() 289 | loss_d_dclf2.backward() 290 | dclf2_opt.step() 291 | 292 | # train image discriminator -> LSGAN 293 | # ===== dis_s2t ===== 294 | if i_iter%5 == 0: 295 | prob_dis_s2t_real1 = dis_s2t(tdatav) 296 | prob_dis_s2t_fake1 = dis_s2t(rec_s2t.detach()) 297 | loss_d_s2t = 0.5* mse_loss(prob_dis_s2t_real1, Variable(torch.FloatTensor(prob_dis_s2t_real1.data.size()).fill_(true_label).cuda())).cuda() \ 298 | + 0.5* mse_loss(prob_dis_s2t_fake1, Variable(torch.FloatTensor(prob_dis_s2t_fake1.data.size()).fill_(fake_label).cuda())).cuda() 299 | dis_s2t_opt.zero_grad() 300 | loss_d_s2t.backward() 301 | dis_s2t_opt.step() 302 | 303 | # ===== dis_t2s ===== 304 | if i_iter%5 == 0: 305 | prob_dis_t2s_real1 = dis_t2s(sdatav) 306 | prob_dis_t2s_fake1 = dis_t2s(rec_t2s.detach()) 307 | loss_d_t2s = 0.5* mse_loss(prob_dis_t2s_real1, Variable(torch.FloatTensor(prob_dis_t2s_real1.data.size()).fill_(true_label).cuda())).cuda() \ 308 | + 0.5* mse_loss(prob_dis_t2s_fake1, Variable(torch.FloatTensor(prob_dis_t2s_fake1.data.size()).fill_(fake_label).cuda())).cuda() 309 | dis_t2s_opt.zero_grad() 310 | loss_d_t2s.backward() 311 | dis_t2s_opt.step() 312 | 313 | for p in dclf1.parameters(): 314 | p.requires_grad = False 315 | for p in dclf2.parameters(): 316 | p.requires_grad = False 317 | for p in dis_s2t.parameters(): 318 | p.requires_grad = False 319 | for p in dis_t2s.parameters(): 320 | p.requires_grad = False 321 | 322 | # ==== VGGLoss self-reconstruction loss ==== 323 | loss_rec_s = VGG_loss(rec_s, sdatav) 324 | loss_rec_t = VGG_loss(rec_t, tdatav) 325 | loss_rec_self = loss_rec_s + loss_rec_t 326 | 327 | loss_rec_s2t = VGG_loss_for_trans(rec_s2t, sdatav, tdatav, weights=[0, 0, 0, 1.0/4, 1.0]) 328 | loss_rec_t2s = VGG_loss_for_trans(rec_t2s, tdatav, sdatav, weights=[0, 0, 0, 1.0/4, 1.0]) 329 | loss_rec_tran = loss_rec_s2t + loss_rec_t2s 330 | 331 | # ==== domain agnostic loss ==== 332 | prob_dclf1_fake2 = dclf1(F.softmax(upsample_256(t_pred1), dim=1)) 333 | loss_feat1_similarity = bce_loss(prob_dclf1_fake2, Variable(torch.FloatTensor(prob_dclf1_fake2.data.size()).fill_(true_label)).cuda()) 334 | 335 | prob_dclf2_fake2 = dclf2(F.softmax(upsample_256(t_pred2), dim=1)) 336 | loss_feat2_similarity = bce_loss(prob_dclf2_fake2, Variable(torch.FloatTensor(prob_dclf2_fake2.data.size()).fill_(true_label)).cuda()) 337 | 338 | loss_feat_similarity = lambda_adv_target1* loss_feat1_similarity + lambda_adv_target2* loss_feat2_similarity 339 | 340 | # ==== image translation loss ==== 341 | # prob_dis_s2t_real2 = dis_s2t(tdatav) 342 | prob_dis_s2t_fake2 = dis_s2t(rec_s2t) 343 | loss_gen_s2t = mse_loss(prob_dis_s2t_fake2, Variable(torch.FloatTensor(prob_dis_s2t_fake2.data.size()).fill_(true_label)).cuda()) \ 344 | 345 | # prob_dis_t2s_real2 = dis_t2s(sdatav) 346 | prob_dis_t2s_fake2 = dis_t2s(rec_t2s) 347 | loss_gen_t2s = mse_loss(prob_dis_t2s_fake2, Variable(torch.FloatTensor(prob_dis_t2s_fake2.data.size()).fill_(true_label)).cuda()) \ 348 | 349 | loss_image_translation = loss_gen_s2t + loss_gen_t2s 350 | 351 | # ==== segmentation loss ==== 352 | s_pred1 = upsample_256(s_pred1) 353 | s_pred2 = upsample_256(s_pred2) 354 | loss_s_sg1 = sg_loss(s_pred1, slabelv) 355 | loss_s_sg2 = sg_loss(s_pred2, slabelv) 356 | 357 | loss_sim_sg = lambda_seg* loss_s_sg1 + loss_s_sg2 358 | 359 | # ==== tranalated segmentation==== 360 | # When to start using translated labels, it should be discussed 361 | if i_iter >= 0: 362 | # check if we have to detach the rec_s2t images 363 | _, s2t_pred1, s2t_pred2, _ = enc_shared(rec_s2t.detach()) 364 | s2t_pred1 = upsample_256(s2t_pred1) 365 | s2t_pred2 = upsample_256(s2t_pred2) 366 | loss_s2t_sg1 = sg_loss(s2t_pred1, slabelv) 367 | loss_s2t_sg2 = sg_loss(s2t_pred2, slabelv) 368 | loss_sim_sg += lambda_seg* loss_s2t_sg1 + loss_s2t_sg2 369 | 370 | # visualize segmentation map 371 | t_pred2 = upsample_256(t_pred2) 372 | 373 | pred_s = F.softmax(s_pred2, dim=1).data.max(1)[1].cpu().numpy() 374 | pred_t = F.softmax(t_pred2, dim=1).data.max(1)[1].cpu().numpy() 375 | 376 | map_s = synthia_set.decode_segmap(pred_s) 377 | map_t = city_set.decode_segmap(pred_t) 378 | 379 | gt_s = slabelv.data.cpu().numpy() 380 | gt_t = tlabelv.data.cpu().numpy() 381 | gt_s = synthia_set.decode_segmap(gt_s) 382 | gt_t = city_set.decode_segmap(gt_t) 383 | 384 | total_loss = \ 385 | 1.0 * loss_sim_sg \ 386 | + 2.0 * loss_feat_similarity \ 387 | + 0.5 * loss_rec_self \ 388 | + 0.01* loss_image_translation \ 389 | + 0.05 * loss_rec_tran 390 | 391 | enc_shared_opt.zero_grad() 392 | enc_s_opt.zero_grad() 393 | enc_t_opt.zero_grad() 394 | dec_s_opt.zero_grad() 395 | 396 | total_loss.backward() 397 | 398 | enc_shared_opt.step() 399 | enc_s_opt.step() 400 | enc_t_opt.step() 401 | dec_s_opt.step() 402 | 403 | if i_iter % 25 == 0: 404 | i_iter_tmp.append(i_iter) 405 | print ('Best Iter : '+str(best_iter)) 406 | print ('Best mIoU : '+str(best_iou)) 407 | 408 | plt.title('prob_s2t') 409 | prob_dis_s2t_real1_tmp.append(prob_dis_s2t_real1.data[0].mean()) 410 | prob_dis_s2t_fake1_tmp.append(prob_dis_s2t_fake1.data[0].mean()) 411 | prob_dis_s2t_fake2_tmp.append(prob_dis_s2t_fake2.data[0].mean()) 412 | plt.plot(i_iter_tmp, prob_dis_s2t_real1_tmp, label='prob_dis_s2t_real1') 413 | plt.plot(i_iter_tmp, prob_dis_s2t_fake1_tmp, label='prob_dis_s2t_fake1') 414 | plt.plot(i_iter_tmp, prob_dis_s2t_fake2_tmp, label='prob_dis_s2t_fake2') 415 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 416 | plt.grid() 417 | plt.savefig(os.path.join(args.log_dir, 'prob_s2t.png')) 418 | plt.close() 419 | 420 | plt.title('prob_t2s') 421 | prob_dis_t2s_real1_tmp.append(prob_dis_t2s_real1.data[0].mean()) 422 | prob_dis_t2s_fake1_tmp.append(prob_dis_t2s_fake1.data[0].mean()) 423 | prob_dis_t2s_fake2_tmp.append(prob_dis_t2s_fake2.data[0].mean()) 424 | plt.plot(i_iter_tmp, prob_dis_t2s_real1_tmp, label='prob_dis_t2s_real1') 425 | plt.plot(i_iter_tmp, prob_dis_t2s_fake1_tmp, label='prob_dis_t2s_fake1') 426 | plt.plot(i_iter_tmp, prob_dis_t2s_fake2_tmp, label='prob_dis_t2s_fake2') 427 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 428 | plt.grid() 429 | plt.savefig(os.path.join(args.log_dir, 'prob_t2s.png')) 430 | plt.close() 431 | 432 | plt.title('rec self loss') 433 | loss_rec_s_tmp.append(loss_rec_s.data[0]) 434 | loss_rec_t_tmp.append(loss_rec_t.data[0]) 435 | plt.plot(i_iter_tmp, loss_rec_s_tmp, label='loss_rec_s') 436 | plt.plot(i_iter_tmp, loss_rec_t_tmp, label='loss_rec_t') 437 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 438 | plt.grid() 439 | plt.savefig(os.path.join(args.log_dir, 'rec_loss.png')) 440 | plt.close() 441 | 442 | plt.title('rec tra loss') 443 | loss_rec_s2t_tmp.append(loss_rec_s2t.data[0]) 444 | loss_rec_t2s_tmp.append(loss_rec_t2s.data[0]) 445 | plt.plot(i_iter_tmp, loss_rec_s2t_tmp, label='loss_rec_s2t') 446 | plt.plot(i_iter_tmp, loss_rec_t2s_tmp, label='loss_rec_t2s') 447 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 448 | plt.grid() 449 | plt.savefig(os.path.join(args.log_dir, 'rec_tra_loss.png')) 450 | plt.close() 451 | 452 | plt.title('prob_dclf1') 453 | prob_dclf1_real1_tmp.append(prob_dclf1_real1.data[0].mean()) 454 | prob_dclf1_fake1_tmp.append(prob_dclf1_fake1.data[0].mean()) 455 | prob_dclf1_fake2_tmp.append(prob_dclf1_fake2.data[0].mean()) 456 | plt.plot(i_iter_tmp, prob_dclf1_real1_tmp, label='prob_dclf1_real1') 457 | plt.plot(i_iter_tmp, prob_dclf1_fake1_tmp, label='prob_dclf1_fake1') 458 | plt.plot(i_iter_tmp, prob_dclf1_fake2_tmp, label='prob_dclf1_fake2') 459 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 460 | plt.grid() 461 | plt.savefig(os.path.join(args.log_dir, 'prob_dclf1.png')) 462 | plt.close() 463 | 464 | plt.title('prob_dclf2') 465 | prob_dclf2_real1_tmp.append(prob_dclf2_real1.data[0].mean()) 466 | prob_dclf2_fake1_tmp.append(prob_dclf2_fake1.data[0].mean()) 467 | prob_dclf2_fake2_tmp.append(prob_dclf2_fake2.data[0].mean()) 468 | plt.plot(i_iter_tmp, prob_dclf2_real1_tmp, label='prob_dclf2_real1') 469 | plt.plot(i_iter_tmp, prob_dclf2_fake1_tmp, label='prob_dclf2_fake1') 470 | plt.plot(i_iter_tmp, prob_dclf2_fake2_tmp, label='prob_dclf2_fake2') 471 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 472 | plt.grid() 473 | plt.savefig(os.path.join(args.log_dir, 'prob_dclf2.png')) 474 | plt.close() 475 | 476 | plt.title('segmentation_loss') 477 | loss_sim_sg_tmp.append(loss_sim_sg.data[0]) 478 | plt.plot(i_iter_tmp, loss_sim_sg_tmp, label='loss_sim_sg') 479 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 480 | plt.grid() 481 | plt.savefig(os.path.join(args.log_dir, 'segmentation_loss.png')) 482 | plt.close() 483 | 484 | plt.title('mIoU') 485 | plt.plot(epoch_tmp, City_tmp, label='City') 486 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, borderaxespad=0.) 487 | plt.grid() 488 | plt.savefig(os.path.join(args.log_dir, 'mIoU.png')) 489 | plt.close() 490 | 491 | if i_iter%500 == 0 : 492 | imgs_s = torch.cat(((sdatav[:,[2, 1, 0],:,:].cpu()+1)/2, (rec_s[:,[2, 1, 0],:,:].cpu()+1)/2, (rec_s2t[:,[2, 1, 0],:,:].cpu()+1)/2, Variable(torch.Tensor((map_s.transpose((0, 3, 1, 2))))), Variable(torch.Tensor((gt_s.transpose((0, 3, 1, 2)))))), 0) 493 | imgs_s = vutils.make_grid(imgs_s.data, nrow=batch_size, normalize=False, scale_each=True).cpu().numpy() 494 | imgs_s = np.clip(imgs_s*255,0,255).astype(np.uint8) 495 | imgs_s = imgs_s.transpose(1,2,0) 496 | imgs_s = Image.fromarray(imgs_s) 497 | filename = '%05d_source.jpg' % i_iter 498 | imgs_s.save(os.path.join(args.gen_img_dir, filename)) 499 | 500 | imgs_t = torch.cat(((tdatav[:,[2, 1, 0],:,:].cpu()+1)/2, (rec_t[:,[2, 1, 0],:,:].cpu()+1)/2, (rec_t2s[:,[2, 1, 0],:,:].cpu()+1)/2, Variable(torch.Tensor((map_t.transpose((0, 3, 1, 2))))), Variable(torch.Tensor((gt_t.transpose((0, 3, 1, 2)))))), 0) 501 | imgs_t = vutils.make_grid(imgs_t.data, nrow=batch_size, normalize=False, scale_each=True).cpu().numpy() 502 | imgs_t = np.clip(imgs_t*255,0,255).astype(np.uint8) 503 | imgs_t = imgs_t.transpose(1,2,0) 504 | imgs_t = Image.fromarray(imgs_t) 505 | filename = '%05d_target.jpg' % i_iter 506 | imgs_t.save(os.path.join(args.gen_img_dir, filename)) 507 | 508 | if i_iter % num_calmIoU == 0: 509 | enc_shared.eval() 510 | print ('evaluating models ...') 511 | for i_val, (images_val, labels_val) in tqdm(enumerate(val_loader)): 512 | images_val = Variable(images_val.cuda(), volatile=True) 513 | labels_val = Variable(labels_val, volatile=True) 514 | 515 | _, _, pred, _ = enc_shared(images_val) 516 | pred = upsample_512(pred) 517 | pred = pred.data.max(1)[1].cpu().numpy() 518 | gt = labels_val.data.cpu().numpy() 519 | cty_running_metrics.update(gt, pred) 520 | 521 | cty_score, cty_class_iou = cty_running_metrics.get_scores() 522 | 523 | for k, v in cty_score.items(): 524 | print(k, v) 525 | 526 | cty_running_metrics.reset() 527 | City_tmp.append(cty_score['Mean IoU : \t']) 528 | epoch_tmp.append(i_iter) 529 | if i_iter % 10000 == 0 and i_iter != 0: 530 | save_models(model_dict, './weight_' + str(i_iter)) 531 | 532 | if cty_score['Mean IoU : \t'] > best_iou: 533 | best_iter = i_iter 534 | best_iou = cty_score['Mean IoU : \t'] 535 | save_models(model_dict, './weight/') 536 | -------------------------------------------------------------------------------- /util/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /util/info.json: -------------------------------------------------------------------------------- 1 | { 2 | "classes":19, 3 | "label2train":[ 4 | [0, 255], 5 | [1, 255], 6 | [2, 255], 7 | [3, 255], 8 | [4, 255], 9 | [5, 255], 10 | [6, 255], 11 | [7, 0], 12 | [8, 1], 13 | [9, 255], 14 | [10, 255], 15 | [11, 2], 16 | [12, 3], 17 | [13, 4], 18 | [14, 255], 19 | [15, 255], 20 | [16, 255], 21 | [17, 5], 22 | [18, 255], 23 | [19, 6], 24 | [20, 7], 25 | [21, 8], 26 | [22, 9], 27 | [23, 10], 28 | [24, 11], 29 | [25, 12], 30 | [26, 13], 31 | [27, 14], 32 | [28, 15], 33 | [29, 255], 34 | [30, 255], 35 | [31, 16], 36 | [32, 17], 37 | [33, 18], 38 | [-1, 255]], 39 | "label":[ 40 | "road", 41 | "sidewalk", 42 | "building", 43 | "wall", 44 | "fence", 45 | "pole", 46 | "light", 47 | "sign", 48 | "vegetation", 49 | "terrain", 50 | "sky", 51 | "person", 52 | "rider", 53 | "car", 54 | "truck", 55 | "bus", 56 | "train", 57 | "motocycle", 58 | "bicycle"], 59 | "palette":[ 60 | [128,64,128], 61 | [244,35,232], 62 | [70,70,70], 63 | [102,102,156], 64 | [190,153,153], 65 | [153,153,153], 66 | [250,170,30], 67 | [220,220,0], 68 | [107,142,35], 69 | [152,251,152], 70 | [70,130,180], 71 | [220,20,60], 72 | [255,0,0], 73 | [0,0,142], 74 | [0,0,70], 75 | [0,60,100], 76 | [0,80,100], 77 | [0,0,230], 78 | [119,11,32], 79 | [0,0,0]], 80 | "mean":[ 81 | 73.158359210711552, 82 | 82.908917542625858, 83 | 72.392398761941593], 84 | "std":[ 85 | 47.675755341814678, 86 | 48.494214368814916, 87 | 47.736546325441594] 88 | } 89 | -------------------------------------------------------------------------------- /util/loader/CityDemoLoader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import matplotlib.pyplot as plt 6 | import collections 7 | import torch 8 | import torchvision 9 | from torch.utils import data 10 | from PIL import Image, ImageFile 11 | from .augmentations import * 12 | ImageFile.LOAD_TRUNCATED_IMAGES = True 13 | 14 | valid_colors = [[128, 64, 128], 15 | [244, 35, 232], 16 | [ 70, 70, 70], 17 | [102, 102, 156], 18 | [190, 153, 153], 19 | [153, 153, 153], 20 | [250, 170, 30], 21 | [220, 220, 0], 22 | [107, 142, 35], 23 | [152, 251, 152], 24 | [ 70, 130, 180], 25 | [220, 20, 60], 26 | [255, 0, 0], 27 | [ 0, 0, 142], 28 | [ 0, 0, 70], 29 | [ 0, 60, 100], 30 | [ 0, 80, 100], 31 | [ 0, 0, 230], 32 | [119, 11, 32]] 33 | label_colours = dict(zip(range(19), valid_colors)) 34 | 35 | class CityDemoLoader(data.Dataset): 36 | def __init__(self, root, img_list_path, lbl_list_path, max_iters=None, crop_size=None, mean=(128, 128, 128), transform=None, set='val'): 37 | self.n_classes = 19 38 | self.root = root 39 | self.crop_size = crop_size 40 | self.mean = mean 41 | self.transform = transform 42 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 43 | self.img_ids = [i_id.strip() for i_id in open(img_list_path)] 44 | self.lbl_ids = [i_id.strip() for i_id in open(lbl_list_path)] 45 | 46 | if not max_iters==None: 47 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 48 | self.lbl_ids = self.lbl_ids * int(np.ceil(float(max_iters) / len(self.lbl_ids))) 49 | 50 | self.files = [] 51 | self.id_to_trainid = {7: 0, 8 : 1, 11: 2, 12: 3, 13: 4 , 17: 5, 52 | 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 53 | 26: 13,27:14, 28:15, 31:16, 32: 17, 33: 18} 54 | self.set = set 55 | # for split in ["train", "trainval", "val"]: 56 | for img_name, lbl_name in zip(self.img_ids, self.lbl_ids): 57 | img_file = osp.join(self.root, "leftImg8bit/demoVideo/%s" % (img_name)) 58 | lbl_file = osp.join(self.root, "leftImg8bit/demoVideo/%s" % (img_name)) 59 | #lbl_file = osp.join(self.root, "gtFine/%s/%s" % (self.set, lbl_name)) 60 | self.files.append({ 61 | "img": img_file, 62 | "label": lbl_file, 63 | "name": img_name 64 | }) 65 | 66 | def __len__(self): 67 | return len(self.files) 68 | 69 | def __getitem__(self, index): 70 | datafiles = self.files[index] 71 | 72 | image = Image.open(datafiles["img"]).convert('RGB') 73 | label = Image.open(datafiles["label"]) 74 | name = datafiles["name"] 75 | 76 | # resize 77 | if self.crop_size != None: 78 | image = image.resize((self.crop_size[1], self.crop_size[0]), Image.BICUBIC) 79 | label = label.resize((self.crop_size[1], self.crop_size[0]), Image.NEAREST) 80 | # transform 81 | if self.transform != None: 82 | image, label = self.transform(image, label) 83 | 84 | image = np.asarray(image, np.float32) 85 | label = np.asarray(label, np.long) 86 | 87 | # re-assign labels to match the format of Cityscapes 88 | label_copy = 255 * np.ones(label.shape, dtype=np.long) 89 | for k, v in self.id_to_trainid.items(): 90 | label_copy[label == k] = v 91 | 92 | size = image.shape 93 | image = image[:, :, ::-1] # change to BGR 94 | image -= self.mean 95 | image = image.transpose((2, 0, 1)) / 128.0 96 | 97 | return image.copy(), label_copy.copy() 98 | 99 | def decode_segmap(self, img): 100 | map = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3)) 101 | for idx in range(img.shape[0]): 102 | temp = img[idx, :, :] 103 | r = temp.copy() 104 | g = temp.copy() 105 | b = temp.copy() 106 | for l in range(0, self.n_classes): 107 | r[temp == l] = label_colours[l][0] 108 | g[temp == l] = label_colours[l][1] 109 | b[temp == l] = label_colours[l][2] 110 | 111 | rgb = np.zeros((temp.shape[0], temp.shape[1], 3)) 112 | rgb[:, :, 0] = r / 255.0 113 | rgb[:, :, 1] = g / 255.0 114 | rgb[:, :, 2] = b / 255.0 115 | map[idx, :, :, :] = rgb 116 | return map 117 | 118 | if __name__ == '__main__': 119 | dst = GTA5DataSet("./data", is_transform=True) 120 | trainloader = data.DataLoader(dst, batch_size=4) 121 | for i, data in enumerate(trainloader): 122 | imgs, labels = data 123 | if i == 0: 124 | img = torchvision.utils.make_grid(imgs).numpy() 125 | img = np.transpose(img, (1, 2, 0)) 126 | img = img[:, :, ::-1] 127 | plt.imshow(img) 128 | plt.show() 129 | -------------------------------------------------------------------------------- /util/loader/CityLoader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import matplotlib.pyplot as plt 6 | import collections 7 | import torch 8 | import torchvision 9 | from torch.utils import data 10 | from PIL import Image, ImageFile 11 | from .augmentations import * 12 | ImageFile.LOAD_TRUNCATED_IMAGES = True 13 | 14 | valid_colors = [[128, 64, 128], 15 | [244, 35, 232], 16 | [ 70, 70, 70], 17 | [102, 102, 156], 18 | [190, 153, 153], 19 | [153, 153, 153], 20 | [250, 170, 30], 21 | [220, 220, 0], 22 | [107, 142, 35], 23 | [152, 251, 152], 24 | [ 70, 130, 180], 25 | [220, 20, 60], 26 | [255, 0, 0], 27 | [ 0, 0, 142], 28 | [ 0, 0, 70], 29 | [ 0, 60, 100], 30 | [ 0, 80, 100], 31 | [ 0, 0, 230], 32 | [119, 11, 32]] 33 | label_colours = dict(zip(range(19), valid_colors)) 34 | 35 | class CityLoader(data.Dataset): 36 | def __init__(self, root, img_list_path, lbl_list_path, max_iters=None, crop_size=None, mean=(128, 128, 128), transform=None, set='val'): 37 | self.n_classes = 19 38 | self.root = root 39 | self.crop_size = crop_size 40 | self.mean = mean 41 | self.transform = transform 42 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 43 | self.img_ids = [i_id.strip() for i_id in open(img_list_path)] 44 | self.lbl_ids = [i_id.strip() for i_id in open(lbl_list_path)] 45 | 46 | if not max_iters==None: 47 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 48 | self.lbl_ids = self.lbl_ids * int(np.ceil(float(max_iters) / len(self.lbl_ids))) 49 | 50 | self.files = [] 51 | self.id_to_trainid = {7: 0, 8 : 1, 11: 2, 12: 3, 13: 4 , 17: 5, 52 | 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 53 | 26: 13,27:14, 28:15, 31:16, 32: 17, 33: 18} 54 | self.set = set 55 | # for split in ["train", "trainval", "val"]: 56 | for img_name, lbl_name in zip(self.img_ids, self.lbl_ids): 57 | img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, img_name)) 58 | lbl_file = osp.join(self.root, "gtFine/%s/%s" % (self.set, lbl_name)) 59 | self.files.append({ 60 | "img": img_file, 61 | "label": lbl_file, 62 | "name": img_name 63 | }) 64 | 65 | def __len__(self): 66 | return len(self.files) 67 | 68 | def __getitem__(self, index): 69 | datafiles = self.files[index] 70 | 71 | image = Image.open(datafiles["img"]).convert('RGB') 72 | label = Image.open(datafiles["label"]) 73 | name = datafiles["name"] 74 | 75 | # resize 76 | if self.crop_size != None: 77 | image = image.resize((self.crop_size[1], self.crop_size[0]), Image.BICUBIC) 78 | label = label.resize((self.crop_size[1], self.crop_size[0]), Image.NEAREST) 79 | # transform 80 | if self.transform != None: 81 | image, label = self.transform(image, label) 82 | 83 | image = np.asarray(image, np.float32) 84 | label = np.asarray(label, np.long) 85 | 86 | # re-assign labels to match the format of Cityscapes 87 | label_copy = 255 * np.ones(label.shape, dtype=np.long) 88 | for k, v in self.id_to_trainid.items(): 89 | label_copy[label == k] = v 90 | 91 | size = image.shape 92 | image = image[:, :, ::-1] # change to BGR 93 | image -= self.mean 94 | image = image.transpose((2, 0, 1)) / 128.0 95 | 96 | return image.copy(), label_copy.copy() 97 | 98 | def decode_segmap(self, img): 99 | map = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3)) 100 | for idx in range(img.shape[0]): 101 | temp = img[idx, :, :] 102 | r = temp.copy() 103 | g = temp.copy() 104 | b = temp.copy() 105 | for l in range(0, self.n_classes): 106 | r[temp == l] = label_colours[l][0] 107 | g[temp == l] = label_colours[l][1] 108 | b[temp == l] = label_colours[l][2] 109 | 110 | rgb = np.zeros((temp.shape[0], temp.shape[1], 3)) 111 | rgb[:, :, 0] = r / 255.0 112 | rgb[:, :, 1] = g / 255.0 113 | rgb[:, :, 2] = b / 255.0 114 | map[idx, :, :, :] = rgb 115 | return map 116 | 117 | if __name__ == '__main__': 118 | dst = GTA5DataSet("./data", is_transform=True) 119 | trainloader = data.DataLoader(dst, batch_size=4) 120 | for i, data in enumerate(trainloader): 121 | imgs, labels = data 122 | if i == 0: 123 | img = torchvision.utils.make_grid(imgs).numpy() 124 | img = np.transpose(img, (1, 2, 0)) 125 | img = img[:, :, ::-1] 126 | plt.imshow(img) 127 | plt.show() 128 | -------------------------------------------------------------------------------- /util/loader/CityTestLoader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import matplotlib.pyplot as plt 6 | import collections 7 | import torch 8 | import torchvision 9 | from torch.utils import data 10 | from PIL import Image, ImageFile 11 | from .augmentations import * 12 | ImageFile.LOAD_TRUNCATED_IMAGES = True 13 | 14 | valid_colors = [[128, 64, 128], 15 | [244, 35, 232], 16 | [ 70, 70, 70], 17 | [102, 102, 156], 18 | [190, 153, 153], 19 | [153, 153, 153], 20 | [250, 170, 30], 21 | [220, 220, 0], 22 | [107, 142, 35], 23 | [152, 251, 152], 24 | [ 70, 130, 180], 25 | [220, 20, 60], 26 | [255, 0, 0], 27 | [ 0, 0, 142], 28 | [ 0, 0, 70], 29 | [ 0, 60, 100], 30 | [ 0, 80, 100], 31 | [ 0, 0, 230], 32 | [119, 11, 32]] 33 | label_colours = dict(zip(range(19), valid_colors)) 34 | 35 | class CityTestLoader(data.Dataset): 36 | def __init__(self, root, img_list_path, max_iters=None, crop_size=None, mean=(128, 128, 128), transform=None, set='val'): 37 | self.n_classes = 19 38 | self.root = root 39 | self.crop_size = crop_size 40 | self.mean = mean 41 | self.transform = transform 42 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 43 | self.img_ids = [i_id.strip() for i_id in open(img_list_path)] 44 | 45 | if not max_iters==None: 46 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 47 | 48 | self.files = [] 49 | self.id_to_trainid = {7: 0, 8 : 1, 11: 2, 12: 3, 13: 4 , 17: 5, 50 | 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 51 | 26: 13,27:14, 28:15, 31:16, 32: 17, 33: 18} 52 | self.trainid_to_id = {0: 7, 1 : 8, 2: 11, 3: 12, 4: 13 , 5: 17, 53 | 6: 19, 7: 20, 8: 21, 9: 22, 10: 23, 11: 24, 12: 25, 54 | 13: 26, 14: 27, 15:28, 16:31, 17: 32, 18: 33} 55 | self.set = set 56 | # for split in ["train", "trainval", "val"]: 57 | for img_name in zip(self.img_ids): 58 | img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, img_name[0])) 59 | self.files.append({ 60 | "img": img_file, 61 | "name": img_name 62 | }) 63 | 64 | def __len__(self): 65 | return len(self.files) 66 | 67 | def __getitem__(self, index): 68 | datafiles = self.files[index] 69 | 70 | image = Image.open(datafiles["img"]).convert('RGB') 71 | name = datafiles["name"] 72 | 73 | # resize 74 | if self.crop_size != None: 75 | image = image.resize((self.crop_size[1], self.crop_size[0]), Image.BICUBIC) 76 | # transform 77 | if self.transform != None: 78 | image = self.transform(image) 79 | 80 | image = np.asarray(image, np.float32) 81 | 82 | size = image.shape 83 | image = image[:, :, ::-1] # change to BGR 84 | image -= self.mean 85 | image = image.transpose((2, 0, 1)) / 128.0 86 | 87 | return image.copy(), name 88 | 89 | def decode_segmap(self, img): 90 | map = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3)) 91 | for idx in range(img.shape[0]): 92 | temp = img[idx, :, :] 93 | r = temp.copy() 94 | g = temp.copy() 95 | b = temp.copy() 96 | for l in range(0, self.n_classes): 97 | r[temp == l] = label_colours[l][0] 98 | g[temp == l] = label_colours[l][1] 99 | b[temp == l] = label_colours[l][2] 100 | 101 | rgb = np.zeros((temp.shape[0], temp.shape[1], 3)) 102 | rgb[:, :, 0] = r / 255.0 103 | rgb[:, :, 1] = g / 255.0 104 | rgb[:, :, 2] = b / 255.0 105 | map[idx, :, :, :] = rgb 106 | return map 107 | def convert_back_to_id(self, pred): 108 | coverted_arr = np.zeros(pred.shape) 109 | for train_id in self.trainid_to_id.keys(): 110 | coverted_arr[pred==train_id] = self.trainid_to_id[train_id] 111 | return coverted_arr 112 | 113 | 114 | if __name__ == '__main__': 115 | dst = GTA5DataSet("./data", is_transform=True) 116 | trainloader = data.DataLoader(dst, batch_size=4) 117 | for i, data in enumerate(trainloader): 118 | imgs, labels = data 119 | if i == 0: 120 | img = torchvision.utils.make_grid(imgs).numpy() 121 | img = np.transpose(img, (1, 2, 0)) 122 | img = img[:, :, ::-1] 123 | plt.imshow(img) 124 | plt.show() 125 | -------------------------------------------------------------------------------- /util/loader/GTA5Loader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import matplotlib.pyplot as plt 6 | import collections 7 | import torch 8 | import torchvision 9 | from torch.utils import data 10 | from PIL import Image, ImageFile 11 | from .augmentations import * 12 | ImageFile.LOAD_TRUNCATED_IMAGES = True 13 | 14 | valid_colors = [[128, 64, 128], 15 | [244, 35, 232], 16 | [ 70, 70, 70], 17 | [102, 102, 156], 18 | [190, 153, 153], 19 | [153, 153, 153], 20 | [250, 170, 30], 21 | [220, 220, 0], 22 | [107, 142, 35], 23 | [152, 251, 152], 24 | [ 70, 130, 180], 25 | [220, 20, 60], 26 | [255, 0, 0], 27 | [ 0, 0, 142], 28 | [ 0, 0, 70], 29 | [ 0, 60, 100], 30 | [ 0, 80, 100], 31 | [ 0, 0, 230], 32 | [119, 11, 32]] 33 | label_colours = dict(zip(range(19), valid_colors)) 34 | 35 | class GTA5Loader(data.Dataset): 36 | def __init__(self, root, list_path, max_iters=None, crop_size=None, mean=(128, 128, 128), transform=None): 37 | self.n_classes = 19 38 | self.root = root 39 | self.list_path = list_path 40 | self.crop_size = crop_size 41 | self.mean = mean 42 | self.transform = transform 43 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 44 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 45 | if not max_iters==None: 46 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 47 | self.files = [] 48 | 49 | self.id_to_trainid = {7: 0 , 8 : 1, 11: 2 , 12: 3 , 13: 4 , 17: 5, 50 | 19: 6 , 20: 7, 21: 8 , 22: 9 , 23: 10, 24: 11, 25: 12, 51 | 26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18} 52 | 53 | # for split in ["train", "trainval", "val"]: 54 | for name in self.img_ids: 55 | img_file = osp.join(self.root, "images/%s" % name) 56 | label_file = osp.join(self.root, "labels/%s" % name) 57 | self.files.append({ 58 | "img": img_file, 59 | "label": label_file, 60 | "name": name 61 | }) 62 | 63 | def __len__(self): 64 | return len(self.files) 65 | 66 | 67 | def __getitem__(self, index): 68 | datafiles = self.files[index] 69 | 70 | image = Image.open(datafiles["img"]).convert('RGB') 71 | label = Image.open(datafiles["label"]) 72 | name = datafiles["name"] 73 | # resize 74 | if self.crop_size != None: 75 | image_PIL = image.resize((self.crop_size[1], self.crop_size[0]), Image.BICUBIC) 76 | label_PIL = label.resize((self.crop_size[1], self.crop_size[0]), Image.NEAREST) 77 | i_iter = 0 78 | while(1): 79 | i_iter = i_iter + 1 80 | if i_iter > 5: 81 | print (datafiles["img"]) 82 | break 83 | # transform 84 | if self.transform != None: 85 | image, label = self.transform(image_PIL, label_PIL) 86 | 87 | image = np.asarray(image, np.float32) 88 | label = np.asarray(label, np.long) 89 | 90 | # re-assign labels to match the format of Cityscapes 91 | label_copy = 255 * np.ones(label.shape, dtype=np.long) 92 | for k, v in self.id_to_trainid.items(): 93 | label_copy[label == k] = v 94 | 95 | label_cat, label_time = np.unique(label_copy, return_counts=True) 96 | label_p = 1.0* label_time/np.sum(label_time) 97 | pass_c, pass_t = np.unique(label_p>0.02, return_counts=True) 98 | if pass_c[-1] == True: 99 | if pass_t[-1] >= 3: 100 | break 101 | elif pass_t[-1] == 2: 102 | if not (label_cat[-1] == 255 and label_p[-1]>0.02): 103 | break 104 | size = image.shape 105 | image = image[:, :, ::-1] # change to BGR 106 | image -= self.mean 107 | image = image.transpose((2, 0, 1)) / 128.0 108 | 109 | return image.copy(), label_copy.copy() 110 | 111 | def decode_segmap(self, img): 112 | map = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3)) 113 | for idx in range(img.shape[0]): 114 | temp = img[idx, :, :] 115 | r = temp.copy() 116 | g = temp.copy() 117 | b = temp.copy() 118 | for l in range(0, self.n_classes): 119 | r[temp == l] = label_colours[l][0] 120 | g[temp == l] = label_colours[l][1] 121 | b[temp == l] = label_colours[l][2] 122 | 123 | rgb = np.zeros((temp.shape[0], temp.shape[1], 3)) 124 | rgb[:, :, 0] = r / 255.0 125 | rgb[:, :, 1] = g / 255.0 126 | rgb[:, :, 2] = b / 255.0 127 | map[idx, :, :, :] = rgb 128 | return map 129 | 130 | if __name__ == '__main__': 131 | dst = GTA5DataSet("./data", is_transform=True) 132 | trainloader = data.DataLoader(dst, batch_size=4) 133 | for i, data in enumerate(trainloader): 134 | imgs, labels = data 135 | if i == 0: 136 | img = torchvision.utils.make_grid(imgs).numpy() 137 | img = np.transpose(img, (1, 2, 0)) 138 | img = img[:, :, ::-1] 139 | plt.imshow(img) 140 | plt.show() 141 | -------------------------------------------------------------------------------- /util/loader/SYNTHIALoader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import matplotlib.pyplot as plt 6 | import collections 7 | import torch 8 | import torchvision 9 | from torch.utils import data 10 | from PIL import Image, ImageFile 11 | from .augmentations import * 12 | import imageio 13 | ImageFile.LOAD_TRUNCATED_IMAGES = True 14 | 15 | valid_colors = [[128, 64, 128], # Road, 0 16 | [244, 35, 232], # Sidewalk, 1 17 | [ 70, 70, 70], # Building, 2 18 | [102, 102, 156], # Wall, 3 19 | [190, 153, 153], # Fence, 4 20 | [153, 153, 153], # pole, 5 21 | [250, 170, 30], # traffic light, 6 22 | [220, 220, 0], # traffic sign, 7 23 | [107, 142, 35], # vegetation, 8 24 | [152, 251, 152], # terrain, 9 25 | [ 70, 130, 180], # sky, 10 26 | [220, 20, 60], # person, 11 27 | [255, 0, 0], # rider, 12 28 | [ 0, 0, 142], # car, 13 29 | [ 0, 0, 70], # truck, 14 30 | [ 0, 60, 100], # bus, 15 31 | [ 0, 80, 100], # train, 16 32 | [ 0, 0, 230], # motor-bike, 17 33 | [119, 11, 32]] # bike, 18 34 | 35 | # in the 16-class setting: removing index 9, 14, 16 36 | 37 | label_colours = dict(zip(range(19), valid_colors)) 38 | 39 | class SYNTHIALoader(data.Dataset): 40 | def __init__(self, root, list_path, max_iters=None, crop_size=None, mean=(128, 128, 128), transform=None): 41 | self.n_classes = 16 42 | self.root = root 43 | self.list_path = list_path 44 | self.crop_size = crop_size 45 | self.mean = mean 46 | self.transform = transform 47 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 48 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 49 | if not max_iters==None: 50 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 51 | self.files = [] 52 | 53 | self.id_to_trainid = {3: 0 , 4 : 1, 2 : 2 , 21: 3 , 5 : 4 , 7 : 5, 54 | 15: 6 , 9 : 7, 6 : 8 , 16: 9 , 1 : 10, 10: 11, 17: 12, 55 | 8 : 13, 18: 14, 19: 15, 20: 16, 12: 17, 11: 18} 56 | # for split in ["train", "trainval", "val"]: 57 | for name in self.img_ids: 58 | img_file = osp.join(self.root, "RGB/%s" % name) 59 | label_file = osp.join(self.root, "GT/LABELS/%s" % name) 60 | self.files.append({ 61 | "img": img_file, 62 | "label": label_file, 63 | "name": name 64 | }) 65 | 66 | def __len__(self): 67 | return len(self.files) 68 | 69 | 70 | def __getitem__(self, index): 71 | datafiles = self.files[index] 72 | 73 | image = Image.open(datafiles["img"]).convert('RGB') 74 | #label = Image.open(datafiles["label"]).convert('RGB') 75 | label = np.asarray(imageio.imread(datafiles["label"], format='PNG-FI'))[:,:,0] # uint16 76 | label = Image.fromarray(label) 77 | name = datafiles["name"] 78 | # resize 79 | if self.crop_size != None: 80 | image_PIL = image.resize((self.crop_size[1], self.crop_size[0]), Image.BICUBIC) 81 | label_PIL = label.resize((self.crop_size[1], self.crop_size[0]), Image.NEAREST) 82 | i_iter = 0 83 | while(1): 84 | i_iter = i_iter + 1 85 | if i_iter > 5: 86 | print (datafiles["img"]) 87 | break 88 | # transform 89 | if self.transform != None: 90 | image, label = self.transform(image_PIL, label_PIL) 91 | 92 | image = np.asarray(image, np.float32) 93 | label = np.asarray(label, np.long) 94 | 95 | # re-assign labels to match the format of Cityscapes 96 | label_copy = 255 * np.ones(label.shape, dtype=np.long) 97 | for k, v in self.id_to_trainid.items(): 98 | label_copy[label == k] = v 99 | 100 | label_cat, label_time = np.unique(label_copy, return_counts=True) 101 | label_p = 1.0* label_time/np.sum(label_time) 102 | pass_c, pass_t = np.unique(label_p>0.02, return_counts=True) 103 | if pass_c[-1] == True: 104 | if pass_t[-1] >= 3: 105 | break 106 | elif pass_t[-1] == 2: 107 | if not (label_cat[-1] == 255 and label_p[-1]>0.02): 108 | break 109 | size = image.shape 110 | image = image[:, :, ::-1] # change to BGR 111 | image -= self.mean 112 | image = image.transpose((2, 0, 1)) / 128.0 113 | 114 | return image.copy(), label_copy.copy() 115 | 116 | def decode_segmap(self, img): 117 | map = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3)) 118 | for idx in range(img.shape[0]): 119 | temp = img[idx, :, :] 120 | r = temp.copy() 121 | g = temp.copy() 122 | b = temp.copy() 123 | for l in range(0, self.n_classes): 124 | r[temp == l] = label_colours[l][0] 125 | g[temp == l] = label_colours[l][1] 126 | b[temp == l] = label_colours[l][2] 127 | 128 | rgb = np.zeros((temp.shape[0], temp.shape[1], 3)) 129 | rgb[:, :, 0] = r / 255.0 130 | rgb[:, :, 1] = g / 255.0 131 | rgb[:, :, 2] = b / 255.0 132 | map[idx, :, :, :] = rgb 133 | return map 134 | 135 | if __name__ == '__main__': 136 | dst = GTA5DataSet("./data", is_transform=True) 137 | trainloader = data.DataLoader(dst, batch_size=4) 138 | for i, data in enumerate(trainloader): 139 | imgs, labels = data 140 | if i == 0: 141 | img = torchvision.utils.make_grid(imgs).numpy() 142 | img = np.transpose(img, (1, 2, 0)) 143 | img = img[:, :, ::-1] 144 | plt.imshow(img) 145 | plt.show() 146 | -------------------------------------------------------------------------------- /util/loader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hui-po-wang/DISE-Domain-Invariant-Structure-Extraction/97c3940d54c451f92e4d40a3d642caf9c6ceca58/util/loader/__init__.py -------------------------------------------------------------------------------- /util/loader/augmentations.py: -------------------------------------------------------------------------------- 1 | # Adapted from https://github.com/ZijunDeng/pytorch-semantic-segmentation/blob/master/utils/joint_transforms.py 2 | 3 | import math 4 | import numbers 5 | import random 6 | import numpy as np 7 | 8 | from PIL import Image, ImageOps 9 | 10 | class Compose(object): 11 | def __init__(self, augmentations): 12 | self.augmentations = augmentations 13 | 14 | def __call__(self, img, mask): 15 | # img, mask = Image.fromarray(img, mode='RGB'), Image.fromarray(mask, mode='L') 16 | assert img.size == mask.size 17 | for a in self.augmentations: 18 | img, mask = a(img, mask) 19 | return np.array(img), np.array(mask, dtype=np.uint8) 20 | 21 | 22 | class RandomCrop(object): 23 | def __init__(self, size, padding=0): 24 | if isinstance(size, numbers.Number): 25 | self.size = (int(size), int(size)) 26 | else: 27 | self.size = size 28 | self.padding = padding 29 | 30 | def __call__(self, img, mask): 31 | if self.padding > 0: 32 | img = ImageOps.expand(img, border=self.padding, fill=0) 33 | mask = ImageOps.expand(mask, border=self.padding, fill=0) 34 | 35 | assert img.size == mask.size 36 | w, h = img.size 37 | th, tw = self.size 38 | if w == tw and h == th: 39 | return img, mask 40 | if w < tw or h < th: 41 | return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST) 42 | 43 | x1 = random.randint(0, w - tw) 44 | y1 = random.randint(0, h - th) 45 | return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)) 46 | 47 | 48 | class CenterCrop(object): 49 | def __init__(self, size): 50 | if isinstance(size, numbers.Number): 51 | self.size = (int(size), int(size)) 52 | else: 53 | self.size = size 54 | 55 | def __call__(self, img, mask): 56 | assert img.size == mask.size 57 | w, h = img.size 58 | th, tw = self.size 59 | x1 = int(round((w - tw) / 2.)) 60 | y1 = int(round((h - th) / 2.)) 61 | return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)) 62 | 63 | 64 | class RandomHorizontallyFlip(object): 65 | def __call__(self, img, mask): 66 | if random.random() < 0.5: 67 | return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT) 68 | return img, mask 69 | 70 | 71 | class FreeScale(object): 72 | def __init__(self, size): 73 | self.size = tuple(reversed(size)) # size: (h, w) 74 | 75 | def __call__(self, img, mask): 76 | assert img.size == mask.size 77 | return img.resize(self.size, Image.BILINEAR), mask.resize(self.size, Image.NEAREST) 78 | 79 | 80 | class Scale(object): 81 | def __init__(self, size): 82 | self.size = size 83 | 84 | def __call__(self, img, mask): 85 | assert img.size == mask.size 86 | w, h = img.size 87 | if (w >= h and w == self.size) or (h >= w and h == self.size): 88 | return img, mask 89 | if w > h: 90 | ow = self.size 91 | oh = int(self.size * h / w) 92 | return img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST) 93 | else: 94 | oh = self.size 95 | ow = int(self.size * w / h) 96 | return img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST) 97 | 98 | 99 | class RandomSizedCrop(object): 100 | def __init__(self, size): 101 | self.size = size 102 | 103 | def __call__(self, img, mask): 104 | assert img.size == mask.size 105 | for attempt in range(10): 106 | area = img.size[0] * img.size[1] 107 | target_area = random.uniform(0.45, 1.0) * area 108 | aspect_ratio = random.uniform(0.5, 2) 109 | 110 | w = int(round(math.sqrt(target_area * aspect_ratio))) 111 | h = int(round(math.sqrt(target_area / aspect_ratio))) 112 | 113 | if random.random() < 0.5: 114 | w, h = h, w 115 | 116 | if w <= img.size[0] and h <= img.size[1]: 117 | x1 = random.randint(0, img.size[0] - w) 118 | y1 = random.randint(0, img.size[1] - h) 119 | 120 | img = img.crop((x1, y1, x1 + w, y1 + h)) 121 | mask = mask.crop((x1, y1, x1 + w, y1 + h)) 122 | assert (img.size == (w, h)) 123 | 124 | return img.resize((self.size, self.size), Image.BILINEAR), mask.resize((self.size, self.size), 125 | Image.NEAREST) 126 | 127 | # Fallback 128 | scale = Scale(self.size) 129 | crop = CenterCrop(self.size) 130 | return crop(*scale(img, mask)) 131 | 132 | 133 | class RandomRotate(object): 134 | def __init__(self, degree): 135 | self.degree = degree 136 | 137 | def __call__(self, img, mask): 138 | rotate_degree = random.random() * 2 * self.degree - self.degree 139 | return img.rotate(rotate_degree, Image.BILINEAR), mask.rotate(rotate_degree, Image.NEAREST) 140 | 141 | 142 | class RandomSized_and_Crop(object): 143 | def __init__(self, size): 144 | self.size = size 145 | # self.scale = Scale(self.size) 146 | self.crop = RandomCrop(self.size) 147 | 148 | def __call__(self, img, mask): 149 | assert img.size == mask.size 150 | 151 | w = int(random.uniform(0.7, 1) * img.size[0]) 152 | h = int(random.uniform(0.7, 1) * img.size[1]) 153 | 154 | img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST) 155 | 156 | return self.crop(img, mask) 157 | -------------------------------------------------------------------------------- /util/loader/cityscapes_list/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hui-po-wang/DISE-Domain-Invariant-Structure-Extraction/97c3940d54c451f92e4d40a3d642caf9c6ceca58/util/loader/cityscapes_list/.DS_Store -------------------------------------------------------------------------------- /util/loader/cityscapes_list/info.json: -------------------------------------------------------------------------------- 1 | { 2 | "classes":19, 3 | "label2train":[ 4 | [0, 255], 5 | [1, 255], 6 | [2, 255], 7 | [3, 255], 8 | [4, 255], 9 | [5, 255], 10 | [6, 255], 11 | [7, 0], 12 | [8, 1], 13 | [9, 255], 14 | [10, 255], 15 | [11, 2], 16 | [12, 3], 17 | [13, 4], 18 | [14, 255], 19 | [15, 255], 20 | [16, 255], 21 | [17, 5], 22 | [18, 255], 23 | [19, 6], 24 | [20, 7], 25 | [21, 8], 26 | [22, 9], 27 | [23, 10], 28 | [24, 11], 29 | [25, 12], 30 | [26, 13], 31 | [27, 14], 32 | [28, 15], 33 | [29, 255], 34 | [30, 255], 35 | [31, 16], 36 | [32, 17], 37 | [33, 18], 38 | [-1, 255]], 39 | "label":[ 40 | "road", 41 | "sidewalk", 42 | "building", 43 | "wall", 44 | "fence", 45 | "pole", 46 | "light", 47 | "sign", 48 | "vegetation", 49 | "terrain", 50 | "sky", 51 | "person", 52 | "rider", 53 | "car", 54 | "truck", 55 | "bus", 56 | "train", 57 | "motocycle", 58 | "bicycle"], 59 | "palette":[ 60 | [128,64,128], 61 | [244,35,232], 62 | [70,70,70], 63 | [102,102,156], 64 | [190,153,153], 65 | [153,153,153], 66 | [250,170,30], 67 | [220,220,0], 68 | [107,142,35], 69 | [152,251,152], 70 | [70,130,180], 71 | [220,20,60], 72 | [255,0,0], 73 | [0,0,142], 74 | [0,0,70], 75 | [0,60,100], 76 | [0,80,100], 77 | [0,0,230], 78 | [119,11,32], 79 | [0,0,0]], 80 | "mean":[ 81 | 73.158359210711552, 82 | 82.908917542625858, 83 | 72.392398761941593], 84 | "std":[ 85 | 47.675755341814678, 86 | 48.494214368814916, 87 | 47.736546325441594] 88 | } 89 | -------------------------------------------------------------------------------- /util/loader/cityscapes_list/label.txt: -------------------------------------------------------------------------------- 1 | frankfurt/frankfurt_000001_007973_gtFine_labelIds.png 2 | frankfurt/frankfurt_000001_025921_gtFine_labelIds.png 3 | frankfurt/frankfurt_000001_062016_gtFine_labelIds.png 4 | frankfurt/frankfurt_000001_049078_gtFine_labelIds.png 5 | frankfurt/frankfurt_000000_009561_gtFine_labelIds.png 6 | frankfurt/frankfurt_000001_013710_gtFine_labelIds.png 7 | frankfurt/frankfurt_000001_041664_gtFine_labelIds.png 8 | frankfurt/frankfurt_000000_013240_gtFine_labelIds.png 9 | frankfurt/frankfurt_000001_044787_gtFine_labelIds.png 10 | frankfurt/frankfurt_000001_015328_gtFine_labelIds.png 11 | frankfurt/frankfurt_000001_073243_gtFine_labelIds.png 12 | frankfurt/frankfurt_000001_034816_gtFine_labelIds.png 13 | frankfurt/frankfurt_000001_041074_gtFine_labelIds.png 14 | frankfurt/frankfurt_000001_005898_gtFine_labelIds.png 15 | frankfurt/frankfurt_000000_022254_gtFine_labelIds.png 16 | frankfurt/frankfurt_000001_044658_gtFine_labelIds.png 17 | frankfurt/frankfurt_000001_009504_gtFine_labelIds.png 18 | frankfurt/frankfurt_000001_024927_gtFine_labelIds.png 19 | frankfurt/frankfurt_000001_017842_gtFine_labelIds.png 20 | frankfurt/frankfurt_000001_068208_gtFine_labelIds.png 21 | frankfurt/frankfurt_000001_013016_gtFine_labelIds.png 22 | frankfurt/frankfurt_000001_010156_gtFine_labelIds.png 23 | frankfurt/frankfurt_000000_002963_gtFine_labelIds.png 24 | frankfurt/frankfurt_000001_020693_gtFine_labelIds.png 25 | frankfurt/frankfurt_000001_078803_gtFine_labelIds.png 26 | frankfurt/frankfurt_000001_025713_gtFine_labelIds.png 27 | frankfurt/frankfurt_000001_007285_gtFine_labelIds.png 28 | frankfurt/frankfurt_000001_070099_gtFine_labelIds.png 29 | frankfurt/frankfurt_000000_009291_gtFine_labelIds.png 30 | frankfurt/frankfurt_000000_019607_gtFine_labelIds.png 31 | frankfurt/frankfurt_000001_068063_gtFine_labelIds.png 32 | frankfurt/frankfurt_000000_003920_gtFine_labelIds.png 33 | frankfurt/frankfurt_000001_077233_gtFine_labelIds.png 34 | frankfurt/frankfurt_000001_029086_gtFine_labelIds.png 35 | frankfurt/frankfurt_000001_060545_gtFine_labelIds.png 36 | frankfurt/frankfurt_000001_001464_gtFine_labelIds.png 37 | frankfurt/frankfurt_000001_028590_gtFine_labelIds.png 38 | frankfurt/frankfurt_000001_016462_gtFine_labelIds.png 39 | frankfurt/frankfurt_000001_060422_gtFine_labelIds.png 40 | frankfurt/frankfurt_000001_009058_gtFine_labelIds.png 41 | frankfurt/frankfurt_000001_080830_gtFine_labelIds.png 42 | frankfurt/frankfurt_000001_012870_gtFine_labelIds.png 43 | frankfurt/frankfurt_000001_077434_gtFine_labelIds.png 44 | frankfurt/frankfurt_000001_033655_gtFine_labelIds.png 45 | frankfurt/frankfurt_000001_051516_gtFine_labelIds.png 46 | frankfurt/frankfurt_000001_044413_gtFine_labelIds.png 47 | frankfurt/frankfurt_000001_055172_gtFine_labelIds.png 48 | frankfurt/frankfurt_000001_040575_gtFine_labelIds.png 49 | frankfurt/frankfurt_000000_020215_gtFine_labelIds.png 50 | frankfurt/frankfurt_000000_017228_gtFine_labelIds.png 51 | frankfurt/frankfurt_000001_041354_gtFine_labelIds.png 52 | frankfurt/frankfurt_000000_008206_gtFine_labelIds.png 53 | frankfurt/frankfurt_000001_043564_gtFine_labelIds.png 54 | frankfurt/frankfurt_000001_032711_gtFine_labelIds.png 55 | frankfurt/frankfurt_000001_064130_gtFine_labelIds.png 56 | frankfurt/frankfurt_000001_053102_gtFine_labelIds.png 57 | frankfurt/frankfurt_000001_082087_gtFine_labelIds.png 58 | frankfurt/frankfurt_000001_057478_gtFine_labelIds.png 59 | frankfurt/frankfurt_000001_007407_gtFine_labelIds.png 60 | frankfurt/frankfurt_000001_008200_gtFine_labelIds.png 61 | frankfurt/frankfurt_000001_038844_gtFine_labelIds.png 62 | frankfurt/frankfurt_000001_016029_gtFine_labelIds.png 63 | frankfurt/frankfurt_000001_058176_gtFine_labelIds.png 64 | frankfurt/frankfurt_000001_057181_gtFine_labelIds.png 65 | frankfurt/frankfurt_000001_039895_gtFine_labelIds.png 66 | frankfurt/frankfurt_000000_000294_gtFine_labelIds.png 67 | frankfurt/frankfurt_000001_055062_gtFine_labelIds.png 68 | frankfurt/frankfurt_000001_083029_gtFine_labelIds.png 69 | frankfurt/frankfurt_000001_010444_gtFine_labelIds.png 70 | frankfurt/frankfurt_000001_041517_gtFine_labelIds.png 71 | frankfurt/frankfurt_000001_069633_gtFine_labelIds.png 72 | frankfurt/frankfurt_000001_020287_gtFine_labelIds.png 73 | frankfurt/frankfurt_000001_012038_gtFine_labelIds.png 74 | frankfurt/frankfurt_000001_046504_gtFine_labelIds.png 75 | frankfurt/frankfurt_000001_032556_gtFine_labelIds.png 76 | frankfurt/frankfurt_000000_001751_gtFine_labelIds.png 77 | frankfurt/frankfurt_000001_000538_gtFine_labelIds.png 78 | frankfurt/frankfurt_000001_083852_gtFine_labelIds.png 79 | frankfurt/frankfurt_000001_077092_gtFine_labelIds.png 80 | frankfurt/frankfurt_000001_017101_gtFine_labelIds.png 81 | frankfurt/frankfurt_000001_044525_gtFine_labelIds.png 82 | frankfurt/frankfurt_000001_005703_gtFine_labelIds.png 83 | frankfurt/frankfurt_000001_080391_gtFine_labelIds.png 84 | frankfurt/frankfurt_000001_038418_gtFine_labelIds.png 85 | frankfurt/frankfurt_000001_066832_gtFine_labelIds.png 86 | frankfurt/frankfurt_000000_003357_gtFine_labelIds.png 87 | frankfurt/frankfurt_000000_020880_gtFine_labelIds.png 88 | frankfurt/frankfurt_000001_062396_gtFine_labelIds.png 89 | frankfurt/frankfurt_000001_046272_gtFine_labelIds.png 90 | frankfurt/frankfurt_000001_062509_gtFine_labelIds.png 91 | frankfurt/frankfurt_000001_054415_gtFine_labelIds.png 92 | frankfurt/frankfurt_000001_021406_gtFine_labelIds.png 93 | frankfurt/frankfurt_000001_030310_gtFine_labelIds.png 94 | frankfurt/frankfurt_000000_014480_gtFine_labelIds.png 95 | frankfurt/frankfurt_000001_005410_gtFine_labelIds.png 96 | frankfurt/frankfurt_000000_022797_gtFine_labelIds.png 97 | frankfurt/frankfurt_000001_035144_gtFine_labelIds.png 98 | frankfurt/frankfurt_000001_014565_gtFine_labelIds.png 99 | frankfurt/frankfurt_000001_065850_gtFine_labelIds.png 100 | frankfurt/frankfurt_000000_000576_gtFine_labelIds.png 101 | frankfurt/frankfurt_000001_065617_gtFine_labelIds.png 102 | frankfurt/frankfurt_000000_005543_gtFine_labelIds.png 103 | frankfurt/frankfurt_000001_055709_gtFine_labelIds.png 104 | frankfurt/frankfurt_000001_027325_gtFine_labelIds.png 105 | frankfurt/frankfurt_000001_011835_gtFine_labelIds.png 106 | frankfurt/frankfurt_000001_046779_gtFine_labelIds.png 107 | frankfurt/frankfurt_000001_064305_gtFine_labelIds.png 108 | frankfurt/frankfurt_000001_012738_gtFine_labelIds.png 109 | frankfurt/frankfurt_000001_048355_gtFine_labelIds.png 110 | frankfurt/frankfurt_000001_019969_gtFine_labelIds.png 111 | frankfurt/frankfurt_000001_080091_gtFine_labelIds.png 112 | frankfurt/frankfurt_000000_011007_gtFine_labelIds.png 113 | frankfurt/frankfurt_000000_015676_gtFine_labelIds.png 114 | frankfurt/frankfurt_000001_044227_gtFine_labelIds.png 115 | frankfurt/frankfurt_000001_055387_gtFine_labelIds.png 116 | frankfurt/frankfurt_000001_038245_gtFine_labelIds.png 117 | frankfurt/frankfurt_000001_059642_gtFine_labelIds.png 118 | frankfurt/frankfurt_000001_030669_gtFine_labelIds.png 119 | frankfurt/frankfurt_000001_068772_gtFine_labelIds.png 120 | frankfurt/frankfurt_000001_079206_gtFine_labelIds.png 121 | frankfurt/frankfurt_000001_055306_gtFine_labelIds.png 122 | frankfurt/frankfurt_000001_012699_gtFine_labelIds.png 123 | frankfurt/frankfurt_000001_042384_gtFine_labelIds.png 124 | frankfurt/frankfurt_000001_054077_gtFine_labelIds.png 125 | frankfurt/frankfurt_000001_010830_gtFine_labelIds.png 126 | frankfurt/frankfurt_000001_052120_gtFine_labelIds.png 127 | frankfurt/frankfurt_000001_032018_gtFine_labelIds.png 128 | frankfurt/frankfurt_000001_051737_gtFine_labelIds.png 129 | frankfurt/frankfurt_000001_028335_gtFine_labelIds.png 130 | frankfurt/frankfurt_000001_049770_gtFine_labelIds.png 131 | frankfurt/frankfurt_000001_054884_gtFine_labelIds.png 132 | frankfurt/frankfurt_000001_019698_gtFine_labelIds.png 133 | frankfurt/frankfurt_000000_011461_gtFine_labelIds.png 134 | frankfurt/frankfurt_000000_001016_gtFine_labelIds.png 135 | frankfurt/frankfurt_000001_062250_gtFine_labelIds.png 136 | frankfurt/frankfurt_000001_004736_gtFine_labelIds.png 137 | frankfurt/frankfurt_000001_068682_gtFine_labelIds.png 138 | frankfurt/frankfurt_000000_006589_gtFine_labelIds.png 139 | frankfurt/frankfurt_000000_011810_gtFine_labelIds.png 140 | frankfurt/frankfurt_000001_066574_gtFine_labelIds.png 141 | frankfurt/frankfurt_000001_048654_gtFine_labelIds.png 142 | frankfurt/frankfurt_000001_049209_gtFine_labelIds.png 143 | frankfurt/frankfurt_000001_042098_gtFine_labelIds.png 144 | frankfurt/frankfurt_000001_031416_gtFine_labelIds.png 145 | frankfurt/frankfurt_000000_009969_gtFine_labelIds.png 146 | frankfurt/frankfurt_000001_038645_gtFine_labelIds.png 147 | frankfurt/frankfurt_000001_020046_gtFine_labelIds.png 148 | frankfurt/frankfurt_000001_054219_gtFine_labelIds.png 149 | frankfurt/frankfurt_000001_002759_gtFine_labelIds.png 150 | frankfurt/frankfurt_000001_066438_gtFine_labelIds.png 151 | frankfurt/frankfurt_000000_020321_gtFine_labelIds.png 152 | frankfurt/frankfurt_000001_002646_gtFine_labelIds.png 153 | frankfurt/frankfurt_000001_046126_gtFine_labelIds.png 154 | frankfurt/frankfurt_000000_002196_gtFine_labelIds.png 155 | frankfurt/frankfurt_000001_057954_gtFine_labelIds.png 156 | frankfurt/frankfurt_000001_011715_gtFine_labelIds.png 157 | frankfurt/frankfurt_000000_021879_gtFine_labelIds.png 158 | frankfurt/frankfurt_000001_082466_gtFine_labelIds.png 159 | frankfurt/frankfurt_000000_003025_gtFine_labelIds.png 160 | frankfurt/frankfurt_000001_023369_gtFine_labelIds.png 161 | frankfurt/frankfurt_000001_061682_gtFine_labelIds.png 162 | frankfurt/frankfurt_000001_017459_gtFine_labelIds.png 163 | frankfurt/frankfurt_000001_059789_gtFine_labelIds.png 164 | frankfurt/frankfurt_000001_073464_gtFine_labelIds.png 165 | frankfurt/frankfurt_000001_063045_gtFine_labelIds.png 166 | frankfurt/frankfurt_000001_064651_gtFine_labelIds.png 167 | frankfurt/frankfurt_000000_013382_gtFine_labelIds.png 168 | frankfurt/frankfurt_000001_002512_gtFine_labelIds.png 169 | frankfurt/frankfurt_000001_032942_gtFine_labelIds.png 170 | frankfurt/frankfurt_000001_010600_gtFine_labelIds.png 171 | frankfurt/frankfurt_000001_030067_gtFine_labelIds.png 172 | frankfurt/frankfurt_000001_014741_gtFine_labelIds.png 173 | frankfurt/frankfurt_000000_021667_gtFine_labelIds.png 174 | frankfurt/frankfurt_000001_051807_gtFine_labelIds.png 175 | frankfurt/frankfurt_000001_019854_gtFine_labelIds.png 176 | frankfurt/frankfurt_000001_015768_gtFine_labelIds.png 177 | frankfurt/frankfurt_000001_007857_gtFine_labelIds.png 178 | frankfurt/frankfurt_000001_058914_gtFine_labelIds.png 179 | frankfurt/frankfurt_000000_012868_gtFine_labelIds.png 180 | frankfurt/frankfurt_000000_013942_gtFine_labelIds.png 181 | frankfurt/frankfurt_000001_014406_gtFine_labelIds.png 182 | frankfurt/frankfurt_000001_049298_gtFine_labelIds.png 183 | frankfurt/frankfurt_000001_023769_gtFine_labelIds.png 184 | frankfurt/frankfurt_000001_012519_gtFine_labelIds.png 185 | frankfurt/frankfurt_000001_064925_gtFine_labelIds.png 186 | frankfurt/frankfurt_000001_072295_gtFine_labelIds.png 187 | frankfurt/frankfurt_000001_058504_gtFine_labelIds.png 188 | frankfurt/frankfurt_000001_059119_gtFine_labelIds.png 189 | frankfurt/frankfurt_000001_015091_gtFine_labelIds.png 190 | frankfurt/frankfurt_000001_058057_gtFine_labelIds.png 191 | frankfurt/frankfurt_000001_003056_gtFine_labelIds.png 192 | frankfurt/frankfurt_000001_007622_gtFine_labelIds.png 193 | frankfurt/frankfurt_000001_016273_gtFine_labelIds.png 194 | frankfurt/frankfurt_000001_035864_gtFine_labelIds.png 195 | frankfurt/frankfurt_000001_067092_gtFine_labelIds.png 196 | frankfurt/frankfurt_000000_013067_gtFine_labelIds.png 197 | frankfurt/frankfurt_000001_067474_gtFine_labelIds.png 198 | frankfurt/frankfurt_000001_060135_gtFine_labelIds.png 199 | frankfurt/frankfurt_000000_018797_gtFine_labelIds.png 200 | frankfurt/frankfurt_000000_005898_gtFine_labelIds.png 201 | frankfurt/frankfurt_000001_055603_gtFine_labelIds.png 202 | frankfurt/frankfurt_000001_060906_gtFine_labelIds.png 203 | frankfurt/frankfurt_000001_062653_gtFine_labelIds.png 204 | frankfurt/frankfurt_000000_004617_gtFine_labelIds.png 205 | frankfurt/frankfurt_000001_055538_gtFine_labelIds.png 206 | frankfurt/frankfurt_000000_008451_gtFine_labelIds.png 207 | frankfurt/frankfurt_000001_052594_gtFine_labelIds.png 208 | frankfurt/frankfurt_000001_004327_gtFine_labelIds.png 209 | frankfurt/frankfurt_000001_075296_gtFine_labelIds.png 210 | frankfurt/frankfurt_000001_073088_gtFine_labelIds.png 211 | frankfurt/frankfurt_000001_005184_gtFine_labelIds.png 212 | frankfurt/frankfurt_000000_016286_gtFine_labelIds.png 213 | frankfurt/frankfurt_000001_008688_gtFine_labelIds.png 214 | frankfurt/frankfurt_000000_011074_gtFine_labelIds.png 215 | frankfurt/frankfurt_000001_056580_gtFine_labelIds.png 216 | frankfurt/frankfurt_000001_067735_gtFine_labelIds.png 217 | frankfurt/frankfurt_000001_034047_gtFine_labelIds.png 218 | frankfurt/frankfurt_000001_076502_gtFine_labelIds.png 219 | frankfurt/frankfurt_000001_071288_gtFine_labelIds.png 220 | frankfurt/frankfurt_000001_067295_gtFine_labelIds.png 221 | frankfurt/frankfurt_000001_071781_gtFine_labelIds.png 222 | frankfurt/frankfurt_000000_012121_gtFine_labelIds.png 223 | frankfurt/frankfurt_000001_004859_gtFine_labelIds.png 224 | frankfurt/frankfurt_000001_073911_gtFine_labelIds.png 225 | frankfurt/frankfurt_000001_047552_gtFine_labelIds.png 226 | frankfurt/frankfurt_000001_037705_gtFine_labelIds.png 227 | frankfurt/frankfurt_000001_025512_gtFine_labelIds.png 228 | frankfurt/frankfurt_000001_047178_gtFine_labelIds.png 229 | frankfurt/frankfurt_000001_014221_gtFine_labelIds.png 230 | frankfurt/frankfurt_000000_007365_gtFine_labelIds.png 231 | frankfurt/frankfurt_000001_049698_gtFine_labelIds.png 232 | frankfurt/frankfurt_000001_065160_gtFine_labelIds.png 233 | frankfurt/frankfurt_000001_061763_gtFine_labelIds.png 234 | frankfurt/frankfurt_000000_010351_gtFine_labelIds.png 235 | frankfurt/frankfurt_000001_072155_gtFine_labelIds.png 236 | frankfurt/frankfurt_000001_023235_gtFine_labelIds.png 237 | frankfurt/frankfurt_000000_015389_gtFine_labelIds.png 238 | frankfurt/frankfurt_000000_009688_gtFine_labelIds.png 239 | frankfurt/frankfurt_000000_016005_gtFine_labelIds.png 240 | frankfurt/frankfurt_000001_054640_gtFine_labelIds.png 241 | frankfurt/frankfurt_000001_029600_gtFine_labelIds.png 242 | frankfurt/frankfurt_000001_028232_gtFine_labelIds.png 243 | frankfurt/frankfurt_000001_050686_gtFine_labelIds.png 244 | frankfurt/frankfurt_000001_013496_gtFine_labelIds.png 245 | frankfurt/frankfurt_000001_066092_gtFine_labelIds.png 246 | frankfurt/frankfurt_000001_009854_gtFine_labelIds.png 247 | frankfurt/frankfurt_000001_067178_gtFine_labelIds.png 248 | frankfurt/frankfurt_000001_028854_gtFine_labelIds.png 249 | frankfurt/frankfurt_000001_083199_gtFine_labelIds.png 250 | frankfurt/frankfurt_000001_064798_gtFine_labelIds.png 251 | frankfurt/frankfurt_000001_018113_gtFine_labelIds.png 252 | frankfurt/frankfurt_000001_050149_gtFine_labelIds.png 253 | frankfurt/frankfurt_000001_048196_gtFine_labelIds.png 254 | frankfurt/frankfurt_000000_001236_gtFine_labelIds.png 255 | frankfurt/frankfurt_000000_017476_gtFine_labelIds.png 256 | frankfurt/frankfurt_000001_003588_gtFine_labelIds.png 257 | frankfurt/frankfurt_000001_021825_gtFine_labelIds.png 258 | frankfurt/frankfurt_000000_010763_gtFine_labelIds.png 259 | frankfurt/frankfurt_000001_062793_gtFine_labelIds.png 260 | frankfurt/frankfurt_000001_029236_gtFine_labelIds.png 261 | frankfurt/frankfurt_000001_075984_gtFine_labelIds.png 262 | frankfurt/frankfurt_000001_031266_gtFine_labelIds.png 263 | frankfurt/frankfurt_000001_043395_gtFine_labelIds.png 264 | frankfurt/frankfurt_000001_040732_gtFine_labelIds.png 265 | frankfurt/frankfurt_000001_011162_gtFine_labelIds.png 266 | frankfurt/frankfurt_000000_012009_gtFine_labelIds.png 267 | frankfurt/frankfurt_000001_042733_gtFine_labelIds.png 268 | lindau/lindau_000052_000019_gtFine_labelIds.png 269 | lindau/lindau_000009_000019_gtFine_labelIds.png 270 | lindau/lindau_000037_000019_gtFine_labelIds.png 271 | lindau/lindau_000047_000019_gtFine_labelIds.png 272 | lindau/lindau_000015_000019_gtFine_labelIds.png 273 | lindau/lindau_000030_000019_gtFine_labelIds.png 274 | lindau/lindau_000012_000019_gtFine_labelIds.png 275 | lindau/lindau_000032_000019_gtFine_labelIds.png 276 | lindau/lindau_000046_000019_gtFine_labelIds.png 277 | lindau/lindau_000000_000019_gtFine_labelIds.png 278 | lindau/lindau_000031_000019_gtFine_labelIds.png 279 | lindau/lindau_000011_000019_gtFine_labelIds.png 280 | lindau/lindau_000027_000019_gtFine_labelIds.png 281 | lindau/lindau_000054_000019_gtFine_labelIds.png 282 | lindau/lindau_000026_000019_gtFine_labelIds.png 283 | lindau/lindau_000017_000019_gtFine_labelIds.png 284 | lindau/lindau_000023_000019_gtFine_labelIds.png 285 | lindau/lindau_000005_000019_gtFine_labelIds.png 286 | lindau/lindau_000056_000019_gtFine_labelIds.png 287 | lindau/lindau_000025_000019_gtFine_labelIds.png 288 | lindau/lindau_000045_000019_gtFine_labelIds.png 289 | lindau/lindau_000014_000019_gtFine_labelIds.png 290 | lindau/lindau_000004_000019_gtFine_labelIds.png 291 | lindau/lindau_000021_000019_gtFine_labelIds.png 292 | lindau/lindau_000049_000019_gtFine_labelIds.png 293 | lindau/lindau_000033_000019_gtFine_labelIds.png 294 | lindau/lindau_000042_000019_gtFine_labelIds.png 295 | lindau/lindau_000013_000019_gtFine_labelIds.png 296 | lindau/lindau_000024_000019_gtFine_labelIds.png 297 | lindau/lindau_000002_000019_gtFine_labelIds.png 298 | lindau/lindau_000043_000019_gtFine_labelIds.png 299 | lindau/lindau_000016_000019_gtFine_labelIds.png 300 | lindau/lindau_000050_000019_gtFine_labelIds.png 301 | lindau/lindau_000018_000019_gtFine_labelIds.png 302 | lindau/lindau_000007_000019_gtFine_labelIds.png 303 | lindau/lindau_000048_000019_gtFine_labelIds.png 304 | lindau/lindau_000022_000019_gtFine_labelIds.png 305 | lindau/lindau_000053_000019_gtFine_labelIds.png 306 | lindau/lindau_000038_000019_gtFine_labelIds.png 307 | lindau/lindau_000001_000019_gtFine_labelIds.png 308 | lindau/lindau_000036_000019_gtFine_labelIds.png 309 | lindau/lindau_000035_000019_gtFine_labelIds.png 310 | lindau/lindau_000003_000019_gtFine_labelIds.png 311 | lindau/lindau_000034_000019_gtFine_labelIds.png 312 | lindau/lindau_000010_000019_gtFine_labelIds.png 313 | lindau/lindau_000055_000019_gtFine_labelIds.png 314 | lindau/lindau_000006_000019_gtFine_labelIds.png 315 | lindau/lindau_000019_000019_gtFine_labelIds.png 316 | lindau/lindau_000029_000019_gtFine_labelIds.png 317 | lindau/lindau_000039_000019_gtFine_labelIds.png 318 | lindau/lindau_000051_000019_gtFine_labelIds.png 319 | lindau/lindau_000020_000019_gtFine_labelIds.png 320 | lindau/lindau_000057_000019_gtFine_labelIds.png 321 | lindau/lindau_000041_000019_gtFine_labelIds.png 322 | lindau/lindau_000040_000019_gtFine_labelIds.png 323 | lindau/lindau_000044_000019_gtFine_labelIds.png 324 | lindau/lindau_000028_000019_gtFine_labelIds.png 325 | lindau/lindau_000058_000019_gtFine_labelIds.png 326 | lindau/lindau_000008_000019_gtFine_labelIds.png 327 | munster/munster_000000_000019_gtFine_labelIds.png 328 | munster/munster_000012_000019_gtFine_labelIds.png 329 | munster/munster_000032_000019_gtFine_labelIds.png 330 | munster/munster_000068_000019_gtFine_labelIds.png 331 | munster/munster_000101_000019_gtFine_labelIds.png 332 | munster/munster_000153_000019_gtFine_labelIds.png 333 | munster/munster_000115_000019_gtFine_labelIds.png 334 | munster/munster_000029_000019_gtFine_labelIds.png 335 | munster/munster_000019_000019_gtFine_labelIds.png 336 | munster/munster_000156_000019_gtFine_labelIds.png 337 | munster/munster_000129_000019_gtFine_labelIds.png 338 | munster/munster_000169_000019_gtFine_labelIds.png 339 | munster/munster_000150_000019_gtFine_labelIds.png 340 | munster/munster_000165_000019_gtFine_labelIds.png 341 | munster/munster_000050_000019_gtFine_labelIds.png 342 | munster/munster_000025_000019_gtFine_labelIds.png 343 | munster/munster_000116_000019_gtFine_labelIds.png 344 | munster/munster_000132_000019_gtFine_labelIds.png 345 | munster/munster_000066_000019_gtFine_labelIds.png 346 | munster/munster_000096_000019_gtFine_labelIds.png 347 | munster/munster_000030_000019_gtFine_labelIds.png 348 | munster/munster_000146_000019_gtFine_labelIds.png 349 | munster/munster_000098_000019_gtFine_labelIds.png 350 | munster/munster_000059_000019_gtFine_labelIds.png 351 | munster/munster_000093_000019_gtFine_labelIds.png 352 | munster/munster_000122_000019_gtFine_labelIds.png 353 | munster/munster_000024_000019_gtFine_labelIds.png 354 | munster/munster_000036_000019_gtFine_labelIds.png 355 | munster/munster_000086_000019_gtFine_labelIds.png 356 | munster/munster_000163_000019_gtFine_labelIds.png 357 | munster/munster_000001_000019_gtFine_labelIds.png 358 | munster/munster_000053_000019_gtFine_labelIds.png 359 | munster/munster_000071_000019_gtFine_labelIds.png 360 | munster/munster_000079_000019_gtFine_labelIds.png 361 | munster/munster_000159_000019_gtFine_labelIds.png 362 | munster/munster_000038_000019_gtFine_labelIds.png 363 | munster/munster_000138_000019_gtFine_labelIds.png 364 | munster/munster_000135_000019_gtFine_labelIds.png 365 | munster/munster_000065_000019_gtFine_labelIds.png 366 | munster/munster_000139_000019_gtFine_labelIds.png 367 | munster/munster_000108_000019_gtFine_labelIds.png 368 | munster/munster_000020_000019_gtFine_labelIds.png 369 | munster/munster_000074_000019_gtFine_labelIds.png 370 | munster/munster_000035_000019_gtFine_labelIds.png 371 | munster/munster_000067_000019_gtFine_labelIds.png 372 | munster/munster_000151_000019_gtFine_labelIds.png 373 | munster/munster_000083_000019_gtFine_labelIds.png 374 | munster/munster_000118_000019_gtFine_labelIds.png 375 | munster/munster_000046_000019_gtFine_labelIds.png 376 | munster/munster_000147_000019_gtFine_labelIds.png 377 | munster/munster_000047_000019_gtFine_labelIds.png 378 | munster/munster_000043_000019_gtFine_labelIds.png 379 | munster/munster_000168_000019_gtFine_labelIds.png 380 | munster/munster_000167_000019_gtFine_labelIds.png 381 | munster/munster_000021_000019_gtFine_labelIds.png 382 | munster/munster_000073_000019_gtFine_labelIds.png 383 | munster/munster_000089_000019_gtFine_labelIds.png 384 | munster/munster_000060_000019_gtFine_labelIds.png 385 | munster/munster_000155_000019_gtFine_labelIds.png 386 | munster/munster_000140_000019_gtFine_labelIds.png 387 | munster/munster_000145_000019_gtFine_labelIds.png 388 | munster/munster_000077_000019_gtFine_labelIds.png 389 | munster/munster_000018_000019_gtFine_labelIds.png 390 | munster/munster_000045_000019_gtFine_labelIds.png 391 | munster/munster_000166_000019_gtFine_labelIds.png 392 | munster/munster_000037_000019_gtFine_labelIds.png 393 | munster/munster_000112_000019_gtFine_labelIds.png 394 | munster/munster_000080_000019_gtFine_labelIds.png 395 | munster/munster_000144_000019_gtFine_labelIds.png 396 | munster/munster_000142_000019_gtFine_labelIds.png 397 | munster/munster_000070_000019_gtFine_labelIds.png 398 | munster/munster_000044_000019_gtFine_labelIds.png 399 | munster/munster_000137_000019_gtFine_labelIds.png 400 | munster/munster_000041_000019_gtFine_labelIds.png 401 | munster/munster_000113_000019_gtFine_labelIds.png 402 | munster/munster_000075_000019_gtFine_labelIds.png 403 | munster/munster_000157_000019_gtFine_labelIds.png 404 | munster/munster_000158_000019_gtFine_labelIds.png 405 | munster/munster_000109_000019_gtFine_labelIds.png 406 | munster/munster_000033_000019_gtFine_labelIds.png 407 | munster/munster_000088_000019_gtFine_labelIds.png 408 | munster/munster_000090_000019_gtFine_labelIds.png 409 | munster/munster_000114_000019_gtFine_labelIds.png 410 | munster/munster_000171_000019_gtFine_labelIds.png 411 | munster/munster_000013_000019_gtFine_labelIds.png 412 | munster/munster_000130_000019_gtFine_labelIds.png 413 | munster/munster_000016_000019_gtFine_labelIds.png 414 | munster/munster_000136_000019_gtFine_labelIds.png 415 | munster/munster_000007_000019_gtFine_labelIds.png 416 | munster/munster_000014_000019_gtFine_labelIds.png 417 | munster/munster_000052_000019_gtFine_labelIds.png 418 | munster/munster_000104_000019_gtFine_labelIds.png 419 | munster/munster_000173_000019_gtFine_labelIds.png 420 | munster/munster_000057_000019_gtFine_labelIds.png 421 | munster/munster_000072_000019_gtFine_labelIds.png 422 | munster/munster_000003_000019_gtFine_labelIds.png 423 | munster/munster_000161_000019_gtFine_labelIds.png 424 | munster/munster_000002_000019_gtFine_labelIds.png 425 | munster/munster_000028_000019_gtFine_labelIds.png 426 | munster/munster_000051_000019_gtFine_labelIds.png 427 | munster/munster_000105_000019_gtFine_labelIds.png 428 | munster/munster_000061_000019_gtFine_labelIds.png 429 | munster/munster_000058_000019_gtFine_labelIds.png 430 | munster/munster_000094_000019_gtFine_labelIds.png 431 | munster/munster_000027_000019_gtFine_labelIds.png 432 | munster/munster_000062_000019_gtFine_labelIds.png 433 | munster/munster_000127_000019_gtFine_labelIds.png 434 | munster/munster_000110_000019_gtFine_labelIds.png 435 | munster/munster_000170_000019_gtFine_labelIds.png 436 | munster/munster_000023_000019_gtFine_labelIds.png 437 | munster/munster_000084_000019_gtFine_labelIds.png 438 | munster/munster_000121_000019_gtFine_labelIds.png 439 | munster/munster_000087_000019_gtFine_labelIds.png 440 | munster/munster_000097_000019_gtFine_labelIds.png 441 | munster/munster_000119_000019_gtFine_labelIds.png 442 | munster/munster_000128_000019_gtFine_labelIds.png 443 | munster/munster_000078_000019_gtFine_labelIds.png 444 | munster/munster_000010_000019_gtFine_labelIds.png 445 | munster/munster_000015_000019_gtFine_labelIds.png 446 | munster/munster_000048_000019_gtFine_labelIds.png 447 | munster/munster_000085_000019_gtFine_labelIds.png 448 | munster/munster_000164_000019_gtFine_labelIds.png 449 | munster/munster_000111_000019_gtFine_labelIds.png 450 | munster/munster_000099_000019_gtFine_labelIds.png 451 | munster/munster_000117_000019_gtFine_labelIds.png 452 | munster/munster_000009_000019_gtFine_labelIds.png 453 | munster/munster_000049_000019_gtFine_labelIds.png 454 | munster/munster_000148_000019_gtFine_labelIds.png 455 | munster/munster_000022_000019_gtFine_labelIds.png 456 | munster/munster_000131_000019_gtFine_labelIds.png 457 | munster/munster_000006_000019_gtFine_labelIds.png 458 | munster/munster_000005_000019_gtFine_labelIds.png 459 | munster/munster_000102_000019_gtFine_labelIds.png 460 | munster/munster_000160_000019_gtFine_labelIds.png 461 | munster/munster_000107_000019_gtFine_labelIds.png 462 | munster/munster_000095_000019_gtFine_labelIds.png 463 | munster/munster_000106_000019_gtFine_labelIds.png 464 | munster/munster_000034_000019_gtFine_labelIds.png 465 | munster/munster_000143_000019_gtFine_labelIds.png 466 | munster/munster_000017_000019_gtFine_labelIds.png 467 | munster/munster_000040_000019_gtFine_labelIds.png 468 | munster/munster_000152_000019_gtFine_labelIds.png 469 | munster/munster_000154_000019_gtFine_labelIds.png 470 | munster/munster_000100_000019_gtFine_labelIds.png 471 | munster/munster_000004_000019_gtFine_labelIds.png 472 | munster/munster_000141_000019_gtFine_labelIds.png 473 | munster/munster_000011_000019_gtFine_labelIds.png 474 | munster/munster_000055_000019_gtFine_labelIds.png 475 | munster/munster_000134_000019_gtFine_labelIds.png 476 | munster/munster_000054_000019_gtFine_labelIds.png 477 | munster/munster_000064_000019_gtFine_labelIds.png 478 | munster/munster_000039_000019_gtFine_labelIds.png 479 | munster/munster_000103_000019_gtFine_labelIds.png 480 | munster/munster_000092_000019_gtFine_labelIds.png 481 | munster/munster_000172_000019_gtFine_labelIds.png 482 | munster/munster_000042_000019_gtFine_labelIds.png 483 | munster/munster_000124_000019_gtFine_labelIds.png 484 | munster/munster_000069_000019_gtFine_labelIds.png 485 | munster/munster_000026_000019_gtFine_labelIds.png 486 | munster/munster_000120_000019_gtFine_labelIds.png 487 | munster/munster_000031_000019_gtFine_labelIds.png 488 | munster/munster_000162_000019_gtFine_labelIds.png 489 | munster/munster_000056_000019_gtFine_labelIds.png 490 | munster/munster_000081_000019_gtFine_labelIds.png 491 | munster/munster_000123_000019_gtFine_labelIds.png 492 | munster/munster_000125_000019_gtFine_labelIds.png 493 | munster/munster_000082_000019_gtFine_labelIds.png 494 | munster/munster_000133_000019_gtFine_labelIds.png 495 | munster/munster_000126_000019_gtFine_labelIds.png 496 | munster/munster_000063_000019_gtFine_labelIds.png 497 | munster/munster_000008_000019_gtFine_labelIds.png 498 | munster/munster_000149_000019_gtFine_labelIds.png 499 | munster/munster_000076_000019_gtFine_labelIds.png 500 | munster/munster_000091_000019_gtFine_labelIds.png 501 | -------------------------------------------------------------------------------- /util/loader/cityscapes_list/val.txt: -------------------------------------------------------------------------------- 1 | frankfurt/frankfurt_000001_007973_leftImg8bit.png 2 | frankfurt/frankfurt_000001_025921_leftImg8bit.png 3 | frankfurt/frankfurt_000001_062016_leftImg8bit.png 4 | frankfurt/frankfurt_000001_049078_leftImg8bit.png 5 | frankfurt/frankfurt_000000_009561_leftImg8bit.png 6 | frankfurt/frankfurt_000001_013710_leftImg8bit.png 7 | frankfurt/frankfurt_000001_041664_leftImg8bit.png 8 | frankfurt/frankfurt_000000_013240_leftImg8bit.png 9 | frankfurt/frankfurt_000001_044787_leftImg8bit.png 10 | frankfurt/frankfurt_000001_015328_leftImg8bit.png 11 | frankfurt/frankfurt_000001_073243_leftImg8bit.png 12 | frankfurt/frankfurt_000001_034816_leftImg8bit.png 13 | frankfurt/frankfurt_000001_041074_leftImg8bit.png 14 | frankfurt/frankfurt_000001_005898_leftImg8bit.png 15 | frankfurt/frankfurt_000000_022254_leftImg8bit.png 16 | frankfurt/frankfurt_000001_044658_leftImg8bit.png 17 | frankfurt/frankfurt_000001_009504_leftImg8bit.png 18 | frankfurt/frankfurt_000001_024927_leftImg8bit.png 19 | frankfurt/frankfurt_000001_017842_leftImg8bit.png 20 | frankfurt/frankfurt_000001_068208_leftImg8bit.png 21 | frankfurt/frankfurt_000001_013016_leftImg8bit.png 22 | frankfurt/frankfurt_000001_010156_leftImg8bit.png 23 | frankfurt/frankfurt_000000_002963_leftImg8bit.png 24 | frankfurt/frankfurt_000001_020693_leftImg8bit.png 25 | frankfurt/frankfurt_000001_078803_leftImg8bit.png 26 | frankfurt/frankfurt_000001_025713_leftImg8bit.png 27 | frankfurt/frankfurt_000001_007285_leftImg8bit.png 28 | frankfurt/frankfurt_000001_070099_leftImg8bit.png 29 | frankfurt/frankfurt_000000_009291_leftImg8bit.png 30 | frankfurt/frankfurt_000000_019607_leftImg8bit.png 31 | frankfurt/frankfurt_000001_068063_leftImg8bit.png 32 | frankfurt/frankfurt_000000_003920_leftImg8bit.png 33 | frankfurt/frankfurt_000001_077233_leftImg8bit.png 34 | frankfurt/frankfurt_000001_029086_leftImg8bit.png 35 | frankfurt/frankfurt_000001_060545_leftImg8bit.png 36 | frankfurt/frankfurt_000001_001464_leftImg8bit.png 37 | frankfurt/frankfurt_000001_028590_leftImg8bit.png 38 | frankfurt/frankfurt_000001_016462_leftImg8bit.png 39 | frankfurt/frankfurt_000001_060422_leftImg8bit.png 40 | frankfurt/frankfurt_000001_009058_leftImg8bit.png 41 | frankfurt/frankfurt_000001_080830_leftImg8bit.png 42 | frankfurt/frankfurt_000001_012870_leftImg8bit.png 43 | frankfurt/frankfurt_000001_077434_leftImg8bit.png 44 | frankfurt/frankfurt_000001_033655_leftImg8bit.png 45 | frankfurt/frankfurt_000001_051516_leftImg8bit.png 46 | frankfurt/frankfurt_000001_044413_leftImg8bit.png 47 | frankfurt/frankfurt_000001_055172_leftImg8bit.png 48 | frankfurt/frankfurt_000001_040575_leftImg8bit.png 49 | frankfurt/frankfurt_000000_020215_leftImg8bit.png 50 | frankfurt/frankfurt_000000_017228_leftImg8bit.png 51 | frankfurt/frankfurt_000001_041354_leftImg8bit.png 52 | frankfurt/frankfurt_000000_008206_leftImg8bit.png 53 | frankfurt/frankfurt_000001_043564_leftImg8bit.png 54 | frankfurt/frankfurt_000001_032711_leftImg8bit.png 55 | frankfurt/frankfurt_000001_064130_leftImg8bit.png 56 | frankfurt/frankfurt_000001_053102_leftImg8bit.png 57 | frankfurt/frankfurt_000001_082087_leftImg8bit.png 58 | frankfurt/frankfurt_000001_057478_leftImg8bit.png 59 | frankfurt/frankfurt_000001_007407_leftImg8bit.png 60 | frankfurt/frankfurt_000001_008200_leftImg8bit.png 61 | frankfurt/frankfurt_000001_038844_leftImg8bit.png 62 | frankfurt/frankfurt_000001_016029_leftImg8bit.png 63 | frankfurt/frankfurt_000001_058176_leftImg8bit.png 64 | frankfurt/frankfurt_000001_057181_leftImg8bit.png 65 | frankfurt/frankfurt_000001_039895_leftImg8bit.png 66 | frankfurt/frankfurt_000000_000294_leftImg8bit.png 67 | frankfurt/frankfurt_000001_055062_leftImg8bit.png 68 | frankfurt/frankfurt_000001_083029_leftImg8bit.png 69 | frankfurt/frankfurt_000001_010444_leftImg8bit.png 70 | frankfurt/frankfurt_000001_041517_leftImg8bit.png 71 | frankfurt/frankfurt_000001_069633_leftImg8bit.png 72 | frankfurt/frankfurt_000001_020287_leftImg8bit.png 73 | frankfurt/frankfurt_000001_012038_leftImg8bit.png 74 | frankfurt/frankfurt_000001_046504_leftImg8bit.png 75 | frankfurt/frankfurt_000001_032556_leftImg8bit.png 76 | frankfurt/frankfurt_000000_001751_leftImg8bit.png 77 | frankfurt/frankfurt_000001_000538_leftImg8bit.png 78 | frankfurt/frankfurt_000001_083852_leftImg8bit.png 79 | frankfurt/frankfurt_000001_077092_leftImg8bit.png 80 | frankfurt/frankfurt_000001_017101_leftImg8bit.png 81 | frankfurt/frankfurt_000001_044525_leftImg8bit.png 82 | frankfurt/frankfurt_000001_005703_leftImg8bit.png 83 | frankfurt/frankfurt_000001_080391_leftImg8bit.png 84 | frankfurt/frankfurt_000001_038418_leftImg8bit.png 85 | frankfurt/frankfurt_000001_066832_leftImg8bit.png 86 | frankfurt/frankfurt_000000_003357_leftImg8bit.png 87 | frankfurt/frankfurt_000000_020880_leftImg8bit.png 88 | frankfurt/frankfurt_000001_062396_leftImg8bit.png 89 | frankfurt/frankfurt_000001_046272_leftImg8bit.png 90 | frankfurt/frankfurt_000001_062509_leftImg8bit.png 91 | frankfurt/frankfurt_000001_054415_leftImg8bit.png 92 | frankfurt/frankfurt_000001_021406_leftImg8bit.png 93 | frankfurt/frankfurt_000001_030310_leftImg8bit.png 94 | frankfurt/frankfurt_000000_014480_leftImg8bit.png 95 | frankfurt/frankfurt_000001_005410_leftImg8bit.png 96 | frankfurt/frankfurt_000000_022797_leftImg8bit.png 97 | frankfurt/frankfurt_000001_035144_leftImg8bit.png 98 | frankfurt/frankfurt_000001_014565_leftImg8bit.png 99 | frankfurt/frankfurt_000001_065850_leftImg8bit.png 100 | frankfurt/frankfurt_000000_000576_leftImg8bit.png 101 | frankfurt/frankfurt_000001_065617_leftImg8bit.png 102 | frankfurt/frankfurt_000000_005543_leftImg8bit.png 103 | frankfurt/frankfurt_000001_055709_leftImg8bit.png 104 | frankfurt/frankfurt_000001_027325_leftImg8bit.png 105 | frankfurt/frankfurt_000001_011835_leftImg8bit.png 106 | frankfurt/frankfurt_000001_046779_leftImg8bit.png 107 | frankfurt/frankfurt_000001_064305_leftImg8bit.png 108 | frankfurt/frankfurt_000001_012738_leftImg8bit.png 109 | frankfurt/frankfurt_000001_048355_leftImg8bit.png 110 | frankfurt/frankfurt_000001_019969_leftImg8bit.png 111 | frankfurt/frankfurt_000001_080091_leftImg8bit.png 112 | frankfurt/frankfurt_000000_011007_leftImg8bit.png 113 | frankfurt/frankfurt_000000_015676_leftImg8bit.png 114 | frankfurt/frankfurt_000001_044227_leftImg8bit.png 115 | frankfurt/frankfurt_000001_055387_leftImg8bit.png 116 | frankfurt/frankfurt_000001_038245_leftImg8bit.png 117 | frankfurt/frankfurt_000001_059642_leftImg8bit.png 118 | frankfurt/frankfurt_000001_030669_leftImg8bit.png 119 | frankfurt/frankfurt_000001_068772_leftImg8bit.png 120 | frankfurt/frankfurt_000001_079206_leftImg8bit.png 121 | frankfurt/frankfurt_000001_055306_leftImg8bit.png 122 | frankfurt/frankfurt_000001_012699_leftImg8bit.png 123 | frankfurt/frankfurt_000001_042384_leftImg8bit.png 124 | frankfurt/frankfurt_000001_054077_leftImg8bit.png 125 | frankfurt/frankfurt_000001_010830_leftImg8bit.png 126 | frankfurt/frankfurt_000001_052120_leftImg8bit.png 127 | frankfurt/frankfurt_000001_032018_leftImg8bit.png 128 | frankfurt/frankfurt_000001_051737_leftImg8bit.png 129 | frankfurt/frankfurt_000001_028335_leftImg8bit.png 130 | frankfurt/frankfurt_000001_049770_leftImg8bit.png 131 | frankfurt/frankfurt_000001_054884_leftImg8bit.png 132 | frankfurt/frankfurt_000001_019698_leftImg8bit.png 133 | frankfurt/frankfurt_000000_011461_leftImg8bit.png 134 | frankfurt/frankfurt_000000_001016_leftImg8bit.png 135 | frankfurt/frankfurt_000001_062250_leftImg8bit.png 136 | frankfurt/frankfurt_000001_004736_leftImg8bit.png 137 | frankfurt/frankfurt_000001_068682_leftImg8bit.png 138 | frankfurt/frankfurt_000000_006589_leftImg8bit.png 139 | frankfurt/frankfurt_000000_011810_leftImg8bit.png 140 | frankfurt/frankfurt_000001_066574_leftImg8bit.png 141 | frankfurt/frankfurt_000001_048654_leftImg8bit.png 142 | frankfurt/frankfurt_000001_049209_leftImg8bit.png 143 | frankfurt/frankfurt_000001_042098_leftImg8bit.png 144 | frankfurt/frankfurt_000001_031416_leftImg8bit.png 145 | frankfurt/frankfurt_000000_009969_leftImg8bit.png 146 | frankfurt/frankfurt_000001_038645_leftImg8bit.png 147 | frankfurt/frankfurt_000001_020046_leftImg8bit.png 148 | frankfurt/frankfurt_000001_054219_leftImg8bit.png 149 | frankfurt/frankfurt_000001_002759_leftImg8bit.png 150 | frankfurt/frankfurt_000001_066438_leftImg8bit.png 151 | frankfurt/frankfurt_000000_020321_leftImg8bit.png 152 | frankfurt/frankfurt_000001_002646_leftImg8bit.png 153 | frankfurt/frankfurt_000001_046126_leftImg8bit.png 154 | frankfurt/frankfurt_000000_002196_leftImg8bit.png 155 | frankfurt/frankfurt_000001_057954_leftImg8bit.png 156 | frankfurt/frankfurt_000001_011715_leftImg8bit.png 157 | frankfurt/frankfurt_000000_021879_leftImg8bit.png 158 | frankfurt/frankfurt_000001_082466_leftImg8bit.png 159 | frankfurt/frankfurt_000000_003025_leftImg8bit.png 160 | frankfurt/frankfurt_000001_023369_leftImg8bit.png 161 | frankfurt/frankfurt_000001_061682_leftImg8bit.png 162 | frankfurt/frankfurt_000001_017459_leftImg8bit.png 163 | frankfurt/frankfurt_000001_059789_leftImg8bit.png 164 | frankfurt/frankfurt_000001_073464_leftImg8bit.png 165 | frankfurt/frankfurt_000001_063045_leftImg8bit.png 166 | frankfurt/frankfurt_000001_064651_leftImg8bit.png 167 | frankfurt/frankfurt_000000_013382_leftImg8bit.png 168 | frankfurt/frankfurt_000001_002512_leftImg8bit.png 169 | frankfurt/frankfurt_000001_032942_leftImg8bit.png 170 | frankfurt/frankfurt_000001_010600_leftImg8bit.png 171 | frankfurt/frankfurt_000001_030067_leftImg8bit.png 172 | frankfurt/frankfurt_000001_014741_leftImg8bit.png 173 | frankfurt/frankfurt_000000_021667_leftImg8bit.png 174 | frankfurt/frankfurt_000001_051807_leftImg8bit.png 175 | frankfurt/frankfurt_000001_019854_leftImg8bit.png 176 | frankfurt/frankfurt_000001_015768_leftImg8bit.png 177 | frankfurt/frankfurt_000001_007857_leftImg8bit.png 178 | frankfurt/frankfurt_000001_058914_leftImg8bit.png 179 | frankfurt/frankfurt_000000_012868_leftImg8bit.png 180 | frankfurt/frankfurt_000000_013942_leftImg8bit.png 181 | frankfurt/frankfurt_000001_014406_leftImg8bit.png 182 | frankfurt/frankfurt_000001_049298_leftImg8bit.png 183 | frankfurt/frankfurt_000001_023769_leftImg8bit.png 184 | frankfurt/frankfurt_000001_012519_leftImg8bit.png 185 | frankfurt/frankfurt_000001_064925_leftImg8bit.png 186 | frankfurt/frankfurt_000001_072295_leftImg8bit.png 187 | frankfurt/frankfurt_000001_058504_leftImg8bit.png 188 | frankfurt/frankfurt_000001_059119_leftImg8bit.png 189 | frankfurt/frankfurt_000001_015091_leftImg8bit.png 190 | frankfurt/frankfurt_000001_058057_leftImg8bit.png 191 | frankfurt/frankfurt_000001_003056_leftImg8bit.png 192 | frankfurt/frankfurt_000001_007622_leftImg8bit.png 193 | frankfurt/frankfurt_000001_016273_leftImg8bit.png 194 | frankfurt/frankfurt_000001_035864_leftImg8bit.png 195 | frankfurt/frankfurt_000001_067092_leftImg8bit.png 196 | frankfurt/frankfurt_000000_013067_leftImg8bit.png 197 | frankfurt/frankfurt_000001_067474_leftImg8bit.png 198 | frankfurt/frankfurt_000001_060135_leftImg8bit.png 199 | frankfurt/frankfurt_000000_018797_leftImg8bit.png 200 | frankfurt/frankfurt_000000_005898_leftImg8bit.png 201 | frankfurt/frankfurt_000001_055603_leftImg8bit.png 202 | frankfurt/frankfurt_000001_060906_leftImg8bit.png 203 | frankfurt/frankfurt_000001_062653_leftImg8bit.png 204 | frankfurt/frankfurt_000000_004617_leftImg8bit.png 205 | frankfurt/frankfurt_000001_055538_leftImg8bit.png 206 | frankfurt/frankfurt_000000_008451_leftImg8bit.png 207 | frankfurt/frankfurt_000001_052594_leftImg8bit.png 208 | frankfurt/frankfurt_000001_004327_leftImg8bit.png 209 | frankfurt/frankfurt_000001_075296_leftImg8bit.png 210 | frankfurt/frankfurt_000001_073088_leftImg8bit.png 211 | frankfurt/frankfurt_000001_005184_leftImg8bit.png 212 | frankfurt/frankfurt_000000_016286_leftImg8bit.png 213 | frankfurt/frankfurt_000001_008688_leftImg8bit.png 214 | frankfurt/frankfurt_000000_011074_leftImg8bit.png 215 | frankfurt/frankfurt_000001_056580_leftImg8bit.png 216 | frankfurt/frankfurt_000001_067735_leftImg8bit.png 217 | frankfurt/frankfurt_000001_034047_leftImg8bit.png 218 | frankfurt/frankfurt_000001_076502_leftImg8bit.png 219 | frankfurt/frankfurt_000001_071288_leftImg8bit.png 220 | frankfurt/frankfurt_000001_067295_leftImg8bit.png 221 | frankfurt/frankfurt_000001_071781_leftImg8bit.png 222 | frankfurt/frankfurt_000000_012121_leftImg8bit.png 223 | frankfurt/frankfurt_000001_004859_leftImg8bit.png 224 | frankfurt/frankfurt_000001_073911_leftImg8bit.png 225 | frankfurt/frankfurt_000001_047552_leftImg8bit.png 226 | frankfurt/frankfurt_000001_037705_leftImg8bit.png 227 | frankfurt/frankfurt_000001_025512_leftImg8bit.png 228 | frankfurt/frankfurt_000001_047178_leftImg8bit.png 229 | frankfurt/frankfurt_000001_014221_leftImg8bit.png 230 | frankfurt/frankfurt_000000_007365_leftImg8bit.png 231 | frankfurt/frankfurt_000001_049698_leftImg8bit.png 232 | frankfurt/frankfurt_000001_065160_leftImg8bit.png 233 | frankfurt/frankfurt_000001_061763_leftImg8bit.png 234 | frankfurt/frankfurt_000000_010351_leftImg8bit.png 235 | frankfurt/frankfurt_000001_072155_leftImg8bit.png 236 | frankfurt/frankfurt_000001_023235_leftImg8bit.png 237 | frankfurt/frankfurt_000000_015389_leftImg8bit.png 238 | frankfurt/frankfurt_000000_009688_leftImg8bit.png 239 | frankfurt/frankfurt_000000_016005_leftImg8bit.png 240 | frankfurt/frankfurt_000001_054640_leftImg8bit.png 241 | frankfurt/frankfurt_000001_029600_leftImg8bit.png 242 | frankfurt/frankfurt_000001_028232_leftImg8bit.png 243 | frankfurt/frankfurt_000001_050686_leftImg8bit.png 244 | frankfurt/frankfurt_000001_013496_leftImg8bit.png 245 | frankfurt/frankfurt_000001_066092_leftImg8bit.png 246 | frankfurt/frankfurt_000001_009854_leftImg8bit.png 247 | frankfurt/frankfurt_000001_067178_leftImg8bit.png 248 | frankfurt/frankfurt_000001_028854_leftImg8bit.png 249 | frankfurt/frankfurt_000001_083199_leftImg8bit.png 250 | frankfurt/frankfurt_000001_064798_leftImg8bit.png 251 | frankfurt/frankfurt_000001_018113_leftImg8bit.png 252 | frankfurt/frankfurt_000001_050149_leftImg8bit.png 253 | frankfurt/frankfurt_000001_048196_leftImg8bit.png 254 | frankfurt/frankfurt_000000_001236_leftImg8bit.png 255 | frankfurt/frankfurt_000000_017476_leftImg8bit.png 256 | frankfurt/frankfurt_000001_003588_leftImg8bit.png 257 | frankfurt/frankfurt_000001_021825_leftImg8bit.png 258 | frankfurt/frankfurt_000000_010763_leftImg8bit.png 259 | frankfurt/frankfurt_000001_062793_leftImg8bit.png 260 | frankfurt/frankfurt_000001_029236_leftImg8bit.png 261 | frankfurt/frankfurt_000001_075984_leftImg8bit.png 262 | frankfurt/frankfurt_000001_031266_leftImg8bit.png 263 | frankfurt/frankfurt_000001_043395_leftImg8bit.png 264 | frankfurt/frankfurt_000001_040732_leftImg8bit.png 265 | frankfurt/frankfurt_000001_011162_leftImg8bit.png 266 | frankfurt/frankfurt_000000_012009_leftImg8bit.png 267 | frankfurt/frankfurt_000001_042733_leftImg8bit.png 268 | lindau/lindau_000052_000019_leftImg8bit.png 269 | lindau/lindau_000009_000019_leftImg8bit.png 270 | lindau/lindau_000037_000019_leftImg8bit.png 271 | lindau/lindau_000047_000019_leftImg8bit.png 272 | lindau/lindau_000015_000019_leftImg8bit.png 273 | lindau/lindau_000030_000019_leftImg8bit.png 274 | lindau/lindau_000012_000019_leftImg8bit.png 275 | lindau/lindau_000032_000019_leftImg8bit.png 276 | lindau/lindau_000046_000019_leftImg8bit.png 277 | lindau/lindau_000000_000019_leftImg8bit.png 278 | lindau/lindau_000031_000019_leftImg8bit.png 279 | lindau/lindau_000011_000019_leftImg8bit.png 280 | lindau/lindau_000027_000019_leftImg8bit.png 281 | lindau/lindau_000054_000019_leftImg8bit.png 282 | lindau/lindau_000026_000019_leftImg8bit.png 283 | lindau/lindau_000017_000019_leftImg8bit.png 284 | lindau/lindau_000023_000019_leftImg8bit.png 285 | lindau/lindau_000005_000019_leftImg8bit.png 286 | lindau/lindau_000056_000019_leftImg8bit.png 287 | lindau/lindau_000025_000019_leftImg8bit.png 288 | lindau/lindau_000045_000019_leftImg8bit.png 289 | lindau/lindau_000014_000019_leftImg8bit.png 290 | lindau/lindau_000004_000019_leftImg8bit.png 291 | lindau/lindau_000021_000019_leftImg8bit.png 292 | lindau/lindau_000049_000019_leftImg8bit.png 293 | lindau/lindau_000033_000019_leftImg8bit.png 294 | lindau/lindau_000042_000019_leftImg8bit.png 295 | lindau/lindau_000013_000019_leftImg8bit.png 296 | lindau/lindau_000024_000019_leftImg8bit.png 297 | lindau/lindau_000002_000019_leftImg8bit.png 298 | lindau/lindau_000043_000019_leftImg8bit.png 299 | lindau/lindau_000016_000019_leftImg8bit.png 300 | lindau/lindau_000050_000019_leftImg8bit.png 301 | lindau/lindau_000018_000019_leftImg8bit.png 302 | lindau/lindau_000007_000019_leftImg8bit.png 303 | lindau/lindau_000048_000019_leftImg8bit.png 304 | lindau/lindau_000022_000019_leftImg8bit.png 305 | lindau/lindau_000053_000019_leftImg8bit.png 306 | lindau/lindau_000038_000019_leftImg8bit.png 307 | lindau/lindau_000001_000019_leftImg8bit.png 308 | lindau/lindau_000036_000019_leftImg8bit.png 309 | lindau/lindau_000035_000019_leftImg8bit.png 310 | lindau/lindau_000003_000019_leftImg8bit.png 311 | lindau/lindau_000034_000019_leftImg8bit.png 312 | lindau/lindau_000010_000019_leftImg8bit.png 313 | lindau/lindau_000055_000019_leftImg8bit.png 314 | lindau/lindau_000006_000019_leftImg8bit.png 315 | lindau/lindau_000019_000019_leftImg8bit.png 316 | lindau/lindau_000029_000019_leftImg8bit.png 317 | lindau/lindau_000039_000019_leftImg8bit.png 318 | lindau/lindau_000051_000019_leftImg8bit.png 319 | lindau/lindau_000020_000019_leftImg8bit.png 320 | lindau/lindau_000057_000019_leftImg8bit.png 321 | lindau/lindau_000041_000019_leftImg8bit.png 322 | lindau/lindau_000040_000019_leftImg8bit.png 323 | lindau/lindau_000044_000019_leftImg8bit.png 324 | lindau/lindau_000028_000019_leftImg8bit.png 325 | lindau/lindau_000058_000019_leftImg8bit.png 326 | lindau/lindau_000008_000019_leftImg8bit.png 327 | munster/munster_000000_000019_leftImg8bit.png 328 | munster/munster_000012_000019_leftImg8bit.png 329 | munster/munster_000032_000019_leftImg8bit.png 330 | munster/munster_000068_000019_leftImg8bit.png 331 | munster/munster_000101_000019_leftImg8bit.png 332 | munster/munster_000153_000019_leftImg8bit.png 333 | munster/munster_000115_000019_leftImg8bit.png 334 | munster/munster_000029_000019_leftImg8bit.png 335 | munster/munster_000019_000019_leftImg8bit.png 336 | munster/munster_000156_000019_leftImg8bit.png 337 | munster/munster_000129_000019_leftImg8bit.png 338 | munster/munster_000169_000019_leftImg8bit.png 339 | munster/munster_000150_000019_leftImg8bit.png 340 | munster/munster_000165_000019_leftImg8bit.png 341 | munster/munster_000050_000019_leftImg8bit.png 342 | munster/munster_000025_000019_leftImg8bit.png 343 | munster/munster_000116_000019_leftImg8bit.png 344 | munster/munster_000132_000019_leftImg8bit.png 345 | munster/munster_000066_000019_leftImg8bit.png 346 | munster/munster_000096_000019_leftImg8bit.png 347 | munster/munster_000030_000019_leftImg8bit.png 348 | munster/munster_000146_000019_leftImg8bit.png 349 | munster/munster_000098_000019_leftImg8bit.png 350 | munster/munster_000059_000019_leftImg8bit.png 351 | munster/munster_000093_000019_leftImg8bit.png 352 | munster/munster_000122_000019_leftImg8bit.png 353 | munster/munster_000024_000019_leftImg8bit.png 354 | munster/munster_000036_000019_leftImg8bit.png 355 | munster/munster_000086_000019_leftImg8bit.png 356 | munster/munster_000163_000019_leftImg8bit.png 357 | munster/munster_000001_000019_leftImg8bit.png 358 | munster/munster_000053_000019_leftImg8bit.png 359 | munster/munster_000071_000019_leftImg8bit.png 360 | munster/munster_000079_000019_leftImg8bit.png 361 | munster/munster_000159_000019_leftImg8bit.png 362 | munster/munster_000038_000019_leftImg8bit.png 363 | munster/munster_000138_000019_leftImg8bit.png 364 | munster/munster_000135_000019_leftImg8bit.png 365 | munster/munster_000065_000019_leftImg8bit.png 366 | munster/munster_000139_000019_leftImg8bit.png 367 | munster/munster_000108_000019_leftImg8bit.png 368 | munster/munster_000020_000019_leftImg8bit.png 369 | munster/munster_000074_000019_leftImg8bit.png 370 | munster/munster_000035_000019_leftImg8bit.png 371 | munster/munster_000067_000019_leftImg8bit.png 372 | munster/munster_000151_000019_leftImg8bit.png 373 | munster/munster_000083_000019_leftImg8bit.png 374 | munster/munster_000118_000019_leftImg8bit.png 375 | munster/munster_000046_000019_leftImg8bit.png 376 | munster/munster_000147_000019_leftImg8bit.png 377 | munster/munster_000047_000019_leftImg8bit.png 378 | munster/munster_000043_000019_leftImg8bit.png 379 | munster/munster_000168_000019_leftImg8bit.png 380 | munster/munster_000167_000019_leftImg8bit.png 381 | munster/munster_000021_000019_leftImg8bit.png 382 | munster/munster_000073_000019_leftImg8bit.png 383 | munster/munster_000089_000019_leftImg8bit.png 384 | munster/munster_000060_000019_leftImg8bit.png 385 | munster/munster_000155_000019_leftImg8bit.png 386 | munster/munster_000140_000019_leftImg8bit.png 387 | munster/munster_000145_000019_leftImg8bit.png 388 | munster/munster_000077_000019_leftImg8bit.png 389 | munster/munster_000018_000019_leftImg8bit.png 390 | munster/munster_000045_000019_leftImg8bit.png 391 | munster/munster_000166_000019_leftImg8bit.png 392 | munster/munster_000037_000019_leftImg8bit.png 393 | munster/munster_000112_000019_leftImg8bit.png 394 | munster/munster_000080_000019_leftImg8bit.png 395 | munster/munster_000144_000019_leftImg8bit.png 396 | munster/munster_000142_000019_leftImg8bit.png 397 | munster/munster_000070_000019_leftImg8bit.png 398 | munster/munster_000044_000019_leftImg8bit.png 399 | munster/munster_000137_000019_leftImg8bit.png 400 | munster/munster_000041_000019_leftImg8bit.png 401 | munster/munster_000113_000019_leftImg8bit.png 402 | munster/munster_000075_000019_leftImg8bit.png 403 | munster/munster_000157_000019_leftImg8bit.png 404 | munster/munster_000158_000019_leftImg8bit.png 405 | munster/munster_000109_000019_leftImg8bit.png 406 | munster/munster_000033_000019_leftImg8bit.png 407 | munster/munster_000088_000019_leftImg8bit.png 408 | munster/munster_000090_000019_leftImg8bit.png 409 | munster/munster_000114_000019_leftImg8bit.png 410 | munster/munster_000171_000019_leftImg8bit.png 411 | munster/munster_000013_000019_leftImg8bit.png 412 | munster/munster_000130_000019_leftImg8bit.png 413 | munster/munster_000016_000019_leftImg8bit.png 414 | munster/munster_000136_000019_leftImg8bit.png 415 | munster/munster_000007_000019_leftImg8bit.png 416 | munster/munster_000014_000019_leftImg8bit.png 417 | munster/munster_000052_000019_leftImg8bit.png 418 | munster/munster_000104_000019_leftImg8bit.png 419 | munster/munster_000173_000019_leftImg8bit.png 420 | munster/munster_000057_000019_leftImg8bit.png 421 | munster/munster_000072_000019_leftImg8bit.png 422 | munster/munster_000003_000019_leftImg8bit.png 423 | munster/munster_000161_000019_leftImg8bit.png 424 | munster/munster_000002_000019_leftImg8bit.png 425 | munster/munster_000028_000019_leftImg8bit.png 426 | munster/munster_000051_000019_leftImg8bit.png 427 | munster/munster_000105_000019_leftImg8bit.png 428 | munster/munster_000061_000019_leftImg8bit.png 429 | munster/munster_000058_000019_leftImg8bit.png 430 | munster/munster_000094_000019_leftImg8bit.png 431 | munster/munster_000027_000019_leftImg8bit.png 432 | munster/munster_000062_000019_leftImg8bit.png 433 | munster/munster_000127_000019_leftImg8bit.png 434 | munster/munster_000110_000019_leftImg8bit.png 435 | munster/munster_000170_000019_leftImg8bit.png 436 | munster/munster_000023_000019_leftImg8bit.png 437 | munster/munster_000084_000019_leftImg8bit.png 438 | munster/munster_000121_000019_leftImg8bit.png 439 | munster/munster_000087_000019_leftImg8bit.png 440 | munster/munster_000097_000019_leftImg8bit.png 441 | munster/munster_000119_000019_leftImg8bit.png 442 | munster/munster_000128_000019_leftImg8bit.png 443 | munster/munster_000078_000019_leftImg8bit.png 444 | munster/munster_000010_000019_leftImg8bit.png 445 | munster/munster_000015_000019_leftImg8bit.png 446 | munster/munster_000048_000019_leftImg8bit.png 447 | munster/munster_000085_000019_leftImg8bit.png 448 | munster/munster_000164_000019_leftImg8bit.png 449 | munster/munster_000111_000019_leftImg8bit.png 450 | munster/munster_000099_000019_leftImg8bit.png 451 | munster/munster_000117_000019_leftImg8bit.png 452 | munster/munster_000009_000019_leftImg8bit.png 453 | munster/munster_000049_000019_leftImg8bit.png 454 | munster/munster_000148_000019_leftImg8bit.png 455 | munster/munster_000022_000019_leftImg8bit.png 456 | munster/munster_000131_000019_leftImg8bit.png 457 | munster/munster_000006_000019_leftImg8bit.png 458 | munster/munster_000005_000019_leftImg8bit.png 459 | munster/munster_000102_000019_leftImg8bit.png 460 | munster/munster_000160_000019_leftImg8bit.png 461 | munster/munster_000107_000019_leftImg8bit.png 462 | munster/munster_000095_000019_leftImg8bit.png 463 | munster/munster_000106_000019_leftImg8bit.png 464 | munster/munster_000034_000019_leftImg8bit.png 465 | munster/munster_000143_000019_leftImg8bit.png 466 | munster/munster_000017_000019_leftImg8bit.png 467 | munster/munster_000040_000019_leftImg8bit.png 468 | munster/munster_000152_000019_leftImg8bit.png 469 | munster/munster_000154_000019_leftImg8bit.png 470 | munster/munster_000100_000019_leftImg8bit.png 471 | munster/munster_000004_000019_leftImg8bit.png 472 | munster/munster_000141_000019_leftImg8bit.png 473 | munster/munster_000011_000019_leftImg8bit.png 474 | munster/munster_000055_000019_leftImg8bit.png 475 | munster/munster_000134_000019_leftImg8bit.png 476 | munster/munster_000054_000019_leftImg8bit.png 477 | munster/munster_000064_000019_leftImg8bit.png 478 | munster/munster_000039_000019_leftImg8bit.png 479 | munster/munster_000103_000019_leftImg8bit.png 480 | munster/munster_000092_000019_leftImg8bit.png 481 | munster/munster_000172_000019_leftImg8bit.png 482 | munster/munster_000042_000019_leftImg8bit.png 483 | munster/munster_000124_000019_leftImg8bit.png 484 | munster/munster_000069_000019_leftImg8bit.png 485 | munster/munster_000026_000019_leftImg8bit.png 486 | munster/munster_000120_000019_leftImg8bit.png 487 | munster/munster_000031_000019_leftImg8bit.png 488 | munster/munster_000162_000019_leftImg8bit.png 489 | munster/munster_000056_000019_leftImg8bit.png 490 | munster/munster_000081_000019_leftImg8bit.png 491 | munster/munster_000123_000019_leftImg8bit.png 492 | munster/munster_000125_000019_leftImg8bit.png 493 | munster/munster_000082_000019_leftImg8bit.png 494 | munster/munster_000133_000019_leftImg8bit.png 495 | munster/munster_000126_000019_leftImg8bit.png 496 | munster/munster_000063_000019_leftImg8bit.png 497 | munster/munster_000008_000019_leftImg8bit.png 498 | munster/munster_000149_000019_leftImg8bit.png 499 | munster/munster_000076_000019_leftImg8bit.png 500 | munster/munster_000091_000019_leftImg8bit.png 501 | -------------------------------------------------------------------------------- /util/loader/cityscapes_list/val_label.txt: -------------------------------------------------------------------------------- 1 | frankfurt/frankfurt_000001_007973_gtFine_labelIds.png 2 | frankfurt/frankfurt_000001_025921_gtFine_labelIds.png 3 | frankfurt/frankfurt_000001_062016_gtFine_labelIds.png 4 | frankfurt/frankfurt_000001_049078_gtFine_labelIds.png 5 | frankfurt/frankfurt_000000_009561_gtFine_labelIds.png 6 | frankfurt/frankfurt_000001_013710_gtFine_labelIds.png 7 | frankfurt/frankfurt_000001_041664_gtFine_labelIds.png 8 | frankfurt/frankfurt_000000_013240_gtFine_labelIds.png 9 | frankfurt/frankfurt_000001_044787_gtFine_labelIds.png 10 | frankfurt/frankfurt_000001_015328_gtFine_labelIds.png 11 | frankfurt/frankfurt_000001_073243_gtFine_labelIds.png 12 | frankfurt/frankfurt_000001_034816_gtFine_labelIds.png 13 | frankfurt/frankfurt_000001_041074_gtFine_labelIds.png 14 | frankfurt/frankfurt_000001_005898_gtFine_labelIds.png 15 | frankfurt/frankfurt_000000_022254_gtFine_labelIds.png 16 | frankfurt/frankfurt_000001_044658_gtFine_labelIds.png 17 | frankfurt/frankfurt_000001_009504_gtFine_labelIds.png 18 | frankfurt/frankfurt_000001_024927_gtFine_labelIds.png 19 | frankfurt/frankfurt_000001_017842_gtFine_labelIds.png 20 | frankfurt/frankfurt_000001_068208_gtFine_labelIds.png 21 | frankfurt/frankfurt_000001_013016_gtFine_labelIds.png 22 | frankfurt/frankfurt_000001_010156_gtFine_labelIds.png 23 | frankfurt/frankfurt_000000_002963_gtFine_labelIds.png 24 | frankfurt/frankfurt_000001_020693_gtFine_labelIds.png 25 | frankfurt/frankfurt_000001_078803_gtFine_labelIds.png 26 | frankfurt/frankfurt_000001_025713_gtFine_labelIds.png 27 | frankfurt/frankfurt_000001_007285_gtFine_labelIds.png 28 | frankfurt/frankfurt_000001_070099_gtFine_labelIds.png 29 | frankfurt/frankfurt_000000_009291_gtFine_labelIds.png 30 | frankfurt/frankfurt_000000_019607_gtFine_labelIds.png 31 | frankfurt/frankfurt_000001_068063_gtFine_labelIds.png 32 | frankfurt/frankfurt_000000_003920_gtFine_labelIds.png 33 | frankfurt/frankfurt_000001_077233_gtFine_labelIds.png 34 | frankfurt/frankfurt_000001_029086_gtFine_labelIds.png 35 | frankfurt/frankfurt_000001_060545_gtFine_labelIds.png 36 | frankfurt/frankfurt_000001_001464_gtFine_labelIds.png 37 | frankfurt/frankfurt_000001_028590_gtFine_labelIds.png 38 | frankfurt/frankfurt_000001_016462_gtFine_labelIds.png 39 | frankfurt/frankfurt_000001_060422_gtFine_labelIds.png 40 | frankfurt/frankfurt_000001_009058_gtFine_labelIds.png 41 | frankfurt/frankfurt_000001_080830_gtFine_labelIds.png 42 | frankfurt/frankfurt_000001_012870_gtFine_labelIds.png 43 | frankfurt/frankfurt_000001_077434_gtFine_labelIds.png 44 | frankfurt/frankfurt_000001_033655_gtFine_labelIds.png 45 | frankfurt/frankfurt_000001_051516_gtFine_labelIds.png 46 | frankfurt/frankfurt_000001_044413_gtFine_labelIds.png 47 | frankfurt/frankfurt_000001_055172_gtFine_labelIds.png 48 | frankfurt/frankfurt_000001_040575_gtFine_labelIds.png 49 | frankfurt/frankfurt_000000_020215_gtFine_labelIds.png 50 | frankfurt/frankfurt_000000_017228_gtFine_labelIds.png 51 | frankfurt/frankfurt_000001_041354_gtFine_labelIds.png 52 | frankfurt/frankfurt_000000_008206_gtFine_labelIds.png 53 | frankfurt/frankfurt_000001_043564_gtFine_labelIds.png 54 | frankfurt/frankfurt_000001_032711_gtFine_labelIds.png 55 | frankfurt/frankfurt_000001_064130_gtFine_labelIds.png 56 | frankfurt/frankfurt_000001_053102_gtFine_labelIds.png 57 | frankfurt/frankfurt_000001_082087_gtFine_labelIds.png 58 | frankfurt/frankfurt_000001_057478_gtFine_labelIds.png 59 | frankfurt/frankfurt_000001_007407_gtFine_labelIds.png 60 | frankfurt/frankfurt_000001_008200_gtFine_labelIds.png 61 | frankfurt/frankfurt_000001_038844_gtFine_labelIds.png 62 | frankfurt/frankfurt_000001_016029_gtFine_labelIds.png 63 | frankfurt/frankfurt_000001_058176_gtFine_labelIds.png 64 | frankfurt/frankfurt_000001_057181_gtFine_labelIds.png 65 | frankfurt/frankfurt_000001_039895_gtFine_labelIds.png 66 | frankfurt/frankfurt_000000_000294_gtFine_labelIds.png 67 | frankfurt/frankfurt_000001_055062_gtFine_labelIds.png 68 | frankfurt/frankfurt_000001_083029_gtFine_labelIds.png 69 | frankfurt/frankfurt_000001_010444_gtFine_labelIds.png 70 | frankfurt/frankfurt_000001_041517_gtFine_labelIds.png 71 | frankfurt/frankfurt_000001_069633_gtFine_labelIds.png 72 | frankfurt/frankfurt_000001_020287_gtFine_labelIds.png 73 | frankfurt/frankfurt_000001_012038_gtFine_labelIds.png 74 | frankfurt/frankfurt_000001_046504_gtFine_labelIds.png 75 | frankfurt/frankfurt_000001_032556_gtFine_labelIds.png 76 | frankfurt/frankfurt_000000_001751_gtFine_labelIds.png 77 | frankfurt/frankfurt_000001_000538_gtFine_labelIds.png 78 | frankfurt/frankfurt_000001_083852_gtFine_labelIds.png 79 | frankfurt/frankfurt_000001_077092_gtFine_labelIds.png 80 | frankfurt/frankfurt_000001_017101_gtFine_labelIds.png 81 | frankfurt/frankfurt_000001_044525_gtFine_labelIds.png 82 | frankfurt/frankfurt_000001_005703_gtFine_labelIds.png 83 | frankfurt/frankfurt_000001_080391_gtFine_labelIds.png 84 | frankfurt/frankfurt_000001_038418_gtFine_labelIds.png 85 | frankfurt/frankfurt_000001_066832_gtFine_labelIds.png 86 | frankfurt/frankfurt_000000_003357_gtFine_labelIds.png 87 | frankfurt/frankfurt_000000_020880_gtFine_labelIds.png 88 | frankfurt/frankfurt_000001_062396_gtFine_labelIds.png 89 | frankfurt/frankfurt_000001_046272_gtFine_labelIds.png 90 | frankfurt/frankfurt_000001_062509_gtFine_labelIds.png 91 | frankfurt/frankfurt_000001_054415_gtFine_labelIds.png 92 | frankfurt/frankfurt_000001_021406_gtFine_labelIds.png 93 | frankfurt/frankfurt_000001_030310_gtFine_labelIds.png 94 | frankfurt/frankfurt_000000_014480_gtFine_labelIds.png 95 | frankfurt/frankfurt_000001_005410_gtFine_labelIds.png 96 | frankfurt/frankfurt_000000_022797_gtFine_labelIds.png 97 | frankfurt/frankfurt_000001_035144_gtFine_labelIds.png 98 | frankfurt/frankfurt_000001_014565_gtFine_labelIds.png 99 | frankfurt/frankfurt_000001_065850_gtFine_labelIds.png 100 | frankfurt/frankfurt_000000_000576_gtFine_labelIds.png 101 | frankfurt/frankfurt_000001_065617_gtFine_labelIds.png 102 | frankfurt/frankfurt_000000_005543_gtFine_labelIds.png 103 | frankfurt/frankfurt_000001_055709_gtFine_labelIds.png 104 | frankfurt/frankfurt_000001_027325_gtFine_labelIds.png 105 | frankfurt/frankfurt_000001_011835_gtFine_labelIds.png 106 | frankfurt/frankfurt_000001_046779_gtFine_labelIds.png 107 | frankfurt/frankfurt_000001_064305_gtFine_labelIds.png 108 | frankfurt/frankfurt_000001_012738_gtFine_labelIds.png 109 | frankfurt/frankfurt_000001_048355_gtFine_labelIds.png 110 | frankfurt/frankfurt_000001_019969_gtFine_labelIds.png 111 | frankfurt/frankfurt_000001_080091_gtFine_labelIds.png 112 | frankfurt/frankfurt_000000_011007_gtFine_labelIds.png 113 | frankfurt/frankfurt_000000_015676_gtFine_labelIds.png 114 | frankfurt/frankfurt_000001_044227_gtFine_labelIds.png 115 | frankfurt/frankfurt_000001_055387_gtFine_labelIds.png 116 | frankfurt/frankfurt_000001_038245_gtFine_labelIds.png 117 | frankfurt/frankfurt_000001_059642_gtFine_labelIds.png 118 | frankfurt/frankfurt_000001_030669_gtFine_labelIds.png 119 | frankfurt/frankfurt_000001_068772_gtFine_labelIds.png 120 | frankfurt/frankfurt_000001_079206_gtFine_labelIds.png 121 | frankfurt/frankfurt_000001_055306_gtFine_labelIds.png 122 | frankfurt/frankfurt_000001_012699_gtFine_labelIds.png 123 | frankfurt/frankfurt_000001_042384_gtFine_labelIds.png 124 | frankfurt/frankfurt_000001_054077_gtFine_labelIds.png 125 | frankfurt/frankfurt_000001_010830_gtFine_labelIds.png 126 | frankfurt/frankfurt_000001_052120_gtFine_labelIds.png 127 | frankfurt/frankfurt_000001_032018_gtFine_labelIds.png 128 | frankfurt/frankfurt_000001_051737_gtFine_labelIds.png 129 | frankfurt/frankfurt_000001_028335_gtFine_labelIds.png 130 | frankfurt/frankfurt_000001_049770_gtFine_labelIds.png 131 | frankfurt/frankfurt_000001_054884_gtFine_labelIds.png 132 | frankfurt/frankfurt_000001_019698_gtFine_labelIds.png 133 | frankfurt/frankfurt_000000_011461_gtFine_labelIds.png 134 | frankfurt/frankfurt_000000_001016_gtFine_labelIds.png 135 | frankfurt/frankfurt_000001_062250_gtFine_labelIds.png 136 | frankfurt/frankfurt_000001_004736_gtFine_labelIds.png 137 | frankfurt/frankfurt_000001_068682_gtFine_labelIds.png 138 | frankfurt/frankfurt_000000_006589_gtFine_labelIds.png 139 | frankfurt/frankfurt_000000_011810_gtFine_labelIds.png 140 | frankfurt/frankfurt_000001_066574_gtFine_labelIds.png 141 | frankfurt/frankfurt_000001_048654_gtFine_labelIds.png 142 | frankfurt/frankfurt_000001_049209_gtFine_labelIds.png 143 | frankfurt/frankfurt_000001_042098_gtFine_labelIds.png 144 | frankfurt/frankfurt_000001_031416_gtFine_labelIds.png 145 | frankfurt/frankfurt_000000_009969_gtFine_labelIds.png 146 | frankfurt/frankfurt_000001_038645_gtFine_labelIds.png 147 | frankfurt/frankfurt_000001_020046_gtFine_labelIds.png 148 | frankfurt/frankfurt_000001_054219_gtFine_labelIds.png 149 | frankfurt/frankfurt_000001_002759_gtFine_labelIds.png 150 | frankfurt/frankfurt_000001_066438_gtFine_labelIds.png 151 | frankfurt/frankfurt_000000_020321_gtFine_labelIds.png 152 | frankfurt/frankfurt_000001_002646_gtFine_labelIds.png 153 | frankfurt/frankfurt_000001_046126_gtFine_labelIds.png 154 | frankfurt/frankfurt_000000_002196_gtFine_labelIds.png 155 | frankfurt/frankfurt_000001_057954_gtFine_labelIds.png 156 | frankfurt/frankfurt_000001_011715_gtFine_labelIds.png 157 | frankfurt/frankfurt_000000_021879_gtFine_labelIds.png 158 | frankfurt/frankfurt_000001_082466_gtFine_labelIds.png 159 | frankfurt/frankfurt_000000_003025_gtFine_labelIds.png 160 | frankfurt/frankfurt_000001_023369_gtFine_labelIds.png 161 | frankfurt/frankfurt_000001_061682_gtFine_labelIds.png 162 | frankfurt/frankfurt_000001_017459_gtFine_labelIds.png 163 | frankfurt/frankfurt_000001_059789_gtFine_labelIds.png 164 | frankfurt/frankfurt_000001_073464_gtFine_labelIds.png 165 | frankfurt/frankfurt_000001_063045_gtFine_labelIds.png 166 | frankfurt/frankfurt_000001_064651_gtFine_labelIds.png 167 | frankfurt/frankfurt_000000_013382_gtFine_labelIds.png 168 | frankfurt/frankfurt_000001_002512_gtFine_labelIds.png 169 | frankfurt/frankfurt_000001_032942_gtFine_labelIds.png 170 | frankfurt/frankfurt_000001_010600_gtFine_labelIds.png 171 | frankfurt/frankfurt_000001_030067_gtFine_labelIds.png 172 | frankfurt/frankfurt_000001_014741_gtFine_labelIds.png 173 | frankfurt/frankfurt_000000_021667_gtFine_labelIds.png 174 | frankfurt/frankfurt_000001_051807_gtFine_labelIds.png 175 | frankfurt/frankfurt_000001_019854_gtFine_labelIds.png 176 | frankfurt/frankfurt_000001_015768_gtFine_labelIds.png 177 | frankfurt/frankfurt_000001_007857_gtFine_labelIds.png 178 | frankfurt/frankfurt_000001_058914_gtFine_labelIds.png 179 | frankfurt/frankfurt_000000_012868_gtFine_labelIds.png 180 | frankfurt/frankfurt_000000_013942_gtFine_labelIds.png 181 | frankfurt/frankfurt_000001_014406_gtFine_labelIds.png 182 | frankfurt/frankfurt_000001_049298_gtFine_labelIds.png 183 | frankfurt/frankfurt_000001_023769_gtFine_labelIds.png 184 | frankfurt/frankfurt_000001_012519_gtFine_labelIds.png 185 | frankfurt/frankfurt_000001_064925_gtFine_labelIds.png 186 | frankfurt/frankfurt_000001_072295_gtFine_labelIds.png 187 | frankfurt/frankfurt_000001_058504_gtFine_labelIds.png 188 | frankfurt/frankfurt_000001_059119_gtFine_labelIds.png 189 | frankfurt/frankfurt_000001_015091_gtFine_labelIds.png 190 | frankfurt/frankfurt_000001_058057_gtFine_labelIds.png 191 | frankfurt/frankfurt_000001_003056_gtFine_labelIds.png 192 | frankfurt/frankfurt_000001_007622_gtFine_labelIds.png 193 | frankfurt/frankfurt_000001_016273_gtFine_labelIds.png 194 | frankfurt/frankfurt_000001_035864_gtFine_labelIds.png 195 | frankfurt/frankfurt_000001_067092_gtFine_labelIds.png 196 | frankfurt/frankfurt_000000_013067_gtFine_labelIds.png 197 | frankfurt/frankfurt_000001_067474_gtFine_labelIds.png 198 | frankfurt/frankfurt_000001_060135_gtFine_labelIds.png 199 | frankfurt/frankfurt_000000_018797_gtFine_labelIds.png 200 | frankfurt/frankfurt_000000_005898_gtFine_labelIds.png 201 | frankfurt/frankfurt_000001_055603_gtFine_labelIds.png 202 | frankfurt/frankfurt_000001_060906_gtFine_labelIds.png 203 | frankfurt/frankfurt_000001_062653_gtFine_labelIds.png 204 | frankfurt/frankfurt_000000_004617_gtFine_labelIds.png 205 | frankfurt/frankfurt_000001_055538_gtFine_labelIds.png 206 | frankfurt/frankfurt_000000_008451_gtFine_labelIds.png 207 | frankfurt/frankfurt_000001_052594_gtFine_labelIds.png 208 | frankfurt/frankfurt_000001_004327_gtFine_labelIds.png 209 | frankfurt/frankfurt_000001_075296_gtFine_labelIds.png 210 | frankfurt/frankfurt_000001_073088_gtFine_labelIds.png 211 | frankfurt/frankfurt_000001_005184_gtFine_labelIds.png 212 | frankfurt/frankfurt_000000_016286_gtFine_labelIds.png 213 | frankfurt/frankfurt_000001_008688_gtFine_labelIds.png 214 | frankfurt/frankfurt_000000_011074_gtFine_labelIds.png 215 | frankfurt/frankfurt_000001_056580_gtFine_labelIds.png 216 | frankfurt/frankfurt_000001_067735_gtFine_labelIds.png 217 | frankfurt/frankfurt_000001_034047_gtFine_labelIds.png 218 | frankfurt/frankfurt_000001_076502_gtFine_labelIds.png 219 | frankfurt/frankfurt_000001_071288_gtFine_labelIds.png 220 | frankfurt/frankfurt_000001_067295_gtFine_labelIds.png 221 | frankfurt/frankfurt_000001_071781_gtFine_labelIds.png 222 | frankfurt/frankfurt_000000_012121_gtFine_labelIds.png 223 | frankfurt/frankfurt_000001_004859_gtFine_labelIds.png 224 | frankfurt/frankfurt_000001_073911_gtFine_labelIds.png 225 | frankfurt/frankfurt_000001_047552_gtFine_labelIds.png 226 | frankfurt/frankfurt_000001_037705_gtFine_labelIds.png 227 | frankfurt/frankfurt_000001_025512_gtFine_labelIds.png 228 | frankfurt/frankfurt_000001_047178_gtFine_labelIds.png 229 | frankfurt/frankfurt_000001_014221_gtFine_labelIds.png 230 | frankfurt/frankfurt_000000_007365_gtFine_labelIds.png 231 | frankfurt/frankfurt_000001_049698_gtFine_labelIds.png 232 | frankfurt/frankfurt_000001_065160_gtFine_labelIds.png 233 | frankfurt/frankfurt_000001_061763_gtFine_labelIds.png 234 | frankfurt/frankfurt_000000_010351_gtFine_labelIds.png 235 | frankfurt/frankfurt_000001_072155_gtFine_labelIds.png 236 | frankfurt/frankfurt_000001_023235_gtFine_labelIds.png 237 | frankfurt/frankfurt_000000_015389_gtFine_labelIds.png 238 | frankfurt/frankfurt_000000_009688_gtFine_labelIds.png 239 | frankfurt/frankfurt_000000_016005_gtFine_labelIds.png 240 | frankfurt/frankfurt_000001_054640_gtFine_labelIds.png 241 | frankfurt/frankfurt_000001_029600_gtFine_labelIds.png 242 | frankfurt/frankfurt_000001_028232_gtFine_labelIds.png 243 | frankfurt/frankfurt_000001_050686_gtFine_labelIds.png 244 | frankfurt/frankfurt_000001_013496_gtFine_labelIds.png 245 | frankfurt/frankfurt_000001_066092_gtFine_labelIds.png 246 | frankfurt/frankfurt_000001_009854_gtFine_labelIds.png 247 | frankfurt/frankfurt_000001_067178_gtFine_labelIds.png 248 | frankfurt/frankfurt_000001_028854_gtFine_labelIds.png 249 | frankfurt/frankfurt_000001_083199_gtFine_labelIds.png 250 | frankfurt/frankfurt_000001_064798_gtFine_labelIds.png 251 | frankfurt/frankfurt_000001_018113_gtFine_labelIds.png 252 | frankfurt/frankfurt_000001_050149_gtFine_labelIds.png 253 | frankfurt/frankfurt_000001_048196_gtFine_labelIds.png 254 | frankfurt/frankfurt_000000_001236_gtFine_labelIds.png 255 | frankfurt/frankfurt_000000_017476_gtFine_labelIds.png 256 | frankfurt/frankfurt_000001_003588_gtFine_labelIds.png 257 | frankfurt/frankfurt_000001_021825_gtFine_labelIds.png 258 | frankfurt/frankfurt_000000_010763_gtFine_labelIds.png 259 | frankfurt/frankfurt_000001_062793_gtFine_labelIds.png 260 | frankfurt/frankfurt_000001_029236_gtFine_labelIds.png 261 | frankfurt/frankfurt_000001_075984_gtFine_labelIds.png 262 | frankfurt/frankfurt_000001_031266_gtFine_labelIds.png 263 | frankfurt/frankfurt_000001_043395_gtFine_labelIds.png 264 | frankfurt/frankfurt_000001_040732_gtFine_labelIds.png 265 | frankfurt/frankfurt_000001_011162_gtFine_labelIds.png 266 | frankfurt/frankfurt_000000_012009_gtFine_labelIds.png 267 | frankfurt/frankfurt_000001_042733_gtFine_labelIds.png 268 | lindau/lindau_000052_000019_gtFine_labelIds.png 269 | lindau/lindau_000009_000019_gtFine_labelIds.png 270 | lindau/lindau_000037_000019_gtFine_labelIds.png 271 | lindau/lindau_000047_000019_gtFine_labelIds.png 272 | lindau/lindau_000015_000019_gtFine_labelIds.png 273 | lindau/lindau_000030_000019_gtFine_labelIds.png 274 | lindau/lindau_000012_000019_gtFine_labelIds.png 275 | lindau/lindau_000032_000019_gtFine_labelIds.png 276 | lindau/lindau_000046_000019_gtFine_labelIds.png 277 | lindau/lindau_000000_000019_gtFine_labelIds.png 278 | lindau/lindau_000031_000019_gtFine_labelIds.png 279 | lindau/lindau_000011_000019_gtFine_labelIds.png 280 | lindau/lindau_000027_000019_gtFine_labelIds.png 281 | lindau/lindau_000054_000019_gtFine_labelIds.png 282 | lindau/lindau_000026_000019_gtFine_labelIds.png 283 | lindau/lindau_000017_000019_gtFine_labelIds.png 284 | lindau/lindau_000023_000019_gtFine_labelIds.png 285 | lindau/lindau_000005_000019_gtFine_labelIds.png 286 | lindau/lindau_000056_000019_gtFine_labelIds.png 287 | lindau/lindau_000025_000019_gtFine_labelIds.png 288 | lindau/lindau_000045_000019_gtFine_labelIds.png 289 | lindau/lindau_000014_000019_gtFine_labelIds.png 290 | lindau/lindau_000004_000019_gtFine_labelIds.png 291 | lindau/lindau_000021_000019_gtFine_labelIds.png 292 | lindau/lindau_000049_000019_gtFine_labelIds.png 293 | lindau/lindau_000033_000019_gtFine_labelIds.png 294 | lindau/lindau_000042_000019_gtFine_labelIds.png 295 | lindau/lindau_000013_000019_gtFine_labelIds.png 296 | lindau/lindau_000024_000019_gtFine_labelIds.png 297 | lindau/lindau_000002_000019_gtFine_labelIds.png 298 | lindau/lindau_000043_000019_gtFine_labelIds.png 299 | lindau/lindau_000016_000019_gtFine_labelIds.png 300 | lindau/lindau_000050_000019_gtFine_labelIds.png 301 | lindau/lindau_000018_000019_gtFine_labelIds.png 302 | lindau/lindau_000007_000019_gtFine_labelIds.png 303 | lindau/lindau_000048_000019_gtFine_labelIds.png 304 | lindau/lindau_000022_000019_gtFine_labelIds.png 305 | lindau/lindau_000053_000019_gtFine_labelIds.png 306 | lindau/lindau_000038_000019_gtFine_labelIds.png 307 | lindau/lindau_000001_000019_gtFine_labelIds.png 308 | lindau/lindau_000036_000019_gtFine_labelIds.png 309 | lindau/lindau_000035_000019_gtFine_labelIds.png 310 | lindau/lindau_000003_000019_gtFine_labelIds.png 311 | lindau/lindau_000034_000019_gtFine_labelIds.png 312 | lindau/lindau_000010_000019_gtFine_labelIds.png 313 | lindau/lindau_000055_000019_gtFine_labelIds.png 314 | lindau/lindau_000006_000019_gtFine_labelIds.png 315 | lindau/lindau_000019_000019_gtFine_labelIds.png 316 | lindau/lindau_000029_000019_gtFine_labelIds.png 317 | lindau/lindau_000039_000019_gtFine_labelIds.png 318 | lindau/lindau_000051_000019_gtFine_labelIds.png 319 | lindau/lindau_000020_000019_gtFine_labelIds.png 320 | lindau/lindau_000057_000019_gtFine_labelIds.png 321 | lindau/lindau_000041_000019_gtFine_labelIds.png 322 | lindau/lindau_000040_000019_gtFine_labelIds.png 323 | lindau/lindau_000044_000019_gtFine_labelIds.png 324 | lindau/lindau_000028_000019_gtFine_labelIds.png 325 | lindau/lindau_000058_000019_gtFine_labelIds.png 326 | lindau/lindau_000008_000019_gtFine_labelIds.png 327 | munster/munster_000000_000019_gtFine_labelIds.png 328 | munster/munster_000012_000019_gtFine_labelIds.png 329 | munster/munster_000032_000019_gtFine_labelIds.png 330 | munster/munster_000068_000019_gtFine_labelIds.png 331 | munster/munster_000101_000019_gtFine_labelIds.png 332 | munster/munster_000153_000019_gtFine_labelIds.png 333 | munster/munster_000115_000019_gtFine_labelIds.png 334 | munster/munster_000029_000019_gtFine_labelIds.png 335 | munster/munster_000019_000019_gtFine_labelIds.png 336 | munster/munster_000156_000019_gtFine_labelIds.png 337 | munster/munster_000129_000019_gtFine_labelIds.png 338 | munster/munster_000169_000019_gtFine_labelIds.png 339 | munster/munster_000150_000019_gtFine_labelIds.png 340 | munster/munster_000165_000019_gtFine_labelIds.png 341 | munster/munster_000050_000019_gtFine_labelIds.png 342 | munster/munster_000025_000019_gtFine_labelIds.png 343 | munster/munster_000116_000019_gtFine_labelIds.png 344 | munster/munster_000132_000019_gtFine_labelIds.png 345 | munster/munster_000066_000019_gtFine_labelIds.png 346 | munster/munster_000096_000019_gtFine_labelIds.png 347 | munster/munster_000030_000019_gtFine_labelIds.png 348 | munster/munster_000146_000019_gtFine_labelIds.png 349 | munster/munster_000098_000019_gtFine_labelIds.png 350 | munster/munster_000059_000019_gtFine_labelIds.png 351 | munster/munster_000093_000019_gtFine_labelIds.png 352 | munster/munster_000122_000019_gtFine_labelIds.png 353 | munster/munster_000024_000019_gtFine_labelIds.png 354 | munster/munster_000036_000019_gtFine_labelIds.png 355 | munster/munster_000086_000019_gtFine_labelIds.png 356 | munster/munster_000163_000019_gtFine_labelIds.png 357 | munster/munster_000001_000019_gtFine_labelIds.png 358 | munster/munster_000053_000019_gtFine_labelIds.png 359 | munster/munster_000071_000019_gtFine_labelIds.png 360 | munster/munster_000079_000019_gtFine_labelIds.png 361 | munster/munster_000159_000019_gtFine_labelIds.png 362 | munster/munster_000038_000019_gtFine_labelIds.png 363 | munster/munster_000138_000019_gtFine_labelIds.png 364 | munster/munster_000135_000019_gtFine_labelIds.png 365 | munster/munster_000065_000019_gtFine_labelIds.png 366 | munster/munster_000139_000019_gtFine_labelIds.png 367 | munster/munster_000108_000019_gtFine_labelIds.png 368 | munster/munster_000020_000019_gtFine_labelIds.png 369 | munster/munster_000074_000019_gtFine_labelIds.png 370 | munster/munster_000035_000019_gtFine_labelIds.png 371 | munster/munster_000067_000019_gtFine_labelIds.png 372 | munster/munster_000151_000019_gtFine_labelIds.png 373 | munster/munster_000083_000019_gtFine_labelIds.png 374 | munster/munster_000118_000019_gtFine_labelIds.png 375 | munster/munster_000046_000019_gtFine_labelIds.png 376 | munster/munster_000147_000019_gtFine_labelIds.png 377 | munster/munster_000047_000019_gtFine_labelIds.png 378 | munster/munster_000043_000019_gtFine_labelIds.png 379 | munster/munster_000168_000019_gtFine_labelIds.png 380 | munster/munster_000167_000019_gtFine_labelIds.png 381 | munster/munster_000021_000019_gtFine_labelIds.png 382 | munster/munster_000073_000019_gtFine_labelIds.png 383 | munster/munster_000089_000019_gtFine_labelIds.png 384 | munster/munster_000060_000019_gtFine_labelIds.png 385 | munster/munster_000155_000019_gtFine_labelIds.png 386 | munster/munster_000140_000019_gtFine_labelIds.png 387 | munster/munster_000145_000019_gtFine_labelIds.png 388 | munster/munster_000077_000019_gtFine_labelIds.png 389 | munster/munster_000018_000019_gtFine_labelIds.png 390 | munster/munster_000045_000019_gtFine_labelIds.png 391 | munster/munster_000166_000019_gtFine_labelIds.png 392 | munster/munster_000037_000019_gtFine_labelIds.png 393 | munster/munster_000112_000019_gtFine_labelIds.png 394 | munster/munster_000080_000019_gtFine_labelIds.png 395 | munster/munster_000144_000019_gtFine_labelIds.png 396 | munster/munster_000142_000019_gtFine_labelIds.png 397 | munster/munster_000070_000019_gtFine_labelIds.png 398 | munster/munster_000044_000019_gtFine_labelIds.png 399 | munster/munster_000137_000019_gtFine_labelIds.png 400 | munster/munster_000041_000019_gtFine_labelIds.png 401 | munster/munster_000113_000019_gtFine_labelIds.png 402 | munster/munster_000075_000019_gtFine_labelIds.png 403 | munster/munster_000157_000019_gtFine_labelIds.png 404 | munster/munster_000158_000019_gtFine_labelIds.png 405 | munster/munster_000109_000019_gtFine_labelIds.png 406 | munster/munster_000033_000019_gtFine_labelIds.png 407 | munster/munster_000088_000019_gtFine_labelIds.png 408 | munster/munster_000090_000019_gtFine_labelIds.png 409 | munster/munster_000114_000019_gtFine_labelIds.png 410 | munster/munster_000171_000019_gtFine_labelIds.png 411 | munster/munster_000013_000019_gtFine_labelIds.png 412 | munster/munster_000130_000019_gtFine_labelIds.png 413 | munster/munster_000016_000019_gtFine_labelIds.png 414 | munster/munster_000136_000019_gtFine_labelIds.png 415 | munster/munster_000007_000019_gtFine_labelIds.png 416 | munster/munster_000014_000019_gtFine_labelIds.png 417 | munster/munster_000052_000019_gtFine_labelIds.png 418 | munster/munster_000104_000019_gtFine_labelIds.png 419 | munster/munster_000173_000019_gtFine_labelIds.png 420 | munster/munster_000057_000019_gtFine_labelIds.png 421 | munster/munster_000072_000019_gtFine_labelIds.png 422 | munster/munster_000003_000019_gtFine_labelIds.png 423 | munster/munster_000161_000019_gtFine_labelIds.png 424 | munster/munster_000002_000019_gtFine_labelIds.png 425 | munster/munster_000028_000019_gtFine_labelIds.png 426 | munster/munster_000051_000019_gtFine_labelIds.png 427 | munster/munster_000105_000019_gtFine_labelIds.png 428 | munster/munster_000061_000019_gtFine_labelIds.png 429 | munster/munster_000058_000019_gtFine_labelIds.png 430 | munster/munster_000094_000019_gtFine_labelIds.png 431 | munster/munster_000027_000019_gtFine_labelIds.png 432 | munster/munster_000062_000019_gtFine_labelIds.png 433 | munster/munster_000127_000019_gtFine_labelIds.png 434 | munster/munster_000110_000019_gtFine_labelIds.png 435 | munster/munster_000170_000019_gtFine_labelIds.png 436 | munster/munster_000023_000019_gtFine_labelIds.png 437 | munster/munster_000084_000019_gtFine_labelIds.png 438 | munster/munster_000121_000019_gtFine_labelIds.png 439 | munster/munster_000087_000019_gtFine_labelIds.png 440 | munster/munster_000097_000019_gtFine_labelIds.png 441 | munster/munster_000119_000019_gtFine_labelIds.png 442 | munster/munster_000128_000019_gtFine_labelIds.png 443 | munster/munster_000078_000019_gtFine_labelIds.png 444 | munster/munster_000010_000019_gtFine_labelIds.png 445 | munster/munster_000015_000019_gtFine_labelIds.png 446 | munster/munster_000048_000019_gtFine_labelIds.png 447 | munster/munster_000085_000019_gtFine_labelIds.png 448 | munster/munster_000164_000019_gtFine_labelIds.png 449 | munster/munster_000111_000019_gtFine_labelIds.png 450 | munster/munster_000099_000019_gtFine_labelIds.png 451 | munster/munster_000117_000019_gtFine_labelIds.png 452 | munster/munster_000009_000019_gtFine_labelIds.png 453 | munster/munster_000049_000019_gtFine_labelIds.png 454 | munster/munster_000148_000019_gtFine_labelIds.png 455 | munster/munster_000022_000019_gtFine_labelIds.png 456 | munster/munster_000131_000019_gtFine_labelIds.png 457 | munster/munster_000006_000019_gtFine_labelIds.png 458 | munster/munster_000005_000019_gtFine_labelIds.png 459 | munster/munster_000102_000019_gtFine_labelIds.png 460 | munster/munster_000160_000019_gtFine_labelIds.png 461 | munster/munster_000107_000019_gtFine_labelIds.png 462 | munster/munster_000095_000019_gtFine_labelIds.png 463 | munster/munster_000106_000019_gtFine_labelIds.png 464 | munster/munster_000034_000019_gtFine_labelIds.png 465 | munster/munster_000143_000019_gtFine_labelIds.png 466 | munster/munster_000017_000019_gtFine_labelIds.png 467 | munster/munster_000040_000019_gtFine_labelIds.png 468 | munster/munster_000152_000019_gtFine_labelIds.png 469 | munster/munster_000154_000019_gtFine_labelIds.png 470 | munster/munster_000100_000019_gtFine_labelIds.png 471 | munster/munster_000004_000019_gtFine_labelIds.png 472 | munster/munster_000141_000019_gtFine_labelIds.png 473 | munster/munster_000011_000019_gtFine_labelIds.png 474 | munster/munster_000055_000019_gtFine_labelIds.png 475 | munster/munster_000134_000019_gtFine_labelIds.png 476 | munster/munster_000054_000019_gtFine_labelIds.png 477 | munster/munster_000064_000019_gtFine_labelIds.png 478 | munster/munster_000039_000019_gtFine_labelIds.png 479 | munster/munster_000103_000019_gtFine_labelIds.png 480 | munster/munster_000092_000019_gtFine_labelIds.png 481 | munster/munster_000172_000019_gtFine_labelIds.png 482 | munster/munster_000042_000019_gtFine_labelIds.png 483 | munster/munster_000124_000019_gtFine_labelIds.png 484 | munster/munster_000069_000019_gtFine_labelIds.png 485 | munster/munster_000026_000019_gtFine_labelIds.png 486 | munster/munster_000120_000019_gtFine_labelIds.png 487 | munster/munster_000031_000019_gtFine_labelIds.png 488 | munster/munster_000162_000019_gtFine_labelIds.png 489 | munster/munster_000056_000019_gtFine_labelIds.png 490 | munster/munster_000081_000019_gtFine_labelIds.png 491 | munster/munster_000123_000019_gtFine_labelIds.png 492 | munster/munster_000125_000019_gtFine_labelIds.png 493 | munster/munster_000082_000019_gtFine_labelIds.png 494 | munster/munster_000133_000019_gtFine_labelIds.png 495 | munster/munster_000126_000019_gtFine_labelIds.png 496 | munster/munster_000063_000019_gtFine_labelIds.png 497 | munster/munster_000008_000019_gtFine_labelIds.png 498 | munster/munster_000149_000019_gtFine_labelIds.png 499 | munster/munster_000076_000019_gtFine_labelIds.png 500 | munster/munster_000091_000019_gtFine_labelIds.png 501 | -------------------------------------------------------------------------------- /util/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | from torchvision import models 7 | 8 | class VGGLoss(nn.Module): 9 | def __init__(self, gpu_id=0): 10 | super(VGGLoss, self).__init__() 11 | self.vgg = Vgg19().cuda(gpu_id) 12 | self.criterion = nn.L1Loss() 13 | self.downsample = nn.AvgPool2d(2, stride=2, count_include_pad=False) 14 | 15 | def forward(self, x, y, weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]): 16 | bs = x.size(0) 17 | while x.size()[3] > 1024: 18 | x, y = self.downsample(x), self.downsample(y) 19 | x_vgg, y_vgg = self.vgg(x), self.vgg(y) 20 | loss = 0 21 | for i in range(len(x_vgg)): 22 | loss += weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) 23 | return loss 24 | 25 | class VGGLoss_for_trans(nn.Module): 26 | def __init__(self, gpu_id=0): 27 | super(VGGLoss_for_trans, self).__init__() 28 | self.vgg = Vgg19().cuda(gpu_id) 29 | self.criterion = nn.L1Loss() 30 | self.downsample = nn.AvgPool2d(2, stride=2, count_include_pad=False) 31 | 32 | def forward(self, trans_img, struct_img, texture_img, weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]): 33 | while trans_img.size()[3] > 1024: 34 | trans_img, struct_img, texture_img = self.downsample(trans_img), self.downsample(struct_img), self.downsample(texture_img) 35 | trans_vgg, struct_vgg, texture_vgg = self.vgg(trans_img), self.vgg(struct_img), self.vgg(texture_img) 36 | loss = 0 37 | for i in range(len(trans_vgg)): 38 | if i < 3: 39 | x_feat_mean = trans_vgg[i].view(trans_vgg[i].size(0), trans_vgg[i].size(1), -1).mean(2) 40 | y_feat_mean = texture_vgg[i].view(texture_vgg[i].size(0), texture_vgg[i].size(1), -1).mean(2) 41 | loss += self.criterion(x_feat_mean, y_feat_mean.detach()) 42 | else: 43 | loss += weights[i] * self.criterion(trans_vgg[i], struct_vgg[i].detach()) 44 | return loss 45 | 46 | def cross_entropy2d(input, target, weight=None, size_average=True): 47 | n, c, h, w = input.size() 48 | log_p = F.log_softmax(input, dim=1) 49 | log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c) 50 | log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0] 51 | log_p = log_p.view(-1, c) 52 | 53 | mask = target >= 0 54 | target = target[mask] 55 | loss = F.nll_loss(log_p, target, ignore_index=255, 56 | weight=weight, size_average=False) 57 | if size_average: 58 | loss /= mask.data.sum() 59 | return loss 60 | 61 | def myL1Loss(source, target): 62 | return torch.mean(torch.abs(source - target)) 63 | 64 | class Vgg19(nn.Module): 65 | def __init__(self, requires_grad=False): 66 | super(Vgg19, self).__init__() 67 | vgg_pretrained_features = models.vgg19(pretrained=True).features 68 | self.slice1 = torch.nn.Sequential() 69 | self.slice2 = torch.nn.Sequential() 70 | self.slice3 = torch.nn.Sequential() 71 | self.slice4 = torch.nn.Sequential() 72 | self.slice5 = torch.nn.Sequential() 73 | for x in range(2): 74 | self.slice1.add_module(str(x), vgg_pretrained_features[x]) 75 | for x in range(2, 7): 76 | self.slice2.add_module(str(x), vgg_pretrained_features[x]) 77 | for x in range(7, 12): 78 | self.slice3.add_module(str(x), vgg_pretrained_features[x]) 79 | for x in range(12, 21): 80 | self.slice4.add_module(str(x), vgg_pretrained_features[x]) 81 | for x in range(21, 30): 82 | self.slice5.add_module(str(x), vgg_pretrained_features[x]) 83 | if not requires_grad: 84 | for param in self.parameters(): 85 | param.requires_grad = False 86 | 87 | def forward(self, X): 88 | h_relu1 = self.slice1(X) 89 | h_relu2 = self.slice2(h_relu1) 90 | h_relu3 = self.slice3(h_relu2) 91 | h_relu4 = self.slice4(h_relu3) 92 | h_relu5 = self.slice5(h_relu4) 93 | out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] 94 | return out 95 | -------------------------------------------------------------------------------- /util/metrics.py: -------------------------------------------------------------------------------- 1 | # Adapted from score written by wkentaro 2 | # https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py 3 | 4 | import numpy as np 5 | 6 | label = ['road', 7 | 'sidewalk', 8 | 'building', 9 | 'wall', 10 | 'fence', 11 | 'pole', 12 | 'light', 13 | 'sign', 14 | 'vegetation', 15 | 'terrain', 16 | 'sky', 17 | 'person', 18 | 'rider', 19 | 'car', 20 | 'truck', 21 | 'bus', 22 | 'train', 23 | 'motorcycle', 24 | 'bycycle'] 25 | 26 | class runningScore(object): 27 | 28 | def __init__(self, n_classes): 29 | self.n_classes = n_classes 30 | self.confusion_matrix = np.zeros((n_classes, n_classes)) 31 | 32 | def _fast_hist(self, label_true, label_pred, n_class): 33 | mask = (label_true >= 0) & (label_true < n_class) 34 | hist = np.bincount( 35 | n_class * label_true[mask].astype(int) + 36 | label_pred[mask], minlength=n_class**2).reshape(n_class, n_class) 37 | return hist 38 | 39 | def update(self, label_trues, label_preds): 40 | for lt, lp in zip(label_trues, label_preds): 41 | self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes) 42 | 43 | def get_scores(self): 44 | """Returns accuracy score evaluation result. 45 | - overall accuracy 46 | - mean accuracy 47 | - mean IU 48 | - fwavacc 49 | """ 50 | hist = self.confusion_matrix 51 | acc = np.diag(hist).sum() / hist.sum() 52 | acc_cls = np.diag(hist) / hist.sum(axis=1) 53 | acc_cls = np.nanmean(acc_cls) 54 | iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) 55 | for id in range(19): 56 | print ('===>' + label[id] + ':' + str(iu[id])) 57 | mean_iu = np.nanmean(iu) 58 | freq = hist.sum(axis=1) / hist.sum() 59 | fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() 60 | cls_iu = dict(zip(range(self.n_classes), iu)) 61 | 62 | return {'Overall Acc: \t': acc, 63 | 'Mean Acc : \t': acc_cls, 64 | 'FreqW Acc : \t': fwavacc, 65 | 'Mean IoU : \t': mean_iu,}, cls_iu 66 | 67 | def reset(self): 68 | self.confusion_matrix = np.zeros((self.n_classes, self.n_classes)) 69 | -------------------------------------------------------------------------------- /util/utils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Misc Utility functions 3 | ''' 4 | from collections import OrderedDict 5 | import os 6 | import numpy as np 7 | import torch 8 | 9 | def recursive_glob(rootdir='.', suffix=''): 10 | """Performs recursive glob with given suffix and rootdir 11 | :param rootdir is the root directory 12 | :param suffix is the suffix to be searched 13 | """ 14 | return [os.path.join(looproot, filename) 15 | for looproot, _, filenames in os.walk(rootdir) 16 | for filename in filenames if filename.endswith(suffix)] 17 | 18 | def poly_lr_scheduler(base_lr, iter, max_iter=30000, power=0.9): 19 | return base_lr * ((1 - float(iter) / max_iter) ** (power)) 20 | 21 | def adjust_learning_rate(opts, base_lr, i_iter, max_iter, power): 22 | lr = poly_lr_scheduler(base_lr, i_iter, max_iter, power) 23 | for opt in opts: 24 | opt.param_groups[0]['lr'] = lr 25 | if len(opt.param_groups) > 1: 26 | opt.param_groups[1]['lr'] = lr * 10 27 | 28 | def alpha_blend(input_image, segmentation_mask, alpha=0.5): 29 | """Alpha Blending utility to overlay RGB masks on RBG images 30 | :param input_image is a np.ndarray with 3 channels 31 | :param segmentation_mask is a np.ndarray with 3 channels 32 | :param alpha is a float value 33 | 34 | """ 35 | blended = np.zeros(input_image.size, dtype=np.float32) 36 | blended = input_image * alpha + segmentation_mask * (1 - alpha) 37 | return blended 38 | 39 | def convert_state_dict(state_dict): 40 | """Converts a state dict saved from a dataParallel module to normal 41 | module state_dict inplace 42 | :param state_dict is the loaded DataParallel model_state 43 | 44 | """ 45 | new_state_dict = OrderedDict() 46 | for k, v in state_dict.items(): 47 | name = k[7:] # remove `module.` 48 | new_state_dict[name] = v 49 | return new_state_dict 50 | 51 | def save_models(model_dict, prefix='./'): 52 | if not os.path.exists(prefix): 53 | os.makedirs(prefix) 54 | for key, value in model_dict.items(): 55 | torch.save(value.state_dict(), os.path.join(prefix, key+'.pth')) 56 | 57 | def load_models(model_dict, prefix='./'): 58 | for key, value in model_dict.items(): 59 | value.load_state_dict(torch.load(os.path.join(prefix, key+'.pth'))) 60 | 61 | --------------------------------------------------------------------------------