├── DAST_train.py ├── DA_train.py ├── README.md ├── dataset ├── __init__.py ├── cbst_dataset.py ├── cityscapes_dataset.py ├── cityscapes_list │ ├── info.json │ ├── label.txt │ ├── train.txt │ └── val.txt ├── gta5_dataset.py └── gta5_list │ └── train.txt ├── evaluate_bulk.py ├── iou_bulk.py ├── model ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── deeplab_multi.cpython-37.pyc │ ├── deeplab_multi_val.cpython-37.pyc │ └── discriminator.cpython-37.pyc ├── deeplab.py ├── deeplab_multi.py ├── deeplab_vgg.py └── discriminator.py ├── util.py └── utils ├── __init__.py └── loss.py /DAST_train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | import util 4 | import os 5 | import os.path as osp 6 | import timeit 7 | import torch 8 | from torch.utils import data 9 | import torch.nn.functional as F 10 | import torch.backends.cudnn as cudnn 11 | import torch.nn as nn 12 | import torch.optim as optim 13 | import math 14 | from PIL import Image 15 | import numpy as np 16 | import shutil 17 | import random 18 | 19 | from model.deeplab_multi import DeeplabMulti 20 | from model.discriminator import FCDiscriminator 21 | from model.discriminator import OutspaceDiscriminator 22 | from dataset.cbst_dataset import SrcSTDataSet, TgtSTDataSet, TestDataSet 23 | 24 | IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) # BGR 25 | 26 | BATCH_SIZE = 1 27 | IGNORE_LABEL = 255 28 | LEARNING_RATE = 2e-4 29 | LEARNING_RATE_D = 1e-4 30 | MOMENTUM = 0.9 31 | NUM_CLASSES = 19 32 | NUM_STEPS = 100000 33 | NUM_STEPS_STOP = 20000 # early stopping 34 | POWER = 0.9 35 | RESTORE_FROM = './round0.pth' 36 | RESTORE_FROM_D = './round0_D.pth' 37 | SAVE_PRED_EVERY = 1000 38 | SNAPSHOT_DIR = './snapshots/GTA2Cityscapes' 39 | WEIGHT_DECAY = 0.0005 40 | INIT_TGT_PORT = 0.5 41 | 42 | SAVE_PATH = 'debug' 43 | LOG_FILE = 'self_training_log' 44 | 45 | SOURCE = 'GTA5' 46 | INPUT_SIZE = '1280,720' 47 | DATA_SRC_DIRECTORY = './data/GTA5' 48 | DATA_SRC_LIST_PATH = './dataset/gta5_list/train.txt' 49 | 50 | 51 | TARGET = 'cityscapes' 52 | INPUT_SIZE_TARGET = '1024, 512' 53 | DATA_TGT_DIRECTORY = './data/Cityscapes' 54 | DATA_TGT_TRAIN_LIST_PATH = './dataset/cityscapes_list/train.txt' 55 | DATA_TGT_TEST_LIST_PATH = './dataset/cityscapes_list/val.txt' 56 | 57 | def setup_seed(seed): 58 | torch.manual_seed(seed) 59 | torch.cuda.manual_seed_all(seed) 60 | np.random.seed(seed) 61 | random.seed(seed) 62 | torch.backends.cudnn.deterministic = True 63 | 64 | def get_arguments(): 65 | """Parse all the arguments provided from the CLI. 66 | 67 | Returns: 68 | A list of parsed arguments. 69 | """ 70 | parser = argparse.ArgumentParser(description="DeepLab-VGG Network") 71 | parser.add_argument("--data-src-dir", type=str, default=DATA_SRC_DIRECTORY, 72 | help="Path to the directory containing the source dataset.") 73 | parser.add_argument("--data-src-list", type=str, default=DATA_SRC_LIST_PATH, 74 | help="Path to the file listing the images&labels in the source dataset.") 75 | parser.add_argument("--data-tgt-dir", type=str, default=DATA_TGT_DIRECTORY, 76 | help="Path to the directory containing the target dataset.") 77 | parser.add_argument("--data-tgt-train-list", type=str, default=DATA_TGT_TRAIN_LIST_PATH, 78 | help="Path to the file listing the images*GT labels in the target train dataset.") 79 | parser.add_argument("--data-tgt-test-list", type=str, default=DATA_TGT_TEST_LIST_PATH, 80 | help="Path to the file listing the images*GT labels in the target test dataset.") 81 | parser.add_argument("--num-classes", type=int, default=NUM_CLASSES, 82 | help="Number of classes to predict (including background).") 83 | parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL, 84 | help="The index of the label to ignore during the training.") 85 | parser.add_argument("--restore-from", type=str, default=RESTORE_FROM, 86 | help="Where restore model parameters from.") 87 | parser.add_argument("--restore-from-D", type=str, default=RESTORE_FROM_D, 88 | help="Where restore model parameters from.") 89 | parser.add_argument("--batch-size", type=int, default=BATCH_SIZE, 90 | help="Number of images sent to the network in one step.") 91 | parser.add_argument("--input-size", type=str, default=INPUT_SIZE, 92 | help="Comma-separated string with height and width of images.") 93 | parser.add_argument("--input-size-target", type=str, default=INPUT_SIZE_TARGET, 94 | help="Comma-separated string with height and width of images.") 95 | parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE, 96 | help="Base learning rate for training with polynomial decay.") 97 | parser.add_argument("--learning-rate-D", type=float, default=LEARNING_RATE_D, 98 | help="Base learning rate for discriminator.") 99 | parser.add_argument("--power", type=float, default=POWER, 100 | help="Decay parameter to compute the learning rate.") 101 | parser.add_argument("--momentum", type=float, default=MOMENTUM, 102 | help="Momentum component of the optimiser.") 103 | parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY, 104 | help="Regularisation parameter for L2-loss.") 105 | parser.add_argument("--save", type=str, default=SAVE_PATH, 106 | help="Path to save result for self-training.") 107 | parser.add_argument('--init-tgt-port', default=INIT_TGT_PORT, type=float, dest='init_tgt_port', 108 | help='The initial portion of target to determine kc') 109 | parser.add_argument("--num-steps", type=int, default=NUM_STEPS, 110 | help="Number of training steps.") 111 | parser.add_argument("--num-steps-stop", type=int, default=NUM_STEPS_STOP, 112 | help="Number of training steps for early stopping.") 113 | parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY, 114 | help="Save summaries and checkpoint every often.") 115 | parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR, 116 | help="Where to save snapshots of the model.") 117 | parser.add_argument('--rm-prob', 118 | help='If remove the probability maps generated in every round.', 119 | default=False, action='store_true') 120 | parser.add_argument("--log-file", type=str, default=LOG_FILE, 121 | help="The name of log file.") 122 | parser.add_argument('--debug',help='True means logging debug info.', 123 | default=False, action='store_true') 124 | return parser.parse_args() 125 | 126 | args = get_arguments() 127 | 128 | palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30, 129 | 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70, 130 | 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32] 131 | zero_pad = 256 * 3 - len(palette) 132 | for i in range(zero_pad): 133 | palette.append(0) 134 | 135 | def colorize_mask(mask): 136 | # mask: numpy array of the mask 137 | new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P') 138 | new_mask.putpalette(palette) 139 | return new_mask 140 | 141 | def main(): 142 | setup_seed(666) 143 | device = torch.device("cuda") 144 | save_path = args.save 145 | save_pseudo_label_path = osp.join(save_path, 'pseudo_label') # in 'save_path'. Save labelIDs, not trainIDs. 146 | save_stats_path = osp.join(save_path, 'stats') # in 'save_path' 147 | save_lst_path = osp.join(save_path, 'list') 148 | if not os.path.exists(save_path): 149 | os.makedirs(save_path) 150 | if not os.path.exists(save_pseudo_label_path): 151 | os.makedirs(save_pseudo_label_path) 152 | if not os.path.exists(save_stats_path): 153 | os.makedirs(save_stats_path) 154 | if not os.path.exists(save_lst_path): 155 | os.makedirs(save_lst_path) 156 | 157 | cudnn.enabled = True 158 | cudnn.benchmark = True 159 | 160 | logger = util.set_logger(args.save, args.log_file, args.debug) 161 | logger.info('start with arguments %s', args) 162 | 163 | model = DeeplabMulti(num_classes=args.num_classes) 164 | saved_state_dict = torch.load(args.restore_from) 165 | model.load_state_dict(saved_state_dict) 166 | model.train() 167 | model.to(device) 168 | 169 | # init D 170 | num_class_list = [2048, 19] 171 | model_D = nn.ModuleList([FCDiscriminator(num_classes=num_class_list[i]).train().to(device) if i<1 else OutspaceDiscriminator(num_classes=num_class_list[i]).train().to(device) for i in range(2)]) 172 | saved_state_dict_D = torch.load(args.restore_from_D) 173 | model_D.load_state_dict(saved_state_dict_D) 174 | 175 | if not os.path.exists(args.snapshot_dir): 176 | os.makedirs(args.snapshot_dir) 177 | 178 | image_src_list, _, src_num = parse_split_list(args.data_src_list) 179 | image_tgt_list, image_name_tgt_list, tgt_num = parse_split_list(args.data_tgt_train_list) 180 | # portions 181 | tgt_portion = args.init_tgt_port 182 | 183 | # training crop size 184 | w, h = map(int, args.input_size.split(',')) 185 | input_size = (w, h) 186 | 187 | w, h = map(int, args.input_size_target.split(',')) 188 | input_size_target = (w, h) 189 | 190 | bce_loss1 = torch.nn.MSELoss() 191 | bce_loss2 = torch.nn.MSELoss(reduce=False, reduction='none') 192 | seg_loss = torch.nn.CrossEntropyLoss(ignore_index=255) 193 | round_idx = 3 194 | save_round_eval_path = osp.join(args.save,str(round_idx)) 195 | save_pseudo_label_color_path = osp.join(save_round_eval_path, 'pseudo_label_color') 196 | if not os.path.exists(save_round_eval_path): 197 | os.makedirs(save_round_eval_path) 198 | if not os.path.exists(save_pseudo_label_color_path): 199 | os.makedirs(save_pseudo_label_color_path) 200 | ########## pseudo-label generation 201 | # evaluation & save confidence vectors 202 | test(model, model_D, device, save_round_eval_path, round_idx, 500, args, logger) 203 | conf_dict, pred_cls_num, save_prob_path, save_pred_path = val(model, model_D, device, save_round_eval_path, round_idx, tgt_num, args, logger) 204 | # class-balanced thresholds 205 | cls_thresh = kc_parameters(conf_dict, pred_cls_num, tgt_portion, round_idx, save_stats_path, args, logger) 206 | # pseudo-label maps generation 207 | label_selection(cls_thresh, tgt_num, image_name_tgt_list, round_idx, save_prob_path, save_pred_path, save_pseudo_label_path, save_pseudo_label_color_path, save_round_eval_path, args, logger) 208 | src_train_lst, tgt_train_lst, src_num_sel = savelst_SrcTgt(image_tgt_list, image_name_tgt_list, image_src_list, save_lst_path, save_pseudo_label_path, src_num, tgt_num, args) 209 | ########### model retraining 210 | # dataset 211 | srctrainset = SrcSTDataSet(args.data_src_dir, src_train_lst, max_iters=args.num_steps * args.batch_size, 212 | crop_size=input_size, scale=False, mirror=False, mean=IMG_MEAN) 213 | tgttrainset = TgtSTDataSet(args.data_tgt_dir, tgt_train_lst, pseudo_root=save_pseudo_label_path, max_iters=args.num_steps * args.batch_size, 214 | crop_size=input_size_target, scale=False, mirror=False, mean=IMG_MEAN, set='train') 215 | trainloader = torch.utils.data.DataLoader(srctrainset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True) 216 | trainloader_iter = enumerate(trainloader) 217 | targetloader = torch.utils.data.DataLoader(tgttrainset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True) 218 | targetloader_iter = enumerate(targetloader) 219 | 220 | optimizer = optim.SGD(model.parameters(), 221 | lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) 222 | optimizer.zero_grad() 223 | optimizer_D = optim.Adam(model_D.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99)) 224 | optimizer_D.zero_grad() 225 | logger.info('###### Start model retraining dataset in round {}! ######'.format(round_idx)) 226 | 227 | start = timeit.default_timer() 228 | # start training 229 | interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear', align_corners=True) 230 | interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear', align_corners=True) 231 | 232 | # labels for adversarial training 233 | source_label = 0 234 | target_label = 1 235 | 236 | for i_iter in range(args.num_steps): 237 | 238 | lamb = 1 239 | optimizer.zero_grad() 240 | adjust_learning_rate(optimizer, i_iter) 241 | 242 | optimizer_D.zero_grad() 243 | adjust_learning_rate_D(optimizer_D, i_iter) 244 | 245 | # train G 246 | # don't accumulate grads in D 247 | for param in model_D.parameters(): 248 | param.requires_grad = False 249 | 250 | # train with source 251 | _, batch = trainloader_iter.__next__() 252 | images, labels, _, _ = batch 253 | images = images.to(device) 254 | labels = labels.long().to(device) 255 | 256 | feat_source, pred_source = model(images, model_D, 'source') 257 | pred_source = interp(pred_source) 258 | 259 | loss_seg = seg_loss(pred_source, labels) 260 | loss_seg.backward() 261 | 262 | # train with target 263 | _, batch = targetloader_iter.__next__() 264 | images, labels, _, _ = batch 265 | images = images.to(device) 266 | labels = labels.long().to(device) 267 | 268 | feat_target, pred_target = model(images, model_D, 'target') 269 | pred_target = interp_target(pred_target) 270 | # atten_target = F.interpolate(atten_target, size=(16, 32), mode='bilinear', align_corners=True) 271 | 272 | loss_seg_tgt = seg_loss(pred_target, labels)*lamb 273 | 274 | D_out1 = model_D[0](feat_target) 275 | loss_adv1 = bce_loss1(D_out1, torch.FloatTensor(D_out1.data.size()).fill_(source_label).to(device)) 276 | D_out2 = model_D[1](F.softmax(pred_target, dim=1)) 277 | loss_adv2 = bce_loss2(D_out2, torch.FloatTensor(D_out2.data.size()).fill_(source_label).to(device)) 278 | loss_adv = loss_adv1*0.01 + loss_adv2.mean()*0.01 279 | loss = loss_seg_tgt + loss_adv 280 | loss.backward() 281 | 282 | optimizer.step() 283 | 284 | # train D 285 | # bring back requires_grad 286 | for param in model_D.parameters(): 287 | param.requires_grad = True 288 | 289 | # train with source 290 | D_out_source1 = model_D[0](feat_source.detach()) 291 | loss_D_source1 = bce_loss1(D_out_source1, torch.FloatTensor(D_out_source1.data.size()).fill_(source_label).to(device)) 292 | D_out_source2 = model_D[1](F.softmax(pred_source.detach(),dim=1)) 293 | loss_D_source2 = bce_loss1(D_out_source2, torch.FloatTensor(D_out_source2.data.size()).fill_(source_label).to(device)) 294 | loss_D_source = loss_D_source1 + loss_D_source2 295 | loss_D_source.backward() 296 | 297 | # train with target 298 | D_out_target1 = model_D[0](feat_target.detach()) 299 | loss_D_target1 = bce_loss1(D_out_target1, torch.FloatTensor(D_out_target1.data.size()).fill_(target_label).to(device)) 300 | D_out_target2 = model_D[1](F.softmax(pred_target.detach(),dim=1)) 301 | weight_target = bce_loss2(D_out_target2, torch.FloatTensor(D_out_target2.data.size()).fill_(target_label).to(device)) 302 | loss_D_target2 = weight_target.mean() 303 | loss_D_target = loss_D_target1 + loss_D_target2 304 | loss_D_target.backward() 305 | 306 | optimizer_D.step() 307 | 308 | if i_iter % 10 == 0: 309 | print('iter={0:8d}/{1:8d}, seg={2:.3f} seg_tgt={3:.3f} adv={4:.3f} adv1={5:.3f} adv2={6:.3f} src1={7:.3f} src2={8:.3f} tgt1={9:.3f} tgt2={10:.3f} D1={11:.3f} D2={12:.3f}'.format( 310 | i_iter, args.num_steps, loss_seg.item(), loss_seg_tgt.item(), loss_adv.item(), loss_adv1.item(), loss_adv2.mean().item(), loss_D_source1.item(), loss_D_source2.item(), 311 | loss_D_target1.item(), loss_D_target2.item(), loss_D_source.item(), loss_D_target.item())) 312 | 313 | if i_iter >= args.num_steps_stop - 1: 314 | print('save model ...') 315 | torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '.pth')) 316 | torch.save(model_D.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '_D.pth')) 317 | break 318 | 319 | if i_iter % args.save_pred_every == 0 and i_iter != 0: 320 | print('taking snapshot ...') 321 | test(model, model_D, device, save_round_eval_path, round_idx, 500, args, logger) 322 | torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '.pth')) 323 | torch.save(model_D.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '_D.pth')) 324 | 325 | 326 | end = timeit.default_timer() 327 | logger.info('###### Finish model retraining dataset in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx, end - start)) 328 | # test self-trained model in target domain test set 329 | test(model, model_D, device, save_round_eval_path, round_idx, 500, args, logger) 330 | 331 | def savelst_SrcTgt(image_tgt_list, image_name_tgt_list, image_src_list, save_lst_path, save_pseudo_label_path, src_num, tgt_num, args): 332 | src_train_lst = osp.join(save_lst_path,'src_train.txt') 333 | tgt_train_lst = osp.join(save_lst_path, 'tgt_train.txt') 334 | 335 | # generate src train list 336 | with open(src_train_lst, 'w') as f: 337 | for idx in range(src_num): 338 | f.write("%s\n" % (image_src_list[idx])) 339 | # generate tgt train list 340 | with open(tgt_train_lst, 'w') as f: 341 | for idx in range(tgt_num): 342 | image_tgt_path = osp.join(save_pseudo_label_path,image_name_tgt_list[idx]) 343 | f.write("%s\t%s\n" % (image_tgt_list[idx], image_tgt_path)) 344 | 345 | return src_train_lst, tgt_train_lst, src_num 346 | 347 | def label_selection(cls_thresh, tgt_num, image_name_tgt_list, round_idx, save_prob_path, save_pred_path, save_pseudo_label_path, save_pseudo_label_color_path, save_round_eval_path, args, logger): 348 | logger.info('###### Start pseudo-label generation in round {} ! ######'.format(round_idx)) 349 | start_pl = time.time() 350 | for idx in range(tgt_num): 351 | sample_name = image_name_tgt_list[idx].split('.')[0] 352 | probmap_path = osp.join(save_prob_path, '{}.npy'.format(sample_name)) 353 | pred_path = osp.join(save_pred_path, '{}.png'.format(sample_name)) 354 | pred_prob = np.load(probmap_path) 355 | pred_label_trainIDs = np.asarray(Image.open(pred_path)) 356 | save_wpred_vis_path = osp.join(save_round_eval_path, 'weighted_pred_vis') 357 | if not os.path.exists(save_wpred_vis_path): 358 | os.makedirs(save_wpred_vis_path) 359 | weighted_prob = pred_prob/cls_thresh 360 | weighted_pred_trainIDs = np.asarray(np.argmax(weighted_prob, axis=2), dtype=np.uint8) 361 | # save weighted predication 362 | wpred_label_col = weighted_pred_trainIDs.copy() 363 | wpred_label_col = colorize_mask(wpred_label_col) 364 | wpred_label_col.save('%s/%s_color.png' % (save_wpred_vis_path, sample_name)) 365 | weighted_conf = np.amax(weighted_prob, axis=2) 366 | pred_label_trainIDs = weighted_pred_trainIDs.copy() 367 | pred_label_trainIDs[weighted_conf < 1] = 255 # '255' in cityscapes indicates 'unlabaled' for trainIDs 368 | 369 | # pseudo-labels with labelID 370 | pseudo_label_trainIDs = pred_label_trainIDs.copy() 371 | # save colored pseudo-label map 372 | pseudo_label_col = colorize_mask(pseudo_label_trainIDs) 373 | pseudo_label_col.save('%s/%s_color.png' % (save_pseudo_label_color_path, sample_name)) 374 | # save pseudo-label map with label IDs 375 | pseudo_label_save = Image.fromarray(pseudo_label_trainIDs.astype(np.uint8)) 376 | pseudo_label_save.save('%s/%s.png' % (save_pseudo_label_path, sample_name)) 377 | 378 | # remove probability maps 379 | if args.rm_prob: 380 | shutil.rmtree(save_prob_path) 381 | 382 | logger.info('###### Finish pseudo-label generation in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx,time.time() - start_pl)) 383 | 384 | def parse_split_list(list_name): 385 | image_list = [] 386 | image_name_list = [] 387 | file_num = 0 388 | with open(list_name) as f: 389 | for item in f.readlines(): 390 | fields = item.strip() 391 | image_name = fields.split('/')[-1] 392 | image_list.append(fields) 393 | image_name_list.append(image_name) 394 | file_num += 1 395 | return image_list, image_name_list, file_num 396 | 397 | def kc_parameters(conf_dict, pred_cls_num, tgt_portion, round_idx, save_stats_path, args, logger): 398 | logger.info('###### Start kc generation in round {} ! ######'.format(round_idx)) 399 | start_kc = time.time() 400 | # threshold for each class 401 | cls_thresh = np.ones(args.num_classes,dtype = np.float32) 402 | cls_sel_size = np.zeros(args.num_classes, dtype=np.float32) 403 | cls_size = np.zeros(args.num_classes, dtype=np.float32) 404 | for idx_cls in np.arange(0, args.num_classes): 405 | cls_size[idx_cls] = pred_cls_num[idx_cls] 406 | if conf_dict[idx_cls] != None: 407 | conf_dict[idx_cls].sort(reverse=True) # sort in descending order 408 | len_cls = len(conf_dict[idx_cls]) 409 | cls_sel_size[idx_cls] = int(math.floor(len_cls * tgt_portion)) 410 | len_cls_thresh = int(cls_sel_size[idx_cls]) 411 | if len_cls_thresh != 0: 412 | if conf_dict[idx_cls][len_cls_thresh-1]<0.9: 413 | cls_thresh[idx_cls] = conf_dict[idx_cls][len_cls_thresh-1] 414 | else: 415 | cls_thresh[idx_cls] = 0.9 416 | conf_dict[idx_cls] = None 417 | # save thresholds 418 | np.save(save_stats_path + '/cls_thresh_round' + str(round_idx) + '.npy', cls_thresh) 419 | np.save(save_stats_path + '/cls_sel_size_round' + str(round_idx) + '.npy', cls_sel_size) 420 | logger.info('###### Finish kc generation in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx,time.time() - start_kc)) 421 | return cls_thresh 422 | 423 | def val(model, model_D, device, save_round_eval_path, round_idx, tgt_num, args, logger): 424 | """Create the model and start the evaluation process.""" 425 | ## scorer 426 | scorer = ScoreUpdater(args.num_classes, tgt_num, logger) 427 | scorer.reset() 428 | 429 | ## test data loader 430 | testloader = data.DataLoader(TestDataSet(args.data_tgt_dir, args.data_tgt_train_list, crop_size=(1024,512), mean=IMG_MEAN, scale=False, mirror=False, set='train'), 431 | batch_size=1, shuffle=False, pin_memory=True) 432 | model.eval() 433 | model_D.eval() 434 | model.to(device) 435 | model_D.to(device) 436 | 437 | ## upsampling layer 438 | interp = nn.Upsample(size=(512, 1024), mode='bilinear', align_corners=True) 439 | 440 | ## output of deeplab is logits, not probability 441 | softmax2d = nn.Softmax2d() 442 | 443 | ## output folder 444 | save_pred_vis_path = osp.join(save_round_eval_path, 'pred_vis') 445 | save_prob_path = osp.join(save_round_eval_path, 'prob') 446 | save_pred_path = osp.join(save_round_eval_path, 'pred') 447 | if not os.path.exists(save_pred_vis_path): 448 | os.makedirs(save_pred_vis_path) 449 | if not os.path.exists(save_prob_path): 450 | os.makedirs(save_prob_path) 451 | if not os.path.exists(save_pred_path): 452 | os.makedirs(save_pred_path) 453 | 454 | # saving output data 455 | conf_dict = {k: [] for k in range(args.num_classes)} 456 | pred_cls_num = np.zeros(args.num_classes) 457 | ## evaluation process 458 | logger.info('###### Start evaluating target domain train set in round {}! ######'.format(round_idx)) 459 | start_eval = time.time() 460 | with torch.no_grad(): 461 | for index, batch in enumerate(testloader): 462 | image, label, _, name = batch 463 | label = label.cpu().data[0].numpy() 464 | _, pred = model(image.to(device), model_D, 'target') 465 | output = softmax2d(interp(pred)).cpu().data[0].numpy() 466 | output = output.transpose(1,2,0) 467 | amax_output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8) 468 | conf = np.amax(output,axis=2) 469 | # score 470 | pred_label = amax_output.copy() 471 | scorer.update(pred_label.flatten(), label.flatten(), index) 472 | 473 | # save visualized seg maps & predication prob map 474 | amax_output_col = colorize_mask(amax_output) 475 | name = name[0].split('/')[-1] 476 | image_name = name.split('.')[0] 477 | # prob 478 | np.save('%s/%s.npy' % (save_prob_path, image_name), output) 479 | # trainIDs/vis seg maps 480 | amax_output = Image.fromarray(amax_output) 481 | amax_output.save('%s/%s.png' % (save_pred_path, image_name)) 482 | amax_output_col.save('%s/%s_color.png' % (save_pred_vis_path, image_name)) 483 | 484 | # save class-wise confidence maps 485 | for idx_cls in range(args.num_classes): 486 | idx_temp = pred_label == idx_cls 487 | pred_cls_num[idx_cls] = pred_cls_num[idx_cls] + np.sum(idx_temp) 488 | if idx_temp.any(): 489 | conf_cls_temp = conf[idx_temp].astype(np.float32) 490 | len_cls_temp = conf_cls_temp.size 491 | # downsampling by ds_rate 492 | conf_cls = conf_cls_temp[0:len_cls_temp:4] 493 | conf_dict[idx_cls].extend(conf_cls) 494 | logger.info('###### Finish evaluating target domain train set in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx, time.time()-start_eval)) 495 | 496 | return conf_dict, pred_cls_num, save_prob_path, save_pred_path # return the dictionary containing all the class-wise confidence vectors 497 | 498 | def test(model, model_D, device, save_round_eval_path, round_idx, test_num, args, logger): 499 | """Create the model and start the evaluation process.""" 500 | ## scorer 501 | scorer = ScoreUpdater(args.num_classes, test_num, logger) 502 | scorer.reset() 503 | 504 | ## test data loader 505 | testloader = data.DataLoader(TestDataSet(args.data_tgt_dir, args.data_tgt_test_list, crop_size=(1024,512), mean=IMG_MEAN, scale=False, mirror=False, set='val'), 506 | batch_size=1, shuffle=False, pin_memory=True) 507 | model.eval() 508 | model_D.eval() 509 | model.to(device) 510 | model_D.to(device) 511 | 512 | ## upsampling layer 513 | interp = nn.Upsample(size=(1024, 2048), mode='bilinear', align_corners=True) 514 | 515 | save_test_vis_path = osp.join(save_round_eval_path, 'testSet_vis') 516 | if not os.path.exists(save_test_vis_path): 517 | os.makedirs(save_test_vis_path) 518 | 519 | ## evaluation process 520 | logger.info('###### Start evaluating in target domain test set in round {}! ######'.format(round_idx)) 521 | start_eval = time.time() 522 | with torch.no_grad(): 523 | for index, batch in enumerate(testloader): 524 | image, label, _, name = batch 525 | label = label.cpu().data[0].numpy() 526 | _, output = model(image.to(device), model_D, 'target') 527 | output = interp(output).cpu().data[0].numpy() 528 | output = output.transpose(1,2,0) 529 | amax_output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8) 530 | pred_label = amax_output.copy() 531 | scorer.update(pred_label.flatten(), label.flatten(), index) 532 | # save visualized seg maps & predication prob map 533 | amax_output_col = colorize_mask(amax_output) 534 | name = name[0].split('/')[-1] 535 | image_name = name.split('.')[0] 536 | # vis seg maps 537 | amax_output_col.save('%s/%s_color.png' % (save_test_vis_path, image_name)) 538 | 539 | logger.info('###### Finish evaluating in target domain test set in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx, time.time()-start_eval)) 540 | 541 | class ScoreUpdater(object): 542 | # only IoU are computed. accu, cls_accu, etc are ignored. 543 | def __init__(self, c_num, x_num, logger=None, label=None, info=None): 544 | self._confs = np.zeros((c_num, c_num)) 545 | self._per_cls_iou = np.zeros(c_num) 546 | self._logger = logger 547 | self._label = label 548 | self._info = info 549 | self._num_class = c_num 550 | self._num_sample = x_num 551 | 552 | @property 553 | def info(self): 554 | return self._info 555 | 556 | def reset(self): 557 | self._start = time.time() 558 | self._computed = np.zeros(self._num_sample) # one-dimension 559 | self._confs[:] = 0 560 | 561 | def fast_hist(self,label, pred_label, n): 562 | k = (label >= 0) & (label < n) 563 | return np.bincount(n * label[k].astype(int) + pred_label[k], minlength=n ** 2).reshape(n, n) 564 | 565 | def per_class_iu(self,hist): 566 | return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) 567 | 568 | def do_updates(self, conf, i, computed=True): 569 | if computed: 570 | self._computed[i] = 1 571 | self._per_cls_iou = self.per_class_iu(conf) 572 | 573 | def update(self, pred_label, label, i, computed=True): 574 | conf = self.fast_hist(label, pred_label, self._num_class) 575 | self._confs += conf 576 | self.do_updates(self._confs, i, computed) 577 | self.scores(i) 578 | 579 | def scores(self, i=None, logger=None): 580 | x_num = self._num_sample 581 | ious = np.nan_to_num( self._per_cls_iou ) 582 | 583 | logger = self._logger if logger is None else logger 584 | if logger is not None: 585 | if i is not None: 586 | speed = 1. * self._computed.sum() / (time.time() - self._start) 587 | logger.info('Done {}/{} with speed: {:.2f}/s'.format(i + 1, x_num, speed)) 588 | name = '' if self._label is None else '{}, '.format(self._label) 589 | logger.info('{}mean iou: {:.2f}%'. \ 590 | format(name, np.mean(ious) * 100)) 591 | with util.np_print_options(formatter={'float': '{:5.2f}'.format}): 592 | logger.info('\n{}'.format(ious * 100)) 593 | 594 | return ious 595 | 596 | 597 | def lr_poly(base_lr, iter, max_iter, power): 598 | return base_lr * ((1 - float(iter) / max_iter) ** (power)) 599 | 600 | def adjust_learning_rate(optimizer, i_iter): 601 | lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power) 602 | optimizer.param_groups[0]['lr'] = lr 603 | if len(optimizer.param_groups) > 1: 604 | optimizer.param_groups[1]['lr'] = lr * 10 605 | 606 | def adjust_learning_rate_D(optimizer, i_iter): 607 | lr = lr_poly(args.learning_rate_D, i_iter, args.num_steps, args.power) 608 | optimizer.param_groups[0]['lr'] = lr 609 | if len(optimizer.param_groups) > 1: 610 | optimizer.param_groups[1]['lr'] = lr * 10 611 | 612 | if __name__ == '__main__': 613 | main() 614 | -------------------------------------------------------------------------------- /DA_train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import torch.nn as nn 4 | from torch.utils import data 5 | import numpy as np 6 | import torch.optim as optim 7 | import torch.backends.cudnn as cudnn 8 | import torch.nn.functional as F 9 | import os 10 | import os.path as osp 11 | import random 12 | 13 | from model.deeplab_multi import DeeplabMulti 14 | from model.discriminator import FCDiscriminator 15 | from model.discriminator import OutspaceDiscriminator 16 | from dataset.gta5_dataset import GTA5DataSet 17 | from dataset.cityscapes_dataset import cityscapesDataSet 18 | 19 | IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) 20 | 21 | MODEL = 'DeepLab' 22 | BATCH_SIZE = 1 23 | NUM_WORKERS = 8 24 | IGNORE_LABEL = 255 25 | LEARNING_RATE = 2.5e-4 26 | MOMENTUM = 0.9 27 | NUM_CLASSES = 19 28 | NUM_STEPS = 250000 29 | NUM_STEPS_STOP = 150000 # early stopping 30 | POWER = 0.9 31 | RESTORE_FROM = './model_weight/DeepLab_resnet_pretrained_init-f81d91e8.pth' 32 | SAVE_PRED_EVERY = 2000 33 | SNAPSHOT_DIR = './snapshots/' 34 | WEIGHT_DECAY = 0.0005 35 | 36 | LEARNING_RATE_D = 1e-4 37 | 38 | SOURCE = 'GTA5' 39 | INPUT_SIZE = '1280,720' 40 | DATA_DIRECTORY = './data/GTA5/' 41 | DATA_LIST_PATH = './dataset/gta5_list/train.txt' 42 | 43 | TARGET = 'cityscapes' 44 | INPUT_SIZE_TARGET = '1024,512' 45 | DATA_DIRECTORY_TARGET = './data/Cityscapes/' 46 | DATA_LIST_PATH_TARGET = './dataset/cityscapes_list/train.txt' 47 | SET = 'train' 48 | 49 | 50 | def get_arguments(): 51 | """Parse all the arguments provided from the CLI. 52 | 53 | Returns: 54 | A list of parsed arguments. 55 | """ 56 | parser = argparse.ArgumentParser(description="DeepLab-ResNet Network") 57 | parser.add_argument("--model", type=str, default=MODEL, 58 | help="available options : DeepLab") 59 | parser.add_argument("--target", type=str, default=TARGET, 60 | help="available options : cityscapes") 61 | parser.add_argument("--batch-size", type=int, default=BATCH_SIZE, 62 | help="Number of images sent to the network in one step.") 63 | parser.add_argument("--num-workers", type=int, default=NUM_WORKERS, 64 | help="number of workers for multithread dataloading.") 65 | parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY, 66 | help="Path to the directory containing the source dataset.") 67 | parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH, 68 | help="Path to the file listing the images in the source dataset.") 69 | parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL, 70 | help="The index of the label to ignore during the training.") 71 | parser.add_argument("--input-size", type=str, default=INPUT_SIZE, 72 | help="Comma-separated string with height and width of source images.") 73 | parser.add_argument("--data-dir-target", type=str, default=DATA_DIRECTORY_TARGET, 74 | help="Path to the directory containing the target dataset.") 75 | parser.add_argument("--data-list-target", type=str, default=DATA_LIST_PATH_TARGET, 76 | help="Path to the file listing the images in the target dataset.") 77 | parser.add_argument("--input-size-target", type=str, default=INPUT_SIZE_TARGET, 78 | help="Comma-separated string with height and width of target images.") 79 | parser.add_argument("--is-training", action="store_true", 80 | help="Whether to updates the running means and variances during the training.") 81 | parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE, 82 | help="Base learning rate for training with polynomial decay.") 83 | parser.add_argument("--learning-rate-D", type=float, default=LEARNING_RATE_D, 84 | help="Base learning rate for discriminator.") 85 | parser.add_argument("--momentum", type=float, default=MOMENTUM, 86 | help="Momentum component of the optimiser.") 87 | parser.add_argument("--not-restore-last", action="store_true", 88 | help="Whether to not restore last (FC) layers.") 89 | parser.add_argument("--num-classes", type=int, default=NUM_CLASSES, 90 | help="Number of classes to predict (including background).") 91 | parser.add_argument("--num-steps", type=int, default=NUM_STEPS, 92 | help="Number of training steps.") 93 | parser.add_argument("--num-steps-stop", type=int, default=NUM_STEPS_STOP, 94 | help="Number of training steps for early stopping.") 95 | parser.add_argument("--power", type=float, default=POWER, 96 | help="Decay parameter to compute the learning rate.") 97 | parser.add_argument("--random-mirror", action="store_true", 98 | help="Whether to randomly mirror the inputs during the training.") 99 | parser.add_argument("--random-scale", action="store_true", 100 | help="Whether to randomly scale the inputs during the training.") 101 | parser.add_argument("--restore-from", type=str, default=RESTORE_FROM, 102 | help="Where restore model parameters from.") 103 | parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY, 104 | help="Save summaries and checkpoint every often.") 105 | parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR, 106 | help="Where to save snapshots of the model.") 107 | parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY, 108 | help="Regularisation parameter for L2-loss.") 109 | parser.add_argument("--cpu", action='store_true', help="choose to use cpu device.") 110 | parser.add_argument("--set", type=str, default=SET, 111 | help="choose adaptation set.") 112 | return parser.parse_args() 113 | 114 | 115 | args = get_arguments() 116 | 117 | 118 | def lr_poly(base_lr, iter, max_iter, power): 119 | return base_lr * ((1 - float(iter) / max_iter) ** (power)) 120 | 121 | 122 | def adjust_learning_rate(optimizer, i_iter): 123 | lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power) 124 | optimizer.param_groups[0]['lr'] = lr 125 | if len(optimizer.param_groups) > 1: 126 | optimizer.param_groups[1]['lr'] = lr * 10 127 | 128 | 129 | def adjust_learning_rate_D(optimizer, i_iter): 130 | lr = lr_poly(args.learning_rate_D, i_iter, args.num_steps, args.power) 131 | optimizer.param_groups[0]['lr'] = lr 132 | if len(optimizer.param_groups) > 1: 133 | optimizer.param_groups[1]['lr'] = lr * 10 134 | 135 | def setup_seed(seed): 136 | torch.manual_seed(seed) 137 | torch.cuda.manual_seed_all(seed) 138 | np.random.seed(seed) 139 | random.seed(seed) 140 | torch.backends.cudnn.deterministic = True 141 | 142 | def main(): 143 | """Create the model and start the training.""" 144 | setup_seed(666) 145 | device = torch.device("cuda" if not args.cpu else "cpu") 146 | 147 | w, h = map(int, args.input_size.split(',')) 148 | input_size = (w, h) 149 | 150 | w, h = map(int, args.input_size_target.split(',')) 151 | input_size_target = (w, h) 152 | 153 | cudnn.enabled = True 154 | 155 | # Create network 156 | model = DeeplabMulti(num_classes=args.num_classes) 157 | saved_state_dict = torch.load(args.restore_from) 158 | new_params = model.state_dict().copy() 159 | for i in saved_state_dict: 160 | i_parts = i.split('.') 161 | if not args.num_classes == 19 or not i_parts[1] == 'layer5': 162 | if i_parts[1]=='layer4' and i_parts[2]=='2': 163 | i_parts[1] = 'layer5' 164 | i_parts[2] = '0' 165 | new_params['.'.join(i_parts[1:])] = saved_state_dict[i] 166 | else: 167 | new_params['.'.join(i_parts[1:])] = saved_state_dict[i] 168 | model.load_state_dict(new_params) 169 | model.train() 170 | model.to(device) 171 | 172 | cudnn.benchmark = True 173 | 174 | # init D 175 | num_class_list = [2048, 19] 176 | model_D = nn.ModuleList([FCDiscriminator(num_classes=num_class_list[i]).train().to(device) if i<1 else OutspaceDiscriminator(num_classes=num_class_list[i]).train().to(device) for i in range(2)]) 177 | 178 | if not os.path.exists(args.snapshot_dir): 179 | os.makedirs(args.snapshot_dir) 180 | 181 | trainloader = data.DataLoader( 182 | GTA5DataSet(args.data_dir, args.data_list, max_iters=args.num_steps * args.batch_size, 183 | crop_size=input_size, 184 | scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN), 185 | batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) 186 | 187 | trainloader_iter = enumerate(trainloader) 188 | 189 | targetloader = data.DataLoader(cityscapesDataSet(args.data_dir_target, args.data_list_target, 190 | max_iters=args.num_steps * args.batch_size, 191 | crop_size=input_size_target, 192 | scale=False, mirror=args.random_mirror, mean=IMG_MEAN, 193 | set=args.set), 194 | batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, 195 | pin_memory=True) 196 | 197 | 198 | targetloader_iter = enumerate(targetloader) 199 | 200 | # implement model.optim_parameters(args) to handle different models' lr setting 201 | 202 | optimizer = optim.SGD(model.optim_parameters(args), 203 | lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) 204 | optimizer.zero_grad() 205 | 206 | optimizer_D = optim.Adam(model_D.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99)) 207 | optimizer_D.zero_grad() 208 | 209 | bce_loss = torch.nn.MSELoss() 210 | seg_loss = torch.nn.CrossEntropyLoss(ignore_index=255) 211 | 212 | interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear', align_corners=True) 213 | interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear', align_corners=True) 214 | 215 | # labels for adversarial training 216 | source_label = 0 217 | target_label = 1 218 | 219 | for i_iter in range(args.num_steps): 220 | 221 | optimizer.zero_grad() 222 | adjust_learning_rate(optimizer, i_iter) 223 | 224 | optimizer_D.zero_grad() 225 | adjust_learning_rate_D(optimizer_D, i_iter) 226 | 227 | # train G 228 | # don't accumulate grads in D 229 | for param in model_D.parameters(): 230 | param.requires_grad = False 231 | 232 | # train with source 233 | _, batch = trainloader_iter.__next__() 234 | images, labels, _, _ = batch 235 | images = images.to(device) 236 | labels = labels.long().to(device) 237 | 238 | feat_source, pred_source = model(images, model_D, 'source') 239 | pred_source = interp(pred_source) 240 | 241 | loss_seg = seg_loss(pred_source, labels) 242 | loss_seg.backward() 243 | 244 | # train with target 245 | _, batch = targetloader_iter.__next__() 246 | images, _, _ = batch 247 | images = images.to(device) 248 | 249 | feat_target, pred_target = model(images, model_D, 'target') 250 | pred_target = interp_target(pred_target) 251 | 252 | loss_adv = 0 253 | D_out = model_D[0](feat_target) 254 | loss_adv += bce_loss(D_out, torch.FloatTensor(D_out.data.size()).fill_(source_label).to(device)) 255 | D_out = model_D[1](F.softmax(pred_target, dim=1)) 256 | loss_adv += bce_loss(D_out, torch.FloatTensor(D_out.data.size()).fill_(source_label).to(device)) 257 | loss_adv = loss_adv*0.01 258 | loss_adv.backward() 259 | 260 | optimizer.step() 261 | 262 | # train D 263 | # bring back requires_grad 264 | for param in model_D.parameters(): 265 | param.requires_grad = True 266 | 267 | # train with source 268 | loss_D_source = 0 269 | D_out_source = model_D[0](feat_source.detach()) 270 | loss_D_source += bce_loss(D_out_source, torch.FloatTensor(D_out_source.data.size()).fill_(source_label).to(device)) 271 | D_out_source = model_D[1](F.softmax(pred_source.detach(),dim=1)) 272 | loss_D_source += bce_loss(D_out_source, torch.FloatTensor(D_out_source.data.size()).fill_(source_label).to(device)) 273 | loss_D_source.backward() 274 | 275 | # train with target 276 | loss_D_target = 0 277 | D_out_target = model_D[0](feat_target.detach()) 278 | loss_D_target += bce_loss(D_out_target, torch.FloatTensor(D_out_target.data.size()).fill_(target_label).to(device)) 279 | D_out_target = model_D[1](F.softmax(pred_target.detach(),dim=1)) 280 | loss_D_target += bce_loss(D_out_target, torch.FloatTensor(D_out_target.data.size()).fill_(target_label).to(device)) 281 | loss_D_target.backward() 282 | 283 | optimizer_D.step() 284 | 285 | if i_iter % 10 == 0: 286 | print('iter = {0:8d}/{1:8d}, loss_seg = {2:.3f} loss_adv = {3:.3f} loss_D_s = {4:.3f}, loss_D_t = {5:.3f}'.format( 287 | i_iter, args.num_steps, loss_seg.item(), loss_adv.item(), loss_D_source.item(), loss_D_target.item())) 288 | 289 | if i_iter >= args.num_steps_stop - 1: 290 | print('save model ...') 291 | torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '.pth')) 292 | torch.save(model_D.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '_D.pth')) 293 | break 294 | 295 | if i_iter % args.save_pred_every == 0 and i_iter != 0: 296 | print('taking snapshot ...') 297 | torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '.pth')) 298 | torch.save(model_D.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '_D.pth')) 299 | 300 | 301 | if __name__ == '__main__': 302 | main() 303 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DAST_segmentation 2 | The source code of DAST: Unsupervised Domain Adaptation in Semantic Segmentation Based on Discriminator Attention and Self-Training. 3 | 4 | This is a [pytorch](http://pytorch.org/) implementation. 5 | 6 | ### Prerequisites 7 | - Python 3.6 8 | - GPU Memory >= 11G 9 | - Pytorch 1.6.0 10 | 11 | ### Getting started 12 | 13 | - Download [The GTA5 Dataset]( https://download.visinf.tu-darmstadt.de/data/from_games/ ) 14 | 15 | - Download [The SYNTHIA Dataset]( http://synthia-dataset.net/download-2/ ) 16 | 17 | - Download [The Cityscapes Dataset]( https://www.cityscapes-dataset.com/ ) 18 | 19 | - Download [The imagenet pretraind model]( https://drive.google.com/drive/folders/1w7GZQTIkuGkNo4a87J3sSmPR2avdZw2_?usp=sharing ) 20 | 21 | The data folder is structured as follows: 22 | ``` 23 | ├── data/ 24 | │ ├── Cityscapes/ 25 | | | ├── gtFine/ 26 | | | ├── leftImg8bit/ 27 | │ ├── GTA5/ 28 | | | ├── images/ 29 | | | ├── labels/ 30 | │ └── 31 | └── model_weight/ 32 | │ ├── DeepLab_resnet_pretrained.pth 33 | ├── vgg16-00b39a1b-updated.pth 34 | ... 35 | ``` 36 | ### Train 37 | 1. First train DA and choose the best weight evaluated by our established [validation data]( https://drive.google.com/file/d/1P6Kev8qkISm3BNShPNt9ugbSKuHGHKxj/view?usp=sharing ) 38 | ``` 39 | CUDA_VISIBLE_DEVICES=0 python DA_train.py --snapshot-dir ./snapshots/GTA2Cityscapes 40 | ``` 41 | 2. Then train DAST for several round using the above weight. 42 | ``` 43 | CUDA_VISIBLE_DEVICES=0 python DAST_train.py --snapshot-dir ./snapshots/GTA2Cityscapes 44 | ``` 45 | 46 | ### Evaluate 47 | ``` 48 | CUDA_VISIBLE_DEVICES=0 python -u evaluate_bulk.py 49 | CUDA_VISIBLE_DEVICES=0 python -u iou_bulk.py 50 | ``` 51 | Our pretrained model is available via [Google Drive]( https://drive.google.com/drive/folders/1w7GZQTIkuGkNo4a87J3sSmPR2avdZw2_?usp=sharing ) 52 | 53 | ### Citation 54 | This code is heavily borrowed from the baseline [AdaptSegNet]( https://github.com/wasidennis/AdaptSegNet ) and [BDL]( https://github.com/liyunsheng13/BDL ) 55 | -------------------------------------------------------------------------------- /dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yufei1900/DAST_segmentation/70b41a52da7b6cb6adc6fac156eb1e2dc7139e95/dataset/__init__.py -------------------------------------------------------------------------------- /dataset/cbst_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import matplotlib.pyplot as plt 6 | import collections 7 | import torch 8 | import torchvision 9 | from torch.utils import data 10 | from PIL import Image 11 | 12 | class SrcSTDataSet(data.Dataset): 13 | def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255): 14 | self.root = root 15 | self.list_path = list_path 16 | self.crop_size = crop_size 17 | self.scale = scale 18 | self.ignore_label = ignore_label 19 | self.mean = mean 20 | self.is_mirror = mirror 21 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 22 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 23 | if not max_iters==None: 24 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 25 | self.files = [] 26 | 27 | self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5, 28 | 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 29 | 26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18} 30 | 31 | # for split in ["train", "trainval", "val"]: 32 | for name in self.img_ids: 33 | img_file = osp.join(self.root, "images/%s" % name) 34 | label_file = osp.join(self.root, "labels/%s" % name) 35 | self.files.append({ 36 | "img": img_file, 37 | "label": label_file, 38 | "name": name 39 | }) 40 | 41 | def __len__(self): 42 | return len(self.files) 43 | 44 | 45 | def __getitem__(self, index): 46 | datafiles = self.files[index] 47 | 48 | image = Image.open(datafiles["img"]).convert('RGB') 49 | label = Image.open(datafiles["label"]) 50 | name = datafiles["name"] 51 | 52 | # resize 53 | image = image.resize(self.crop_size, Image.BICUBIC) 54 | label = label.resize(self.crop_size, Image.NEAREST) 55 | 56 | image = np.asarray(image, np.float32) 57 | label = np.asarray(label, np.float32) 58 | 59 | # re-assign labels to match the format of Cityscapes 60 | label_copy = 255 * np.ones(label.shape, dtype=np.float32) 61 | for k, v in self.id_to_trainid.items(): 62 | label_copy[label == k] = v 63 | 64 | size = image.shape 65 | image = image[:, :, ::-1] # change to BGR 66 | image -= self.mean 67 | image = image.transpose((2, 0, 1)) 68 | 69 | return image.copy(), label_copy.copy(), np.array(size), name 70 | 71 | class TgtSTDataSet(data.Dataset): 72 | def __init__(self, root, list_path, pseudo_root=None, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255, set='train'): 73 | self.root = root 74 | self.pseudo_root = pseudo_root 75 | self.list_path = list_path 76 | self.crop_size = crop_size 77 | self.scale = scale 78 | self.ignore_label = ignore_label 79 | self.mean = mean 80 | self.is_mirror = mirror 81 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 82 | self.img_ids = [] 83 | self.label_ids = [] 84 | with open(self.list_path) as f: 85 | for item in f.readlines(): 86 | fields = item.strip().split('\t') 87 | self.img_ids.append(fields[0]) 88 | self.label_ids.append(fields[1]) 89 | if not max_iters==None: 90 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 91 | self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids))) 92 | self.files = [] 93 | self.set = set 94 | # for split in ["train", "trainval", "val"]: 95 | for idx in range(len(self.img_ids)): 96 | img_name = self.img_ids[idx] 97 | label_name = self.label_ids[idx] 98 | img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, img_name)) 99 | label_file = label_name 100 | self.files.append({ 101 | "img": img_file, 102 | "label": label_file, 103 | "name": img_name 104 | }) 105 | 106 | def __len__(self): 107 | return len(self.files) 108 | 109 | def __getitem__(self, index): 110 | datafiles = self.files[index] 111 | 112 | image = Image.open(datafiles["img"]).convert('RGB') 113 | label = Image.open(datafiles["label"]) 114 | name = datafiles["name"] 115 | 116 | # resize 117 | image = image.resize(self.crop_size, Image.BICUBIC) 118 | label = label.resize(self.crop_size, Image.NEAREST) 119 | 120 | image = np.asarray(image, np.float32) 121 | label = np.asarray(label, np.float32) 122 | 123 | size = image.shape 124 | image = image[:, :, ::-1] # change to BGR 125 | image -= self.mean 126 | image = image.transpose((2, 0, 1)) 127 | 128 | return image.copy(), label.copy(), np.array(size), name 129 | 130 | 131 | class TestDataSet(data.Dataset): 132 | def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255, set='val'): 133 | self.root = root 134 | self.list_path = list_path 135 | self.crop_size = crop_size 136 | self.scale = scale 137 | self.ignore_label = ignore_label 138 | self.mean = mean 139 | self.is_mirror = mirror 140 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 141 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 142 | if not max_iters==None: 143 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 144 | self.files = [] 145 | self.set = set 146 | self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5, 147 | 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 148 | 26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18} 149 | # for split in ["train", "trainval", "val"]: 150 | for name in self.img_ids: 151 | img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, name)) 152 | label_file = osp.join(self.root, "gtFine/%s/%s" % (self.set, name.replace('leftImg8bit','gtFine_labelIds'))) 153 | self.files.append({ 154 | "img": img_file, 155 | "label": label_file, 156 | "name": name 157 | }) 158 | 159 | def __len__(self): 160 | return len(self.files) 161 | 162 | def __getitem__(self, index): 163 | datafiles = self.files[index] 164 | 165 | image = Image.open(datafiles["img"]).convert('RGB') 166 | label = Image.open(datafiles["label"]) 167 | name = datafiles["name"] 168 | 169 | # resize 170 | image = image.resize(self.crop_size, Image.BICUBIC) 171 | if self.set == 'train': 172 | label = label.resize(self.crop_size, Image.NEAREST) 173 | 174 | image = np.asarray(image, np.float32) 175 | label = np.asarray(label, np.float32) 176 | 177 | # re-assign labels to match the format of Cityscapes 178 | label_copy = 255 * np.ones(label.shape, dtype=np.float32) 179 | for k, v in self.id_to_trainid.items(): 180 | label_copy[label == k] = v 181 | 182 | size = image.shape 183 | image = image[:, :, ::-1] # change to BGR 184 | image -= self.mean 185 | image = image.transpose((2, 0, 1)) 186 | 187 | return image.copy(), label_copy.copy(), np.array(size), name 188 | 189 | -------------------------------------------------------------------------------- /dataset/cityscapes_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import collections 6 | import torch 7 | import torchvision 8 | from torch.utils import data 9 | from PIL import Image 10 | 11 | class cityscapesDataSet(data.Dataset): 12 | def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255, set='val'): 13 | self.root = root 14 | self.list_path = list_path 15 | self.crop_size = crop_size 16 | self.scale = scale 17 | self.ignore_label = ignore_label 18 | self.mean = mean 19 | self.is_mirror = mirror 20 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 21 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 22 | if not max_iters==None: 23 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 24 | self.files = [] 25 | self.set = set 26 | # for split in ["train", "trainval", "val"]: 27 | for name in self.img_ids: 28 | img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, name)) 29 | self.files.append({ 30 | "img": img_file, 31 | "name": name 32 | }) 33 | 34 | def __len__(self): 35 | return len(self.files) 36 | 37 | def __getitem__(self, index): 38 | datafiles = self.files[index] 39 | 40 | image = Image.open(datafiles["img"]).convert('RGB') 41 | name = datafiles["name"] 42 | 43 | # resize 44 | image = image.resize(self.crop_size, Image.BICUBIC) 45 | 46 | image = np.asarray(image, np.float32) 47 | 48 | size = image.shape 49 | image = image[:, :, ::-1] # change to BGR 50 | image -= self.mean 51 | image = image.transpose((2, 0, 1)) 52 | 53 | return image.copy(), np.array(size), name 54 | 55 | 56 | if __name__ == '__main__': 57 | dst = GTA5DataSet("./data", is_transform=True) 58 | trainloader = data.DataLoader(dst, batch_size=4) 59 | -------------------------------------------------------------------------------- /dataset/cityscapes_list/info.json: -------------------------------------------------------------------------------- 1 | { 2 | "classes":19, 3 | "label2train":[ 4 | [0, 255], 5 | [1, 255], 6 | [2, 255], 7 | [3, 255], 8 | [4, 255], 9 | [5, 255], 10 | [6, 255], 11 | [7, 0], 12 | [8, 1], 13 | [9, 255], 14 | [10, 255], 15 | [11, 2], 16 | [12, 3], 17 | [13, 4], 18 | [14, 255], 19 | [15, 255], 20 | [16, 255], 21 | [17, 5], 22 | [18, 255], 23 | [19, 6], 24 | [20, 7], 25 | [21, 8], 26 | [22, 9], 27 | [23, 10], 28 | [24, 11], 29 | [25, 12], 30 | [26, 13], 31 | [27, 14], 32 | [28, 15], 33 | [29, 255], 34 | [30, 255], 35 | [31, 16], 36 | [32, 17], 37 | [33, 18], 38 | [-1, 255]], 39 | "label":[ 40 | "road", 41 | "sidewalk", 42 | "building", 43 | "wall", 44 | "fence", 45 | "pole", 46 | "light", 47 | "sign", 48 | "vegetation", 49 | "terrain", 50 | "sky", 51 | "person", 52 | "rider", 53 | "car", 54 | "truck", 55 | "bus", 56 | "train", 57 | "motocycle", 58 | "bicycle"], 59 | "palette":[ 60 | [128,64,128], 61 | [244,35,232], 62 | [70,70,70], 63 | [102,102,156], 64 | [190,153,153], 65 | [153,153,153], 66 | [250,170,30], 67 | [220,220,0], 68 | [107,142,35], 69 | [152,251,152], 70 | [70,130,180], 71 | [220,20,60], 72 | [255,0,0], 73 | [0,0,142], 74 | [0,0,70], 75 | [0,60,100], 76 | [0,80,100], 77 | [0,0,230], 78 | [119,11,32], 79 | [0,0,0]], 80 | "mean":[ 81 | 73.158359210711552, 82 | 82.908917542625858, 83 | 72.392398761941593], 84 | "std":[ 85 | 47.675755341814678, 86 | 48.494214368814916, 87 | 47.736546325441594] 88 | } 89 | -------------------------------------------------------------------------------- /dataset/cityscapes_list/label.txt: -------------------------------------------------------------------------------- 1 | frankfurt/frankfurt_000001_007973_gtFine_labelIds.png 2 | frankfurt/frankfurt_000001_025921_gtFine_labelIds.png 3 | frankfurt/frankfurt_000001_062016_gtFine_labelIds.png 4 | frankfurt/frankfurt_000001_049078_gtFine_labelIds.png 5 | frankfurt/frankfurt_000000_009561_gtFine_labelIds.png 6 | frankfurt/frankfurt_000001_013710_gtFine_labelIds.png 7 | frankfurt/frankfurt_000001_041664_gtFine_labelIds.png 8 | frankfurt/frankfurt_000000_013240_gtFine_labelIds.png 9 | frankfurt/frankfurt_000001_044787_gtFine_labelIds.png 10 | frankfurt/frankfurt_000001_015328_gtFine_labelIds.png 11 | frankfurt/frankfurt_000001_073243_gtFine_labelIds.png 12 | frankfurt/frankfurt_000001_034816_gtFine_labelIds.png 13 | frankfurt/frankfurt_000001_041074_gtFine_labelIds.png 14 | frankfurt/frankfurt_000001_005898_gtFine_labelIds.png 15 | frankfurt/frankfurt_000000_022254_gtFine_labelIds.png 16 | frankfurt/frankfurt_000001_044658_gtFine_labelIds.png 17 | frankfurt/frankfurt_000001_009504_gtFine_labelIds.png 18 | frankfurt/frankfurt_000001_024927_gtFine_labelIds.png 19 | frankfurt/frankfurt_000001_017842_gtFine_labelIds.png 20 | frankfurt/frankfurt_000001_068208_gtFine_labelIds.png 21 | frankfurt/frankfurt_000001_013016_gtFine_labelIds.png 22 | frankfurt/frankfurt_000001_010156_gtFine_labelIds.png 23 | frankfurt/frankfurt_000000_002963_gtFine_labelIds.png 24 | frankfurt/frankfurt_000001_020693_gtFine_labelIds.png 25 | frankfurt/frankfurt_000001_078803_gtFine_labelIds.png 26 | frankfurt/frankfurt_000001_025713_gtFine_labelIds.png 27 | frankfurt/frankfurt_000001_007285_gtFine_labelIds.png 28 | frankfurt/frankfurt_000001_070099_gtFine_labelIds.png 29 | frankfurt/frankfurt_000000_009291_gtFine_labelIds.png 30 | frankfurt/frankfurt_000000_019607_gtFine_labelIds.png 31 | frankfurt/frankfurt_000001_068063_gtFine_labelIds.png 32 | frankfurt/frankfurt_000000_003920_gtFine_labelIds.png 33 | frankfurt/frankfurt_000001_077233_gtFine_labelIds.png 34 | frankfurt/frankfurt_000001_029086_gtFine_labelIds.png 35 | frankfurt/frankfurt_000001_060545_gtFine_labelIds.png 36 | frankfurt/frankfurt_000001_001464_gtFine_labelIds.png 37 | frankfurt/frankfurt_000001_028590_gtFine_labelIds.png 38 | frankfurt/frankfurt_000001_016462_gtFine_labelIds.png 39 | frankfurt/frankfurt_000001_060422_gtFine_labelIds.png 40 | frankfurt/frankfurt_000001_009058_gtFine_labelIds.png 41 | frankfurt/frankfurt_000001_080830_gtFine_labelIds.png 42 | frankfurt/frankfurt_000001_012870_gtFine_labelIds.png 43 | frankfurt/frankfurt_000001_077434_gtFine_labelIds.png 44 | frankfurt/frankfurt_000001_033655_gtFine_labelIds.png 45 | frankfurt/frankfurt_000001_051516_gtFine_labelIds.png 46 | frankfurt/frankfurt_000001_044413_gtFine_labelIds.png 47 | frankfurt/frankfurt_000001_055172_gtFine_labelIds.png 48 | frankfurt/frankfurt_000001_040575_gtFine_labelIds.png 49 | frankfurt/frankfurt_000000_020215_gtFine_labelIds.png 50 | frankfurt/frankfurt_000000_017228_gtFine_labelIds.png 51 | frankfurt/frankfurt_000001_041354_gtFine_labelIds.png 52 | frankfurt/frankfurt_000000_008206_gtFine_labelIds.png 53 | frankfurt/frankfurt_000001_043564_gtFine_labelIds.png 54 | frankfurt/frankfurt_000001_032711_gtFine_labelIds.png 55 | frankfurt/frankfurt_000001_064130_gtFine_labelIds.png 56 | frankfurt/frankfurt_000001_053102_gtFine_labelIds.png 57 | frankfurt/frankfurt_000001_082087_gtFine_labelIds.png 58 | frankfurt/frankfurt_000001_057478_gtFine_labelIds.png 59 | frankfurt/frankfurt_000001_007407_gtFine_labelIds.png 60 | frankfurt/frankfurt_000001_008200_gtFine_labelIds.png 61 | frankfurt/frankfurt_000001_038844_gtFine_labelIds.png 62 | frankfurt/frankfurt_000001_016029_gtFine_labelIds.png 63 | frankfurt/frankfurt_000001_058176_gtFine_labelIds.png 64 | frankfurt/frankfurt_000001_057181_gtFine_labelIds.png 65 | frankfurt/frankfurt_000001_039895_gtFine_labelIds.png 66 | frankfurt/frankfurt_000000_000294_gtFine_labelIds.png 67 | frankfurt/frankfurt_000001_055062_gtFine_labelIds.png 68 | frankfurt/frankfurt_000001_083029_gtFine_labelIds.png 69 | frankfurt/frankfurt_000001_010444_gtFine_labelIds.png 70 | frankfurt/frankfurt_000001_041517_gtFine_labelIds.png 71 | frankfurt/frankfurt_000001_069633_gtFine_labelIds.png 72 | frankfurt/frankfurt_000001_020287_gtFine_labelIds.png 73 | frankfurt/frankfurt_000001_012038_gtFine_labelIds.png 74 | frankfurt/frankfurt_000001_046504_gtFine_labelIds.png 75 | frankfurt/frankfurt_000001_032556_gtFine_labelIds.png 76 | frankfurt/frankfurt_000000_001751_gtFine_labelIds.png 77 | frankfurt/frankfurt_000001_000538_gtFine_labelIds.png 78 | frankfurt/frankfurt_000001_083852_gtFine_labelIds.png 79 | frankfurt/frankfurt_000001_077092_gtFine_labelIds.png 80 | frankfurt/frankfurt_000001_017101_gtFine_labelIds.png 81 | frankfurt/frankfurt_000001_044525_gtFine_labelIds.png 82 | frankfurt/frankfurt_000001_005703_gtFine_labelIds.png 83 | frankfurt/frankfurt_000001_080391_gtFine_labelIds.png 84 | frankfurt/frankfurt_000001_038418_gtFine_labelIds.png 85 | frankfurt/frankfurt_000001_066832_gtFine_labelIds.png 86 | frankfurt/frankfurt_000000_003357_gtFine_labelIds.png 87 | frankfurt/frankfurt_000000_020880_gtFine_labelIds.png 88 | frankfurt/frankfurt_000001_062396_gtFine_labelIds.png 89 | frankfurt/frankfurt_000001_046272_gtFine_labelIds.png 90 | frankfurt/frankfurt_000001_062509_gtFine_labelIds.png 91 | frankfurt/frankfurt_000001_054415_gtFine_labelIds.png 92 | frankfurt/frankfurt_000001_021406_gtFine_labelIds.png 93 | frankfurt/frankfurt_000001_030310_gtFine_labelIds.png 94 | frankfurt/frankfurt_000000_014480_gtFine_labelIds.png 95 | frankfurt/frankfurt_000001_005410_gtFine_labelIds.png 96 | frankfurt/frankfurt_000000_022797_gtFine_labelIds.png 97 | frankfurt/frankfurt_000001_035144_gtFine_labelIds.png 98 | frankfurt/frankfurt_000001_014565_gtFine_labelIds.png 99 | frankfurt/frankfurt_000001_065850_gtFine_labelIds.png 100 | frankfurt/frankfurt_000000_000576_gtFine_labelIds.png 101 | frankfurt/frankfurt_000001_065617_gtFine_labelIds.png 102 | frankfurt/frankfurt_000000_005543_gtFine_labelIds.png 103 | frankfurt/frankfurt_000001_055709_gtFine_labelIds.png 104 | frankfurt/frankfurt_000001_027325_gtFine_labelIds.png 105 | frankfurt/frankfurt_000001_011835_gtFine_labelIds.png 106 | frankfurt/frankfurt_000001_046779_gtFine_labelIds.png 107 | frankfurt/frankfurt_000001_064305_gtFine_labelIds.png 108 | frankfurt/frankfurt_000001_012738_gtFine_labelIds.png 109 | frankfurt/frankfurt_000001_048355_gtFine_labelIds.png 110 | frankfurt/frankfurt_000001_019969_gtFine_labelIds.png 111 | frankfurt/frankfurt_000001_080091_gtFine_labelIds.png 112 | frankfurt/frankfurt_000000_011007_gtFine_labelIds.png 113 | frankfurt/frankfurt_000000_015676_gtFine_labelIds.png 114 | frankfurt/frankfurt_000001_044227_gtFine_labelIds.png 115 | frankfurt/frankfurt_000001_055387_gtFine_labelIds.png 116 | frankfurt/frankfurt_000001_038245_gtFine_labelIds.png 117 | frankfurt/frankfurt_000001_059642_gtFine_labelIds.png 118 | frankfurt/frankfurt_000001_030669_gtFine_labelIds.png 119 | frankfurt/frankfurt_000001_068772_gtFine_labelIds.png 120 | frankfurt/frankfurt_000001_079206_gtFine_labelIds.png 121 | frankfurt/frankfurt_000001_055306_gtFine_labelIds.png 122 | frankfurt/frankfurt_000001_012699_gtFine_labelIds.png 123 | frankfurt/frankfurt_000001_042384_gtFine_labelIds.png 124 | frankfurt/frankfurt_000001_054077_gtFine_labelIds.png 125 | frankfurt/frankfurt_000001_010830_gtFine_labelIds.png 126 | frankfurt/frankfurt_000001_052120_gtFine_labelIds.png 127 | frankfurt/frankfurt_000001_032018_gtFine_labelIds.png 128 | frankfurt/frankfurt_000001_051737_gtFine_labelIds.png 129 | frankfurt/frankfurt_000001_028335_gtFine_labelIds.png 130 | frankfurt/frankfurt_000001_049770_gtFine_labelIds.png 131 | frankfurt/frankfurt_000001_054884_gtFine_labelIds.png 132 | frankfurt/frankfurt_000001_019698_gtFine_labelIds.png 133 | frankfurt/frankfurt_000000_011461_gtFine_labelIds.png 134 | frankfurt/frankfurt_000000_001016_gtFine_labelIds.png 135 | frankfurt/frankfurt_000001_062250_gtFine_labelIds.png 136 | frankfurt/frankfurt_000001_004736_gtFine_labelIds.png 137 | frankfurt/frankfurt_000001_068682_gtFine_labelIds.png 138 | frankfurt/frankfurt_000000_006589_gtFine_labelIds.png 139 | frankfurt/frankfurt_000000_011810_gtFine_labelIds.png 140 | frankfurt/frankfurt_000001_066574_gtFine_labelIds.png 141 | frankfurt/frankfurt_000001_048654_gtFine_labelIds.png 142 | frankfurt/frankfurt_000001_049209_gtFine_labelIds.png 143 | frankfurt/frankfurt_000001_042098_gtFine_labelIds.png 144 | frankfurt/frankfurt_000001_031416_gtFine_labelIds.png 145 | frankfurt/frankfurt_000000_009969_gtFine_labelIds.png 146 | frankfurt/frankfurt_000001_038645_gtFine_labelIds.png 147 | frankfurt/frankfurt_000001_020046_gtFine_labelIds.png 148 | frankfurt/frankfurt_000001_054219_gtFine_labelIds.png 149 | frankfurt/frankfurt_000001_002759_gtFine_labelIds.png 150 | frankfurt/frankfurt_000001_066438_gtFine_labelIds.png 151 | frankfurt/frankfurt_000000_020321_gtFine_labelIds.png 152 | frankfurt/frankfurt_000001_002646_gtFine_labelIds.png 153 | frankfurt/frankfurt_000001_046126_gtFine_labelIds.png 154 | frankfurt/frankfurt_000000_002196_gtFine_labelIds.png 155 | frankfurt/frankfurt_000001_057954_gtFine_labelIds.png 156 | frankfurt/frankfurt_000001_011715_gtFine_labelIds.png 157 | frankfurt/frankfurt_000000_021879_gtFine_labelIds.png 158 | frankfurt/frankfurt_000001_082466_gtFine_labelIds.png 159 | frankfurt/frankfurt_000000_003025_gtFine_labelIds.png 160 | frankfurt/frankfurt_000001_023369_gtFine_labelIds.png 161 | frankfurt/frankfurt_000001_061682_gtFine_labelIds.png 162 | frankfurt/frankfurt_000001_017459_gtFine_labelIds.png 163 | frankfurt/frankfurt_000001_059789_gtFine_labelIds.png 164 | frankfurt/frankfurt_000001_073464_gtFine_labelIds.png 165 | frankfurt/frankfurt_000001_063045_gtFine_labelIds.png 166 | frankfurt/frankfurt_000001_064651_gtFine_labelIds.png 167 | frankfurt/frankfurt_000000_013382_gtFine_labelIds.png 168 | frankfurt/frankfurt_000001_002512_gtFine_labelIds.png 169 | frankfurt/frankfurt_000001_032942_gtFine_labelIds.png 170 | frankfurt/frankfurt_000001_010600_gtFine_labelIds.png 171 | frankfurt/frankfurt_000001_030067_gtFine_labelIds.png 172 | frankfurt/frankfurt_000001_014741_gtFine_labelIds.png 173 | frankfurt/frankfurt_000000_021667_gtFine_labelIds.png 174 | frankfurt/frankfurt_000001_051807_gtFine_labelIds.png 175 | frankfurt/frankfurt_000001_019854_gtFine_labelIds.png 176 | frankfurt/frankfurt_000001_015768_gtFine_labelIds.png 177 | frankfurt/frankfurt_000001_007857_gtFine_labelIds.png 178 | frankfurt/frankfurt_000001_058914_gtFine_labelIds.png 179 | frankfurt/frankfurt_000000_012868_gtFine_labelIds.png 180 | frankfurt/frankfurt_000000_013942_gtFine_labelIds.png 181 | frankfurt/frankfurt_000001_014406_gtFine_labelIds.png 182 | frankfurt/frankfurt_000001_049298_gtFine_labelIds.png 183 | frankfurt/frankfurt_000001_023769_gtFine_labelIds.png 184 | frankfurt/frankfurt_000001_012519_gtFine_labelIds.png 185 | frankfurt/frankfurt_000001_064925_gtFine_labelIds.png 186 | frankfurt/frankfurt_000001_072295_gtFine_labelIds.png 187 | frankfurt/frankfurt_000001_058504_gtFine_labelIds.png 188 | frankfurt/frankfurt_000001_059119_gtFine_labelIds.png 189 | frankfurt/frankfurt_000001_015091_gtFine_labelIds.png 190 | frankfurt/frankfurt_000001_058057_gtFine_labelIds.png 191 | frankfurt/frankfurt_000001_003056_gtFine_labelIds.png 192 | frankfurt/frankfurt_000001_007622_gtFine_labelIds.png 193 | frankfurt/frankfurt_000001_016273_gtFine_labelIds.png 194 | frankfurt/frankfurt_000001_035864_gtFine_labelIds.png 195 | frankfurt/frankfurt_000001_067092_gtFine_labelIds.png 196 | frankfurt/frankfurt_000000_013067_gtFine_labelIds.png 197 | frankfurt/frankfurt_000001_067474_gtFine_labelIds.png 198 | frankfurt/frankfurt_000001_060135_gtFine_labelIds.png 199 | frankfurt/frankfurt_000000_018797_gtFine_labelIds.png 200 | frankfurt/frankfurt_000000_005898_gtFine_labelIds.png 201 | frankfurt/frankfurt_000001_055603_gtFine_labelIds.png 202 | frankfurt/frankfurt_000001_060906_gtFine_labelIds.png 203 | frankfurt/frankfurt_000001_062653_gtFine_labelIds.png 204 | frankfurt/frankfurt_000000_004617_gtFine_labelIds.png 205 | frankfurt/frankfurt_000001_055538_gtFine_labelIds.png 206 | frankfurt/frankfurt_000000_008451_gtFine_labelIds.png 207 | frankfurt/frankfurt_000001_052594_gtFine_labelIds.png 208 | frankfurt/frankfurt_000001_004327_gtFine_labelIds.png 209 | frankfurt/frankfurt_000001_075296_gtFine_labelIds.png 210 | frankfurt/frankfurt_000001_073088_gtFine_labelIds.png 211 | frankfurt/frankfurt_000001_005184_gtFine_labelIds.png 212 | frankfurt/frankfurt_000000_016286_gtFine_labelIds.png 213 | frankfurt/frankfurt_000001_008688_gtFine_labelIds.png 214 | frankfurt/frankfurt_000000_011074_gtFine_labelIds.png 215 | frankfurt/frankfurt_000001_056580_gtFine_labelIds.png 216 | frankfurt/frankfurt_000001_067735_gtFine_labelIds.png 217 | frankfurt/frankfurt_000001_034047_gtFine_labelIds.png 218 | frankfurt/frankfurt_000001_076502_gtFine_labelIds.png 219 | frankfurt/frankfurt_000001_071288_gtFine_labelIds.png 220 | frankfurt/frankfurt_000001_067295_gtFine_labelIds.png 221 | frankfurt/frankfurt_000001_071781_gtFine_labelIds.png 222 | frankfurt/frankfurt_000000_012121_gtFine_labelIds.png 223 | frankfurt/frankfurt_000001_004859_gtFine_labelIds.png 224 | frankfurt/frankfurt_000001_073911_gtFine_labelIds.png 225 | frankfurt/frankfurt_000001_047552_gtFine_labelIds.png 226 | frankfurt/frankfurt_000001_037705_gtFine_labelIds.png 227 | frankfurt/frankfurt_000001_025512_gtFine_labelIds.png 228 | frankfurt/frankfurt_000001_047178_gtFine_labelIds.png 229 | frankfurt/frankfurt_000001_014221_gtFine_labelIds.png 230 | frankfurt/frankfurt_000000_007365_gtFine_labelIds.png 231 | frankfurt/frankfurt_000001_049698_gtFine_labelIds.png 232 | frankfurt/frankfurt_000001_065160_gtFine_labelIds.png 233 | frankfurt/frankfurt_000001_061763_gtFine_labelIds.png 234 | frankfurt/frankfurt_000000_010351_gtFine_labelIds.png 235 | frankfurt/frankfurt_000001_072155_gtFine_labelIds.png 236 | frankfurt/frankfurt_000001_023235_gtFine_labelIds.png 237 | frankfurt/frankfurt_000000_015389_gtFine_labelIds.png 238 | frankfurt/frankfurt_000000_009688_gtFine_labelIds.png 239 | frankfurt/frankfurt_000000_016005_gtFine_labelIds.png 240 | frankfurt/frankfurt_000001_054640_gtFine_labelIds.png 241 | frankfurt/frankfurt_000001_029600_gtFine_labelIds.png 242 | frankfurt/frankfurt_000001_028232_gtFine_labelIds.png 243 | frankfurt/frankfurt_000001_050686_gtFine_labelIds.png 244 | frankfurt/frankfurt_000001_013496_gtFine_labelIds.png 245 | frankfurt/frankfurt_000001_066092_gtFine_labelIds.png 246 | frankfurt/frankfurt_000001_009854_gtFine_labelIds.png 247 | frankfurt/frankfurt_000001_067178_gtFine_labelIds.png 248 | frankfurt/frankfurt_000001_028854_gtFine_labelIds.png 249 | frankfurt/frankfurt_000001_083199_gtFine_labelIds.png 250 | frankfurt/frankfurt_000001_064798_gtFine_labelIds.png 251 | frankfurt/frankfurt_000001_018113_gtFine_labelIds.png 252 | frankfurt/frankfurt_000001_050149_gtFine_labelIds.png 253 | frankfurt/frankfurt_000001_048196_gtFine_labelIds.png 254 | frankfurt/frankfurt_000000_001236_gtFine_labelIds.png 255 | frankfurt/frankfurt_000000_017476_gtFine_labelIds.png 256 | frankfurt/frankfurt_000001_003588_gtFine_labelIds.png 257 | frankfurt/frankfurt_000001_021825_gtFine_labelIds.png 258 | frankfurt/frankfurt_000000_010763_gtFine_labelIds.png 259 | frankfurt/frankfurt_000001_062793_gtFine_labelIds.png 260 | frankfurt/frankfurt_000001_029236_gtFine_labelIds.png 261 | frankfurt/frankfurt_000001_075984_gtFine_labelIds.png 262 | frankfurt/frankfurt_000001_031266_gtFine_labelIds.png 263 | frankfurt/frankfurt_000001_043395_gtFine_labelIds.png 264 | frankfurt/frankfurt_000001_040732_gtFine_labelIds.png 265 | frankfurt/frankfurt_000001_011162_gtFine_labelIds.png 266 | frankfurt/frankfurt_000000_012009_gtFine_labelIds.png 267 | frankfurt/frankfurt_000001_042733_gtFine_labelIds.png 268 | lindau/lindau_000052_000019_gtFine_labelIds.png 269 | lindau/lindau_000009_000019_gtFine_labelIds.png 270 | lindau/lindau_000037_000019_gtFine_labelIds.png 271 | lindau/lindau_000047_000019_gtFine_labelIds.png 272 | lindau/lindau_000015_000019_gtFine_labelIds.png 273 | lindau/lindau_000030_000019_gtFine_labelIds.png 274 | lindau/lindau_000012_000019_gtFine_labelIds.png 275 | lindau/lindau_000032_000019_gtFine_labelIds.png 276 | lindau/lindau_000046_000019_gtFine_labelIds.png 277 | lindau/lindau_000000_000019_gtFine_labelIds.png 278 | lindau/lindau_000031_000019_gtFine_labelIds.png 279 | lindau/lindau_000011_000019_gtFine_labelIds.png 280 | lindau/lindau_000027_000019_gtFine_labelIds.png 281 | lindau/lindau_000054_000019_gtFine_labelIds.png 282 | lindau/lindau_000026_000019_gtFine_labelIds.png 283 | lindau/lindau_000017_000019_gtFine_labelIds.png 284 | lindau/lindau_000023_000019_gtFine_labelIds.png 285 | lindau/lindau_000005_000019_gtFine_labelIds.png 286 | lindau/lindau_000056_000019_gtFine_labelIds.png 287 | lindau/lindau_000025_000019_gtFine_labelIds.png 288 | lindau/lindau_000045_000019_gtFine_labelIds.png 289 | lindau/lindau_000014_000019_gtFine_labelIds.png 290 | lindau/lindau_000004_000019_gtFine_labelIds.png 291 | lindau/lindau_000021_000019_gtFine_labelIds.png 292 | lindau/lindau_000049_000019_gtFine_labelIds.png 293 | lindau/lindau_000033_000019_gtFine_labelIds.png 294 | lindau/lindau_000042_000019_gtFine_labelIds.png 295 | lindau/lindau_000013_000019_gtFine_labelIds.png 296 | lindau/lindau_000024_000019_gtFine_labelIds.png 297 | lindau/lindau_000002_000019_gtFine_labelIds.png 298 | lindau/lindau_000043_000019_gtFine_labelIds.png 299 | lindau/lindau_000016_000019_gtFine_labelIds.png 300 | lindau/lindau_000050_000019_gtFine_labelIds.png 301 | lindau/lindau_000018_000019_gtFine_labelIds.png 302 | lindau/lindau_000007_000019_gtFine_labelIds.png 303 | lindau/lindau_000048_000019_gtFine_labelIds.png 304 | lindau/lindau_000022_000019_gtFine_labelIds.png 305 | lindau/lindau_000053_000019_gtFine_labelIds.png 306 | lindau/lindau_000038_000019_gtFine_labelIds.png 307 | lindau/lindau_000001_000019_gtFine_labelIds.png 308 | lindau/lindau_000036_000019_gtFine_labelIds.png 309 | lindau/lindau_000035_000019_gtFine_labelIds.png 310 | lindau/lindau_000003_000019_gtFine_labelIds.png 311 | lindau/lindau_000034_000019_gtFine_labelIds.png 312 | lindau/lindau_000010_000019_gtFine_labelIds.png 313 | lindau/lindau_000055_000019_gtFine_labelIds.png 314 | lindau/lindau_000006_000019_gtFine_labelIds.png 315 | lindau/lindau_000019_000019_gtFine_labelIds.png 316 | lindau/lindau_000029_000019_gtFine_labelIds.png 317 | lindau/lindau_000039_000019_gtFine_labelIds.png 318 | lindau/lindau_000051_000019_gtFine_labelIds.png 319 | lindau/lindau_000020_000019_gtFine_labelIds.png 320 | lindau/lindau_000057_000019_gtFine_labelIds.png 321 | lindau/lindau_000041_000019_gtFine_labelIds.png 322 | lindau/lindau_000040_000019_gtFine_labelIds.png 323 | lindau/lindau_000044_000019_gtFine_labelIds.png 324 | lindau/lindau_000028_000019_gtFine_labelIds.png 325 | lindau/lindau_000058_000019_gtFine_labelIds.png 326 | lindau/lindau_000008_000019_gtFine_labelIds.png 327 | munster/munster_000000_000019_gtFine_labelIds.png 328 | munster/munster_000012_000019_gtFine_labelIds.png 329 | munster/munster_000032_000019_gtFine_labelIds.png 330 | munster/munster_000068_000019_gtFine_labelIds.png 331 | munster/munster_000101_000019_gtFine_labelIds.png 332 | munster/munster_000153_000019_gtFine_labelIds.png 333 | munster/munster_000115_000019_gtFine_labelIds.png 334 | munster/munster_000029_000019_gtFine_labelIds.png 335 | munster/munster_000019_000019_gtFine_labelIds.png 336 | munster/munster_000156_000019_gtFine_labelIds.png 337 | munster/munster_000129_000019_gtFine_labelIds.png 338 | munster/munster_000169_000019_gtFine_labelIds.png 339 | munster/munster_000150_000019_gtFine_labelIds.png 340 | munster/munster_000165_000019_gtFine_labelIds.png 341 | munster/munster_000050_000019_gtFine_labelIds.png 342 | munster/munster_000025_000019_gtFine_labelIds.png 343 | munster/munster_000116_000019_gtFine_labelIds.png 344 | munster/munster_000132_000019_gtFine_labelIds.png 345 | munster/munster_000066_000019_gtFine_labelIds.png 346 | munster/munster_000096_000019_gtFine_labelIds.png 347 | munster/munster_000030_000019_gtFine_labelIds.png 348 | munster/munster_000146_000019_gtFine_labelIds.png 349 | munster/munster_000098_000019_gtFine_labelIds.png 350 | munster/munster_000059_000019_gtFine_labelIds.png 351 | munster/munster_000093_000019_gtFine_labelIds.png 352 | munster/munster_000122_000019_gtFine_labelIds.png 353 | munster/munster_000024_000019_gtFine_labelIds.png 354 | munster/munster_000036_000019_gtFine_labelIds.png 355 | munster/munster_000086_000019_gtFine_labelIds.png 356 | munster/munster_000163_000019_gtFine_labelIds.png 357 | munster/munster_000001_000019_gtFine_labelIds.png 358 | munster/munster_000053_000019_gtFine_labelIds.png 359 | munster/munster_000071_000019_gtFine_labelIds.png 360 | munster/munster_000079_000019_gtFine_labelIds.png 361 | munster/munster_000159_000019_gtFine_labelIds.png 362 | munster/munster_000038_000019_gtFine_labelIds.png 363 | munster/munster_000138_000019_gtFine_labelIds.png 364 | munster/munster_000135_000019_gtFine_labelIds.png 365 | munster/munster_000065_000019_gtFine_labelIds.png 366 | munster/munster_000139_000019_gtFine_labelIds.png 367 | munster/munster_000108_000019_gtFine_labelIds.png 368 | munster/munster_000020_000019_gtFine_labelIds.png 369 | munster/munster_000074_000019_gtFine_labelIds.png 370 | munster/munster_000035_000019_gtFine_labelIds.png 371 | munster/munster_000067_000019_gtFine_labelIds.png 372 | munster/munster_000151_000019_gtFine_labelIds.png 373 | munster/munster_000083_000019_gtFine_labelIds.png 374 | munster/munster_000118_000019_gtFine_labelIds.png 375 | munster/munster_000046_000019_gtFine_labelIds.png 376 | munster/munster_000147_000019_gtFine_labelIds.png 377 | munster/munster_000047_000019_gtFine_labelIds.png 378 | munster/munster_000043_000019_gtFine_labelIds.png 379 | munster/munster_000168_000019_gtFine_labelIds.png 380 | munster/munster_000167_000019_gtFine_labelIds.png 381 | munster/munster_000021_000019_gtFine_labelIds.png 382 | munster/munster_000073_000019_gtFine_labelIds.png 383 | munster/munster_000089_000019_gtFine_labelIds.png 384 | munster/munster_000060_000019_gtFine_labelIds.png 385 | munster/munster_000155_000019_gtFine_labelIds.png 386 | munster/munster_000140_000019_gtFine_labelIds.png 387 | munster/munster_000145_000019_gtFine_labelIds.png 388 | munster/munster_000077_000019_gtFine_labelIds.png 389 | munster/munster_000018_000019_gtFine_labelIds.png 390 | munster/munster_000045_000019_gtFine_labelIds.png 391 | munster/munster_000166_000019_gtFine_labelIds.png 392 | munster/munster_000037_000019_gtFine_labelIds.png 393 | munster/munster_000112_000019_gtFine_labelIds.png 394 | munster/munster_000080_000019_gtFine_labelIds.png 395 | munster/munster_000144_000019_gtFine_labelIds.png 396 | munster/munster_000142_000019_gtFine_labelIds.png 397 | munster/munster_000070_000019_gtFine_labelIds.png 398 | munster/munster_000044_000019_gtFine_labelIds.png 399 | munster/munster_000137_000019_gtFine_labelIds.png 400 | munster/munster_000041_000019_gtFine_labelIds.png 401 | munster/munster_000113_000019_gtFine_labelIds.png 402 | munster/munster_000075_000019_gtFine_labelIds.png 403 | munster/munster_000157_000019_gtFine_labelIds.png 404 | munster/munster_000158_000019_gtFine_labelIds.png 405 | munster/munster_000109_000019_gtFine_labelIds.png 406 | munster/munster_000033_000019_gtFine_labelIds.png 407 | munster/munster_000088_000019_gtFine_labelIds.png 408 | munster/munster_000090_000019_gtFine_labelIds.png 409 | munster/munster_000114_000019_gtFine_labelIds.png 410 | munster/munster_000171_000019_gtFine_labelIds.png 411 | munster/munster_000013_000019_gtFine_labelIds.png 412 | munster/munster_000130_000019_gtFine_labelIds.png 413 | munster/munster_000016_000019_gtFine_labelIds.png 414 | munster/munster_000136_000019_gtFine_labelIds.png 415 | munster/munster_000007_000019_gtFine_labelIds.png 416 | munster/munster_000014_000019_gtFine_labelIds.png 417 | munster/munster_000052_000019_gtFine_labelIds.png 418 | munster/munster_000104_000019_gtFine_labelIds.png 419 | munster/munster_000173_000019_gtFine_labelIds.png 420 | munster/munster_000057_000019_gtFine_labelIds.png 421 | munster/munster_000072_000019_gtFine_labelIds.png 422 | munster/munster_000003_000019_gtFine_labelIds.png 423 | munster/munster_000161_000019_gtFine_labelIds.png 424 | munster/munster_000002_000019_gtFine_labelIds.png 425 | munster/munster_000028_000019_gtFine_labelIds.png 426 | munster/munster_000051_000019_gtFine_labelIds.png 427 | munster/munster_000105_000019_gtFine_labelIds.png 428 | munster/munster_000061_000019_gtFine_labelIds.png 429 | munster/munster_000058_000019_gtFine_labelIds.png 430 | munster/munster_000094_000019_gtFine_labelIds.png 431 | munster/munster_000027_000019_gtFine_labelIds.png 432 | munster/munster_000062_000019_gtFine_labelIds.png 433 | munster/munster_000127_000019_gtFine_labelIds.png 434 | munster/munster_000110_000019_gtFine_labelIds.png 435 | munster/munster_000170_000019_gtFine_labelIds.png 436 | munster/munster_000023_000019_gtFine_labelIds.png 437 | munster/munster_000084_000019_gtFine_labelIds.png 438 | munster/munster_000121_000019_gtFine_labelIds.png 439 | munster/munster_000087_000019_gtFine_labelIds.png 440 | munster/munster_000097_000019_gtFine_labelIds.png 441 | munster/munster_000119_000019_gtFine_labelIds.png 442 | munster/munster_000128_000019_gtFine_labelIds.png 443 | munster/munster_000078_000019_gtFine_labelIds.png 444 | munster/munster_000010_000019_gtFine_labelIds.png 445 | munster/munster_000015_000019_gtFine_labelIds.png 446 | munster/munster_000048_000019_gtFine_labelIds.png 447 | munster/munster_000085_000019_gtFine_labelIds.png 448 | munster/munster_000164_000019_gtFine_labelIds.png 449 | munster/munster_000111_000019_gtFine_labelIds.png 450 | munster/munster_000099_000019_gtFine_labelIds.png 451 | munster/munster_000117_000019_gtFine_labelIds.png 452 | munster/munster_000009_000019_gtFine_labelIds.png 453 | munster/munster_000049_000019_gtFine_labelIds.png 454 | munster/munster_000148_000019_gtFine_labelIds.png 455 | munster/munster_000022_000019_gtFine_labelIds.png 456 | munster/munster_000131_000019_gtFine_labelIds.png 457 | munster/munster_000006_000019_gtFine_labelIds.png 458 | munster/munster_000005_000019_gtFine_labelIds.png 459 | munster/munster_000102_000019_gtFine_labelIds.png 460 | munster/munster_000160_000019_gtFine_labelIds.png 461 | munster/munster_000107_000019_gtFine_labelIds.png 462 | munster/munster_000095_000019_gtFine_labelIds.png 463 | munster/munster_000106_000019_gtFine_labelIds.png 464 | munster/munster_000034_000019_gtFine_labelIds.png 465 | munster/munster_000143_000019_gtFine_labelIds.png 466 | munster/munster_000017_000019_gtFine_labelIds.png 467 | munster/munster_000040_000019_gtFine_labelIds.png 468 | munster/munster_000152_000019_gtFine_labelIds.png 469 | munster/munster_000154_000019_gtFine_labelIds.png 470 | munster/munster_000100_000019_gtFine_labelIds.png 471 | munster/munster_000004_000019_gtFine_labelIds.png 472 | munster/munster_000141_000019_gtFine_labelIds.png 473 | munster/munster_000011_000019_gtFine_labelIds.png 474 | munster/munster_000055_000019_gtFine_labelIds.png 475 | munster/munster_000134_000019_gtFine_labelIds.png 476 | munster/munster_000054_000019_gtFine_labelIds.png 477 | munster/munster_000064_000019_gtFine_labelIds.png 478 | munster/munster_000039_000019_gtFine_labelIds.png 479 | munster/munster_000103_000019_gtFine_labelIds.png 480 | munster/munster_000092_000019_gtFine_labelIds.png 481 | munster/munster_000172_000019_gtFine_labelIds.png 482 | munster/munster_000042_000019_gtFine_labelIds.png 483 | munster/munster_000124_000019_gtFine_labelIds.png 484 | munster/munster_000069_000019_gtFine_labelIds.png 485 | munster/munster_000026_000019_gtFine_labelIds.png 486 | munster/munster_000120_000019_gtFine_labelIds.png 487 | munster/munster_000031_000019_gtFine_labelIds.png 488 | munster/munster_000162_000019_gtFine_labelIds.png 489 | munster/munster_000056_000019_gtFine_labelIds.png 490 | munster/munster_000081_000019_gtFine_labelIds.png 491 | munster/munster_000123_000019_gtFine_labelIds.png 492 | munster/munster_000125_000019_gtFine_labelIds.png 493 | munster/munster_000082_000019_gtFine_labelIds.png 494 | munster/munster_000133_000019_gtFine_labelIds.png 495 | munster/munster_000126_000019_gtFine_labelIds.png 496 | munster/munster_000063_000019_gtFine_labelIds.png 497 | munster/munster_000008_000019_gtFine_labelIds.png 498 | munster/munster_000149_000019_gtFine_labelIds.png 499 | munster/munster_000076_000019_gtFine_labelIds.png 500 | munster/munster_000091_000019_gtFine_labelIds.png 501 | -------------------------------------------------------------------------------- /dataset/cityscapes_list/val.txt: -------------------------------------------------------------------------------- 1 | frankfurt/frankfurt_000001_007973_leftImg8bit.png 2 | frankfurt/frankfurt_000001_025921_leftImg8bit.png 3 | frankfurt/frankfurt_000001_062016_leftImg8bit.png 4 | frankfurt/frankfurt_000001_049078_leftImg8bit.png 5 | frankfurt/frankfurt_000000_009561_leftImg8bit.png 6 | frankfurt/frankfurt_000001_013710_leftImg8bit.png 7 | frankfurt/frankfurt_000001_041664_leftImg8bit.png 8 | frankfurt/frankfurt_000000_013240_leftImg8bit.png 9 | frankfurt/frankfurt_000001_044787_leftImg8bit.png 10 | frankfurt/frankfurt_000001_015328_leftImg8bit.png 11 | frankfurt/frankfurt_000001_073243_leftImg8bit.png 12 | frankfurt/frankfurt_000001_034816_leftImg8bit.png 13 | frankfurt/frankfurt_000001_041074_leftImg8bit.png 14 | frankfurt/frankfurt_000001_005898_leftImg8bit.png 15 | frankfurt/frankfurt_000000_022254_leftImg8bit.png 16 | frankfurt/frankfurt_000001_044658_leftImg8bit.png 17 | frankfurt/frankfurt_000001_009504_leftImg8bit.png 18 | frankfurt/frankfurt_000001_024927_leftImg8bit.png 19 | frankfurt/frankfurt_000001_017842_leftImg8bit.png 20 | frankfurt/frankfurt_000001_068208_leftImg8bit.png 21 | frankfurt/frankfurt_000001_013016_leftImg8bit.png 22 | frankfurt/frankfurt_000001_010156_leftImg8bit.png 23 | frankfurt/frankfurt_000000_002963_leftImg8bit.png 24 | frankfurt/frankfurt_000001_020693_leftImg8bit.png 25 | frankfurt/frankfurt_000001_078803_leftImg8bit.png 26 | frankfurt/frankfurt_000001_025713_leftImg8bit.png 27 | frankfurt/frankfurt_000001_007285_leftImg8bit.png 28 | frankfurt/frankfurt_000001_070099_leftImg8bit.png 29 | frankfurt/frankfurt_000000_009291_leftImg8bit.png 30 | frankfurt/frankfurt_000000_019607_leftImg8bit.png 31 | frankfurt/frankfurt_000001_068063_leftImg8bit.png 32 | frankfurt/frankfurt_000000_003920_leftImg8bit.png 33 | frankfurt/frankfurt_000001_077233_leftImg8bit.png 34 | frankfurt/frankfurt_000001_029086_leftImg8bit.png 35 | frankfurt/frankfurt_000001_060545_leftImg8bit.png 36 | frankfurt/frankfurt_000001_001464_leftImg8bit.png 37 | frankfurt/frankfurt_000001_028590_leftImg8bit.png 38 | frankfurt/frankfurt_000001_016462_leftImg8bit.png 39 | frankfurt/frankfurt_000001_060422_leftImg8bit.png 40 | frankfurt/frankfurt_000001_009058_leftImg8bit.png 41 | frankfurt/frankfurt_000001_080830_leftImg8bit.png 42 | frankfurt/frankfurt_000001_012870_leftImg8bit.png 43 | frankfurt/frankfurt_000001_077434_leftImg8bit.png 44 | frankfurt/frankfurt_000001_033655_leftImg8bit.png 45 | frankfurt/frankfurt_000001_051516_leftImg8bit.png 46 | frankfurt/frankfurt_000001_044413_leftImg8bit.png 47 | frankfurt/frankfurt_000001_055172_leftImg8bit.png 48 | frankfurt/frankfurt_000001_040575_leftImg8bit.png 49 | frankfurt/frankfurt_000000_020215_leftImg8bit.png 50 | frankfurt/frankfurt_000000_017228_leftImg8bit.png 51 | frankfurt/frankfurt_000001_041354_leftImg8bit.png 52 | frankfurt/frankfurt_000000_008206_leftImg8bit.png 53 | frankfurt/frankfurt_000001_043564_leftImg8bit.png 54 | frankfurt/frankfurt_000001_032711_leftImg8bit.png 55 | frankfurt/frankfurt_000001_064130_leftImg8bit.png 56 | frankfurt/frankfurt_000001_053102_leftImg8bit.png 57 | frankfurt/frankfurt_000001_082087_leftImg8bit.png 58 | frankfurt/frankfurt_000001_057478_leftImg8bit.png 59 | frankfurt/frankfurt_000001_007407_leftImg8bit.png 60 | frankfurt/frankfurt_000001_008200_leftImg8bit.png 61 | frankfurt/frankfurt_000001_038844_leftImg8bit.png 62 | frankfurt/frankfurt_000001_016029_leftImg8bit.png 63 | frankfurt/frankfurt_000001_058176_leftImg8bit.png 64 | frankfurt/frankfurt_000001_057181_leftImg8bit.png 65 | frankfurt/frankfurt_000001_039895_leftImg8bit.png 66 | frankfurt/frankfurt_000000_000294_leftImg8bit.png 67 | frankfurt/frankfurt_000001_055062_leftImg8bit.png 68 | frankfurt/frankfurt_000001_083029_leftImg8bit.png 69 | frankfurt/frankfurt_000001_010444_leftImg8bit.png 70 | frankfurt/frankfurt_000001_041517_leftImg8bit.png 71 | frankfurt/frankfurt_000001_069633_leftImg8bit.png 72 | frankfurt/frankfurt_000001_020287_leftImg8bit.png 73 | frankfurt/frankfurt_000001_012038_leftImg8bit.png 74 | frankfurt/frankfurt_000001_046504_leftImg8bit.png 75 | frankfurt/frankfurt_000001_032556_leftImg8bit.png 76 | frankfurt/frankfurt_000000_001751_leftImg8bit.png 77 | frankfurt/frankfurt_000001_000538_leftImg8bit.png 78 | frankfurt/frankfurt_000001_083852_leftImg8bit.png 79 | frankfurt/frankfurt_000001_077092_leftImg8bit.png 80 | frankfurt/frankfurt_000001_017101_leftImg8bit.png 81 | frankfurt/frankfurt_000001_044525_leftImg8bit.png 82 | frankfurt/frankfurt_000001_005703_leftImg8bit.png 83 | frankfurt/frankfurt_000001_080391_leftImg8bit.png 84 | frankfurt/frankfurt_000001_038418_leftImg8bit.png 85 | frankfurt/frankfurt_000001_066832_leftImg8bit.png 86 | frankfurt/frankfurt_000000_003357_leftImg8bit.png 87 | frankfurt/frankfurt_000000_020880_leftImg8bit.png 88 | frankfurt/frankfurt_000001_062396_leftImg8bit.png 89 | frankfurt/frankfurt_000001_046272_leftImg8bit.png 90 | frankfurt/frankfurt_000001_062509_leftImg8bit.png 91 | frankfurt/frankfurt_000001_054415_leftImg8bit.png 92 | frankfurt/frankfurt_000001_021406_leftImg8bit.png 93 | frankfurt/frankfurt_000001_030310_leftImg8bit.png 94 | frankfurt/frankfurt_000000_014480_leftImg8bit.png 95 | frankfurt/frankfurt_000001_005410_leftImg8bit.png 96 | frankfurt/frankfurt_000000_022797_leftImg8bit.png 97 | frankfurt/frankfurt_000001_035144_leftImg8bit.png 98 | frankfurt/frankfurt_000001_014565_leftImg8bit.png 99 | frankfurt/frankfurt_000001_065850_leftImg8bit.png 100 | frankfurt/frankfurt_000000_000576_leftImg8bit.png 101 | frankfurt/frankfurt_000001_065617_leftImg8bit.png 102 | frankfurt/frankfurt_000000_005543_leftImg8bit.png 103 | frankfurt/frankfurt_000001_055709_leftImg8bit.png 104 | frankfurt/frankfurt_000001_027325_leftImg8bit.png 105 | frankfurt/frankfurt_000001_011835_leftImg8bit.png 106 | frankfurt/frankfurt_000001_046779_leftImg8bit.png 107 | frankfurt/frankfurt_000001_064305_leftImg8bit.png 108 | frankfurt/frankfurt_000001_012738_leftImg8bit.png 109 | frankfurt/frankfurt_000001_048355_leftImg8bit.png 110 | frankfurt/frankfurt_000001_019969_leftImg8bit.png 111 | frankfurt/frankfurt_000001_080091_leftImg8bit.png 112 | frankfurt/frankfurt_000000_011007_leftImg8bit.png 113 | frankfurt/frankfurt_000000_015676_leftImg8bit.png 114 | frankfurt/frankfurt_000001_044227_leftImg8bit.png 115 | frankfurt/frankfurt_000001_055387_leftImg8bit.png 116 | frankfurt/frankfurt_000001_038245_leftImg8bit.png 117 | frankfurt/frankfurt_000001_059642_leftImg8bit.png 118 | frankfurt/frankfurt_000001_030669_leftImg8bit.png 119 | frankfurt/frankfurt_000001_068772_leftImg8bit.png 120 | frankfurt/frankfurt_000001_079206_leftImg8bit.png 121 | frankfurt/frankfurt_000001_055306_leftImg8bit.png 122 | frankfurt/frankfurt_000001_012699_leftImg8bit.png 123 | frankfurt/frankfurt_000001_042384_leftImg8bit.png 124 | frankfurt/frankfurt_000001_054077_leftImg8bit.png 125 | frankfurt/frankfurt_000001_010830_leftImg8bit.png 126 | frankfurt/frankfurt_000001_052120_leftImg8bit.png 127 | frankfurt/frankfurt_000001_032018_leftImg8bit.png 128 | frankfurt/frankfurt_000001_051737_leftImg8bit.png 129 | frankfurt/frankfurt_000001_028335_leftImg8bit.png 130 | frankfurt/frankfurt_000001_049770_leftImg8bit.png 131 | frankfurt/frankfurt_000001_054884_leftImg8bit.png 132 | frankfurt/frankfurt_000001_019698_leftImg8bit.png 133 | frankfurt/frankfurt_000000_011461_leftImg8bit.png 134 | frankfurt/frankfurt_000000_001016_leftImg8bit.png 135 | frankfurt/frankfurt_000001_062250_leftImg8bit.png 136 | frankfurt/frankfurt_000001_004736_leftImg8bit.png 137 | frankfurt/frankfurt_000001_068682_leftImg8bit.png 138 | frankfurt/frankfurt_000000_006589_leftImg8bit.png 139 | frankfurt/frankfurt_000000_011810_leftImg8bit.png 140 | frankfurt/frankfurt_000001_066574_leftImg8bit.png 141 | frankfurt/frankfurt_000001_048654_leftImg8bit.png 142 | frankfurt/frankfurt_000001_049209_leftImg8bit.png 143 | frankfurt/frankfurt_000001_042098_leftImg8bit.png 144 | frankfurt/frankfurt_000001_031416_leftImg8bit.png 145 | frankfurt/frankfurt_000000_009969_leftImg8bit.png 146 | frankfurt/frankfurt_000001_038645_leftImg8bit.png 147 | frankfurt/frankfurt_000001_020046_leftImg8bit.png 148 | frankfurt/frankfurt_000001_054219_leftImg8bit.png 149 | frankfurt/frankfurt_000001_002759_leftImg8bit.png 150 | frankfurt/frankfurt_000001_066438_leftImg8bit.png 151 | frankfurt/frankfurt_000000_020321_leftImg8bit.png 152 | frankfurt/frankfurt_000001_002646_leftImg8bit.png 153 | frankfurt/frankfurt_000001_046126_leftImg8bit.png 154 | frankfurt/frankfurt_000000_002196_leftImg8bit.png 155 | frankfurt/frankfurt_000001_057954_leftImg8bit.png 156 | frankfurt/frankfurt_000001_011715_leftImg8bit.png 157 | frankfurt/frankfurt_000000_021879_leftImg8bit.png 158 | frankfurt/frankfurt_000001_082466_leftImg8bit.png 159 | frankfurt/frankfurt_000000_003025_leftImg8bit.png 160 | frankfurt/frankfurt_000001_023369_leftImg8bit.png 161 | frankfurt/frankfurt_000001_061682_leftImg8bit.png 162 | frankfurt/frankfurt_000001_017459_leftImg8bit.png 163 | frankfurt/frankfurt_000001_059789_leftImg8bit.png 164 | frankfurt/frankfurt_000001_073464_leftImg8bit.png 165 | frankfurt/frankfurt_000001_063045_leftImg8bit.png 166 | frankfurt/frankfurt_000001_064651_leftImg8bit.png 167 | frankfurt/frankfurt_000000_013382_leftImg8bit.png 168 | frankfurt/frankfurt_000001_002512_leftImg8bit.png 169 | frankfurt/frankfurt_000001_032942_leftImg8bit.png 170 | frankfurt/frankfurt_000001_010600_leftImg8bit.png 171 | frankfurt/frankfurt_000001_030067_leftImg8bit.png 172 | frankfurt/frankfurt_000001_014741_leftImg8bit.png 173 | frankfurt/frankfurt_000000_021667_leftImg8bit.png 174 | frankfurt/frankfurt_000001_051807_leftImg8bit.png 175 | frankfurt/frankfurt_000001_019854_leftImg8bit.png 176 | frankfurt/frankfurt_000001_015768_leftImg8bit.png 177 | frankfurt/frankfurt_000001_007857_leftImg8bit.png 178 | frankfurt/frankfurt_000001_058914_leftImg8bit.png 179 | frankfurt/frankfurt_000000_012868_leftImg8bit.png 180 | frankfurt/frankfurt_000000_013942_leftImg8bit.png 181 | frankfurt/frankfurt_000001_014406_leftImg8bit.png 182 | frankfurt/frankfurt_000001_049298_leftImg8bit.png 183 | frankfurt/frankfurt_000001_023769_leftImg8bit.png 184 | frankfurt/frankfurt_000001_012519_leftImg8bit.png 185 | frankfurt/frankfurt_000001_064925_leftImg8bit.png 186 | frankfurt/frankfurt_000001_072295_leftImg8bit.png 187 | frankfurt/frankfurt_000001_058504_leftImg8bit.png 188 | frankfurt/frankfurt_000001_059119_leftImg8bit.png 189 | frankfurt/frankfurt_000001_015091_leftImg8bit.png 190 | frankfurt/frankfurt_000001_058057_leftImg8bit.png 191 | frankfurt/frankfurt_000001_003056_leftImg8bit.png 192 | frankfurt/frankfurt_000001_007622_leftImg8bit.png 193 | frankfurt/frankfurt_000001_016273_leftImg8bit.png 194 | frankfurt/frankfurt_000001_035864_leftImg8bit.png 195 | frankfurt/frankfurt_000001_067092_leftImg8bit.png 196 | frankfurt/frankfurt_000000_013067_leftImg8bit.png 197 | frankfurt/frankfurt_000001_067474_leftImg8bit.png 198 | frankfurt/frankfurt_000001_060135_leftImg8bit.png 199 | frankfurt/frankfurt_000000_018797_leftImg8bit.png 200 | frankfurt/frankfurt_000000_005898_leftImg8bit.png 201 | frankfurt/frankfurt_000001_055603_leftImg8bit.png 202 | frankfurt/frankfurt_000001_060906_leftImg8bit.png 203 | frankfurt/frankfurt_000001_062653_leftImg8bit.png 204 | frankfurt/frankfurt_000000_004617_leftImg8bit.png 205 | frankfurt/frankfurt_000001_055538_leftImg8bit.png 206 | frankfurt/frankfurt_000000_008451_leftImg8bit.png 207 | frankfurt/frankfurt_000001_052594_leftImg8bit.png 208 | frankfurt/frankfurt_000001_004327_leftImg8bit.png 209 | frankfurt/frankfurt_000001_075296_leftImg8bit.png 210 | frankfurt/frankfurt_000001_073088_leftImg8bit.png 211 | frankfurt/frankfurt_000001_005184_leftImg8bit.png 212 | frankfurt/frankfurt_000000_016286_leftImg8bit.png 213 | frankfurt/frankfurt_000001_008688_leftImg8bit.png 214 | frankfurt/frankfurt_000000_011074_leftImg8bit.png 215 | frankfurt/frankfurt_000001_056580_leftImg8bit.png 216 | frankfurt/frankfurt_000001_067735_leftImg8bit.png 217 | frankfurt/frankfurt_000001_034047_leftImg8bit.png 218 | frankfurt/frankfurt_000001_076502_leftImg8bit.png 219 | frankfurt/frankfurt_000001_071288_leftImg8bit.png 220 | frankfurt/frankfurt_000001_067295_leftImg8bit.png 221 | frankfurt/frankfurt_000001_071781_leftImg8bit.png 222 | frankfurt/frankfurt_000000_012121_leftImg8bit.png 223 | frankfurt/frankfurt_000001_004859_leftImg8bit.png 224 | frankfurt/frankfurt_000001_073911_leftImg8bit.png 225 | frankfurt/frankfurt_000001_047552_leftImg8bit.png 226 | frankfurt/frankfurt_000001_037705_leftImg8bit.png 227 | frankfurt/frankfurt_000001_025512_leftImg8bit.png 228 | frankfurt/frankfurt_000001_047178_leftImg8bit.png 229 | frankfurt/frankfurt_000001_014221_leftImg8bit.png 230 | frankfurt/frankfurt_000000_007365_leftImg8bit.png 231 | frankfurt/frankfurt_000001_049698_leftImg8bit.png 232 | frankfurt/frankfurt_000001_065160_leftImg8bit.png 233 | frankfurt/frankfurt_000001_061763_leftImg8bit.png 234 | frankfurt/frankfurt_000000_010351_leftImg8bit.png 235 | frankfurt/frankfurt_000001_072155_leftImg8bit.png 236 | frankfurt/frankfurt_000001_023235_leftImg8bit.png 237 | frankfurt/frankfurt_000000_015389_leftImg8bit.png 238 | frankfurt/frankfurt_000000_009688_leftImg8bit.png 239 | frankfurt/frankfurt_000000_016005_leftImg8bit.png 240 | frankfurt/frankfurt_000001_054640_leftImg8bit.png 241 | frankfurt/frankfurt_000001_029600_leftImg8bit.png 242 | frankfurt/frankfurt_000001_028232_leftImg8bit.png 243 | frankfurt/frankfurt_000001_050686_leftImg8bit.png 244 | frankfurt/frankfurt_000001_013496_leftImg8bit.png 245 | frankfurt/frankfurt_000001_066092_leftImg8bit.png 246 | frankfurt/frankfurt_000001_009854_leftImg8bit.png 247 | frankfurt/frankfurt_000001_067178_leftImg8bit.png 248 | frankfurt/frankfurt_000001_028854_leftImg8bit.png 249 | frankfurt/frankfurt_000001_083199_leftImg8bit.png 250 | frankfurt/frankfurt_000001_064798_leftImg8bit.png 251 | frankfurt/frankfurt_000001_018113_leftImg8bit.png 252 | frankfurt/frankfurt_000001_050149_leftImg8bit.png 253 | frankfurt/frankfurt_000001_048196_leftImg8bit.png 254 | frankfurt/frankfurt_000000_001236_leftImg8bit.png 255 | frankfurt/frankfurt_000000_017476_leftImg8bit.png 256 | frankfurt/frankfurt_000001_003588_leftImg8bit.png 257 | frankfurt/frankfurt_000001_021825_leftImg8bit.png 258 | frankfurt/frankfurt_000000_010763_leftImg8bit.png 259 | frankfurt/frankfurt_000001_062793_leftImg8bit.png 260 | frankfurt/frankfurt_000001_029236_leftImg8bit.png 261 | frankfurt/frankfurt_000001_075984_leftImg8bit.png 262 | frankfurt/frankfurt_000001_031266_leftImg8bit.png 263 | frankfurt/frankfurt_000001_043395_leftImg8bit.png 264 | frankfurt/frankfurt_000001_040732_leftImg8bit.png 265 | frankfurt/frankfurt_000001_011162_leftImg8bit.png 266 | frankfurt/frankfurt_000000_012009_leftImg8bit.png 267 | frankfurt/frankfurt_000001_042733_leftImg8bit.png 268 | lindau/lindau_000052_000019_leftImg8bit.png 269 | lindau/lindau_000009_000019_leftImg8bit.png 270 | lindau/lindau_000037_000019_leftImg8bit.png 271 | lindau/lindau_000047_000019_leftImg8bit.png 272 | lindau/lindau_000015_000019_leftImg8bit.png 273 | lindau/lindau_000030_000019_leftImg8bit.png 274 | lindau/lindau_000012_000019_leftImg8bit.png 275 | lindau/lindau_000032_000019_leftImg8bit.png 276 | lindau/lindau_000046_000019_leftImg8bit.png 277 | lindau/lindau_000000_000019_leftImg8bit.png 278 | lindau/lindau_000031_000019_leftImg8bit.png 279 | lindau/lindau_000011_000019_leftImg8bit.png 280 | lindau/lindau_000027_000019_leftImg8bit.png 281 | lindau/lindau_000054_000019_leftImg8bit.png 282 | lindau/lindau_000026_000019_leftImg8bit.png 283 | lindau/lindau_000017_000019_leftImg8bit.png 284 | lindau/lindau_000023_000019_leftImg8bit.png 285 | lindau/lindau_000005_000019_leftImg8bit.png 286 | lindau/lindau_000056_000019_leftImg8bit.png 287 | lindau/lindau_000025_000019_leftImg8bit.png 288 | lindau/lindau_000045_000019_leftImg8bit.png 289 | lindau/lindau_000014_000019_leftImg8bit.png 290 | lindau/lindau_000004_000019_leftImg8bit.png 291 | lindau/lindau_000021_000019_leftImg8bit.png 292 | lindau/lindau_000049_000019_leftImg8bit.png 293 | lindau/lindau_000033_000019_leftImg8bit.png 294 | lindau/lindau_000042_000019_leftImg8bit.png 295 | lindau/lindau_000013_000019_leftImg8bit.png 296 | lindau/lindau_000024_000019_leftImg8bit.png 297 | lindau/lindau_000002_000019_leftImg8bit.png 298 | lindau/lindau_000043_000019_leftImg8bit.png 299 | lindau/lindau_000016_000019_leftImg8bit.png 300 | lindau/lindau_000050_000019_leftImg8bit.png 301 | lindau/lindau_000018_000019_leftImg8bit.png 302 | lindau/lindau_000007_000019_leftImg8bit.png 303 | lindau/lindau_000048_000019_leftImg8bit.png 304 | lindau/lindau_000022_000019_leftImg8bit.png 305 | lindau/lindau_000053_000019_leftImg8bit.png 306 | lindau/lindau_000038_000019_leftImg8bit.png 307 | lindau/lindau_000001_000019_leftImg8bit.png 308 | lindau/lindau_000036_000019_leftImg8bit.png 309 | lindau/lindau_000035_000019_leftImg8bit.png 310 | lindau/lindau_000003_000019_leftImg8bit.png 311 | lindau/lindau_000034_000019_leftImg8bit.png 312 | lindau/lindau_000010_000019_leftImg8bit.png 313 | lindau/lindau_000055_000019_leftImg8bit.png 314 | lindau/lindau_000006_000019_leftImg8bit.png 315 | lindau/lindau_000019_000019_leftImg8bit.png 316 | lindau/lindau_000029_000019_leftImg8bit.png 317 | lindau/lindau_000039_000019_leftImg8bit.png 318 | lindau/lindau_000051_000019_leftImg8bit.png 319 | lindau/lindau_000020_000019_leftImg8bit.png 320 | lindau/lindau_000057_000019_leftImg8bit.png 321 | lindau/lindau_000041_000019_leftImg8bit.png 322 | lindau/lindau_000040_000019_leftImg8bit.png 323 | lindau/lindau_000044_000019_leftImg8bit.png 324 | lindau/lindau_000028_000019_leftImg8bit.png 325 | lindau/lindau_000058_000019_leftImg8bit.png 326 | lindau/lindau_000008_000019_leftImg8bit.png 327 | munster/munster_000000_000019_leftImg8bit.png 328 | munster/munster_000012_000019_leftImg8bit.png 329 | munster/munster_000032_000019_leftImg8bit.png 330 | munster/munster_000068_000019_leftImg8bit.png 331 | munster/munster_000101_000019_leftImg8bit.png 332 | munster/munster_000153_000019_leftImg8bit.png 333 | munster/munster_000115_000019_leftImg8bit.png 334 | munster/munster_000029_000019_leftImg8bit.png 335 | munster/munster_000019_000019_leftImg8bit.png 336 | munster/munster_000156_000019_leftImg8bit.png 337 | munster/munster_000129_000019_leftImg8bit.png 338 | munster/munster_000169_000019_leftImg8bit.png 339 | munster/munster_000150_000019_leftImg8bit.png 340 | munster/munster_000165_000019_leftImg8bit.png 341 | munster/munster_000050_000019_leftImg8bit.png 342 | munster/munster_000025_000019_leftImg8bit.png 343 | munster/munster_000116_000019_leftImg8bit.png 344 | munster/munster_000132_000019_leftImg8bit.png 345 | munster/munster_000066_000019_leftImg8bit.png 346 | munster/munster_000096_000019_leftImg8bit.png 347 | munster/munster_000030_000019_leftImg8bit.png 348 | munster/munster_000146_000019_leftImg8bit.png 349 | munster/munster_000098_000019_leftImg8bit.png 350 | munster/munster_000059_000019_leftImg8bit.png 351 | munster/munster_000093_000019_leftImg8bit.png 352 | munster/munster_000122_000019_leftImg8bit.png 353 | munster/munster_000024_000019_leftImg8bit.png 354 | munster/munster_000036_000019_leftImg8bit.png 355 | munster/munster_000086_000019_leftImg8bit.png 356 | munster/munster_000163_000019_leftImg8bit.png 357 | munster/munster_000001_000019_leftImg8bit.png 358 | munster/munster_000053_000019_leftImg8bit.png 359 | munster/munster_000071_000019_leftImg8bit.png 360 | munster/munster_000079_000019_leftImg8bit.png 361 | munster/munster_000159_000019_leftImg8bit.png 362 | munster/munster_000038_000019_leftImg8bit.png 363 | munster/munster_000138_000019_leftImg8bit.png 364 | munster/munster_000135_000019_leftImg8bit.png 365 | munster/munster_000065_000019_leftImg8bit.png 366 | munster/munster_000139_000019_leftImg8bit.png 367 | munster/munster_000108_000019_leftImg8bit.png 368 | munster/munster_000020_000019_leftImg8bit.png 369 | munster/munster_000074_000019_leftImg8bit.png 370 | munster/munster_000035_000019_leftImg8bit.png 371 | munster/munster_000067_000019_leftImg8bit.png 372 | munster/munster_000151_000019_leftImg8bit.png 373 | munster/munster_000083_000019_leftImg8bit.png 374 | munster/munster_000118_000019_leftImg8bit.png 375 | munster/munster_000046_000019_leftImg8bit.png 376 | munster/munster_000147_000019_leftImg8bit.png 377 | munster/munster_000047_000019_leftImg8bit.png 378 | munster/munster_000043_000019_leftImg8bit.png 379 | munster/munster_000168_000019_leftImg8bit.png 380 | munster/munster_000167_000019_leftImg8bit.png 381 | munster/munster_000021_000019_leftImg8bit.png 382 | munster/munster_000073_000019_leftImg8bit.png 383 | munster/munster_000089_000019_leftImg8bit.png 384 | munster/munster_000060_000019_leftImg8bit.png 385 | munster/munster_000155_000019_leftImg8bit.png 386 | munster/munster_000140_000019_leftImg8bit.png 387 | munster/munster_000145_000019_leftImg8bit.png 388 | munster/munster_000077_000019_leftImg8bit.png 389 | munster/munster_000018_000019_leftImg8bit.png 390 | munster/munster_000045_000019_leftImg8bit.png 391 | munster/munster_000166_000019_leftImg8bit.png 392 | munster/munster_000037_000019_leftImg8bit.png 393 | munster/munster_000112_000019_leftImg8bit.png 394 | munster/munster_000080_000019_leftImg8bit.png 395 | munster/munster_000144_000019_leftImg8bit.png 396 | munster/munster_000142_000019_leftImg8bit.png 397 | munster/munster_000070_000019_leftImg8bit.png 398 | munster/munster_000044_000019_leftImg8bit.png 399 | munster/munster_000137_000019_leftImg8bit.png 400 | munster/munster_000041_000019_leftImg8bit.png 401 | munster/munster_000113_000019_leftImg8bit.png 402 | munster/munster_000075_000019_leftImg8bit.png 403 | munster/munster_000157_000019_leftImg8bit.png 404 | munster/munster_000158_000019_leftImg8bit.png 405 | munster/munster_000109_000019_leftImg8bit.png 406 | munster/munster_000033_000019_leftImg8bit.png 407 | munster/munster_000088_000019_leftImg8bit.png 408 | munster/munster_000090_000019_leftImg8bit.png 409 | munster/munster_000114_000019_leftImg8bit.png 410 | munster/munster_000171_000019_leftImg8bit.png 411 | munster/munster_000013_000019_leftImg8bit.png 412 | munster/munster_000130_000019_leftImg8bit.png 413 | munster/munster_000016_000019_leftImg8bit.png 414 | munster/munster_000136_000019_leftImg8bit.png 415 | munster/munster_000007_000019_leftImg8bit.png 416 | munster/munster_000014_000019_leftImg8bit.png 417 | munster/munster_000052_000019_leftImg8bit.png 418 | munster/munster_000104_000019_leftImg8bit.png 419 | munster/munster_000173_000019_leftImg8bit.png 420 | munster/munster_000057_000019_leftImg8bit.png 421 | munster/munster_000072_000019_leftImg8bit.png 422 | munster/munster_000003_000019_leftImg8bit.png 423 | munster/munster_000161_000019_leftImg8bit.png 424 | munster/munster_000002_000019_leftImg8bit.png 425 | munster/munster_000028_000019_leftImg8bit.png 426 | munster/munster_000051_000019_leftImg8bit.png 427 | munster/munster_000105_000019_leftImg8bit.png 428 | munster/munster_000061_000019_leftImg8bit.png 429 | munster/munster_000058_000019_leftImg8bit.png 430 | munster/munster_000094_000019_leftImg8bit.png 431 | munster/munster_000027_000019_leftImg8bit.png 432 | munster/munster_000062_000019_leftImg8bit.png 433 | munster/munster_000127_000019_leftImg8bit.png 434 | munster/munster_000110_000019_leftImg8bit.png 435 | munster/munster_000170_000019_leftImg8bit.png 436 | munster/munster_000023_000019_leftImg8bit.png 437 | munster/munster_000084_000019_leftImg8bit.png 438 | munster/munster_000121_000019_leftImg8bit.png 439 | munster/munster_000087_000019_leftImg8bit.png 440 | munster/munster_000097_000019_leftImg8bit.png 441 | munster/munster_000119_000019_leftImg8bit.png 442 | munster/munster_000128_000019_leftImg8bit.png 443 | munster/munster_000078_000019_leftImg8bit.png 444 | munster/munster_000010_000019_leftImg8bit.png 445 | munster/munster_000015_000019_leftImg8bit.png 446 | munster/munster_000048_000019_leftImg8bit.png 447 | munster/munster_000085_000019_leftImg8bit.png 448 | munster/munster_000164_000019_leftImg8bit.png 449 | munster/munster_000111_000019_leftImg8bit.png 450 | munster/munster_000099_000019_leftImg8bit.png 451 | munster/munster_000117_000019_leftImg8bit.png 452 | munster/munster_000009_000019_leftImg8bit.png 453 | munster/munster_000049_000019_leftImg8bit.png 454 | munster/munster_000148_000019_leftImg8bit.png 455 | munster/munster_000022_000019_leftImg8bit.png 456 | munster/munster_000131_000019_leftImg8bit.png 457 | munster/munster_000006_000019_leftImg8bit.png 458 | munster/munster_000005_000019_leftImg8bit.png 459 | munster/munster_000102_000019_leftImg8bit.png 460 | munster/munster_000160_000019_leftImg8bit.png 461 | munster/munster_000107_000019_leftImg8bit.png 462 | munster/munster_000095_000019_leftImg8bit.png 463 | munster/munster_000106_000019_leftImg8bit.png 464 | munster/munster_000034_000019_leftImg8bit.png 465 | munster/munster_000143_000019_leftImg8bit.png 466 | munster/munster_000017_000019_leftImg8bit.png 467 | munster/munster_000040_000019_leftImg8bit.png 468 | munster/munster_000152_000019_leftImg8bit.png 469 | munster/munster_000154_000019_leftImg8bit.png 470 | munster/munster_000100_000019_leftImg8bit.png 471 | munster/munster_000004_000019_leftImg8bit.png 472 | munster/munster_000141_000019_leftImg8bit.png 473 | munster/munster_000011_000019_leftImg8bit.png 474 | munster/munster_000055_000019_leftImg8bit.png 475 | munster/munster_000134_000019_leftImg8bit.png 476 | munster/munster_000054_000019_leftImg8bit.png 477 | munster/munster_000064_000019_leftImg8bit.png 478 | munster/munster_000039_000019_leftImg8bit.png 479 | munster/munster_000103_000019_leftImg8bit.png 480 | munster/munster_000092_000019_leftImg8bit.png 481 | munster/munster_000172_000019_leftImg8bit.png 482 | munster/munster_000042_000019_leftImg8bit.png 483 | munster/munster_000124_000019_leftImg8bit.png 484 | munster/munster_000069_000019_leftImg8bit.png 485 | munster/munster_000026_000019_leftImg8bit.png 486 | munster/munster_000120_000019_leftImg8bit.png 487 | munster/munster_000031_000019_leftImg8bit.png 488 | munster/munster_000162_000019_leftImg8bit.png 489 | munster/munster_000056_000019_leftImg8bit.png 490 | munster/munster_000081_000019_leftImg8bit.png 491 | munster/munster_000123_000019_leftImg8bit.png 492 | munster/munster_000125_000019_leftImg8bit.png 493 | munster/munster_000082_000019_leftImg8bit.png 494 | munster/munster_000133_000019_leftImg8bit.png 495 | munster/munster_000126_000019_leftImg8bit.png 496 | munster/munster_000063_000019_leftImg8bit.png 497 | munster/munster_000008_000019_leftImg8bit.png 498 | munster/munster_000149_000019_leftImg8bit.png 499 | munster/munster_000076_000019_leftImg8bit.png 500 | munster/munster_000091_000019_leftImg8bit.png 501 | -------------------------------------------------------------------------------- /dataset/gta5_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import collections 6 | import torch 7 | import torchvision 8 | from torch.utils import data 9 | from PIL import Image 10 | 11 | 12 | class GTA5DataSet(data.Dataset): 13 | def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255): 14 | self.root = root 15 | self.list_path = list_path 16 | self.crop_size = crop_size 17 | self.scale = scale 18 | self.ignore_label = ignore_label 19 | self.mean = mean 20 | self.is_mirror = mirror 21 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 22 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 23 | if not max_iters==None: 24 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 25 | self.files = [] 26 | 27 | self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5, 28 | 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 29 | 26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18} 30 | 31 | # for split in ["train", "trainval", "val"]: 32 | for name in self.img_ids: 33 | img_file = osp.join(self.root, "images/%s" % name) 34 | label_file = osp.join(self.root, "labels/%s" % name) 35 | self.files.append({ 36 | "img": img_file, 37 | "label": label_file, 38 | "name": name 39 | }) 40 | 41 | def __len__(self): 42 | return len(self.files) 43 | 44 | 45 | def __getitem__(self, index): 46 | datafiles = self.files[index] 47 | 48 | image = Image.open(datafiles["img"]).convert('RGB') 49 | label = Image.open(datafiles["label"]) 50 | name = datafiles["name"] 51 | 52 | # resize 53 | image = image.resize(self.crop_size, Image.BICUBIC) 54 | label = label.resize(self.crop_size, Image.NEAREST) 55 | 56 | image = np.asarray(image, np.float32) 57 | label = np.asarray(label, np.float32) 58 | 59 | # re-assign labels to match the format of Cityscapes 60 | label_copy = 255 * np.ones(label.shape, dtype=np.float32) 61 | for k, v in self.id_to_trainid.items(): 62 | label_copy[label == k] = v 63 | 64 | size = image.shape 65 | image = image[:, :, ::-1] # change to BGR 66 | image -= self.mean 67 | image = image.transpose((2, 0, 1)) 68 | 69 | return image.copy(), label_copy.copy(), np.array(size), name 70 | 71 | 72 | if __name__ == '__main__': 73 | dst = GTA5DataSet("./data", is_transform=True) 74 | trainloader = data.DataLoader(dst, batch_size=4) 75 | -------------------------------------------------------------------------------- /evaluate_bulk.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | import torch 4 | from torch.autograd import Variable 5 | from model.deeplab_multi import DeeplabMulti 6 | from model.discriminator import FCDiscriminator 7 | from model.discriminator import OutspaceDiscriminator 8 | from dataset.cityscapes_dataset import cityscapesDataSet 9 | from torch.utils import data 10 | import os 11 | import cv2 12 | from PIL import Image 13 | import torch.nn as nn 14 | 15 | IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32) 16 | 17 | DATA_DIRECTORY = './data/Cityscapes/' 18 | DATA_LIST_PATH = './dataset/cityscapes_list/val.txt' 19 | SAVE_PATH = './result/cityscapes' 20 | RESTORE_FROM = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_multi-ed35151c.pth' 21 | IGNORE_LABEL = 255 22 | NUM_CLASSES = 19 23 | NUM_STEPS = 500 # Number of images in the validation set. 24 | SET = 'val' 25 | #128, 64, 128 26 | palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30, 27 | 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70, 28 | 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32] 29 | zero_pad = 256 * 3 - len(palette) 30 | for i in range(zero_pad): 31 | palette.append(0) 32 | 33 | 34 | def colorize_mask(mask): 35 | # mask: numpy array of the mask 36 | new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P') 37 | new_mask.putpalette(palette) 38 | 39 | return new_mask 40 | 41 | 42 | def create_map(input_size, mode): 43 | if mode == 'h': 44 | T_base = torch.arange(0, float(input_size[1])) 45 | T_base = T_base.view(input_size[1], 1) 46 | T = T_base 47 | for i in range(input_size[0] - 1): 48 | T = torch.cat((T, T_base), 1) 49 | T = torch.div(T, float(input_size[1])) 50 | if mode == 'w': 51 | T_base = torch.arange(0, float(input_size[0])) 52 | T_base = T_base.view(1, input_size[0]) 53 | T = T_base 54 | for i in range(input_size[1] - 1): 55 | T = torch.cat((T, T_base), 0) 56 | T = torch.div(T, float(input_size[0])) 57 | T = T.view(1, 1, T.size(0), T.size(1)) 58 | return T 59 | 60 | 61 | def get_arguments(): 62 | """Parse all the arguments provided from the CLI. 63 | 64 | Returns: 65 | A list of parsed arguments. 66 | """ 67 | parser = argparse.ArgumentParser(description="DeepLab-ResNet Network") 68 | parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY, 69 | help="Path to the directory containing the Cityscapes dataset.") 70 | parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH, 71 | help="Path to the file listing the images in the dataset.") 72 | parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL, 73 | help="The index of the label to ignore during the training.") 74 | parser.add_argument("--num-classes", type=int, default=NUM_CLASSES, 75 | help="Number of classes to predict (including background).") 76 | parser.add_argument("--restore-from", type=str, default=RESTORE_FROM, 77 | help="Where restore model parameters from.") 78 | parser.add_argument("--gpu", type=int, default=0, 79 | help="choose gpu device.") 80 | parser.add_argument("--set", type=str, default=SET, 81 | help="choose evaluation set.") 82 | parser.add_argument("--save", type=str, default=SAVE_PATH, 83 | help="Path to save result.") 84 | return parser.parse_args() 85 | 86 | 87 | def main(): 88 | """Create the model and start the evaluation process.""" 89 | 90 | for i in range(1, 61): 91 | model_path = './snapshots/GTA2Cityscapes/GTA5_{0:d}.pth'.format(i*2000) 92 | model_D_path = './snapshots/GTA2Cityscapes/GTA5_{0:d}_D.pth'.format(i*2000) 93 | save_path = './result/GTA2Cityscapes_{0:d}'.format(i*2000) 94 | args = get_arguments() 95 | 96 | gpu0 = args.gpu 97 | 98 | if not os.path.exists(save_path): 99 | os.makedirs(save_path) 100 | 101 | model = DeeplabMulti(num_classes=args.num_classes) 102 | saved_state_dict = torch.load(model_path) 103 | model.load_state_dict(saved_state_dict) 104 | model.eval() 105 | model.cuda(gpu0) 106 | 107 | num_class_list = [2048, 19] 108 | model_D = nn.ModuleList([FCDiscriminator(num_classes=num_class_list[i]) if i<1 else OutspaceDiscriminator(num_classes=num_class_list[i]) for i in range(2)]) 109 | model_D.load_state_dict(torch.load(model_D_path)) 110 | model_D.eval() 111 | model_D.cuda(gpu0) 112 | 113 | testloader = data.DataLoader(cityscapesDataSet(args.data_dir, args.data_list, crop_size=(1024,512), mean=IMG_MEAN, scale=False, mirror=False, set=args.set), 114 | batch_size=1, shuffle=False, pin_memory=True) 115 | 116 | interp = nn.Upsample(size=(1024, 2048), mode='bilinear', align_corners=True) 117 | 118 | with torch.no_grad(): 119 | for index, batch in enumerate(testloader): 120 | if index % 100 == 0: 121 | print('%d processd' % index) 122 | image, _, name = batch 123 | feat, pred = model(Variable(image).cuda(gpu0), model_D, 'target') 124 | 125 | output = interp(pred).cpu().data[0].numpy() 126 | output = output.transpose(1,2,0) 127 | output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8) 128 | 129 | output_col = colorize_mask(output) 130 | output = Image.fromarray(output) 131 | 132 | name = name[0].split('/')[-1] 133 | output.save('%s/%s' % (save_path, name)) 134 | 135 | output_col.save('%s/%s_color.png' % (save_path, name.split('.')[0])) 136 | 137 | print(save_path) 138 | if __name__ == '__main__': 139 | main() 140 | -------------------------------------------------------------------------------- /iou_bulk.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import argparse 3 | import json 4 | from PIL import Image 5 | from os.path import join 6 | import csv 7 | 8 | 9 | def fast_hist(a, b, n): 10 | k = (a >= 0) & (a < n) 11 | return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n) 12 | 13 | 14 | def per_class_iu(hist): 15 | return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) 16 | 17 | 18 | def label_mapping(input, mapping): 19 | output = np.copy(input) 20 | for ind in range(len(mapping)): 21 | output[input == mapping[ind][0]] = mapping[ind][1] 22 | return np.array(output, dtype=np.int64) 23 | 24 | 25 | def compute_mIoU(gt_dir, pred_dir, devkit_dir=''): 26 | """ 27 | Compute IoU given the predicted colorized images and 28 | """ 29 | with open(join(devkit_dir, 'info.json'), 'r') as fp: 30 | info = json.load(fp) 31 | num_classes = np.int(info['classes']) 32 | print('Num classes', num_classes) 33 | name_classes = np.array(info['label'], dtype=np.str) 34 | mapping = np.array(info['label2train'], dtype=np.int) 35 | hist = np.zeros((num_classes, num_classes)) 36 | 37 | image_path_list = join(devkit_dir, 'val.txt') 38 | label_path_list = join(devkit_dir, 'label.txt') 39 | gt_imgs = open(label_path_list, 'r').read().splitlines() 40 | gt_imgs = [join(gt_dir, x) for x in gt_imgs] 41 | pred_imgs = open(image_path_list, 'r').read().splitlines() 42 | pred_imgs = [join(pred_dir, x.split('/')[-1]) for x in pred_imgs] 43 | 44 | for ind in range(len(gt_imgs)): 45 | pred = np.array(Image.open(pred_imgs[ind])) 46 | label = np.array(Image.open(gt_imgs[ind])) 47 | label = label_mapping(label, mapping) 48 | if len(label.flatten()) != len(pred.flatten()): 49 | print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(len(label.flatten()), len(pred.flatten()), gt_imgs[ind], pred_imgs[ind])) 50 | continue 51 | hist += fast_hist(label.flatten(), pred.flatten(), num_classes) 52 | if ind > 0 and ind % 10 == 0: 53 | print('{:d} / {:d}: {:0.2f}'.format(ind, len(gt_imgs), 100*np.mean(per_class_iu(hist)))) 54 | 55 | mIoUs = per_class_iu(hist) 56 | for ind_class in range(num_classes): 57 | print('===>' + name_classes[ind_class] + ':\t' + str(round(mIoUs[ind_class] * 100, 2))) 58 | print('===> mIoU: ' + str(round(np.nanmean(mIoUs) * 100, 2))) 59 | return str(round(np.nanmean(mIoUs) * 100, 2)) 60 | 61 | 62 | def main(gt_dir, pred_dir, devkit_dir): 63 | return compute_mIoU(gt_dir, pred_dir, devkit_dir) 64 | 65 | 66 | if __name__ == "__main__": 67 | with open("GTA2Cityscapes_mIoU.csv","a+",newline="") as datacsv: 68 | csvwriter = csv.writer(datacsv,dialect = ("excel")) 69 | for i in range(1, 61): 70 | gt_dir = './data/Cityscapes/gtFine/val' 71 | pred_dir = './result/GTA2Cityscapes_{0:d}'.format(i*2000) 72 | devkit_dir = './dataset/cityscapes_list' 73 | mIoU = main(gt_dir, pred_dir, devkit_dir) 74 | csvwriter.writerow([mIoU]) 75 | -------------------------------------------------------------------------------- /model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yufei1900/DAST_segmentation/70b41a52da7b6cb6adc6fac156eb1e2dc7139e95/model/__init__.py -------------------------------------------------------------------------------- /model/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yufei1900/DAST_segmentation/70b41a52da7b6cb6adc6fac156eb1e2dc7139e95/model/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/deeplab_multi.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yufei1900/DAST_segmentation/70b41a52da7b6cb6adc6fac156eb1e2dc7139e95/model/__pycache__/deeplab_multi.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/deeplab_multi_val.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yufei1900/DAST_segmentation/70b41a52da7b6cb6adc6fac156eb1e2dc7139e95/model/__pycache__/deeplab_multi_val.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/discriminator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yufei1900/DAST_segmentation/70b41a52da7b6cb6adc6fac156eb1e2dc7139e95/model/__pycache__/discriminator.cpython-37.pyc -------------------------------------------------------------------------------- /model/deeplab.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import math 3 | import torch.utils.model_zoo as model_zoo 4 | import torch 5 | import numpy as np 6 | affine_par = True 7 | 8 | 9 | def outS(i): 10 | i = int(i) 11 | i = (i+1)/2 12 | i = int(np.ceil((i+1)/2.0)) 13 | i = (i+1)/2 14 | return i 15 | 16 | def conv3x3(in_planes, out_planes, stride=1): 17 | "3x3 convolution with padding" 18 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 19 | padding=1, bias=False) 20 | 21 | 22 | class BasicBlock(nn.Module): 23 | expansion = 1 24 | 25 | def __init__(self, inplanes, planes, stride=1, downsample=None): 26 | super(BasicBlock, self).__init__() 27 | self.conv1 = conv3x3(inplanes, planes, stride) 28 | self.bn1 = nn.BatchNorm2d(planes, affine = affine_par) 29 | self.relu = nn.ReLU(inplace=True) 30 | self.conv2 = conv3x3(planes, planes) 31 | self.bn2 = nn.BatchNorm2d(planes, affine = affine_par) 32 | self.downsample = downsample 33 | self.stride = stride 34 | 35 | def forward(self, x): 36 | residual = x 37 | 38 | out = self.conv1(x) 39 | out = self.bn1(out) 40 | out = self.relu(out) 41 | 42 | out = self.conv2(out) 43 | out = self.bn2(out) 44 | 45 | if self.downsample is not None: 46 | residual = self.downsample(x) 47 | 48 | out += residual 49 | out = self.relu(out) 50 | 51 | return out 52 | 53 | 54 | class Bottleneck(nn.Module): 55 | expansion = 4 56 | 57 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None): 58 | super(Bottleneck, self).__init__() 59 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change 60 | self.bn1 = nn.BatchNorm2d(planes,affine = affine_par) 61 | for i in self.bn1.parameters(): 62 | i.requires_grad = False 63 | 64 | padding = dilation 65 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change 66 | padding=padding, bias=False, dilation = dilation) 67 | self.bn2 = nn.BatchNorm2d(planes,affine = affine_par) 68 | for i in self.bn2.parameters(): 69 | i.requires_grad = False 70 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 71 | self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par) 72 | for i in self.bn3.parameters(): 73 | i.requires_grad = False 74 | self.relu = nn.ReLU(inplace=True) 75 | self.downsample = downsample 76 | self.stride = stride 77 | 78 | 79 | def forward(self, x): 80 | residual = x 81 | 82 | out = self.conv1(x) 83 | out = self.bn1(out) 84 | out = self.relu(out) 85 | 86 | out = self.conv2(out) 87 | out = self.bn2(out) 88 | out = self.relu(out) 89 | 90 | out = self.conv3(out) 91 | out = self.bn3(out) 92 | 93 | if self.downsample is not None: 94 | residual = self.downsample(x) 95 | 96 | out += residual 97 | out = self.relu(out) 98 | 99 | return out 100 | 101 | class Classifier_Module(nn.Module): 102 | 103 | def __init__(self, dilation_series, padding_series, num_classes): 104 | super(Classifier_Module, self).__init__() 105 | self.conv2d_list = nn.ModuleList() 106 | for dilation, padding in zip(dilation_series, padding_series): 107 | self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True)) 108 | 109 | for m in self.conv2d_list: 110 | m.weight.data.normal_(0, 0.01) 111 | 112 | def forward(self, x): 113 | out = self.conv2d_list[0](x) 114 | for i in range(len(self.conv2d_list)-1): 115 | out += self.conv2d_list[i+1](x) 116 | return out 117 | 118 | 119 | 120 | class ResNet(nn.Module): 121 | def __init__(self, block, layers, num_classes): 122 | self.inplanes = 64 123 | super(ResNet, self).__init__() 124 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 125 | bias=False) 126 | self.bn1 = nn.BatchNorm2d(64, affine = affine_par) 127 | for i in self.bn1.parameters(): 128 | i.requires_grad = False 129 | self.relu = nn.ReLU(inplace=True) 130 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change 131 | self.layer1 = self._make_layer(block, 64, layers[0]) 132 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 133 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) 134 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) 135 | self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes) 136 | 137 | for m in self.modules(): 138 | if isinstance(m, nn.Conv2d): 139 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 140 | m.weight.data.normal_(0, 0.01) 141 | elif isinstance(m, nn.BatchNorm2d): 142 | m.weight.data.fill_(1) 143 | m.bias.data.zero_() 144 | # for i in m.parameters(): 145 | # i.requires_grad = False 146 | 147 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1): 148 | downsample = None 149 | if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4: 150 | downsample = nn.Sequential( 151 | nn.Conv2d(self.inplanes, planes * block.expansion, 152 | kernel_size=1, stride=stride, bias=False), 153 | nn.BatchNorm2d(planes * block.expansion,affine = affine_par)) 154 | for i in downsample._modules['1'].parameters(): 155 | i.requires_grad = False 156 | layers = [] 157 | layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample)) 158 | self.inplanes = planes * block.expansion 159 | for i in range(1, blocks): 160 | layers.append(block(self.inplanes, planes, dilation=dilation)) 161 | 162 | return nn.Sequential(*layers) 163 | def _make_pred_layer(self,block, dilation_series, padding_series,num_classes): 164 | return block(dilation_series,padding_series,num_classes) 165 | 166 | def forward(self, x): 167 | x = self.conv1(x) 168 | x = self.bn1(x) 169 | x = self.relu(x) 170 | x = self.maxpool(x) 171 | x = self.layer1(x) 172 | x = self.layer2(x) 173 | x = self.layer3(x) 174 | x = self.layer4(x) 175 | x = self.layer5(x) 176 | 177 | return x 178 | 179 | def get_1x_lr_params_NOscale(self): 180 | """ 181 | This generator returns all the parameters of the net except for 182 | the last classification layer. Note that for each batchnorm layer, 183 | requires_grad is set to False in deeplab_resnet.py, therefore this function does not return 184 | any batchnorm parameter 185 | """ 186 | b = [] 187 | 188 | b.append(self.conv1) 189 | b.append(self.bn1) 190 | b.append(self.layer1) 191 | b.append(self.layer2) 192 | b.append(self.layer3) 193 | b.append(self.layer4) 194 | 195 | 196 | for i in range(len(b)): 197 | for j in b[i].modules(): 198 | jj = 0 199 | for k in j.parameters(): 200 | jj+=1 201 | if k.requires_grad: 202 | yield k 203 | 204 | def get_10x_lr_params(self): 205 | """ 206 | This generator returns all the parameters for the last layer of the net, 207 | which does the classification of pixel into classes 208 | """ 209 | b = [] 210 | b.append(self.layer5.parameters()) 211 | 212 | for j in range(len(b)): 213 | for i in b[j]: 214 | yield i 215 | 216 | 217 | 218 | def optim_parameters(self, args): 219 | return [{'params': self.get_1x_lr_params_NOscale(), 'lr': args.learning_rate}, 220 | {'params': self.get_10x_lr_params(), 'lr': 10*args.learning_rate}] 221 | 222 | 223 | def Res_Deeplab(num_classes=21): 224 | model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes) 225 | return model 226 | 227 | -------------------------------------------------------------------------------- /model/deeplab_multi.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import math 3 | import torch.utils.model_zoo as model_zoo 4 | import torch 5 | import numpy as np 6 | 7 | affine_par = True 8 | 9 | 10 | def outS(i): 11 | i = int(i) 12 | i = (i + 1) / 2 13 | i = int(np.ceil((i + 1) / 2.0)) 14 | i = (i + 1) / 2 15 | return i 16 | 17 | 18 | def conv3x3(in_planes, out_planes, stride=1): 19 | "3x3 convolution with padding" 20 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 21 | padding=1, bias=False) 22 | 23 | 24 | class BasicBlock(nn.Module): 25 | expansion = 1 26 | 27 | def __init__(self, inplanes, planes, stride=1, downsample=None): 28 | super(BasicBlock, self).__init__() 29 | self.conv1 = conv3x3(inplanes, planes, stride) 30 | self.bn1 = nn.BatchNorm2d(planes, affine=affine_par) 31 | self.relu = nn.ReLU(inplace=True) 32 | self.conv2 = conv3x3(planes, planes) 33 | self.bn2 = nn.BatchNorm2d(planes, affine=affine_par) 34 | self.downsample = downsample 35 | self.stride = stride 36 | 37 | def forward(self, x): 38 | residual = x 39 | 40 | out = self.conv1(x) 41 | out = self.bn1(out) 42 | out = self.relu(out) 43 | 44 | out = self.conv2(out) 45 | out = self.bn2(out) 46 | 47 | if self.downsample is not None: 48 | residual = self.downsample(x) 49 | 50 | out += residual 51 | out = self.relu(out) 52 | 53 | return out 54 | 55 | 56 | class Bottleneck(nn.Module): 57 | expansion = 4 58 | 59 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None): 60 | super(Bottleneck, self).__init__() 61 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change 62 | self.bn1 = nn.BatchNorm2d(planes, affine=affine_par) 63 | for i in self.bn1.parameters(): 64 | i.requires_grad = False 65 | 66 | padding = dilation 67 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change 68 | padding=padding, bias=False, dilation=dilation) 69 | self.bn2 = nn.BatchNorm2d(planes, affine=affine_par) 70 | for i in self.bn2.parameters(): 71 | i.requires_grad = False 72 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 73 | self.bn3 = nn.BatchNorm2d(planes * 4, affine=affine_par) 74 | for i in self.bn3.parameters(): 75 | i.requires_grad = False 76 | self.relu = nn.ReLU(inplace=True) 77 | self.downsample = downsample 78 | self.stride = stride 79 | 80 | def forward(self, x): 81 | residual = x 82 | 83 | out = self.conv1(x) 84 | out = self.bn1(out) 85 | out = self.relu(out) 86 | 87 | out = self.conv2(out) 88 | out = self.bn2(out) 89 | out = self.relu(out) 90 | 91 | out = self.conv3(out) 92 | out = self.bn3(out) 93 | 94 | if self.downsample is not None: 95 | residual = self.downsample(x) 96 | 97 | out += residual 98 | out = self.relu(out) 99 | 100 | return out 101 | 102 | 103 | class Classifier_Module(nn.Module): 104 | def __init__(self, inplanes, dilation_series, padding_series, num_classes): 105 | super(Classifier_Module, self).__init__() 106 | self.conv2d_list = nn.ModuleList() 107 | for dilation, padding in zip(dilation_series, padding_series): 108 | self.conv2d_list.append( 109 | nn.Conv2d(inplanes, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True)) 110 | 111 | for m in self.conv2d_list: 112 | m.weight.data.normal_(0, 0.01) 113 | 114 | def forward(self, x): 115 | out = self.conv2d_list[0](x) 116 | for i in range(len(self.conv2d_list) - 1): 117 | out += self.conv2d_list[i + 1](x) 118 | return out 119 | 120 | 121 | class ResNetMulti(nn.Module): 122 | def __init__(self, block, layers, num_classes): 123 | self.inplanes = 64 124 | super(ResNetMulti, self).__init__() 125 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 126 | bias=False) 127 | self.bn1 = nn.BatchNorm2d(64, affine=affine_par) 128 | for i in self.bn1.parameters(): 129 | i.requires_grad = False 130 | self.relu = nn.ReLU(inplace=True) 131 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change 132 | self.layer1 = self._make_layer(block, 64, layers[0]) 133 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 134 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) 135 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) 136 | self.layer5 = self._make_layer(block, 512, layers[4], stride=1, dilation=4) 137 | self.layer6 = self._make_pred_layer(Classifier_Module, 2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes) 138 | self.tanh = nn.Tanh() 139 | 140 | for m in self.modules(): 141 | if isinstance(m, nn.Conv2d): 142 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 143 | m.weight.data.normal_(0, 0.01) 144 | elif isinstance(m, nn.BatchNorm2d): 145 | m.weight.data.fill_(1) 146 | m.bias.data.zero_() 147 | # for i in m.parameters(): 148 | # i.requires_grad = False 149 | 150 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1): 151 | downsample = None 152 | if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4: 153 | downsample = nn.Sequential( 154 | nn.Conv2d(self.inplanes, planes * block.expansion, 155 | kernel_size=1, stride=stride, bias=False), 156 | nn.BatchNorm2d(planes * block.expansion, affine=affine_par)) 157 | for i in downsample._modules['1'].parameters(): 158 | i.requires_grad = False 159 | layers = [] 160 | layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample)) 161 | self.inplanes = planes * block.expansion 162 | for i in range(1, blocks): 163 | layers.append(block(self.inplanes, planes, dilation=dilation)) 164 | 165 | return nn.Sequential(*layers) 166 | 167 | def _make_pred_layer(self, block, inplanes, dilation_series, padding_series, num_classes): 168 | return block(inplanes, dilation_series, padding_series, num_classes) 169 | 170 | def forward(self, x, D, domain): 171 | x = self.conv1(x) 172 | x = self.bn1(x) 173 | x = self.relu(x) 174 | x = self.maxpool(x) 175 | x1 = self.layer1(x) 176 | x2 = self.layer2(x1) 177 | x3 = self.layer3(x2) 178 | x4 = self.layer4(x3) 179 | if domain == 'source': 180 | x4_a4 = x4 181 | if domain == 'target': 182 | a4 = D[0](x4) 183 | a4 = self.tanh(a4) 184 | a4 = torch.abs(a4) 185 | a4_big = a4.expand(x4.size()) 186 | x4_a4 = a4_big*x4 + x4 187 | x5 = self.layer5(x4_a4) 188 | out = self.layer6(x5) 189 | 190 | return x4, out 191 | 192 | def get_1x_lr_params_NOscale(self): 193 | """ 194 | This generator returns all the parameters of the net except for 195 | the last classification layer. Note that for each batchnorm layer, 196 | requires_grad is set to False in deeplab_resnet.py, therefore this function does not return 197 | any batchnorm parameter 198 | """ 199 | b = [] 200 | 201 | b.append(self.conv1) 202 | b.append(self.bn1) 203 | b.append(self.layer1) 204 | b.append(self.layer2) 205 | b.append(self.layer3) 206 | b.append(self.layer4) 207 | b.append(self.layer5) 208 | 209 | for i in range(len(b)): 210 | for j in b[i].modules(): 211 | jj = 0 212 | for k in j.parameters(): 213 | jj += 1 214 | if k.requires_grad: 215 | yield k 216 | 217 | def get_10x_lr_params(self): 218 | """ 219 | This generator returns all the parameters for the last layer of the net, 220 | which does the classification of pixel into classes 221 | """ 222 | b = [] 223 | b.append(self.layer6.parameters()) 224 | 225 | for j in range(len(b)): 226 | for i in b[j]: 227 | yield i 228 | 229 | def optim_parameters(self, args): 230 | return [{'params': self.get_1x_lr_params_NOscale(), 'lr': args.learning_rate}, 231 | {'params': self.get_10x_lr_params(), 'lr': 10 * args.learning_rate}] 232 | 233 | 234 | def DeeplabMulti(num_classes=21): 235 | model = ResNetMulti(Bottleneck, [3, 4, 23, 2, 1], num_classes) 236 | return model 237 | 238 | -------------------------------------------------------------------------------- /model/deeplab_vgg.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import nn 4 | import torch.nn.functional as F 5 | from torchvision import models 6 | 7 | class Classifier_Module(nn.Module): 8 | 9 | def __init__(self, dims_in, dilation_series, padding_series, num_classes): 10 | super(Classifier_Module, self).__init__() 11 | self.conv2d_list = nn.ModuleList() 12 | for dilation, padding in zip(dilation_series, padding_series): 13 | self.conv2d_list.append(nn.Conv2d(dims_in, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True)) 14 | 15 | for m in self.conv2d_list: 16 | m.weight.data.normal_(0, 0.01) 17 | 18 | def forward(self, x): 19 | out = self.conv2d_list[0](x) 20 | for i in range(len(self.conv2d_list)-1): 21 | out += self.conv2d_list[i+1](x) 22 | return out 23 | 24 | 25 | class DeeplabVGG(nn.Module): 26 | def __init__(self, num_classes, vgg16_caffe_path=None, pretrained=False): 27 | super(DeeplabVGG, self).__init__() 28 | vgg = models.vgg16() 29 | if pretrained: 30 | vgg.load_state_dict(torch.load(vgg16_caffe_path)) 31 | 32 | features, classifier = list(vgg.features.children()), list(vgg.classifier.children()) 33 | 34 | #remove pool4/pool5 35 | features = nn.Sequential(*(features[i] for i in list(range(23))+list(range(24,30)))) 36 | 37 | for i in [23,25,27]: 38 | features[i].dilation = (2,2) 39 | features[i].padding = (2,2) 40 | 41 | fc6 = nn.Conv2d(512, 1024, kernel_size=3, padding=4, dilation=4) 42 | fc7 = nn.Conv2d(1024, 1024, kernel_size=3, padding=4, dilation=4) 43 | 44 | self.layer1 = nn.Sequential(*([features[i] for i in range(0,16)])) 45 | self.layer2 = nn.Sequential(*([features[i] for i in range(16,21)])) 46 | self.layer3 = nn.Sequential(*([features[i] for i in range(21,len(features))])) 47 | self.layer4 = nn.Sequential(*([fc6, nn.ReLU(inplace=True), fc7, nn.ReLU(inplace=True)])) 48 | self.tanh = nn.Tanh() 49 | 50 | self.classifier = Classifier_Module(1024, [6,12,18,24],[6,12,18,24],num_classes) 51 | 52 | def forward(self, x, D, domain): 53 | x1 = self.layer1(x) 54 | x2 = self.layer2(x1) 55 | x3 = self.layer3(x2) 56 | if domain=='source': 57 | x3_a3 = x3 58 | if domain=='target': 59 | a3 = D[0](x3) 60 | a3 = self.tanh(a3) 61 | a3 = torch.abs(a3) 62 | a3_big = a3.expand(x3.size()) 63 | x3_a3 = a3_big*x3 + x3 64 | x4 = self.layer4(x3_a3) 65 | x5 = self.classifier(x4) 66 | return x3, x5 67 | 68 | def optim_parameters(self, args): 69 | return self.parameters() 70 | 71 | class DeeplabVGG_val(nn.Module): 72 | def __init__(self, num_classes, vgg16_caffe_path=None, pretrained=False): 73 | super(DeeplabVGG_val, self).__init__() 74 | vgg = models.vgg16() 75 | if pretrained: 76 | vgg.load_state_dict(torch.load(vgg16_caffe_path)) 77 | 78 | features, classifier = list(vgg.features.children()), list(vgg.classifier.children()) 79 | 80 | #remove pool4/pool5 81 | features = nn.Sequential(*(features[i] for i in list(range(23))+list(range(24,30)))) 82 | 83 | for i in [23,25,27]: 84 | features[i].dilation = (2,2) 85 | features[i].padding = (2,2) 86 | 87 | fc6 = nn.Conv2d(512, 1024, kernel_size=3, padding=4, dilation=4) 88 | fc7 = nn.Conv2d(1024, 1024, kernel_size=3, padding=4, dilation=4) 89 | 90 | self.layer1 = nn.Sequential(*([features[i] for i in range(0,16)])) 91 | self.layer2 = nn.Sequential(*([features[i] for i in range(16,21)])) 92 | self.layer3 = nn.Sequential(*([features[i] for i in range(21,len(features))])) 93 | self.layer4 = nn.Sequential(*([fc6, nn.ReLU(inplace=True), fc7, nn.ReLU(inplace=True)])) 94 | self.tanh = nn.Tanh() 95 | 96 | self.classifier = Classifier_Module(1024, [6,12,18,24],[6,12,18,24],num_classes) 97 | 98 | def forward(self, x, D, domain): 99 | x1 = self.layer1(x) 100 | x2 = self.layer2(x1) 101 | x3 = self.layer3(x2) 102 | if domain=='source': 103 | x3_a3 = x3 104 | if domain=='target': 105 | a3 = D[0](x3) 106 | a3 = self.tanh(a3) 107 | a3 = torch.abs(a3) 108 | a3_big = a3.expand(x3.size()) 109 | x3_a3 = a3_big*x3 + x3 110 | x4 = self.layer4(x3_a3) 111 | x5 = self.classifier(x4) 112 | return x3, x5 113 | 114 | def optim_parameters(self, args): 115 | return self.parameters() 116 | -------------------------------------------------------------------------------- /model/discriminator.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | class FCDiscriminator(nn.Module): 6 | 7 | def __init__(self, num_classes, ndf = 64): 8 | super(FCDiscriminator, self).__init__() 9 | 10 | self.conv1 = nn.Conv2d(num_classes, num_classes//2, kernel_size=3, stride=1, padding=1) 11 | self.conv2 = nn.Conv2d(num_classes//2, num_classes//4, kernel_size=3, stride=1, padding=1) 12 | self.conv3 = nn.Conv2d(num_classes//4, num_classes//8, kernel_size=3, stride=1, padding=1) 13 | self.classifier = nn.Conv2d(num_classes//8, 1, kernel_size=3, stride=1, padding=1) 14 | 15 | self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 16 | #self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear') 17 | #self.sigmoid = nn.Sigmoid() 18 | 19 | 20 | def forward(self, x): 21 | x = self.conv1(x) 22 | x = self.leaky_relu(x) 23 | x = self.conv2(x) 24 | x = self.leaky_relu(x) 25 | x = self.conv3(x) 26 | x = self.leaky_relu(x) 27 | x = self.classifier(x) 28 | #x = self.up_sample(x) 29 | #x = self.sigmoid(x) 30 | 31 | return x 32 | 33 | class OutspaceDiscriminator(nn.Module): 34 | 35 | def __init__(self, num_classes, ndf = 64): 36 | super(OutspaceDiscriminator, self).__init__() 37 | 38 | self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) 39 | self.conv2 = nn.Conv2d(ndf, ndf*2, kernel_size=4, stride=2, padding=1) 40 | self.conv3 = nn.Conv2d(ndf*2, ndf*4, kernel_size=4, stride=2, padding=1) 41 | self.conv4 = nn.Conv2d(ndf*4, ndf*8, kernel_size=4, stride=2, padding=1) 42 | self.classifier = nn.Conv2d(ndf*8, 1, kernel_size=4, stride=2, padding=1) 43 | 44 | self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 45 | #self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear') 46 | #self.sigmoid = nn.Sigmoid() 47 | 48 | 49 | def forward(self, x): 50 | x = self.conv1(x) 51 | x = self.leaky_relu(x) 52 | x = self.conv2(x) 53 | x = self.leaky_relu(x) 54 | x = self.conv3(x) 55 | x = self.leaky_relu(x) 56 | x = self.conv4(x) 57 | x = self.leaky_relu(x) 58 | x = self.classifier(x) 59 | #x = self.up_sample(x) 60 | #x = self.sigmoid(x) 61 | 62 | return x -------------------------------------------------------------------------------- /util.py: -------------------------------------------------------------------------------- 1 | """ 2 | utilities for convenience 3 | """ 4 | import contextlib 5 | import h5py 6 | import logging 7 | import os.path as osp 8 | import yaml 9 | from io import StringIO 10 | from PIL import Image 11 | 12 | import numpy as np 13 | 14 | 15 | cfg = {} 16 | 17 | 18 | def as_list(obj): 19 | """A utility function that treat the argument as a list. 20 | 21 | Parameters 22 | ---------- 23 | obj : object 24 | 25 | Returns 26 | ------- 27 | If `obj` is a list, return it. Otherwise, return `[obj]` as a single-element list. 28 | """ 29 | if isinstance(obj, list): 30 | return obj 31 | else: 32 | return [obj] 33 | 34 | def get_interp_method(imh_src, imw_src, imh_dst, imw_dst, default=Image.CUBIC): 35 | if not cfg.get('choose_interpolation_method', False): 36 | return default 37 | if imh_dst < imh_src and imw_dst < imw_src: 38 | return Image.ANTIALIAS 39 | elif imh_dst > imh_src and imw_dst > imw_src: 40 | return Image.CUBIC 41 | else: 42 | return Image.LINEAR 43 | 44 | def h5py_save(to_path, *data): 45 | with h5py.File(to_path, 'w') as f: 46 | for i, datum in enumerate(data): 47 | f.create_dataset('d{}'.format(i), data=datum) 48 | 49 | def h5py_load(from_path): 50 | data = [] 51 | if osp.isfile(from_path): 52 | with h5py.File(from_path) as f: 53 | for k in f.keys(): 54 | data.append(f[k][()]) 55 | return tuple(data) 56 | 57 | def load_image_with_cache(path, cache=None): 58 | if cache is not None: 59 | if not cache.has_key(path): 60 | with open(path, 'rb') as f: 61 | cache[path] = f.read() 62 | return Image.open(StringIO(cache[path])) 63 | return Image.open(path) 64 | 65 | @contextlib.contextmanager 66 | def np_print_options(*args, **kwargs): 67 | original = np.get_printoptions() 68 | np.set_printoptions(*args, **kwargs) 69 | yield 70 | np.set_printoptions(**original) 71 | 72 | def read_cfg(cfg_file, cfg_info=None): 73 | if cfg_file is not None: 74 | print('Read config file {}'.format(cfg_file)) 75 | with open(cfg_file) as f: 76 | cfg_info = yaml.load(f) 77 | return cfg_info 78 | 79 | def set_logger(output_dir=None, log_file=None, debug=False): 80 | head = '%(asctime)-15s Host %(message)s' 81 | logger_level = logging.INFO if not debug else logging.DEBUG 82 | if all((output_dir, log_file)) and len(log_file) > 0: 83 | logger = logging.getLogger() 84 | log_path = osp.join(output_dir, log_file) 85 | handler = logging.FileHandler(log_path) 86 | formatter = logging.Formatter(head) 87 | handler.setFormatter(formatter) 88 | logger.addHandler(handler) 89 | handler = logging.StreamHandler() 90 | handler.setFormatter(formatter) 91 | logger.addHandler(handler) 92 | logger.setLevel(logger_level) 93 | else: 94 | logging.basicConfig(level=logger_level, format=head) 95 | logger = logging.getLogger() 96 | return logger 97 | 98 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yufei1900/DAST_segmentation/70b41a52da7b6cb6adc6fac156eb1e2dc7139e95/utils/__init__.py -------------------------------------------------------------------------------- /utils/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import torch.nn as nn 4 | from torch.autograd import Variable 5 | 6 | 7 | class CrossEntropy2d(nn.Module): 8 | 9 | def __init__(self, size_average=True, ignore_label=255): 10 | super(CrossEntropy2d, self).__init__() 11 | self.size_average = size_average 12 | self.ignore_label = ignore_label 13 | 14 | def forward(self, predict, target, weight=None): 15 | """ 16 | Args: 17 | predict:(n, c, h, w) 18 | target:(n, h, w) 19 | weight (Tensor, optional): a manual rescaling weight given to each class. 20 | If given, has to be a Tensor of size "nclasses" 21 | """ 22 | assert not target.requires_grad 23 | assert predict.dim() == 4 24 | assert target.dim() == 3 25 | assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 26 | assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1)) 27 | assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3)) 28 | n, c, h, w = predict.size() 29 | target_mask = (target >= 0) * (target != self.ignore_label) 30 | target = target[target_mask] 31 | if not target.data.dim(): 32 | return Variable(torch.zeros(1)) 33 | predict = predict.transpose(1, 2).transpose(2, 3).contiguous() 34 | predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c) 35 | loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average) 36 | return loss 37 | --------------------------------------------------------------------------------