├── data ├── office │ ├── readme │ └── dslr_list.txt └── office-home │ └── readme ├── train_um.sh ├── lr_schedule.py ├── README.md ├── train_office.sh ├── loss.py ├── train_office_home.sh ├── alexnet.py ├── data_list.py ├── utils.py ├── net.py ├── pre_process.py ├── train_svhnmnist.py ├── train_uspsmnist.py ├── train_uspsmnist_pixel.py ├── train_image.py └── network.py /data/office/readme: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /data/office-home/readme: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /train_um.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python train_uspsmnist.py --epochs 30 --task USPS2MNIST --cla_plus_weight 0.3 --cyc_loss_weight 0.05 --weight_in_loss_g 1,0.01,1,1 3 | 4 | python train_uspsmnist_pixel.py --epochs 30 --task MNIST2USPS --cla_plus_weight 0.3 --cyc_loss_weight 0.05 --weight_in_loss_g 1,0.01,1,1 -------------------------------------------------------------------------------- /lr_schedule.py: -------------------------------------------------------------------------------- 1 | def inv_lr_scheduler(optimizer, iter_num, gamma, power, lr=0.001, weight_decay=0.0005): 2 | """Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.""" 3 | lr = lr * (1 + gamma * iter_num) ** (-power) 4 | i=0 5 | for param_group in optimizer.param_groups: 6 | param_group['lr'] = lr * param_group['lr_mult'] 7 | param_group['weight_decay'] = weight_decay * param_group['decay_mult'] 8 | i+=1 9 | 10 | return optimizer 11 | 12 | 13 | schedule_dict = {"inv":inv_lr_scheduler} 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 3CATN 2 | Cycle-consistent Conditional Adversarial Domain Adaptation Networks, ACM MM 2019 3 | 4 | Source codes for "Cycle-consistent Conditional Adversarial Transfer Networks" published on ACM Multmedia 2019. The paper can be found at https://arxiv.org/abs/1909.07618 5 | 6 | You can directely run the .sh files to get the results. 7 | 8 | If you find this repository is helpful, please cite the following work: 9 | > @inproceedings{li2019cycle, 10 | > title={Cycle-consistent Conditional Adversarial Transfer Networks}, 11 | > author={Li, Jingjing and Chen, Erpeng and Ding, Zhengming and Zhu, Lei and and Lu, Ke and Huang, Zi}, 12 | > booktitle={ACM MM}, 13 | > year={2019}, 14 | > organization={ACM} 15 | > } 16 | 17 | # Acknowledgement 18 | Some parts of this project are inspired from CDAN. 19 | -------------------------------------------------------------------------------- /train_office.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #w2a 3 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.05 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office/webcam_list.txt --t_dset_path data/office/amazon_list.txt --source webcam --target amazon 4 | 5 | #w2d 6 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.05 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office/webcam_list.txt --t_dset_path data/office/dslr_list.txt --source webcam --target dslr 7 | 8 | #a2w 9 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.05 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office/amazon_list.txt --t_dset_path data/office/webcam_list.txt --source amazon --target webcam 10 | 11 | #a2d 12 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.05 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office/amazon_list.txt --t_dset_path data/office/dslr_list.txt --source amazon --target dslr 13 | 14 | #d2w 15 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.05 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office/dslr_list.txt --t_dset_path data/office/webcam_list.txt --source dslr --target webcam 16 | 17 | #d2a 18 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.05 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office/dslr_list.txt --t_dset_path data/office/amazon_list.txt --source dslr --target amazon 19 | -------------------------------------------------------------------------------- /loss.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | from torch.autograd import Variable 5 | import math 6 | import torch.nn.functional as F 7 | import pdb 8 | 9 | def Entropy(input_): 10 | bs = input_.size(0) 11 | epsilon = 1e-5 12 | entropy = -input_ * torch.log(input_ + epsilon) 13 | entropy = torch.sum(entropy, dim=1) 14 | return entropy 15 | 16 | def grl_hook(coeff): 17 | def fun1(grad): 18 | return -coeff*grad.clone() 19 | return fun1 20 | 21 | def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None): 22 | softmax_output = input_list[1].detach() 23 | feature = input_list[0] 24 | if random_layer is None: 25 | op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1)) 26 | ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1))) 27 | else: 28 | random_out = random_layer.forward([feature, softmax_output]) 29 | ad_out = ad_net(random_out.view(-1, random_out.size(1))) 30 | batch_size = softmax_output.size(0) // 2 31 | dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda() 32 | if entropy is not None: 33 | entropy.register_hook(grl_hook(coeff)) 34 | entropy = 1.0+torch.exp(-entropy) 35 | source_mask = torch.ones_like(entropy) 36 | source_mask[feature.size(0)//2:] = 0 37 | source_weight = entropy*source_mask 38 | target_mask = torch.ones_like(entropy) 39 | target_mask[0:feature.size(0)//2] = 0 40 | target_weight = entropy*target_mask 41 | weight = source_weight / torch.sum(source_weight).detach().item() + \ 42 | target_weight / torch.sum(target_weight).detach().item() 43 | return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduction='none')(ad_out, dc_target)) / torch.sum(weight).detach().item() 44 | else: 45 | return nn.BCELoss()(ad_out, dc_target) 46 | 47 | def DANN(features, ad_net): 48 | ad_out = ad_net(features) 49 | batch_size = ad_out.size(0) // 2 50 | dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda() 51 | return nn.BCELoss()(ad_out, dc_target) 52 | -------------------------------------------------------------------------------- /train_office_home.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #a2c 4 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Art.txt --t_dset_path data/office-home/Clipart.txt --dset office-home --source Art --target Clipart 5 | 6 | #a2p 7 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Art.txt --t_dset_path data/office-home/Product.txt --dset office-home --source Art --target Product 8 | 9 | #a2r 10 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Art.txt --t_dset_path data/office-home/Real_World.txt --dset office-home --source Art --target Real_World 11 | 12 | #c2a 13 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Clipart.txt --t_dset_path data/office-home/Art.txt --dset office-home --source Clipart --target Art 14 | 15 | #c2p 16 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Clipart.txt --t_dset_path data/office-home/Product.txt --dset office-home --source Clipart --target Product 17 | 18 | #c2r 19 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Clipart.txt --t_dset_path data/office-home/Real_World.txt --dset office-home --source Clipart --target Real_World 20 | 21 | #p2a 22 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Product.txt --t_dset_path data/office-home/Art.txt --dset office-home --source Product --target Art 23 | 24 | #p2c 25 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Product.txt --t_dset_path data/office-home/Clipart.txt --dset office-home --source Product --target Clipart 26 | 27 | #p2r 28 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Product.txt --t_dset_path data/office-home/Real_World.txt --dset office-home --source Product --target Real_World 29 | 30 | #r2a 31 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Real_World.txt --t_dset_path data/office-home/Art.txt --dset office-home --source Real_World --target Art 32 | 33 | #r2c 34 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Real_World.txt --t_dset_path data/office-home/Clipart.txt --dset office-home --source Real_World --target Clipart 35 | 36 | #r2p 37 | python train_image.py --gpu_id 0 --cyc_loss_weight 0.005 --cla_plus_weight 0.1 --weight_in_lossG 1,0.01,0.1,0.1 --s_dset_path data/office-home/Real_World.txt --t_dset_path data/office-home/Product.txt --dset office-home --source Real_World --target Product 38 | 39 | -------------------------------------------------------------------------------- /alexnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import torch.nn as nn 4 | import torch.utils.model_zoo as model_zoo 5 | 6 | 7 | __all__ = ['AlexNet', 'alexnet'] 8 | 9 | class LRN(nn.Module): 10 | def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True): 11 | super(LRN, self).__init__() 12 | self.ACROSS_CHANNELS = ACROSS_CHANNELS 13 | if ACROSS_CHANNELS: 14 | self.average=nn.AvgPool3d(kernel_size=(local_size, 1, 1), 15 | stride=1, 16 | padding=(int((local_size-1.0)/2), 0, 0)) 17 | else: 18 | self.average=nn.AvgPool2d(kernel_size=local_size, 19 | stride=1, 20 | padding=int((local_size-1.0)/2)) 21 | self.alpha = alpha 22 | self.beta = beta 23 | 24 | 25 | def forward(self, x): 26 | if self.ACROSS_CHANNELS: 27 | div = x.pow(2).unsqueeze(1) 28 | div = self.average(div).squeeze(1) 29 | div = div.mul(self.alpha).add(1.0).pow(self.beta) 30 | else: 31 | div = x.pow(2) 32 | div = self.average(div) 33 | div = div.mul(self.alpha).add(1.0).pow(self.beta) 34 | x = x.div(div) 35 | return x 36 | 37 | class AlexNet(nn.Module): 38 | 39 | def __init__(self, num_classes=1000): 40 | super(AlexNet, self).__init__() 41 | self.features = nn.Sequential( 42 | nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0), 43 | nn.ReLU(inplace=True), 44 | LRN(local_size=5, alpha=0.0001, beta=0.75), 45 | nn.MaxPool2d(kernel_size=3, stride=2), 46 | nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2), 47 | nn.ReLU(inplace=True), 48 | LRN(local_size=5, alpha=0.0001, beta=0.75), 49 | nn.MaxPool2d(kernel_size=3, stride=2), 50 | nn.Conv2d(256, 384, kernel_size=3, padding=1), 51 | nn.ReLU(inplace=True), 52 | nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2), 53 | nn.ReLU(inplace=True), 54 | nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2), 55 | nn.ReLU(inplace=True), 56 | nn.MaxPool2d(kernel_size=3, stride=2), 57 | ) 58 | self.classifier = nn.Sequential( 59 | nn.Linear(256 * 6 * 6, 4096), 60 | nn.ReLU(inplace=True), 61 | nn.Dropout(), 62 | nn.Linear(4096, 4096), 63 | nn.ReLU(inplace=True), 64 | nn.Dropout(), 65 | nn.Linear(4096, num_classes), 66 | ) 67 | 68 | def forward(self, x): 69 | x = self.features(x) 70 | x = x.view(x.size(0), 256 * 6 * 6) 71 | x = self.classifier(x) 72 | return x 73 | 74 | 75 | def alexnet(pretrained=False, **kwargs): 76 | r"""AlexNet model architecture from the 77 | `"One weird trick..." `_ paper. 78 | Args: 79 | pretrained (bool): If True, returns a model pre-trained on ImageNet 80 | """ 81 | model = AlexNet(**kwargs) 82 | if pretrained: 83 | model_path = './alexnet.pth.tar' 84 | pretrained_model = torch.load(model_path) 85 | model.load_state_dict(pretrained_model['state_dict']) 86 | return model 87 | -------------------------------------------------------------------------------- /data_list.py: -------------------------------------------------------------------------------- 1 | #from __future__ import print_function, division 2 | 3 | import torch 4 | import numpy as np 5 | import random 6 | from PIL import Image 7 | from torch.utils.data import Dataset 8 | import os 9 | import os.path 10 | data_dir = "/home/cep/code/CDAN_Cycle_loss/data/" 11 | data_list_config={ 12 | "mnist":[os.path.join(data_dir,"svhn2mnist/mnist_train.txt"),os.path.join(data_dir,"svhn2mnist/mnist_test.txt")], 13 | "usps":[os.path.join(data_dir,"usps2mnist/usps_train.txt"),os.path.join(data_dir,"usps2mnist/usps_test.txt")], 14 | "svhn":[os.path.join(data_dir,"svhn2mnist/svhn_balanced.txt"),""], 15 | "amazon":[os.path.join(data_dir,"office/amazon_list.txt")], 16 | "dslr":[os.path.join(data_dir,"office/dslr_list.txt")], 17 | "webcam":[os.path.join(data_dir,"office/webcam_list.txt")], 18 | } 19 | 20 | def make_dataset(image_list, labels): 21 | if labels: 22 | len_ = len(image_list) 23 | images = [(image_list[i].strip(), labels[i, :]) for i in range(len_)] 24 | else: 25 | if len(image_list[0].split()) > 2: 26 | images = [(val.split()[0], np.array([int(la) for la in val.split()[1:]])) for val in image_list] 27 | else: 28 | images = [(val.split()[0], int(val.split()[1])) for val in image_list] 29 | return images 30 | 31 | 32 | def rgb_loader(path): 33 | with open(path, 'rb') as f: 34 | with Image.open(f) as img: 35 | return img.convert('RGB') 36 | 37 | def l_loader(path): 38 | with open(path, 'rb') as f: 39 | with Image.open(f) as img: 40 | return img.convert('L') 41 | 42 | class ImageList(Dataset): 43 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB'): 44 | imgs = make_dataset(image_list, labels) 45 | if len(imgs) == 0: 46 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 47 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 48 | 49 | self.imgs = imgs 50 | self.transform = transform 51 | self.target_transform = target_transform 52 | if mode == 'RGB': 53 | self.loader = rgb_loader 54 | elif mode == 'L': 55 | self.loader = l_loader 56 | 57 | def __getitem__(self, index): 58 | path, target = self.imgs[index] 59 | img = self.loader(path) 60 | if self.transform is not None: 61 | img = self.transform(img) 62 | if self.target_transform is not None: 63 | target = self.target_transform(target) 64 | 65 | return img, target 66 | 67 | def __len__(self): 68 | return len(self.imgs) 69 | 70 | class ImageValueList(Dataset): 71 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, 72 | loader=rgb_loader): 73 | imgs = make_dataset(image_list, labels) 74 | if len(imgs) == 0: 75 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 76 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 77 | 78 | self.imgs = imgs 79 | self.values = [1.0] * len(imgs) 80 | self.transform = transform 81 | self.target_transform = target_transform 82 | self.loader = loader 83 | 84 | def set_values(self, values): 85 | self.values = values 86 | 87 | def __getitem__(self, index): 88 | path, target = self.imgs[index] 89 | img = self.loader(path) 90 | if self.transform is not None: 91 | img = self.transform(img) 92 | if self.target_transform is not None: 93 | target = self.target_transform(target) 94 | 95 | return img, target 96 | 97 | def __len__(self): 98 | return len(self.imgs) 99 | 100 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | import datetime 4 | import sys 5 | 6 | from torch.autograd import Variable 7 | import torch 8 | from visdom import Visdom 9 | import numpy as np 10 | 11 | 12 | def tensor2image(tensor): 13 | image = 127.5 * (tensor[0].cpu().float().numpy() + 1.0) 14 | if image.shape[0] == 1: 15 | image = np.tile(image, (3, 1, 1)) 16 | return image.astype(np.uint8) 17 | 18 | 19 | class Logger(): 20 | def __init__(self, n_epochs, batches_epoch): 21 | self.viz = Visdom() 22 | self.n_epochs = n_epochs 23 | self.batches_epoch = batches_epoch 24 | self.epoch = 1 25 | self.batch = 1 26 | self.prev_time = time.time() 27 | self.mean_period = 0 28 | self.losses = {} 29 | self.loss_windows = {} 30 | self.image_windows = {} 31 | 32 | def log(self, losses=None, images=None): 33 | self.mean_period += (time.time() - self.prev_time) 34 | self.prev_time = time.time() 35 | 36 | sys.stdout.write( 37 | '\rEpoch %03d/%03d [%04d/%04d] -- ' % (self.epoch, self.n_epochs, self.batch, self.batches_epoch)) 38 | 39 | for i, loss_name in enumerate(losses.keys()): 40 | if loss_name not in self.losses: 41 | self.losses[loss_name] = losses[loss_name].data[0] 42 | else: 43 | self.losses[loss_name] += losses[loss_name].data[0] 44 | 45 | if (i + 1) == len(losses.keys()): 46 | sys.stdout.write('%s: %.4f -- ' % (loss_name, self.losses[loss_name] / self.batch)) 47 | else: 48 | sys.stdout.write('%s: %.4f | ' % (loss_name, self.losses[loss_name] / self.batch)) 49 | 50 | batches_done = self.batches_epoch * (self.epoch - 1) + self.batch 51 | batches_left = self.batches_epoch * (self.n_epochs - self.epoch) + self.batches_epoch - self.batch 52 | sys.stdout.write('ETA: %s' % (datetime.timedelta(seconds=batches_left * self.mean_period / batches_done))) 53 | 54 | # Draw images 55 | for image_name, tensor in images.items(): 56 | if image_name not in self.image_windows: 57 | self.image_windows[image_name] = self.viz.image(tensor2image(tensor.data), opts={'title': image_name}) 58 | else: 59 | self.viz.image(tensor2image(tensor.data), win=self.image_windows[image_name], 60 | opts={'title': image_name}) 61 | 62 | # End of epoch 63 | if (self.batch % self.batches_epoch) == 0: 64 | # Plot losses 65 | for loss_name, loss in self.losses.items(): 66 | if loss_name not in self.loss_windows: 67 | self.loss_windows[loss_name] = self.viz.line(X=np.array([self.epoch]), 68 | Y=np.array([loss / self.batch]), 69 | opts={'xlabel': 'epochs', 'ylabel': loss_name, 70 | 'title': loss_name}) 71 | else: 72 | self.viz.line(X=np.array([self.epoch]), Y=np.array([loss / self.batch]), 73 | win=self.loss_windows[loss_name], update='append') 74 | # Reset losses for next epoch 75 | self.losses[loss_name] = 0.0 76 | 77 | self.epoch += 1 78 | self.batch = 1 79 | sys.stdout.write('\n') 80 | else: 81 | self.batch += 1 82 | 83 | 84 | class ReplayBuffer(): 85 | def __init__(self, max_size=50): 86 | assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.' 87 | self.max_size = max_size 88 | self.data = [] 89 | 90 | def push_and_pop(self, data): 91 | to_return = [] 92 | for element in data.data: 93 | element = torch.unsqueeze(element, 0) 94 | if len(self.data) < self.max_size: 95 | self.data.append(element) 96 | to_return.append(element) 97 | else: 98 | if random.uniform(0, 1) > 0.5: 99 | i = random.randint(0, self.max_size - 1) 100 | to_return.append(self.data[i].clone()) 101 | self.data[i] = element 102 | else: 103 | to_return.append(element) 104 | return Variable(torch.cat(to_return)) 105 | 106 | 107 | class LambdaLR(): 108 | def __init__(self, n_epochs, offset, decay_start_epoch): 109 | assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!" 110 | self.n_epochs = n_epochs 111 | self.offset = offset 112 | self.decay_start_epoch = decay_start_epoch 113 | 114 | def step(self, epoch): 115 | return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch) / (self.n_epochs - self.decay_start_epoch) 116 | 117 | 118 | def weights_init_normal(m): 119 | classname = m.__class__.__name__ 120 | if classname.find('Conv') != -1: 121 | torch.nn.init.normal(m.weight.data, 0.0, 0.02) 122 | elif classname.find('BatchNorm2d') != -1: 123 | torch.nn.init.normal(m.weight.data, 1.0, 0.02) 124 | torch.nn.init.constant(m.bias.data, 0.0) -------------------------------------------------------------------------------- /net.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import torch.nn as nn 4 | import numpy as np 5 | from torchvision import models 6 | 7 | 8 | class Discriminator(nn.Module): 9 | def __init__(self): 10 | super(Discriminator, self).__init__() 11 | self.conv1 = nn.Sequential( 12 | nn.Conv2d(1, 32, 5, padding=2), # batch, 32, 16, 16 13 | nn.LeakyReLU(0.2, True), 14 | ) 15 | self.conv2 = nn.Sequential( 16 | nn.Conv2d(32, 64, 5, padding=2), # batch, 64, 16, 16 17 | nn.LeakyReLU(0.2, True), 18 | nn.AvgPool2d(2, stride=2) # batch, 64, 8, 8 19 | ) 20 | self.fc = nn.Sequential( 21 | nn.Linear(64 * 8 * 8, 1024), 22 | nn.LeakyReLU(0.2, True), 23 | nn.Linear(1024, 1), 24 | nn.Sigmoid() 25 | ) 26 | 27 | def forward(self, x): 28 | ''' 29 | x: batch, width, height, channel=1 30 | ''' 31 | x = x.view(x.size(0), 1, 16, 16) 32 | x = self.conv1(x) 33 | x = self.conv2(x) 34 | x = x.view(x.size(0), -1) 35 | x = self.fc(x) 36 | return x 37 | 38 | 39 | class Generator(nn.Module): 40 | def __init__(self, input_size, num_feature): 41 | self.num_feature = num_feature 42 | super(Generator, self).__init__() 43 | self.fc = nn.Linear(input_size, num_feature) # batch,32*32 44 | self.br = nn.Sequential( 45 | nn.BatchNorm2d(1), 46 | nn.ReLU(True) 47 | ) 48 | self.downsample1 = nn.Sequential( 49 | nn.Conv2d(1, 50, 3, stride=1, padding=1), # batch, 50, 32, 32 50 | nn.BatchNorm2d(50), 51 | nn.ReLU(True) 52 | ) 53 | self.downsample2 = nn.Sequential( 54 | nn.Conv2d(50, 25, 3, stride=1, padding=1), # batch, 25, 32, 32 55 | nn.BatchNorm2d(25), 56 | nn.ReLU(True) 57 | ) 58 | self.downsample3 = nn.Sequential( 59 | nn.Conv2d(25, 1, 2, stride=2), # batch, 1, 16, 16 60 | nn.Tanh() 61 | ) 62 | 63 | def forward(self, x): 64 | x = self.fc(x) 65 | x = x.view(x.size(0), 1, 32, 32) 66 | x = self.downsample1(x) 67 | x = self.downsample2(x) 68 | x = self.downsample3(x) 69 | x = x.view(x.size(0), 256) 70 | return x 71 | 72 | 73 | def grl_hook(coeff): 74 | def fun1(grad): 75 | return -coeff*grad.clone() 76 | return fun1 77 | 78 | def calc_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0): 79 | return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low) 80 | 81 | def init_weights(m): 82 | classname = m.__class__.__name__ 83 | if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: 84 | nn.init.kaiming_uniform_(m.weight) 85 | nn.init.zeros_(m.bias) 86 | elif classname.find('BatchNorm') != -1: 87 | nn.init.normal_(m.weight, 1.0, 0.02) 88 | nn.init.zeros_(m.bias) 89 | elif classname.find('Linear') != -1: 90 | nn.init.xavier_normal_(m.weight) 91 | nn.init.zeros_(m.bias) 92 | 93 | resnet_dict = {"ResNet18":models.resnet18, "ResNet34":models.resnet34, "ResNet50":models.resnet50, "ResNet101":models.resnet101, "ResNet152":models.resnet152} 94 | 95 | class ResNetFc(nn.Module): 96 | def __init__(self, resnet_name, use_bottleneck=True, bottleneck_dim=256, new_cls=False, class_num=1000): 97 | super(ResNetFc, self).__init__() 98 | model_resnet = resnet_dict[resnet_name](pretrained=True) 99 | self.conv1 = model_resnet.conv1 100 | self.bn1 = model_resnet.bn1 101 | self.relu = model_resnet.relu 102 | self.maxpool = model_resnet.maxpool 103 | self.layer1 = model_resnet.layer1 104 | self.layer2 = model_resnet.layer2 105 | self.layer3 = model_resnet.layer3 106 | self.layer4 = model_resnet.layer4 107 | self.avgpool = model_resnet.avgpool 108 | self.feature_layers = nn.Sequential(self.conv1, self.bn1, self.relu, self.maxpool, \ 109 | self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool) 110 | 111 | self.use_bottleneck = use_bottleneck 112 | self.new_cls = new_cls 113 | if new_cls: 114 | if self.use_bottleneck: 115 | self.bottleneck = nn.Linear(model_resnet.fc.in_features, bottleneck_dim) 116 | self.fc = nn.Linear(bottleneck_dim, class_num) 117 | self.bottleneck.apply(init_weights) 118 | self.fc.apply(init_weights) 119 | self.__in_features = bottleneck_dim 120 | else: 121 | self.fc = nn.Linear(model_resnet.fc.in_features, class_num) 122 | self.fc.apply(init_weights) 123 | self.__in_features = model_resnet.fc.in_features 124 | else: 125 | self.fc = model_resnet.fc 126 | self.__in_features = model_resnet.fc.in_features 127 | 128 | def forward(self, x): 129 | x = self.feature_layers(x) 130 | x = x.view(x.size(0), -1) 131 | if self.use_bottleneck and self.new_cls: 132 | x = self.bottleneck(x) 133 | y = self.fc(x) 134 | return x, y 135 | 136 | def output_num(self): 137 | return self.__in_features 138 | 139 | def get_parameters(self): 140 | if self.new_cls: 141 | if self.use_bottleneck: 142 | parameter_list = [{"params":self.feature_layers.parameters(), "lr_mult":1, 'decay_mult':2}, \ 143 | {"params":self.bottleneck.parameters(), "lr_mult":10, 'decay_mult':2}, \ 144 | {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}] 145 | else: 146 | parameter_list = [{"params":self.feature_layers.parameters(), "lr_mult":1, 'decay_mult':2}, \ 147 | {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}] 148 | else: 149 | parameter_list = [{"params":self.parameters(), "lr_mult":1, 'decay_mult':2}] 150 | return parameter_list 151 | 152 | class Net(nn.Module): 153 | def __init__(self, input_size,class_num): 154 | super(Net, self).__init__() 155 | self.fc = nn.Sequential( 156 | nn.Linear(input_size,240), 157 | nn.ReLU(), 158 | nn.Linear(240,180), 159 | nn.ReLU(), 160 | nn.Linear(180,120), 161 | nn.ReLU(), 162 | ) 163 | self.fc1 = nn.Sequential( 164 | nn.Linear(120,class_num) 165 | ) 166 | self.fc2 = models.resnet50(pretrained=True).fc 167 | def forward(self, x): 168 | x = x.view(x.size(0),-1) 169 | x = self.fc(x) 170 | x = self.fc1(x) 171 | return x 172 | 173 | class AdversarialNetwork(nn.Module): 174 | def __init__(self, in_feature, hidden_size): 175 | super(AdversarialNetwork, self).__init__() 176 | self.ad_layer1 = nn.Linear(in_feature, hidden_size) 177 | self.ad_layer2 = nn.Linear(hidden_size, hidden_size) 178 | self.ad_layer3 = nn.Linear(hidden_size, 1) 179 | self.relu1 = nn.ReLU() 180 | self.relu2 = nn.ReLU() 181 | self.dropout1 = nn.Dropout(0.5) 182 | self.dropout2 = nn.Dropout(0.5) 183 | self.sigmoid = nn.Sigmoid() 184 | self.apply(init_weights) 185 | self.iter_num = 0 186 | self.alpha = 10 187 | self.low = 0.0 188 | self.high = 1.0 189 | self.max_iter = 10000.0 190 | 191 | def forward(self, x): 192 | if self.training: 193 | self.iter_num += 1 194 | coeff = calc_coeff(self.iter_num, self.high, self.low, self.alpha, self.max_iter) 195 | x = x * 1.0 196 | x.register_hook(grl_hook(coeff)) 197 | x = self.ad_layer1(x) 198 | x = self.relu1(x) 199 | x = self.dropout1(x) 200 | x = self.ad_layer2(x) 201 | x = self.relu2(x) 202 | x = self.dropout2(x) 203 | y = self.ad_layer3(x) 204 | y = self.sigmoid(y) 205 | return y 206 | 207 | def output_num(self): 208 | return 1 209 | def get_parameters(self): 210 | return [{"params":self.parameters(), "lr_mult":10, 'decay_mult':2}] 211 | -------------------------------------------------------------------------------- /pre_process.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torchvision import transforms 3 | import os 4 | from PIL import Image, ImageOps 5 | import numbers 6 | import torch 7 | 8 | class ResizeImage(): 9 | def __init__(self, size): 10 | if isinstance(size, int): 11 | self.size = (int(size), int(size)) 12 | else: 13 | self.size = size 14 | def __call__(self, img): 15 | th, tw = self.size 16 | return img.resize((th, tw)) 17 | 18 | class RandomSizedCrop(object): 19 | """Crop the given PIL.Image to random size and aspect ratio. 20 | A crop of random size of (0.08 to 1.0) of the original size and a random 21 | aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop 22 | is finally resized to given size. 23 | This is popularly used to train the Inception networks. 24 | Args: 25 | size: size of the smaller edge 26 | interpolation: Default: PIL.Image.BILINEAR 27 | """ 28 | 29 | def __init__(self, size, interpolation=Image.BILINEAR): 30 | self.size = size 31 | self.interpolation = interpolation 32 | 33 | def __call__(self, img): 34 | h_off = random.randint(0, img.shape[1]-self.size) 35 | w_off = random.randint(0, img.shape[2]-self.size) 36 | img = img[:, h_off:h_off+self.size, w_off:w_off+self.size] 37 | return img 38 | 39 | 40 | class Normalize(object): 41 | """Normalize an tensor image with mean and standard deviation. 42 | Given mean: (R, G, B), 43 | will normalize each channel of the torch.*Tensor, i.e. 44 | channel = channel - mean 45 | Args: 46 | mean (sequence): Sequence of means for R, G, B channels respecitvely. 47 | """ 48 | 49 | def __init__(self, mean=None, meanfile=None): 50 | if mean: 51 | self.mean = mean 52 | else: 53 | arr = np.load(meanfile) 54 | self.mean = torch.from_numpy(arr.astype('float32')/255.0)[[2,1,0],:,:] 55 | 56 | def __call__(self, tensor): 57 | """ 58 | Args: 59 | tensor (Tensor): Tensor image of size (C, H, W) to be normalized. 60 | Returns: 61 | Tensor: Normalized image. 62 | """ 63 | # TODO: make efficient 64 | for t, m in zip(tensor, self.mean): 65 | t.sub_(m) 66 | return tensor 67 | 68 | 69 | 70 | class PlaceCrop(object): 71 | """Crops the given PIL.Image at the particular index. 72 | Args: 73 | size (sequence or int): Desired output size of the crop. If size is an 74 | int instead of sequence like (w, h), a square crop (size, size) is 75 | made. 76 | """ 77 | 78 | def __init__(self, size, start_x, start_y): 79 | if isinstance(size, int): 80 | self.size = (int(size), int(size)) 81 | else: 82 | self.size = size 83 | self.start_x = start_x 84 | self.start_y = start_y 85 | 86 | def __call__(self, img): 87 | """ 88 | Args: 89 | img (PIL.Image): Image to be cropped. 90 | Returns: 91 | PIL.Image: Cropped image. 92 | """ 93 | th, tw = self.size 94 | return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th)) 95 | 96 | 97 | class ForceFlip(object): 98 | """Horizontally flip the given PIL.Image randomly with a probability of 0.5.""" 99 | 100 | def __call__(self, img): 101 | """ 102 | Args: 103 | img (PIL.Image): Image to be flipped. 104 | Returns: 105 | PIL.Image: Randomly flipped image. 106 | """ 107 | return img.transpose(Image.FLIP_LEFT_RIGHT) 108 | 109 | class CenterCrop(object): 110 | """Crops the given PIL.Image at the center. 111 | Args: 112 | size (sequence or int): Desired output size of the crop. If size is an 113 | int instead of sequence like (h, w), a square crop (size, size) is 114 | made. 115 | """ 116 | 117 | def __init__(self, size): 118 | if isinstance(size, numbers.Number): 119 | self.size = (int(size), int(size)) 120 | else: 121 | self.size = size 122 | 123 | def __call__(self, img): 124 | """ 125 | Args: 126 | img (PIL.Image): Image to be cropped. 127 | Returns: 128 | PIL.Image: Cropped image. 129 | """ 130 | w, h = (img.shape[1], img.shape[2]) 131 | th, tw = self.size 132 | w_off = int((w - tw) / 2.) 133 | h_off = int((h - th) / 2.) 134 | img = img[:, h_off:h_off+th, w_off:w_off+tw] 135 | return img 136 | 137 | 138 | def image_train(resize_size=256, crop_size=224, alexnet=False): 139 | if not alexnet: 140 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 141 | std=[0.229, 0.224, 0.225]) 142 | else: 143 | normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy') 144 | return transforms.Compose([ 145 | ResizeImage(resize_size), 146 | transforms.RandomResizedCrop(crop_size), 147 | transforms.RandomHorizontalFlip(), 148 | transforms.ToTensor(), 149 | normalize 150 | ]) 151 | 152 | def image_test(resize_size=256, crop_size=224, alexnet=False): 153 | if not alexnet: 154 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 155 | std=[0.229, 0.224, 0.225]) 156 | else: 157 | normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy') 158 | start_first = 0 159 | start_center = (resize_size - crop_size - 1) / 2 160 | start_last = resize_size - crop_size - 1 161 | 162 | return transforms.Compose([ 163 | ResizeImage(resize_size), 164 | PlaceCrop(crop_size, start_center, start_center), 165 | transforms.ToTensor(), 166 | normalize 167 | ]) 168 | 169 | def image_test_10crop(resize_size=256, crop_size=224, alexnet=False): 170 | if not alexnet: 171 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 172 | std=[0.229, 0.224, 0.225]) 173 | else: 174 | normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy') 175 | start_first = 0 176 | start_center = (resize_size - crop_size - 1) / 2 177 | start_last = resize_size - crop_size - 1 178 | data_transforms = [ 179 | transforms.Compose([ 180 | ResizeImage(resize_size),ForceFlip(), 181 | PlaceCrop(crop_size, start_first, start_first), 182 | transforms.ToTensor(), 183 | normalize 184 | ]), 185 | transforms.Compose([ 186 | ResizeImage(resize_size),ForceFlip(), 187 | PlaceCrop(crop_size, start_last, start_last), 188 | transforms.ToTensor(), 189 | normalize 190 | ]), 191 | transforms.Compose([ 192 | ResizeImage(resize_size),ForceFlip(), 193 | PlaceCrop(crop_size, start_last, start_first), 194 | transforms.ToTensor(), 195 | normalize 196 | ]), 197 | transforms.Compose([ 198 | ResizeImage(resize_size),ForceFlip(), 199 | PlaceCrop(crop_size, start_first, start_last), 200 | transforms.ToTensor(), 201 | normalize 202 | ]), 203 | transforms.Compose([ 204 | ResizeImage(resize_size),ForceFlip(), 205 | PlaceCrop(crop_size, start_center, start_center), 206 | transforms.ToTensor(), 207 | normalize 208 | ]), 209 | transforms.Compose([ 210 | ResizeImage(resize_size), 211 | PlaceCrop(crop_size, start_first, start_first), 212 | transforms.ToTensor(), 213 | normalize 214 | ]), 215 | transforms.Compose([ 216 | ResizeImage(resize_size), 217 | PlaceCrop(crop_size, start_last, start_last), 218 | transforms.ToTensor(), 219 | normalize 220 | ]), 221 | transforms.Compose([ 222 | ResizeImage(resize_size), 223 | PlaceCrop(crop_size, start_last, start_first), 224 | transforms.ToTensor(), 225 | normalize 226 | ]), 227 | transforms.Compose([ 228 | ResizeImage(resize_size), 229 | PlaceCrop(crop_size, start_first, start_last), 230 | transforms.ToTensor(), 231 | normalize 232 | ]), 233 | transforms.Compose([ 234 | ResizeImage(resize_size), 235 | PlaceCrop(crop_size, start_center, start_center), 236 | transforms.ToTensor(), 237 | normalize 238 | ]) 239 | ] 240 | return data_transforms 241 | -------------------------------------------------------------------------------- /train_svhnmnist.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | import torch.optim as optim 6 | from torchvision import datasets, transforms 7 | from data_list import ImageList 8 | import os 9 | from torch.autograd import Variable 10 | import loss as loss_func 11 | import numpy as np 12 | import network 13 | import net 14 | import itertools 15 | from utils import ReplayBuffer 16 | import os.path as osp 17 | import datetime 18 | 19 | def train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, method, 20 | D_s, D_t, G_s2t, G_t2s, criterion_Sem, criterion_GAN, criterion_cycle, criterion_identity, optimizer_G, 21 | optimizer_D_t, optimizer_D_s, 22 | classifier1, classifier1_optim, fake_S_buffer, fake_T_buffer 23 | ): 24 | model.train() 25 | len_source = len(train_loader) 26 | len_target = len(train_loader1) 27 | if len_source > len_target: 28 | num_iter = len_source 29 | else: 30 | num_iter = len_target 31 | 32 | for batch_idx in range(num_iter): 33 | if batch_idx % len_source == 0: 34 | iter_source = iter(train_loader) 35 | if batch_idx % len_target == 0: 36 | iter_target = iter(train_loader1) 37 | data_source, label_source = iter_source.next() 38 | data_source, label_source = data_source.cuda(), label_source.cuda() 39 | data_target, label_target = iter_target.next() 40 | data_target = data_target.cuda() 41 | 42 | optimizer.zero_grad() 43 | optimizer_ad.zero_grad() 44 | 45 | features_source,outputs_source =model(data_source) 46 | features_target,outputs_target = model(data_target) 47 | features = torch.cat((features_source, features_target), dim=0) 48 | outputs = torch.cat((outputs_source, outputs_target), dim=0) 49 | #feature, output = model(torch.cat((data_source, data_target), 0)) 50 | 51 | loss = nn.CrossEntropyLoss()(outputs.narrow(0, 0, data_source.size(0)), label_source) 52 | softmax_output = nn.Softmax(dim=1)(outputs) 53 | 54 | output1 = classifier1(features) 55 | softmax_output1 = nn.Softmax(dim=1)(output1) 56 | softmax_output = (1-args.cla_plus_weight)*softmax_output+ args.cla_plus_weight*softmax_output1 57 | 58 | 59 | if epoch > start_epoch: 60 | if method == 'CDAN-E': 61 | entropy = loss_func.Entropy(softmax_output) 62 | loss += loss_func.CDAN([features, softmax_output], ad_net, entropy, network.calc_coeff(num_iter*(epoch-start_epoch)+batch_idx), random_layer) 63 | elif method == 'CDAN': 64 | loss += loss_func.CDAN([features, softmax_output], ad_net, None, None, random_layer) 65 | elif method == 'DANN': 66 | loss += loss_func.DANN(features, ad_net) 67 | else: 68 | raise ValueError('Method cannot be recognized.') 69 | 70 | # Cycle 71 | num_feature = features.size(0) 72 | # =================train discriminator T 73 | real_label = Variable(torch.ones(num_feature)).cuda() 74 | fake_label = Variable(torch.zeros(num_feature)).cuda() 75 | 76 | # 训练生成器 77 | optimizer_G.zero_grad() 78 | 79 | # Identity loss 80 | same_t = G_s2t(features_target) 81 | loss_identity_t = criterion_identity(same_t, features_target) 82 | 83 | same_s = G_t2s(features_source) 84 | loss_identity_s = criterion_identity(same_s, features_source) 85 | 86 | # Gan loss 87 | fake_t = G_s2t(features_source) 88 | pred_fake = D_t(fake_t) 89 | loss_G_s2t = criterion_GAN(pred_fake, label_source.float()) 90 | 91 | fake_s = G_t2s(features_target) 92 | pred_fake = D_s(fake_s) 93 | loss_G_t2s = criterion_GAN(pred_fake, label_source.float()) 94 | 95 | # cycle loss 96 | recovered_s = G_t2s(fake_t) 97 | loss_cycle_sts = criterion_cycle(recovered_s, features_source) 98 | 99 | recovered_t = G_s2t(fake_s) 100 | loss_cycle_tst = criterion_cycle(recovered_t, features_target) 101 | 102 | # sem loss 103 | pred_recovered_s = model.classifier(recovered_s) 104 | pred_fake_t = model.classifier(fake_t) 105 | loss_sem_t2s = criterion_Sem(pred_recovered_s, pred_fake_t) 106 | 107 | pred_recovered_t = model.classifier(recovered_t) 108 | pred_fake_s = model.classifier(fake_s) 109 | loss_sem_s2t = criterion_Sem(pred_recovered_t, pred_fake_s) 110 | 111 | loss_cycle = loss_cycle_tst + loss_cycle_sts 112 | weight_in_loss_g = args.weight_in_loss_g.split(',') 113 | loss_G = float(weight_in_loss_g[0]) * (loss_identity_s + loss_identity_t) + \ 114 | float(weight_in_loss_g[1]) * (loss_G_s2t + loss_G_t2s) + \ 115 | float(weight_in_loss_g[2])* loss_cycle + \ 116 | float(weight_in_loss_g[3]) * (loss_sem_s2t + loss_sem_t2s) 117 | 118 | 119 | # 训练softmax分类器 120 | outputs_fake = classifier1(fake_t.detach()) 121 | # 分类器优化 122 | classifier_loss1 = nn.CrossEntropyLoss()(outputs_fake, label_source) 123 | classifier1_optim.zero_grad() 124 | classifier_loss1.backward() 125 | classifier1_optim.step() 126 | 127 | 128 | total_loss = loss + args.cyc_loss_weight * loss_G 129 | total_loss.backward() 130 | optimizer.step() 131 | optimizer_G.step() 132 | 133 | ###### Discriminator S ###### 134 | optimizer_D_s.zero_grad() 135 | 136 | # Real loss 137 | pred_real = D_s(features_source.detach()) 138 | loss_D_real = criterion_GAN(pred_real, real_label) 139 | 140 | # Fake loss 141 | fake_s = fake_S_buffer.push_and_pop(fake_s) 142 | pred_fake = D_s(fake_s.detach()) 143 | loss_D_fake = criterion_GAN(pred_fake, fake_label) 144 | 145 | # Total loss 146 | loss_D_s = loss_D_real + loss_D_fake 147 | loss_D_s.backward() 148 | 149 | optimizer_D_s.step() 150 | ################################### 151 | 152 | ###### Discriminator t ###### 153 | optimizer_D_t.zero_grad() 154 | 155 | # Real loss 156 | pred_real = D_t(features_target.detach()) 157 | loss_D_real = criterion_GAN(pred_real, real_label) 158 | 159 | # Fake loss 160 | fake_t = fake_T_buffer.push_and_pop(fake_t) 161 | pred_fake = D_t(fake_t.detach()) 162 | loss_D_fake = criterion_GAN(pred_fake, fake_label) 163 | 164 | # Total loss 165 | loss_D_t = loss_D_real + loss_D_fake 166 | loss_D_t.backward() 167 | optimizer_D_t.step() 168 | 169 | if epoch > start_epoch: 170 | optimizer_ad.step() 171 | if (batch_idx+epoch*num_iter) % args.log_interval == 0: 172 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLoss+G: {:.6f}'.format( 173 | epoch, batch_idx*args.batch_size, num_iter*args.batch_size, 174 | 100. * batch_idx / num_iter, loss.item(),total_loss.item())) 175 | 176 | def test(args,epoch,config, model, test_loader): 177 | model.eval() 178 | test_loss = 0 179 | correct = 0 180 | for data, target in test_loader: 181 | data, target = data.cuda(), target.cuda() 182 | feature, output = model(data) 183 | test_loss += nn.CrossEntropyLoss()(output, target).item() 184 | pred = output.data.cpu().max(1, keepdim=True)[1] 185 | correct += pred.eq(target.data.cpu().view_as(pred)).sum().item() 186 | 187 | test_loss /= len(test_loader.dataset) 188 | 189 | log_str = "epoch: {}, Accuracy: {}/{} ({:.4f}%)".format( 190 | epoch, correct, len(test_loader.dataset), 191 | 100. * correct / len(test_loader.dataset)) 192 | config["out_file"].write(log_str + "\n") 193 | config["out_file"].flush() 194 | 195 | 196 | print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( 197 | test_loss, correct, len(test_loader.dataset), 198 | 100. * correct / len(test_loader.dataset))) 199 | 200 | def main(): 201 | # Training settings 202 | parser = argparse.ArgumentParser(description='CDAN SVHN MNIST') 203 | parser.add_argument('--method', type=str, default='CDAN-E', choices=['CDAN', 'CDAN-E', 'DANN']) 204 | parser.add_argument('--task', default='USPS2MNIST', help='task to perform') 205 | parser.add_argument('--batch_size', type=int, default=256, help='input batch size for training (default: 64)') 206 | parser.add_argument('--test_batch_size', type=int, default=1000, help='input batch size for testing (default: 1000)') 207 | parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') 208 | parser.add_argument('--lr', type=float, default=0.03, metavar='LR') 209 | parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') 210 | parser.add_argument('--gpu_id', type=str, default='0', help='cuda device id') 211 | parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') 212 | parser.add_argument('--log_interval', type=int, default=50, help='how many batches to wait before logging training status') 213 | parser.add_argument('--random', type=bool, default=False, help='whether to use random') 214 | parser.add_argument('--output_dir',type=str,default="digits/s2m") 215 | parser.add_argument('--cla_plus_weight',type=float,default=0.3) 216 | parser.add_argument('--cyc_loss_weight',type=float,default=0.01) 217 | parser.add_argument('--weight_in_loss_g',type=str,default='1,0.01,0.1,0.1') 218 | args = parser.parse_args() 219 | 220 | torch.manual_seed(args.seed) 221 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id 222 | 223 | source_list = '../data/svhn2mnist/svhn_balanced.txt' 224 | target_list = '../data/svhn2mnist/mnist_train.txt' 225 | test_list = '../data/svhn2mnist/mnist_test.txt' 226 | # train config 227 | config = {} 228 | 229 | config['method'] = args.method 230 | config["gpu"] = args.gpu_id 231 | config['cyc_loss_weight'] = args.cyc_loss_weight 232 | config['cla_plus_weight'] = args.cla_plus_weight 233 | config['weight_in_loss_g'] = args.weight_in_loss_g 234 | config["epochs"] = args.epochs 235 | config["output_for_test"] = True 236 | config["output_path"] = "snapshot/" + args.output_dir 237 | if not osp.exists(config["output_path"]): 238 | os.system('mkdir -p ' + config["output_path"]) 239 | config["out_file"] = open(osp.join(config["output_path"], "log_svhn_to_mnist_{}.txt". 240 | format(str(datetime.datetime.utcnow()))), 241 | "w") 242 | 243 | config["out_file"].write(str(config)) 244 | config["out_file"].flush() 245 | 246 | train_loader = torch.utils.data.DataLoader( 247 | ImageList(open(source_list).readlines(), transform=transforms.Compose([ 248 | transforms.ToTensor(), 249 | transforms.Normalize((0.5,), (0.5,)) 250 | ]), mode='RGB'), 251 | batch_size=args.batch_size, shuffle=True, num_workers=1) 252 | train_loader1 = torch.utils.data.DataLoader( 253 | ImageList(open(target_list).readlines(), transform=transforms.Compose([ 254 | transforms.Resize((32,32)), 255 | transforms.ToTensor(), 256 | transforms.Normalize((0.5,), (0.5,)) 257 | ]), mode='RGB'), 258 | batch_size=args.batch_size, shuffle=True, num_workers=1) 259 | test_loader = torch.utils.data.DataLoader( 260 | ImageList(open(test_list).readlines(), transform=transforms.Compose([ 261 | transforms.Resize((32,32)), 262 | transforms.ToTensor(), 263 | transforms.Normalize((0.5,), (0.5,)) 264 | ]), mode='RGB'), 265 | batch_size=args.test_batch_size, shuffle=True, num_workers=1) 266 | 267 | model = network.DTN() 268 | model = model.cuda() 269 | class_num = 10 270 | 271 | #添加G,D,和额外的分类器 272 | z_dimension = 512 273 | D_s = network.models["Discriminator_digits"]() 274 | D_s = D_s.cuda() 275 | G_s2t = network.models["Generator_digits"](z_dimension, 1024) 276 | G_s2t = G_s2t.cuda() 277 | 278 | D_t = network.models["Discriminator_digits"]() 279 | D_t = D_t.cuda() 280 | G_t2s = network.models["Generator_digits"](z_dimension, 1024) 281 | G_t2s = G_t2s.cuda() 282 | 283 | criterion_GAN = torch.nn.MSELoss() 284 | criterion_cycle = torch.nn.L1Loss() 285 | criterion_identity = torch.nn.L1Loss() 286 | criterion_Sem = torch.nn.L1Loss() 287 | 288 | optimizer_G = torch.optim.Adam(itertools.chain(G_s2t.parameters(), G_t2s.parameters()), lr=0.0003) 289 | optimizer_D_s = torch.optim.Adam(D_s.parameters(), lr=0.0003) 290 | optimizer_D_t = torch.optim.Adam(D_t.parameters(), lr=0.0003) 291 | 292 | fake_S_buffer = ReplayBuffer() 293 | fake_T_buffer = ReplayBuffer() 294 | 295 | ## 添加分类器 296 | classifier1 = net.Net(512, class_num) 297 | classifier1 = classifier1.cuda() 298 | classifier1_optim = optim.Adam(classifier1.parameters(), lr=0.0003) 299 | 300 | 301 | 302 | if args.random: 303 | random_layer = network.RandomLayer([model.output_num(), class_num], 500) 304 | ad_net = network.AdversarialNetwork(500, 500) 305 | random_layer.cuda() 306 | else: 307 | random_layer = None 308 | ad_net = network.AdversarialNetwork(model.output_num() * class_num, 500) 309 | ad_net = ad_net.cuda() 310 | optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9) 311 | optimizer_ad = optim.SGD(ad_net.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9) 312 | 313 | for epoch in range(1, args.epochs + 1): 314 | if epoch % 3 == 0: 315 | for param_group in optimizer.param_groups: 316 | param_group["lr"] = param_group["lr"] * 0.3 317 | train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, 0, args.method, 318 | D_s,D_t,G_s2t,G_t2s,criterion_Sem,criterion_GAN,criterion_cycle,criterion_identity,optimizer_G,optimizer_D_t,optimizer_D_s, 319 | classifier1,classifier1_optim,fake_S_buffer,fake_T_buffer) 320 | test(args,epoch,config, model, test_loader) 321 | 322 | if __name__ == '__main__': 323 | main() 324 | -------------------------------------------------------------------------------- /train_uspsmnist.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | import torch.optim as optim 6 | from torchvision import datasets, transforms 7 | from data_list import ImageList 8 | import os 9 | from torch.autograd import Variable 10 | import loss as loss_func 11 | import numpy as np 12 | import network 13 | 14 | def train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, method, 15 | D_s, D_t, G_s2t, G_t2s, criterion_Sem, criterion_GAN, criterion_cycle, criterion_identity, optimizer_G, 16 | optimizer_D_t, optimizer_D_s, 17 | classifier1, classifier1_optim, fake_S_buffer, fake_T_buffer): 18 | model.train() 19 | len_source = len(train_loader) 20 | len_target = len(train_loader1) 21 | if len_source > len_target: 22 | num_iter = len_source 23 | else: 24 | num_iter = len_target 25 | 26 | for batch_idx in range(num_iter): 27 | if batch_idx % len_source == 0: 28 | iter_source = iter(train_loader) 29 | if batch_idx % len_target == 0: 30 | iter_target = iter(train_loader1) 31 | data_source, label_source = iter_source.next() 32 | data_source, label_source = data_source.cuda(), label_source.cuda() 33 | data_target, label_target = iter_target.next() 34 | data_target = data_target.cuda() 35 | optimizer.zero_grad() 36 | optimizer_ad.zero_grad() 37 | 38 | features_source, outputs_source = model(data_source) 39 | features_target, outputs_target = model(data_target) 40 | features = torch.cat((features_source, features_target), dim=0) 41 | outputs = torch.cat((outputs_source, outputs_target), dim=0) 42 | 43 | loss = nn.CrossEntropyLoss()(outputs.narrow(0, 0, data_source.size(0)), label_source) 44 | softmax_output = nn.Softmax(dim=1)(outputs) 45 | 46 | output1 = classifier1(features) 47 | softmax_output1 = nn.Softmax(dim=1)(output1) 48 | softmax_output = (1 - args.cla_plus_weight) * softmax_output + args.cla_plus_weight * softmax_output1 49 | 50 | if epoch > start_epoch: 51 | if method == 'CDAN-E': 52 | entropy = loss_func.Entropy(softmax_output) 53 | loss += loss_func.CDAN([features, softmax_output], ad_net, entropy, network.calc_coeff(num_iter*(epoch-start_epoch)+batch_idx), random_layer) 54 | elif method == 'CDAN': 55 | loss += loss_func.CDAN([features, softmax_output], ad_net, None, None, random_layer) 56 | elif method == 'DANN': 57 | loss += loss_func.DANN(features, ad_net) 58 | else: 59 | raise ValueError('Method cannot be recognized.') 60 | # Cycle 61 | num_feature = features.size(0) 62 | # =================train discriminator T 63 | real_label = Variable(torch.ones(num_feature)).cuda() 64 | fake_label = Variable(torch.zeros(num_feature)).cuda() 65 | 66 | # 训练生成器 67 | optimizer_G.zero_grad() 68 | 69 | # Identity loss 70 | same_t = G_s2t(features_target) 71 | loss_identity_t = criterion_identity(same_t, features_target) 72 | 73 | same_s = G_t2s(features_source) 74 | loss_identity_s = criterion_identity(same_s, features_source) 75 | 76 | # Gan loss 77 | fake_t = G_s2t(features_source) 78 | pred_fake = D_t(fake_t) 79 | loss_G_s2t = criterion_GAN(pred_fake, label_source.float()) 80 | 81 | fake_s = G_t2s(features_target) 82 | pred_fake = D_s(fake_s) 83 | loss_G_t2s = criterion_GAN(pred_fake, label_source.float()) 84 | 85 | # cycle loss 86 | recovered_s = G_t2s(fake_t) 87 | loss_cycle_sts = criterion_cycle(recovered_s, features_source) 88 | 89 | recovered_t = G_s2t(fake_s) 90 | loss_cycle_tst = criterion_cycle(recovered_t, features_target) 91 | 92 | # sem loss 93 | pred_recovered_s = model.classifier(recovered_s) 94 | pred_fake_t = model.classifier(fake_t) 95 | loss_sem_t2s = criterion_Sem(pred_recovered_s, pred_fake_t) 96 | 97 | pred_recovered_t = model.classifier(recovered_t) 98 | pred_fake_s = model.classifier(fake_s) 99 | loss_sem_s2t = criterion_Sem(pred_recovered_t, pred_fake_s) 100 | 101 | loss_cycle = loss_cycle_tst + loss_cycle_sts 102 | weight_in_loss_g = args.weight_in_loss_g.split(',') 103 | loss_G = float(weight_in_loss_g[0]) * (loss_identity_s + loss_identity_t) + \ 104 | float(weight_in_loss_g[1]) * (loss_G_s2t + loss_G_t2s) + \ 105 | float(weight_in_loss_g[2]) * loss_cycle + \ 106 | float(weight_in_loss_g[3]) * (loss_sem_s2t + loss_sem_t2s) 107 | 108 | # 训练softmax分类器 109 | outputs_fake = classifier1(fake_t.detach()) 110 | # 分类器优化 111 | classifier_loss1 = nn.CrossEntropyLoss()(outputs_fake, label_source) 112 | classifier1_optim.zero_grad() 113 | classifier_loss1.backward() 114 | classifier1_optim.step() 115 | 116 | total_loss = loss + args.cyc_loss_weight * loss_G 117 | total_loss.backward() 118 | optimizer.step() 119 | optimizer_G.step() 120 | 121 | ###### Discriminator S ###### 122 | optimizer_D_s.zero_grad() 123 | 124 | # Real loss 125 | pred_real = D_s(features_source.detach()) 126 | loss_D_real = criterion_GAN(pred_real, real_label) 127 | 128 | # Fake loss 129 | fake_s = fake_S_buffer.push_and_pop(fake_s) 130 | pred_fake = D_s(fake_s.detach()) 131 | loss_D_fake = criterion_GAN(pred_fake, fake_label) 132 | 133 | # Total loss 134 | loss_D_s = loss_D_real + loss_D_fake 135 | loss_D_s.backward() 136 | 137 | optimizer_D_s.step() 138 | ################################### 139 | 140 | ###### Discriminator t ###### 141 | optimizer_D_t.zero_grad() 142 | 143 | # Real loss 144 | pred_real = D_t(features_target.detach()) 145 | loss_D_real = criterion_GAN(pred_real, real_label) 146 | 147 | # Fake loss 148 | fake_t = fake_T_buffer.push_and_pop(fake_t) 149 | pred_fake = D_t(fake_t.detach()) 150 | loss_D_fake = criterion_GAN(pred_fake, fake_label) 151 | 152 | # Total loss 153 | loss_D_t = loss_D_real + loss_D_fake 154 | loss_D_t.backward() 155 | optimizer_D_t.step() 156 | 157 | if epoch > start_epoch: 158 | optimizer_ad.step() 159 | if (batch_idx + epoch * num_iter) % args.log_interval == 0: 160 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLoss+G: {:.6f}'.format( 161 | epoch, batch_idx * args.batch_size, num_iter * args.batch_size, 162 | 100. * batch_idx / num_iter, loss.item(), total_loss.item())) 163 | 164 | def test(args,epoch,config, model, test_loader): 165 | model.eval() 166 | test_loss = 0 167 | correct = 0 168 | for data, target in test_loader: 169 | data, target = data.cuda(), target.cuda() 170 | feature, output = model(data) 171 | test_loss += nn.CrossEntropyLoss()(output, target).item() 172 | pred = output.data.cpu().max(1, keepdim=True)[1] 173 | correct += pred.eq(target.data.cpu().view_as(pred)).sum().item() 174 | 175 | test_loss /= len(test_loader.dataset) 176 | log_str = "epoch: {}, Accuracy: {}/{} ({:.4f}%)".format( 177 | epoch, correct, len(test_loader.dataset), 178 | 100. * correct / len(test_loader.dataset)) 179 | config["out_file"].write(log_str + "\n") 180 | config["out_file"].flush() 181 | print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( 182 | test_loss, correct, len(test_loader.dataset), 183 | 100. * correct / len(test_loader.dataset))) 184 | 185 | def main(): 186 | # Training settings 187 | parser = argparse.ArgumentParser(description='CDAN USPS MNIST') 188 | parser.add_argument('--method', type=str, default='CDAN-E', choices=['CDAN', 'CDAN-E', 'DANN']) 189 | parser.add_argument('--task', default='USPS2MNIST', help='task to perform') 190 | parser.add_argument('--batch_size', type=int, default=256, help='input batch size for training (default: 64)') 191 | parser.add_argument('--test_batch_size', type=int, default=1000, help='input batch size for testing (default: 1000)') 192 | parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') 193 | parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') 194 | parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') 195 | parser.add_argument('--gpu_id', type=str,default="0", help='cuda device id') 196 | parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') 197 | parser.add_argument('--log_interval', type=int, default=10, help='how many batches to wait before logging training status') 198 | parser.add_argument('--random', type=bool, default=False, help='whether to use random') 199 | parser.add_argument('--output_dir',type=str,default="digits/u2m") 200 | parser.add_argument('--cla_plus_weight',type=float,default=0.3) 201 | parser.add_argument('--cyc_loss_weight',type=float,default=0.01) 202 | parser.add_argument('--weight_in_loss_g',type=str,default='1,0.01,0.1,0.1') 203 | args = parser.parse_args() 204 | 205 | torch.manual_seed(args.seed) 206 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id 207 | 208 | # train config 209 | import os.path as osp 210 | import datetime 211 | config = {} 212 | 213 | config['method'] = args.method 214 | config["gpu"] = args.gpu_id 215 | config['cyc_loss_weight'] = args.cyc_loss_weight 216 | config['cla_plus_weight'] = args.cla_plus_weight 217 | config['weight_in_loss_g'] = args.weight_in_loss_g 218 | config["epochs"] = args.epochs 219 | config["output_for_test"] = True 220 | config["output_path"] = "snapshot/" + args.output_dir 221 | if not osp.exists(config["output_path"]): 222 | os.system('mkdir -p ' + config["output_path"]) 223 | config["out_file"] = open(osp.join(config["output_path"], "log_{}_{}.txt". 224 | format(args.task,str(datetime.datetime.utcnow()))), 225 | "w") 226 | 227 | config["out_file"].write(str(config)) 228 | config["out_file"].flush() 229 | 230 | if args.task == 'USPS2MNIST': 231 | source_list = 'data/usps2mnist/usps_train.txt' 232 | target_list = 'data/usps2mnist/mnist_train.txt' 233 | test_list = 'data/usps2mnist/mnist_test.txt' 234 | start_epoch = 1 235 | decay_epoch = 6 236 | elif args.task == 'MNIST2USPS': 237 | source_list = 'data/usps2mnist/mnist_train.txt' 238 | target_list = 'data/usps2mnist/usps_train.txt' 239 | test_list = 'data/usps2mnist/usps_test.txt' 240 | start_epoch = 1 241 | decay_epoch = 5 242 | else: 243 | raise Exception('task cannot be recognized!') 244 | 245 | train_loader = torch.utils.data.DataLoader( 246 | ImageList(open(source_list).readlines(), transform=transforms.Compose([ 247 | transforms.Resize((28,28)), 248 | transforms.ToTensor(), 249 | transforms.Normalize((0.5,), (0.5,)) 250 | ]), mode='L'), 251 | batch_size=args.batch_size, shuffle=True, num_workers=1, drop_last=True) 252 | train_loader1 = torch.utils.data.DataLoader( 253 | ImageList(open(target_list).readlines(), transform=transforms.Compose([ 254 | transforms.Resize((28,28)), 255 | transforms.ToTensor(), 256 | transforms.Normalize((0.5,), (0.5,)) 257 | ]), mode='L'), 258 | batch_size=args.batch_size, shuffle=True, num_workers=1, drop_last=True) 259 | test_loader = torch.utils.data.DataLoader( 260 | ImageList(open(test_list).readlines(), transform=transforms.Compose([ 261 | transforms.Resize((28,28)), 262 | transforms.ToTensor(), 263 | transforms.Normalize((0.5,), (0.5,)) 264 | ]), mode='L'), 265 | batch_size=args.test_batch_size, shuffle=True, num_workers=1) 266 | 267 | model = network.LeNet() 268 | model = model.cuda() 269 | class_num = 10 270 | 271 | # 添加G,D,和额外的分类器 272 | import itertools 273 | from utils import ReplayBuffer 274 | import net 275 | z_dimension = 500 276 | D_s = network.models["Discriminator_um"]() 277 | D_s = D_s.cuda() 278 | G_s2t = network.models["Generator_um"](z_dimension, 500) 279 | G_s2t = G_s2t.cuda() 280 | 281 | D_t = network.models["Discriminator_um"]() 282 | D_t = D_t.cuda() 283 | G_t2s = network.models["Generator_um"](z_dimension, 500) 284 | G_t2s = G_t2s.cuda() 285 | 286 | criterion_GAN = torch.nn.MSELoss() 287 | criterion_cycle = torch.nn.L1Loss() 288 | criterion_identity = torch.nn.L1Loss() 289 | criterion_Sem = torch.nn.L1Loss() 290 | 291 | optimizer_G = torch.optim.Adam(itertools.chain(G_s2t.parameters(), G_t2s.parameters()), lr=0.0003) 292 | optimizer_D_s = torch.optim.Adam(D_s.parameters(), lr=0.0003) 293 | optimizer_D_t = torch.optim.Adam(D_t.parameters(), lr=0.0003) 294 | 295 | fake_S_buffer = ReplayBuffer() 296 | fake_T_buffer = ReplayBuffer() 297 | 298 | ## 添加分类器 299 | classifier1 = net.Net(500, class_num) 300 | classifier1 = classifier1.cuda() 301 | classifier1_optim = optim.Adam(classifier1.parameters(), lr=0.0003) 302 | 303 | 304 | if args.random: 305 | random_layer = network.RandomLayer([model.output_num(), class_num], 500) 306 | ad_net = network.AdversarialNetwork(500, 500) 307 | random_layer.cuda() 308 | else: 309 | random_layer = None 310 | ad_net = network.AdversarialNetwork(model.output_num() * class_num, 500) 311 | ad_net = ad_net.cuda() 312 | optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9) 313 | optimizer_ad = optim.SGD(ad_net.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9) 314 | 315 | for epoch in range(1, args.epochs + 1): 316 | if epoch % decay_epoch == 0: 317 | for param_group in optimizer.param_groups: 318 | param_group["lr"] = param_group["lr"] * 0.5 319 | train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, args.method, 320 | D_s, D_t, G_s2t, G_t2s, criterion_Sem, criterion_GAN, criterion_cycle, criterion_identity, optimizer_G, 321 | optimizer_D_t, optimizer_D_s, 322 | classifier1, classifier1_optim, fake_S_buffer, fake_T_buffer 323 | ) 324 | test(args,epoch,config, model, test_loader) 325 | 326 | if __name__ == '__main__': 327 | main() 328 | -------------------------------------------------------------------------------- /train_uspsmnist_pixel.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | import torch.optim as optim 6 | from torchvision import datasets, transforms 7 | from data_list import ImageList 8 | import os 9 | from torch.autograd import Variable 10 | import loss as loss_func 11 | import numpy as np 12 | import network 13 | 14 | def train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, method, 15 | D_s, D_t, G_s2t, G_t2s, criterion_Sem, criterion_GAN, criterion_cycle, criterion_identity, optimizer_G, 16 | optimizer_D_t, optimizer_D_s, 17 | classifier1, classifier1_optim, fake_S_buffer, fake_T_buffer): 18 | model.train() 19 | len_source = len(train_loader) 20 | len_target = len(train_loader1) 21 | if len_source > len_target: 22 | num_iter = len_source 23 | else: 24 | num_iter = len_target 25 | 26 | for batch_idx in range(num_iter): 27 | if batch_idx % len_source == 0: 28 | iter_source = iter(train_loader) 29 | if batch_idx % len_target == 0: 30 | iter_target = iter(train_loader1) 31 | data_source, label_source = iter_source.next() 32 | data_source, label_source = data_source.cuda(), label_source.cuda() 33 | data_target, label_target = iter_target.next() 34 | data_target = data_target.cuda() 35 | optimizer.zero_grad() 36 | optimizer_ad.zero_grad() 37 | 38 | features_source, outputs_source = model(data_source) 39 | features_target, outputs_target = model(data_target) 40 | features = torch.cat((features_source, features_target), dim=0) 41 | outputs = torch.cat((outputs_source, outputs_target), dim=0) 42 | 43 | loss = nn.CrossEntropyLoss()(outputs.narrow(0, 0, data_source.size(0)), label_source) 44 | softmax_output = nn.Softmax(dim=1)(outputs) 45 | 46 | 47 | # _, outputs_source1 = classifier1(data_source) 48 | # _, outputs_target1 = classifier1(data_target) 49 | outputs_source1 = classifier1(data_source) 50 | outputs_target1 = classifier1(data_target) 51 | output1 = torch.cat((outputs_source1, outputs_target1), dim=0) 52 | softmax_output1 = nn.Softmax(dim=1)(output1) 53 | 54 | softmax_output = (1 - args.cla_plus_weight) * softmax_output + args.cla_plus_weight * softmax_output1 55 | 56 | if epoch > start_epoch: 57 | if method == 'CDAN-E': 58 | entropy = loss_func.Entropy(softmax_output) 59 | loss += loss_func.CDAN([features, softmax_output], ad_net, entropy, network.calc_coeff(num_iter*(epoch-start_epoch)+batch_idx), random_layer) 60 | elif method == 'CDAN': 61 | loss += loss_func.CDAN([features, softmax_output], ad_net, None, None, random_layer) 62 | elif method == 'DANN': 63 | loss += loss_func.DANN(features, ad_net) 64 | else: 65 | raise ValueError('Method cannot be recognized.') 66 | # Cycle 67 | num_feature = features.size(0) 68 | # =================train discriminator T 69 | real_label = Variable(torch.ones(num_feature)).cuda() 70 | fake_label = Variable(torch.zeros(num_feature)).cuda() 71 | 72 | # 训练生成器 73 | optimizer_G.zero_grad() 74 | 75 | # Identity loss 76 | same_t = G_s2t(data_source) 77 | loss_identity_t = criterion_identity(same_t, data_target) 78 | 79 | same_s = G_t2s(G_s2t(data_source)) 80 | loss_identity_s = criterion_identity(same_s, data_source) 81 | 82 | # Gan loss 83 | fake_t = G_s2t(data_source) 84 | pred_fake = D_t(fake_t) 85 | loss_G_s2t = criterion_GAN(pred_fake, label_source.float()) 86 | 87 | fake_s = G_t2s(fake_t) 88 | pred_fake = D_s(fake_s) 89 | loss_G_t2s = criterion_GAN(pred_fake, label_source.float()) 90 | 91 | # cycle loss 92 | recovered_s = G_t2s(fake_t) 93 | loss_cycle_sts = criterion_cycle(recovered_s, data_source) 94 | 95 | recovered_t = G_s2t(fake_s) 96 | loss_cycle_tst = criterion_cycle(recovered_t, data_target) 97 | 98 | # sem loss 99 | _,pred_recovered_s = model(recovered_s) 100 | _,pred_fake_t = model(fake_t) 101 | loss_sem_t2s = criterion_Sem(pred_recovered_s, pred_fake_t) 102 | 103 | _,pred_recovered_t = model(recovered_t) 104 | _,pred_fake_s = model(fake_s) 105 | loss_sem_s2t = criterion_Sem(pred_recovered_t, pred_fake_s) 106 | 107 | loss_cycle = loss_cycle_tst + loss_cycle_sts 108 | weight_in_loss_g = args.weight_in_loss_g.split(',') 109 | loss_G = float(weight_in_loss_g[0]) * (loss_identity_s + loss_identity_t) + \ 110 | float(weight_in_loss_g[1]) * (loss_G_s2t + loss_G_t2s) + \ 111 | float(weight_in_loss_g[2]) * loss_cycle + \ 112 | float(weight_in_loss_g[3]) * (loss_sem_s2t + loss_sem_t2s) 113 | 114 | # 训练softmax分类器 115 | # _,outputs_fake = classifier1(fake_t.detach()) 116 | outputs_fake = classifier1(fake_t.detach()) 117 | # 分类器优化 118 | classifier_loss1 = nn.CrossEntropyLoss()(outputs_fake, label_source) 119 | classifier1_optim.zero_grad() 120 | classifier_loss1.backward() 121 | classifier1_optim.step() 122 | 123 | total_loss = loss + args.cyc_loss_weight * loss_G 124 | total_loss.backward() 125 | optimizer.step() 126 | optimizer_G.step() 127 | 128 | ###### Discriminator S ###### 129 | optimizer_D_s.zero_grad() 130 | 131 | # Real loss 132 | pred_real = D_s(data_source) 133 | loss_D_real = criterion_GAN(pred_real, real_label) 134 | 135 | # Fake loss 136 | fake_s = fake_S_buffer.push_and_pop(fake_s) 137 | pred_fake = D_s(fake_s.detach()) 138 | loss_D_fake = criterion_GAN(pred_fake, fake_label) 139 | 140 | # Total loss 141 | loss_D_s = loss_D_real + loss_D_fake 142 | loss_D_s.backward() 143 | 144 | optimizer_D_s.step() 145 | ################################### 146 | 147 | ###### Discriminator t ###### 148 | optimizer_D_t.zero_grad() 149 | 150 | # Real loss 151 | pred_real = D_t(data_target) 152 | loss_D_real = criterion_GAN(pred_real, real_label) 153 | 154 | # Fake loss 155 | fake_t = fake_T_buffer.push_and_pop(fake_t) 156 | pred_fake = D_t(fake_t.detach()) 157 | loss_D_fake = criterion_GAN(pred_fake, fake_label) 158 | 159 | # Total loss 160 | loss_D_t = loss_D_real + loss_D_fake 161 | loss_D_t.backward() 162 | optimizer_D_t.step() 163 | 164 | if epoch > start_epoch: 165 | optimizer_ad.step() 166 | if (batch_idx + epoch * num_iter) % args.log_interval == 0: 167 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLoss+G: {:.6f}'.format( 168 | epoch, batch_idx * args.batch_size, num_iter * args.batch_size, 169 | 100. * batch_idx / num_iter, loss.item(), total_loss.item())) 170 | 171 | def test(args,epoch,config, model, test_loader): 172 | model.eval() 173 | test_loss = 0 174 | correct = 0 175 | for data, target in test_loader: 176 | data, target = data.cuda(), target.cuda() 177 | feature, output = model(data) 178 | test_loss += nn.CrossEntropyLoss()(output, target).item() 179 | pred = output.data.cpu().max(1, keepdim=True)[1] 180 | correct += pred.eq(target.data.cpu().view_as(pred)).sum().item() 181 | 182 | test_loss /= len(test_loader.dataset) 183 | log_str = "epoch: {}, Accuracy: {}/{} ({:.4f}%)".format( 184 | epoch, correct, len(test_loader.dataset), 185 | 100. * correct / len(test_loader.dataset)) 186 | config["out_file"].write(log_str + "\n") 187 | config["out_file"].flush() 188 | print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( 189 | test_loss, correct, len(test_loader.dataset), 190 | 100. * correct / len(test_loader.dataset))) 191 | 192 | def main(): 193 | # Training settings 194 | parser = argparse.ArgumentParser(description='CDAN USPS MNIST') 195 | parser.add_argument('--method', type=str, default='CDAN-E', choices=['CDAN', 'CDAN-E', 'DANN']) 196 | parser.add_argument('--task', default='USPS2MNIST', help='task to perform') 197 | parser.add_argument('--batch_size', type=int, default=256, help='input batch size for training (default: 64)') 198 | parser.add_argument('--test_batch_size', type=int, default=1000, help='input batch size for testing (default: 1000)') 199 | parser.add_argument('--epochs', type=int, default=40, metavar='N', help='number of epochs to train (default: 10)') 200 | parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate (default: 0.01)') 201 | parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') 202 | parser.add_argument('--gpu_id', type=str,default="0", help='cuda device id') 203 | parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') 204 | parser.add_argument('--log_interval', type=int, default=10, help='how many batches to wait before logging training status') 205 | parser.add_argument('--random', type=bool, default=False, help='whether to use random') 206 | parser.add_argument('--output_dir',type=str,default="digits/u2m") 207 | parser.add_argument('--cla_plus_weight',type=float,default=0.3) 208 | parser.add_argument('--cyc_loss_weight',type=float,default=0.05) 209 | parser.add_argument('--weight_in_loss_g',type=str,default='1,1,1,1') 210 | args = parser.parse_args() 211 | 212 | torch.manual_seed(args.seed) 213 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id 214 | 215 | # train config 216 | import os.path as osp 217 | import datetime 218 | config = {} 219 | 220 | config['method'] = args.method 221 | config["gpu"] = args.gpu_id 222 | config['cyc_loss_weight'] = args.cyc_loss_weight 223 | config['cla_plus_weight'] = args.cla_plus_weight 224 | config['weight_in_loss_g'] = args.weight_in_loss_g 225 | config["epochs"] = args.epochs 226 | config["output_for_test"] = True 227 | config["output_path"] = "snapshot/" + args.output_dir 228 | if not osp.exists(config["output_path"]): 229 | os.system('mkdir -p ' + config["output_path"]) 230 | config["out_file"] = open(osp.join(config["output_path"], "log_{}_{}.txt". 231 | format(args.task,str(datetime.datetime.utcnow()))), 232 | "w") 233 | 234 | config["out_file"].write(str(config)) 235 | config["out_file"].flush() 236 | 237 | if args.task == 'USPS2MNIST': 238 | source_list = 'data/usps2mnist/usps_train.txt' 239 | target_list = 'data/usps2mnist/mnist_train.txt' 240 | test_list = 'data/usps2mnist/mnist_test.txt' 241 | start_epoch = 1 242 | decay_epoch = 6 243 | elif args.task == 'MNIST2USPS': 244 | source_list = 'data/usps2mnist/mnist_train.txt' 245 | target_list = 'data/usps2mnist/usps_train.txt' 246 | test_list = 'data/usps2mnist/usps_test.txt' 247 | start_epoch = 1 248 | decay_epoch = 5 249 | else: 250 | raise Exception('task cannot be recognized!') 251 | 252 | train_loader = torch.utils.data.DataLoader( 253 | ImageList(open(source_list).readlines(), transform=transforms.Compose([ 254 | transforms.Resize((28,28)), 255 | transforms.ToTensor(), 256 | transforms.Normalize((0.5,), (0.5,)) 257 | ]), mode='L'), 258 | batch_size=args.batch_size, shuffle=True, num_workers=1, drop_last=True) 259 | train_loader1 = torch.utils.data.DataLoader( 260 | ImageList(open(target_list).readlines(), transform=transforms.Compose([ 261 | transforms.Resize((28,28)), 262 | transforms.ToTensor(), 263 | transforms.Normalize((0.5,), (0.5,)) 264 | ]), mode='L'), 265 | batch_size=args.batch_size, shuffle=True, num_workers=1, drop_last=True) 266 | test_loader = torch.utils.data.DataLoader( 267 | ImageList(open(test_list).readlines(), transform=transforms.Compose([ 268 | transforms.Resize((28,28)), 269 | transforms.ToTensor(), 270 | transforms.Normalize((0.5,), (0.5,)) 271 | ]), mode='L'), 272 | batch_size=args.test_batch_size, shuffle=True, num_workers=1) 273 | 274 | model = network.LeNet() 275 | model = model.cuda() 276 | class_num = 10 277 | 278 | # 添加G,D,和额外的分类器 279 | import itertools 280 | from utils import ReplayBuffer 281 | import net 282 | z_dimension = 784 283 | D_s = network.models["Discriminator_um_pixel"]() 284 | D_s = D_s.cuda() 285 | G_s2t = network.models["Generator_um_pixel"](z_dimension, 500) 286 | G_s2t = G_s2t.cuda() 287 | 288 | D_t = network.models["Discriminator_um_pixel"]() 289 | D_t = D_t.cuda() 290 | G_t2s = network.models["Generator_um_pixel"](z_dimension, 500) 291 | G_t2s = G_t2s.cuda() 292 | 293 | criterion_GAN = torch.nn.MSELoss() 294 | criterion_cycle = torch.nn.L1Loss() 295 | criterion_identity = torch.nn.L1Loss() 296 | criterion_Sem = torch.nn.L1Loss() 297 | 298 | optimizer_G = torch.optim.Adam(itertools.chain(G_s2t.parameters(), G_t2s.parameters()), lr=0.0003) 299 | optimizer_D_s = torch.optim.Adam(D_s.parameters(), lr=0.0003) 300 | optimizer_D_t = torch.optim.Adam(D_t.parameters(), lr=0.0003) 301 | 302 | fake_S_buffer = ReplayBuffer() 303 | fake_T_buffer = ReplayBuffer() 304 | 305 | ## 添加分类器 306 | #classifier1 = network.LeNet() 307 | classifier1 = net.Net(784,10) 308 | classifier1 = classifier1.cuda() 309 | classifier1_optim = optim.Adam(classifier1.parameters(), lr=0.0003) 310 | 311 | 312 | if args.random: 313 | random_layer = network.RandomLayer([model.output_num(), class_num], 500) 314 | ad_net = network.AdversarialNetwork(500, 500) 315 | random_layer.cuda() 316 | else: 317 | random_layer = None 318 | ad_net = network.AdversarialNetwork(model.output_num() * class_num, 500) 319 | ad_net = ad_net.cuda() 320 | optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9) 321 | optimizer_ad = optim.SGD(ad_net.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9) 322 | 323 | for epoch in range(1, args.epochs + 1): 324 | if epoch % decay_epoch == 0: 325 | for param_group in optimizer.param_groups: 326 | param_group["lr"] = param_group["lr"] * 0.5 327 | train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, args.method, 328 | D_s, D_t, G_s2t, G_t2s, criterion_Sem, criterion_GAN, criterion_cycle, criterion_identity, optimizer_G, 329 | optimizer_D_t, optimizer_D_s, 330 | classifier1, classifier1_optim, fake_S_buffer, fake_T_buffer 331 | ) 332 | test(args,epoch,config, model, test_loader) 333 | 334 | if __name__ == '__main__': 335 | main() 336 | -------------------------------------------------------------------------------- /train_image.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import os.path as osp 4 | 5 | import numpy as np 6 | import torch 7 | import torch.nn as nn 8 | import torch.optim as optim 9 | import network 10 | import loss 11 | import pre_process as prep 12 | from torch.utils.data import DataLoader 13 | import lr_schedule 14 | import data_list 15 | import datetime 16 | from data_list import ImageList 17 | from torch.autograd import Variable 18 | import random 19 | import pdb 20 | import math 21 | from utils import ReplayBuffer,weights_init_normal 22 | import itertools 23 | import net 24 | 25 | 26 | def image_classification_test(loader, model, test_10crop=True): 27 | start_test = True 28 | with torch.no_grad(): 29 | if test_10crop: 30 | iter_test = [iter(loader['test'][i]) for i in range(10)] 31 | for i in range(len(loader['test'][0])): 32 | data = [iter_test[j].next() for j in range(10)] 33 | inputs = [data[j][0] for j in range(10)] 34 | labels = data[0][1] 35 | for j in range(10): 36 | inputs[j] = inputs[j].cuda() 37 | labels = labels 38 | outputs = [] 39 | for j in range(10): 40 | _, predict_out = model(inputs[j]) 41 | outputs.append(nn.Softmax(dim=1)(predict_out)) 42 | outputs = sum(outputs) 43 | if start_test: 44 | all_output = outputs.float().cpu() 45 | all_label = labels.float() 46 | start_test = False 47 | else: 48 | all_output = torch.cat((all_output, outputs.float().cpu()), 0) 49 | all_label = torch.cat((all_label, labels.float()), 0) 50 | else: 51 | iter_test = iter(loader["test"]) 52 | for i in range(len(loader['test'])): 53 | data = iter_test.next() 54 | inputs = data[0] 55 | labels = data[1] 56 | inputs = inputs.cuda() 57 | labels = labels.cuda() 58 | _, outputs = model(inputs) 59 | if start_test: 60 | all_output = outputs.float().cpu() 61 | all_label = labels.float() 62 | start_test = False 63 | else: 64 | all_output = torch.cat((all_output, outputs.float().cpu()), 0) 65 | all_label = torch.cat((all_label, labels.float()), 0) 66 | _, predict = torch.max(all_output, 1) 67 | accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0]) 68 | return accuracy 69 | 70 | 71 | def train(config): 72 | ## set pre-process 73 | prep_dict = {} 74 | prep_config = config["prep"] 75 | prep_dict["source"] = prep.image_train(**config["prep"]['params']) 76 | prep_dict["target"] = prep.image_train(**config["prep"]['params']) 77 | if prep_config["test_10crop"]: 78 | prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params']) 79 | else: 80 | prep_dict["test"] = prep.image_test(**config["prep"]['params']) 81 | 82 | ## prepare data 83 | dsets = {} 84 | dset_loaders = {} 85 | data_config = config["data"] 86 | train_bs = data_config["source"]["batch_size"] 87 | test_bs = data_config["test"]["batch_size"] 88 | dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \ 89 | transform=prep_dict["source"]) 90 | dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \ 91 | shuffle=True, num_workers=0, drop_last=True) 92 | dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \ 93 | transform=prep_dict["target"]) 94 | dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \ 95 | shuffle=True, num_workers=0, drop_last=True) 96 | 97 | if prep_config["test_10crop"]: 98 | for i in range(10): 99 | dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \ 100 | transform=prep_dict["test"][i]) for i in range(10)] 101 | dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \ 102 | shuffle=False, num_workers=0) for dset in dsets['test']] 103 | else: 104 | dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \ 105 | transform=prep_dict["test"]) 106 | dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \ 107 | shuffle=False, num_workers=0) 108 | 109 | class_num = config["network"]["params"]["class_num"] 110 | 111 | ## set base network 112 | net_config = config["network"] 113 | base_network = net_config["name"](**net_config["params"]) 114 | base_network = base_network.cuda() 115 | 116 | ## 添加判别器D_s,D_t,生成器G_s2t,G_t2s 117 | 118 | z_dimension = 256 119 | D_s = network.models["Discriminator"]() 120 | D_s = D_s.cuda() 121 | G_s2t = network.models["Generator"](z_dimension, 1024) 122 | G_s2t = G_s2t.cuda() 123 | 124 | D_t = network.models["Discriminator"]() 125 | D_t = D_t.cuda() 126 | G_t2s = network.models["Generator"](z_dimension, 1024) 127 | G_t2s = G_t2s.cuda() 128 | 129 | criterion_GAN = torch.nn.MSELoss() 130 | criterion_cycle = torch.nn.L1Loss() 131 | criterion_identity = torch.nn.L1Loss() 132 | criterion_Sem = torch.nn.L1Loss() 133 | 134 | optimizer_G = torch.optim.Adam(itertools.chain(G_s2t.parameters(), G_t2s.parameters()), lr=0.0003) 135 | optimizer_D_s = torch.optim.Adam(D_s.parameters(), lr=0.0003) 136 | optimizer_D_t = torch.optim.Adam(D_t.parameters(), lr=0.0003) 137 | 138 | fake_S_buffer = ReplayBuffer() 139 | fake_T_buffer = ReplayBuffer() 140 | 141 | classifier_optimizer = torch.optim.Adam(base_network.parameters(), lr=0.0003) 142 | ## 添加分类器 143 | classifier1 = net.Net(256,class_num) 144 | classifier1 = classifier1.cuda() 145 | classifier1_optim = optim.Adam(classifier1.parameters(), lr=0.0003) 146 | 147 | ## add additional network for some methods 148 | if config["loss"]["random"]: 149 | random_layer = network.RandomLayer([base_network.output_num(), class_num], config["loss"]["random_dim"]) 150 | ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024) 151 | else: 152 | random_layer = None 153 | ad_net = network.AdversarialNetwork(base_network.output_num() * class_num, 1024) 154 | if config["loss"]["random"]: 155 | random_layer.cuda() 156 | ad_net = ad_net.cuda() 157 | parameter_list = base_network.get_parameters() + ad_net.get_parameters() 158 | 159 | ## set optimizer 160 | optimizer_config = config["optimizer"] 161 | optimizer = optimizer_config["type"](parameter_list, \ 162 | **(optimizer_config["optim_params"])) 163 | param_lr = [] 164 | for param_group in optimizer.param_groups: 165 | param_lr.append(param_group["lr"]) 166 | schedule_param = optimizer_config["lr_param"] 167 | lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]] 168 | 169 | gpus = config['gpu'].split(',') 170 | if len(gpus) > 1: 171 | ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus]) 172 | base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus]) 173 | 174 | ## train 175 | len_train_source = len(dset_loaders["source"]) 176 | len_train_target = len(dset_loaders["target"]) 177 | transfer_loss_value = classifier_loss_value = total_loss_value = 0.0 178 | best_acc = 0.0 179 | for i in range(config["num_iterations"]): 180 | if i % config["test_interval"] == config["test_interval"] - 1: 181 | base_network.train(False) 182 | temp_acc = image_classification_test(dset_loaders, \ 183 | base_network, test_10crop=prep_config["test_10crop"]) 184 | temp_model = nn.Sequential(base_network) 185 | if temp_acc > best_acc: 186 | best_acc = temp_acc 187 | best_model = temp_model 188 | 189 | now = datetime.datetime.now() 190 | d = str(now.month) + '-' + str(now.day) + ' ' + str(now.hour) + ':' + str(now.minute) + ":" + str( 191 | now.second) 192 | torch.save(best_model, osp.join(config["output_path"], 193 | "{}_to_{}_best_model_acc-{}_{}.pth.tar".format(args.source, args.target, 194 | best_acc, d))) 195 | log_str = "iter: {:05d}, precision: {:.5f}".format(i, temp_acc) 196 | config["out_file"].write(log_str + "\n") 197 | config["out_file"].flush() 198 | 199 | print(log_str) 200 | if i % config["snapshot_interval"] == 0: 201 | torch.save(nn.Sequential(base_network), osp.join(config["output_path"], \ 202 | "{}_to_{}_iter_{:05d}_model_{}.pth.tar".format(args.source, 203 | args.target, 204 | i, str( 205 | datetime.datetime.utcnow())))) 206 | 207 | loss_params = config["loss"] 208 | ## train one iter 209 | classifier1.train(True) 210 | base_network.train(True) 211 | ad_net.train(True) 212 | optimizer = lr_scheduler(optimizer, i, **schedule_param) 213 | optimizer.zero_grad() 214 | 215 | 216 | if i % len_train_source == 0: 217 | iter_source = iter(dset_loaders["source"]) 218 | if i % len_train_target == 0: 219 | iter_target = iter(dset_loaders["target"]) 220 | inputs_source, labels_source = iter_source.next() 221 | inputs_target, labels_target = iter_target.next() 222 | inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda() 223 | 224 | # 提取特征 225 | features_source, outputs_source = base_network(inputs_source) 226 | features_target, outputs_target = base_network(inputs_target) 227 | features = torch.cat((features_source, features_target), dim=0) 228 | outputs = torch.cat((outputs_source, outputs_target), dim=0) 229 | softmax_out = nn.Softmax(dim=1)(outputs) 230 | 231 | outputs_source1 = classifier1(features_source.detach()) 232 | outputs_target1 = classifier1(features_target.detach()) 233 | outputs1 = torch.cat((outputs_source1,outputs_target1),dim=0) 234 | softmax_out1 = nn.Softmax(dim=1)(outputs1) 235 | 236 | softmax_out = (1-args.cla_plus_weight)*softmax_out + args.cla_plus_weight*softmax_out1 237 | 238 | if config['method'] == 'CDAN+E': 239 | entropy = loss.Entropy(softmax_out) 240 | transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer) 241 | elif config['method'] == 'CDAN': 242 | transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer) 243 | elif config['method'] == 'DANN': 244 | transfer_loss = loss.DANN(features, ad_net) 245 | else: 246 | raise ValueError('Method cannot be recognized.') 247 | classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source) 248 | 249 | # Cycle 250 | num_feature = features_source.size(0) 251 | # =================train discriminator T 252 | real_label = Variable(torch.ones(num_feature)).cuda() 253 | fake_label = Variable(torch.zeros(num_feature)).cuda() 254 | 255 | # 训练生成器 256 | optimizer_G.zero_grad() 257 | 258 | # Identity loss 259 | same_t = G_s2t(features_target.detach()) 260 | loss_identity_t = criterion_identity(same_t, features_target) 261 | 262 | same_s = G_t2s(features_source.detach()) 263 | loss_identity_s = criterion_identity(same_s, features_source) 264 | 265 | # Gan loss 266 | fake_t = G_s2t(features_source.detach()) 267 | pred_fake = D_t(fake_t) 268 | loss_G_s2t = criterion_GAN(pred_fake, labels_source.float()) 269 | 270 | fake_s = G_t2s(features_target.detach()) 271 | pred_fake = D_s(fake_s) 272 | loss_G_t2s = criterion_GAN(pred_fake, labels_source.float()) 273 | 274 | # cycle loss 275 | recovered_s = G_t2s(fake_t) 276 | loss_cycle_sts = criterion_cycle(recovered_s, features_source) 277 | 278 | recovered_t = G_s2t(fake_s) 279 | loss_cycle_tst = criterion_cycle(recovered_t, features_target) 280 | 281 | # sem loss 282 | pred_recovered_s = base_network.fc(recovered_s) 283 | pred_fake_t = base_network.fc(fake_t) 284 | loss_sem_t2s = criterion_Sem(pred_recovered_s, pred_fake_t) 285 | 286 | pred_recovered_t = base_network.fc(recovered_t) 287 | pred_fake_s = base_network.fc(fake_s) 288 | loss_sem_s2t = criterion_Sem(pred_recovered_t, pred_fake_s) 289 | 290 | loss_cycle = loss_cycle_tst + loss_cycle_sts 291 | weights = args.weight_in_lossG.split(',') 292 | loss_G = float(weights[0]) * (loss_identity_s + loss_identity_t) + \ 293 | float(weights[1]) * (loss_G_s2t + loss_G_t2s) + \ 294 | float(weights[2]) * loss_cycle + \ 295 | float(weights[3]) * (loss_sem_s2t + loss_sem_t2s) 296 | 297 | 298 | 299 | # 训练softmax分类器 300 | outputs_fake = classifier1(fake_t.detach()) 301 | # 分类器优化 302 | classifier_loss1 = nn.CrossEntropyLoss()(outputs_fake, labels_source) 303 | classifier1_optim.zero_grad() 304 | classifier_loss1.backward() 305 | classifier1_optim.step() 306 | 307 | total_loss = loss_params["trade_off"] * transfer_loss + classifier_loss + args.cyc_loss_weight*loss_G 308 | total_loss.backward() 309 | optimizer.step() 310 | optimizer_G.step() 311 | 312 | ###### Discriminator S ###### 313 | optimizer_D_s.zero_grad() 314 | 315 | # Real loss 316 | pred_real = D_s(features_source.detach()) 317 | loss_D_real = criterion_GAN(pred_real, real_label) 318 | 319 | # Fake loss 320 | fake_s = fake_S_buffer.push_and_pop(fake_s) 321 | pred_fake = D_s(fake_s.detach()) 322 | loss_D_fake = criterion_GAN(pred_fake, fake_label) 323 | 324 | # Total loss 325 | loss_D_s = loss_D_real + loss_D_fake 326 | loss_D_s.backward() 327 | 328 | optimizer_D_s.step() 329 | ################################### 330 | 331 | ###### Discriminator t ###### 332 | optimizer_D_t.zero_grad() 333 | 334 | # Real loss 335 | pred_real = D_t(features_target.detach()) 336 | loss_D_real = criterion_GAN(pred_real, real_label) 337 | 338 | # Fake loss 339 | fake_t = fake_T_buffer.push_and_pop(fake_t) 340 | pred_fake = D_t(fake_t.detach()) 341 | loss_D_fake = criterion_GAN(pred_fake, fake_label) 342 | 343 | # Total loss 344 | loss_D_t = loss_D_real + loss_D_fake 345 | loss_D_t.backward() 346 | optimizer_D_t.step() 347 | now = datetime.datetime.now() 348 | d = str(now.month)+'-'+str(now.day)+' '+str(now.hour)+':'+str(now.minute)+":"+str(now.second) 349 | torch.save(best_model, osp.join(config["output_path"], 350 | "{}_to_{}_best_model_acc-{}_{}.pth.tar".format(args.source, args.target, 351 | best_acc,d))) 352 | return best_acc 353 | 354 | 355 | if __name__ == "__main__": 356 | 357 | torch.cuda.manual_seed(42) # 为当前GPU设置随机种子 358 | torch.cuda.manual_seed_all(42) 359 | 360 | parser = argparse.ArgumentParser(description='Conditional Domain Adversarial Network') 361 | parser.add_argument('--method', type=str, default='CDAN+E', choices=['CDAN', 'CDAN+E', 'DANN']) 362 | parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run") 363 | parser.add_argument('--net', type=str, default='ResNet50', 364 | choices=["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152", "VGG11", "VGG13", 365 | "VGG16", "VGG19", "VGG11BN", "VGG13BN", "VGG16BN", "VGG19BN", "AlexNet"]) 366 | parser.add_argument('--dset', type=str, default='office', choices=['office', 'image-clef', 'visda', 'office-home'], 367 | help="The dataset or source dataset used") 368 | parser.add_argument('--s_dset_path', type=str, default='data/office/dslr_list.txt', 369 | help="The source dataset path list") 370 | parser.add_argument('--t_dset_path', type=str, default='data/office/amazon_list.txt', 371 | help="The target dataset path list") 372 | parser.add_argument('--source', type=str, default="webcam", help="The source dataset name") 373 | parser.add_argument('--target', type=str, default="amazon", help="The target dataset name") 374 | parser.add_argument('--test_interval', type=int, default=300, help="interval of two continuous test phase") 375 | parser.add_argument('--snapshot_interval', type=int, default=5000, help="interval of two continuous output model") 376 | parser.add_argument('--lr', type=float, default=0.001, help="learning rate") 377 | parser.add_argument('--random', type=bool, default=False, help="whether use random projection") 378 | parser.add_argument('--cyc_loss_weight',type=float,default=0.005) 379 | parser.add_argument('--cla_plus_weight', type=float, default=0.1) 380 | parser.add_argument("--weight_in_lossG",type=str,default='1,0.01,0.1,0.1') 381 | args = parser.parse_args() 382 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id 383 | # os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3' 384 | 385 | # train config 386 | task_name = args.source+"2"+args.target 387 | config = {} 388 | config['torch_seed'] = torch.initial_seed() 389 | config['torch_cuda_seed'] = torch.cuda.initial_seed() 390 | config['method'] = args.method 391 | config["gpu"] = args.gpu_id 392 | config['cyc_loss_weight'] = args.cyc_loss_weight 393 | config['cla_plus_weight'] = args.cla_plus_weight 394 | config['weight_in_lossG'] = args.weight_in_lossG 395 | config["num_iterations"] = 20000 396 | config["test_interval"] = args.test_interval 397 | config["snapshot_interval"] = args.snapshot_interval 398 | config["output_for_test"] = True 399 | config["output_path"] = "snapshot/" + task_name 400 | if not osp.exists(config["output_path"]): 401 | os.system('mkdir -p ' + config["output_path"]) 402 | config["out_file"] = open(osp.join(config["output_path"], "log_{}_to_{}_{}.txt".format(args.source, args.target, 403 | str( 404 | datetime.datetime.utcnow()))), 405 | "w") 406 | if not osp.exists(config["output_path"]): 407 | os.mkdir(config["output_path"]) 408 | 409 | config["prep"] = {"test_10crop": True, 'params': {"resize_size": 256, "crop_size": 224, 'alexnet': False}} 410 | config["loss"] = {"trade_off": 1.0} 411 | if "AlexNet" in args.net: 412 | config["prep"]['params']['alexnet'] = True 413 | config["prep"]['params']['crop_size'] = 227 414 | config["network"] = {"name": network.AlexNetFc, \ 415 | "params": {"use_bottleneck": True, "bottleneck_dim": 256, "new_cls": True}} 416 | elif "ResNet" in args.net: 417 | config["network"] = {"name": network.ResNetFc, \ 418 | "params": {"resnet_name": args.net, "use_bottleneck": True, "bottleneck_dim": 256, 419 | "new_cls": True}} 420 | elif "VGG" in args.net: 421 | config["network"] = {"name": network.VGGFc, \ 422 | "params": {"vgg_name": args.net, "use_bottleneck": True, "bottleneck_dim": 256, 423 | "new_cls": True}} 424 | config["loss"]["random"] = args.random 425 | config["loss"]["random_dim"] = 1024 426 | 427 | config["optimizer"] = {"type": optim.SGD, "optim_params": {'lr': args.lr, "momentum": 0.9, \ 428 | "weight_decay": 0.0005, "nesterov": True}, 429 | "lr_type": "inv", \ 430 | "lr_param": {"lr": args.lr, "gamma": 0.001, "power": 0.75}} 431 | 432 | config["dataset"] = args.dset 433 | config["data"] = {"source": {"list_path": args.s_dset_path, "batch_size": 36}, \ 434 | "target": {"list_path": args.t_dset_path, "batch_size": 36}, \ 435 | "test": {"list_path": args.t_dset_path, "batch_size": 4}} 436 | 437 | if config["dataset"] == "office": 438 | if ("amazon" in args.s_dset_path and "webcam" in args.t_dset_path) or \ 439 | ("webcam" in args.s_dset_path and "dslr" in args.t_dset_path) or \ 440 | ("webcam" in args.s_dset_path and "amazon" in args.t_dset_path) or \ 441 | ("dslr" in args.s_dset_path and "amazon" in args.t_dset_path): 442 | config["optimizer"]["lr_param"]["lr"] = 0.001 # optimal parameters 443 | elif ("amazon" in args.s_dset_path and "dslr" in args.t_dset_path) or \ 444 | ("dslr" in args.s_dset_path and "webcam" in args.t_dset_path): 445 | config["optimizer"]["lr_param"]["lr"] = 0.0003 # optimal parameters 446 | config["network"]["params"]["class_num"] = 31 447 | elif config["dataset"] == "image-clef": 448 | config["optimizer"]["lr_param"]["lr"] = 0.001 # optimal parameters 449 | config["network"]["params"]["class_num"] = 12 450 | elif config["dataset"] == "visda": 451 | config["optimizer"]["lr_param"]["lr"] = 0.001 # optimal parameters 452 | config["network"]["params"]["class_num"] = 12 453 | config['loss']["trade_off"] = 1.0 454 | elif config["dataset"] == "office-home": 455 | config["optimizer"]["lr_param"]["lr"] = 0.001 # optimal parameters 456 | config["network"]["params"]["class_num"] = 65 457 | else: 458 | raise ValueError('Dataset cannot be recognized. Please define your own dataset here.') 459 | config["out_file"].write(str(config)) 460 | config["out_file"].flush() 461 | train(config) 462 | -------------------------------------------------------------------------------- /network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torchvision 5 | from torchvision import models 6 | from torch.autograd import Variable 7 | import math 8 | import pdb 9 | def calc_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0): 10 | return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low) 11 | 12 | 13 | def init_weights(m): 14 | classname = m.__class__.__name__ 15 | if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: 16 | nn.init.kaiming_uniform_(m.weight) 17 | nn.init.zeros_(m.bias) 18 | elif classname.find('BatchNorm') != -1: 19 | nn.init.normal_(m.weight, 1.0, 0.02) 20 | nn.init.zeros_(m.bias) 21 | elif classname.find('Linear') != -1: 22 | nn.init.xavier_normal_(m.weight) 23 | nn.init.zeros_(m.bias) 24 | 25 | class RandomLayer(nn.Module): 26 | def __init__(self, input_dim_list=[], output_dim=1024): 27 | super(RandomLayer, self).__init__() 28 | self.input_num = len(input_dim_list) 29 | self.output_dim = output_dim 30 | self.random_matrix = [torch.randn(input_dim_list[i], output_dim) for i in range(self.input_num)] 31 | 32 | def forward(self, input_list): 33 | return_list = [torch.mm(input_list[i], self.random_matrix[i]) for i in range(self.input_num)] 34 | return_tensor = return_list[0] / math.pow(float(self.output_dim), 1.0/len(return_list)) 35 | for single in return_list[1:]: 36 | return_tensor = torch.mul(return_tensor, single) 37 | return return_tensor 38 | 39 | def cuda(self): 40 | super(RandomLayer, self).cuda() 41 | self.random_matrix = [val.cuda() for val in self.random_matrix] 42 | 43 | class LRN(nn.Module): 44 | def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True): 45 | super(LRN, self).__init__() 46 | self.ACROSS_CHANNELS = ACROSS_CHANNELS 47 | if ACROSS_CHANNELS: 48 | self.average=nn.AvgPool3d(kernel_size=(local_size, 1, 1), 49 | stride=1, 50 | padding=(int((local_size-1.0)/2), 0, 0)) 51 | else: 52 | self.average=nn.AvgPool2d(kernel_size=local_size, 53 | stride=1, 54 | padding=int((local_size-1.0)/2)) 55 | self.alpha = alpha 56 | self.beta = beta 57 | 58 | 59 | def forward(self, x): 60 | if self.ACROSS_CHANNELS: 61 | div = x.pow(2).unsqueeze(1) 62 | div = self.average(div).squeeze(1) 63 | div = div.mul(self.alpha).add(1.0).pow(self.beta) 64 | else: 65 | div = x.pow(2) 66 | div = self.average(div) 67 | div = div.mul(self.alpha).add(1.0).pow(self.beta) 68 | x = x.div(div) 69 | return x 70 | 71 | class AlexNet(nn.Module): 72 | 73 | def __init__(self, num_classes=1000): 74 | super(AlexNet, self).__init__() 75 | self.features = nn.Sequential( 76 | nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0), 77 | nn.ReLU(inplace=True), 78 | LRN(local_size=5, alpha=0.0001, beta=0.75), 79 | nn.MaxPool2d(kernel_size=3, stride=2), 80 | nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2), 81 | nn.ReLU(inplace=True), 82 | LRN(local_size=5, alpha=0.0001, beta=0.75), 83 | nn.MaxPool2d(kernel_size=3, stride=2), 84 | nn.Conv2d(256, 384, kernel_size=3, padding=1), 85 | nn.ReLU(inplace=True), 86 | nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2), 87 | nn.ReLU(inplace=True), 88 | nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2), 89 | nn.ReLU(inplace=True), 90 | nn.MaxPool2d(kernel_size=3, stride=2), 91 | ) 92 | self.classifier = nn.Sequential( 93 | nn.Linear(256 * 6 * 6, 4096), 94 | nn.ReLU(inplace=True), 95 | nn.Dropout(), 96 | nn.Linear(4096, 4096), 97 | nn.ReLU(inplace=True), 98 | nn.Dropout(), 99 | nn.Linear(4096, num_classes), 100 | ) 101 | 102 | def forward(self, x): 103 | x = self.features(x) 104 | print(x.size()) 105 | x = x.view(x.size(0), 256 * 6 * 6) 106 | x = self.classifier(x) 107 | return x 108 | 109 | 110 | def alexnet(pretrained=False, **kwargs): 111 | r"""AlexNet model architecture from the 112 | `"One weird trick..." `_ paper. 113 | Args: 114 | pretrained (bool): If True, returns a model pre-trained on ImageNet 115 | """ 116 | model = AlexNet(**kwargs) 117 | if pretrained: 118 | model_path = './alexnet.pth.tar' 119 | pretrained_model = torch.load(model_path) 120 | model.load_state_dict(pretrained_model['state_dict']) 121 | return model 122 | 123 | # convnet without the last layer 124 | class AlexNetFc(nn.Module): 125 | def __init__(self, use_bottleneck=True, bottleneck_dim=256, new_cls=False, class_num=1000): 126 | super(AlexNetFc, self).__init__() 127 | model_alexnet = alexnet(pretrained=True) 128 | self.features = model_alexnet.features 129 | self.classifier = nn.Sequential() 130 | for i in range(6): 131 | self.classifier.add_module("classifier"+str(i), model_alexnet.classifier[i]) 132 | self.feature_layers = nn.Sequential(self.features, self.classifier) 133 | 134 | self.use_bottleneck = use_bottleneck 135 | self.new_cls = new_cls 136 | if new_cls: 137 | if self.use_bottleneck: 138 | self.bottleneck = nn.Linear(4096, bottleneck_dim) 139 | self.fc = nn.Linear(bottleneck_dim, class_num) 140 | self.bottleneck.apply(init_weights) 141 | self.fc.apply(init_weights) 142 | self.__in_features = bottleneck_dim 143 | else: 144 | self.fc = nn.Linear(4096, class_num) 145 | self.fc.apply(init_weights) 146 | self.__in_features = 4096 147 | else: 148 | self.fc = model_alexnet.classifier[6] 149 | self.__in_features = 4096 150 | 151 | def forward(self, x): 152 | x = self.features(x) 153 | x = x.view(x.size(0), -1) 154 | x = self.classifier(x) 155 | if self.use_bottleneck and self.new_cls: 156 | x = self.bottleneck(x) 157 | y = self.fc(x) 158 | return x, y 159 | 160 | def output_num(self): 161 | return self.__in_features 162 | 163 | def get_parameters(self): 164 | if self.new_cls: 165 | if self.use_bottleneck: 166 | parameter_list = [{"params":self.features.parameters(), "lr_mult":1, 'decay_mult':2}, \ 167 | {"params":self.classifier.parameters(), "lr_mult":1, 'decay_mult':2}, \ 168 | {"params":self.bottleneck.parameters(), "lr_mult":10, 'decay_mult':2}, \ 169 | {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}] 170 | else: 171 | parameter_list = [{"params":self.feature_layers.parameters(), "lr_mult":1, 'decay_mult':2}, \ 172 | {"params":self.classifier.parameters(), "lr_mult":1, 'decay_mult':2}, \ 173 | {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}] 174 | else: 175 | parameter_list = [{"params":self.parameters(), "lr_mult":1, 'decay_mult':2}] 176 | return parameter_list 177 | 178 | 179 | resnet_dict = {"ResNet18":models.resnet18, "ResNet34":models.resnet34, "ResNet50":models.resnet50, "ResNet101":models.resnet101, "ResNet152":models.resnet152} 180 | 181 | def grl_hook(coeff): 182 | def fun1(grad): 183 | return -coeff*grad.clone() 184 | return fun1 185 | 186 | class ResNetFc(nn.Module): 187 | def __init__(self, resnet_name, use_bottleneck=True, bottleneck_dim=256, new_cls=False, class_num=1000): 188 | super(ResNetFc, self).__init__() 189 | model_resnet = resnet_dict[resnet_name](pretrained=True) 190 | self.conv1 = model_resnet.conv1 191 | self.bn1 = model_resnet.bn1 192 | self.relu = model_resnet.relu 193 | self.maxpool = model_resnet.maxpool 194 | self.layer1 = model_resnet.layer1 195 | self.layer2 = model_resnet.layer2 196 | self.layer3 = model_resnet.layer3 197 | self.layer4 = model_resnet.layer4 198 | self.avgpool = model_resnet.avgpool 199 | self.feature_layers = nn.Sequential(self.conv1, self.bn1, self.relu, self.maxpool, \ 200 | self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool) 201 | 202 | self.use_bottleneck = use_bottleneck 203 | self.new_cls = new_cls 204 | if new_cls: 205 | if self.use_bottleneck: 206 | self.bottleneck = nn.Linear(model_resnet.fc.in_features, bottleneck_dim) 207 | self.fc = nn.Linear(bottleneck_dim, class_num) 208 | self.bottleneck.apply(init_weights) 209 | self.fc.apply(init_weights) 210 | self.__in_features = bottleneck_dim 211 | else: 212 | self.fc = nn.Linear(model_resnet.fc.in_features, class_num) 213 | self.fc.apply(init_weights) 214 | self.__in_features = model_resnet.fc.in_features 215 | else: 216 | self.fc = model_resnet.fc 217 | self.__in_features = model_resnet.fc.in_features 218 | def forward(self, x): 219 | x = self.feature_layers(x) 220 | x = x.view(x.size(0), -1) 221 | if self.use_bottleneck and self.new_cls: 222 | x = self.bottleneck(x) 223 | y = self.fc(x) 224 | return x, y 225 | 226 | def output_num(self): 227 | return self.__in_features 228 | 229 | def get_parameters(self): 230 | if self.new_cls: 231 | if self.use_bottleneck: 232 | parameter_list = [{"params":self.feature_layers.parameters(), "lr_mult":1, 'decay_mult':2}, \ 233 | {"params":self.bottleneck.parameters(), "lr_mult":10, 'decay_mult':2}, \ 234 | {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}] 235 | else: 236 | parameter_list = [{"params":self.feature_layers.parameters(), "lr_mult":1, 'decay_mult':2}, \ 237 | {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}] 238 | else: 239 | parameter_list = [{"params":self.parameters(), "lr_mult":1, 'decay_mult':2}] 240 | return parameter_list 241 | 242 | vgg_dict = {"VGG11":models.vgg11, "VGG13":models.vgg13, "VGG16":models.vgg16, "VGG19":models.vgg19, "VGG11BN":models.vgg11_bn, "VGG13BN":models.vgg13_bn, "VGG16BN":models.vgg16_bn, "VGG19BN":models.vgg19_bn} 243 | class VGGFc(nn.Module): 244 | def __init__(self, vgg_name, use_bottleneck=True, bottleneck_dim=256, new_cls=False, class_num=1000): 245 | super(VGGFc, self).__init__() 246 | model_vgg = vgg_dict[vgg_name](pretrained=True) 247 | self.features = model_vgg.features 248 | self.classifier = nn.Sequential() 249 | for i in range(6): 250 | self.classifier.add_module("classifier"+str(i), model_vgg.classifier[i]) 251 | self.feature_layers = nn.Sequential(self.features, self.classifier) 252 | 253 | self.use_bottleneck = use_bottleneck 254 | self.new_cls = new_cls 255 | if new_cls: 256 | if self.use_bottleneck: 257 | self.bottleneck = nn.Linear(4096, bottleneck_dim) 258 | self.fc = nn.Linear(bottleneck_dim, class_num) 259 | self.bottleneck.apply(init_weights) 260 | self.fc.apply(init_weights) 261 | self.__in_features = bottleneck_dim 262 | else: 263 | self.fc = nn.Linear(4096, class_num) 264 | self.fc.apply(init_weights) 265 | self.__in_features = 4096 266 | else: 267 | self.fc = model_vgg.classifier[6] 268 | self.__in_features = 4096 269 | 270 | def forward(self, x): 271 | x = self.features(x) 272 | x = x.view(x.size(0), -1) 273 | x = self.classifier(x) 274 | if self.use_bottleneck and self.new_cls: 275 | x = self.bottleneck(x) 276 | y = self.fc(x) 277 | return x, y 278 | 279 | def output_num(self): 280 | return self.__in_features 281 | 282 | def get_parameters(self): 283 | if self.new_cls: 284 | if self.use_bottleneck: 285 | parameter_list = [{"params":self.features.parameters(), "lr_mult":1, 'decay_mult':2}, \ 286 | {"params":self.classifier.parameters(), "lr_mult":1, 'decay_mult':2}, \ 287 | {"params":self.bottleneck.parameters(), "lr_mult":10, 'decay_mult':2}, \ 288 | {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}] 289 | else: 290 | parameter_list = [{"params":self.feature_layers.parameters(), "lr_mult":1, 'decay_mult':2}, \ 291 | {"params":self.classifier.parameters(), "lr_mult":1, 'decay_mult':2}, \ 292 | {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}] 293 | else: 294 | parameter_list = [{"params":self.parameters(), "lr_mult":1, 'decay_mult':2}] 295 | return parameter_list 296 | 297 | # For SVHN dataset 298 | class DTN(nn.Module): 299 | def __init__(self): 300 | super(DTN, self).__init__() 301 | self.conv_params = nn.Sequential ( 302 | nn.Conv2d(3, 64, kernel_size=5, stride=2, padding=2), 303 | nn.BatchNorm2d(64), 304 | nn.Dropout2d(0.1), 305 | nn.ReLU(), 306 | nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2), 307 | nn.BatchNorm2d(128), 308 | nn.Dropout2d(0.3), 309 | nn.ReLU(), 310 | nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2), 311 | nn.BatchNorm2d(256), 312 | nn.Dropout2d(0.5), 313 | nn.ReLU() 314 | ) 315 | 316 | self.fc_params = nn.Sequential ( 317 | nn.Linear(256*4*4, 512), 318 | nn.BatchNorm1d(512), 319 | nn.ReLU(), 320 | nn.Dropout() 321 | ) 322 | 323 | self.classifier = nn.Linear(512, 10) 324 | self.__in_features = 512 325 | 326 | def forward(self, x): 327 | x = self.conv_params(x) 328 | x = x.view(x.size(0), -1) 329 | x = self.fc_params(x) 330 | y = self.classifier(x) 331 | return x, y 332 | 333 | def output_num(self): 334 | return self.__in_features 335 | 336 | def load(self, init_path): 337 | net_init_dict = torch.load(init_path) 338 | self.load_state_dict(net_init_dict) 339 | 340 | def save(self, out_path): 341 | torch.save(self.state_dict(), out_path) 342 | 343 | class LeNet(nn.Module): 344 | def __init__(self): 345 | super(LeNet, self).__init__() 346 | self.conv_params = nn.Sequential( 347 | nn.Conv2d(1, 20, kernel_size=5), 348 | nn.MaxPool2d(2), 349 | nn.ReLU(), 350 | nn.Conv2d(20, 50, kernel_size=5), 351 | nn.Dropout2d(p=0.5), 352 | nn.MaxPool2d(2), 353 | nn.ReLU(), 354 | ) 355 | 356 | self.fc_params = nn.Sequential(nn.Linear(50*4*4, 500), nn.ReLU(), nn.Dropout(p=0.5)) 357 | self.classifier = nn.Linear(500, 10) 358 | self.__in_features = 500 359 | 360 | 361 | def forward(self, x): 362 | x = self.conv_params(x) 363 | x = x.view(x.size(0), -1) 364 | x = self.fc_params(x) 365 | y = self.classifier(x) 366 | return x, y 367 | 368 | def output_num(self): 369 | return self.__in_features 370 | 371 | class AdversarialNetwork(nn.Module): 372 | def __init__(self, in_feature, hidden_size): 373 | super(AdversarialNetwork, self).__init__() 374 | self.ad_layer1 = nn.Linear(in_feature, hidden_size) 375 | self.ad_layer2 = nn.Linear(hidden_size, hidden_size) 376 | self.ad_layer3 = nn.Linear(hidden_size, 1) 377 | self.relu1 = nn.ReLU() 378 | self.relu2 = nn.ReLU() 379 | self.dropout1 = nn.Dropout(0.5) 380 | self.dropout2 = nn.Dropout(0.5) 381 | self.sigmoid = nn.Sigmoid() 382 | self.apply(init_weights) 383 | self.iter_num = 0 384 | self.alpha = 10 385 | self.low = 0.0 386 | self.high = 1.0 387 | self.max_iter = 10000.0 388 | 389 | def forward(self, x): 390 | if self.training: 391 | self.iter_num += 1 392 | coeff = calc_coeff(self.iter_num, self.high, self.low, self.alpha, self.max_iter) 393 | x = x * 1.0 394 | x.register_hook(grl_hook(coeff)) 395 | x = self.ad_layer1(x) 396 | x = self.relu1(x) 397 | x = self.dropout1(x) 398 | x = self.ad_layer2(x) 399 | x = self.relu2(x) 400 | x = self.dropout2(x) 401 | y = self.ad_layer3(x) 402 | y = self.sigmoid(y) 403 | return y 404 | 405 | def output_num(self): 406 | return 1 407 | def get_parameters(self): 408 | return [{"params":self.parameters(), "lr_mult":10, 'decay_mult':2}] 409 | 410 | 411 | class AddaNet(nn.Module): 412 | "Defines and Adda Network." 413 | 414 | def __init__(self, num_cls=10, model='LeNet', src_weights_init=None, 415 | weights_init=None): 416 | super(AddaNet, self).__init__() 417 | self.name = 'AddaNet' 418 | self.base_model = model 419 | self.num_cls = num_cls 420 | self.cls_criterion = nn.CrossEntropyLoss() 421 | self.gan_criterion = nn.CrossEntropyLoss() 422 | 423 | self.setup_net() 424 | if weights_init is not None: 425 | self.load(weights_init) 426 | elif src_weights_init is not None: 427 | self.load_src_net(src_weights_init) 428 | else: 429 | raise Exception('AddaNet must be initialized with weights.') 430 | 431 | def forward(self, x_s, x_t): 432 | """Pass source and target images through their 433 | respective networks.""" 434 | score_s, x_s = self.src_net(x_s, with_ft=True) 435 | score_t, x_t = self.tgt_net(x_t, with_ft=True) 436 | 437 | if self.discrim_feat: 438 | d_s = self.discriminator(x_s) 439 | d_t = self.discriminator(x_t) 440 | else: 441 | d_s = self.discriminator(score_s) 442 | d_t = self.discriminator(score_t) 443 | return score_s, score_t, d_s, d_t 444 | 445 | def setup_net(self): 446 | """Setup source, target and discriminator networks.""" 447 | self.src_net = models[self.base_model]() 448 | self.tgt_net = models[self.base_model]() 449 | 450 | input_dim = self.num_cls 451 | self.discriminator = nn.Sequential( 452 | nn.Linear(input_dim, 500), 453 | nn.ReLU(), 454 | nn.Linear(500, 500), 455 | nn.ReLU(), 456 | nn.Linear(500, 2), 457 | ) 458 | 459 | # self.image_size = self.src_net.image_size 460 | # self.num_channels = self.src_net.num_channels 461 | 462 | def load(self, init_path): 463 | "Loads full src and tgt models." 464 | net_init_dict = torch.load(init_path) 465 | self.load_state_dict(net_init_dict) 466 | 467 | def load_src_net(self, init_path): 468 | """Initialize source and target with source 469 | weights.""" 470 | self.src_net.load(init_path) 471 | self.tgt_net.load(init_path) 472 | 473 | def save(self, out_path): 474 | torch.save(self.state_dict(), out_path) 475 | 476 | def save_tgt_net(self, out_path): 477 | torch.save(self.tgt_net.state_dict(), out_path) 478 | 479 | class Discriminator(nn.Module): 480 | def __init__(self): 481 | super(Discriminator, self).__init__() 482 | self.conv1 = nn.Sequential( 483 | nn.Conv2d(1, 32, 5, padding=2), # batch, 32, 16, 16 484 | nn.LeakyReLU(0.2, True) 485 | ) 486 | self.conv2 = nn.Sequential( 487 | nn.Conv2d(32, 64, 5, padding=2), # batch, 64, 16, 16 488 | nn.LeakyReLU(0.2, True), 489 | nn.AvgPool2d(2, stride=2) # batch, 64, 8, 8 490 | ) 491 | self.fc = nn.Sequential( 492 | nn.Linear(64 * 8 * 8, 1024), 493 | nn.LeakyReLU(0.2, True), 494 | nn.Linear(1024, 1), 495 | nn.Sigmoid() 496 | ) 497 | 498 | def forward(self, x): 499 | ''' 500 | x: batch, width, height, channel=1 501 | ''' 502 | x = x.view(x.size(0), 1, 16, 16) 503 | x = self.conv1(x) 504 | x = self.conv2(x) 505 | x = x.view(x.size(0), -1) 506 | x = self.fc(x) 507 | return x 508 | 509 | 510 | class Generator(nn.Module): 511 | def __init__(self, input_size, num_feature): 512 | self.num_feature = num_feature 513 | super(Generator, self).__init__() 514 | self.fc = nn.Linear(input_size, 1024) # batch,1024 515 | self.br = nn.Sequential( 516 | nn.BatchNorm2d(1), 517 | nn.ReLU(True) 518 | ) 519 | self.downsample1 = nn.Sequential( 520 | nn.Conv2d(1, 50, 3, stride=1, padding=1), # batch, 50, 32, 32 521 | nn.BatchNorm2d(50), 522 | nn.ReLU(True) 523 | ) 524 | self.downsample2 = nn.Sequential( 525 | nn.Conv2d(50, 25, 3, stride=1, padding=1), # batch, 25, 32, 32 526 | nn.BatchNorm2d(25), 527 | nn.ReLU(True) 528 | ) 529 | self.downsample3 = nn.Sequential( 530 | nn.Conv2d(25, 1, 2, stride=2), # batch, 1, 16, 16 531 | nn.Tanh() 532 | ) 533 | 534 | def forward(self, x): 535 | x = self.fc(x) 536 | x = x.view(x.size(0), 1, 32, 32) 537 | x = self.br(x) 538 | x = self.downsample1(x) 539 | x = self.downsample2(x) 540 | x = self.downsample3(x) 541 | x = x.view(x.size(0),-1) 542 | return x 543 | 544 | 545 | class Discriminator_digits(nn.Module): 546 | def __init__(self): 547 | super(Discriminator_digits, self).__init__() 548 | self.conv1 = nn.Sequential( 549 | nn.Conv2d(2, 32, 5, padding=2), # batch, 32, 16, 16 550 | nn.LeakyReLU(0.2, True) 551 | ) 552 | self.conv2 = nn.Sequential( 553 | nn.Conv2d(32, 64, 5, padding=2), # batch, 64, 16, 16 554 | nn.LeakyReLU(0.2, True), 555 | nn.AvgPool2d(2, stride=2) # batch, 64, 8, 8 556 | ) 557 | self.fc = nn.Sequential( 558 | nn.Linear(64 * 8 * 8, 1024), 559 | nn.LeakyReLU(0.2, True), 560 | nn.Linear(1024, 1), 561 | nn.Sigmoid() 562 | ) 563 | 564 | def forward(self, x): 565 | ''' 566 | x: batch, width, height, channel=1 567 | ''' 568 | x = x.view(x.size(0), 2, 16, 16) 569 | x = self.conv1(x) 570 | x = self.conv2(x) 571 | x = x.view(x.size(0), -1) 572 | x = self.fc(x) 573 | return x 574 | 575 | 576 | 577 | class Generator_digits(nn.Module): 578 | def __init__(self, input_size, num_feature): 579 | super(Generator_digits, self).__init__() 580 | self.fc = nn.Linear(input_size, num_feature) # batch,1024 581 | self.br = nn.Sequential( 582 | nn.BatchNorm2d(1), 583 | nn.ReLU(True) 584 | ) 585 | self.downsample1 = nn.Sequential( 586 | nn.Conv2d(1, 50, 3, stride=1, padding=1), # batch, 50, 32, 32 587 | nn.BatchNorm2d(50), 588 | nn.ReLU(True) 589 | ) 590 | self.downsample2 = nn.Sequential( 591 | nn.Conv2d(50, 25, 3, stride=1, padding=1), # batch, 25, 32, 32 592 | nn.BatchNorm2d(25), 593 | nn.ReLU(True) 594 | ) 595 | self.downsample3 = nn.Sequential( 596 | nn.Conv2d(25, 2, 2, stride=2), # batch, 2, 16, 16 597 | nn.Tanh() 598 | ) 599 | 600 | def forward(self, x): 601 | x = self.fc(x) 602 | x = x.view(x.size(0), 1, 32, 32) 603 | x = self.br(x) 604 | x = self.downsample1(x) 605 | x = self.downsample2(x) 606 | x = self.downsample3(x) 607 | x = x.view(x.size(0),512) 608 | return x 609 | 610 | 611 | 612 | class Discriminator_um(nn.Module): 613 | def __init__(self): 614 | super(Discriminator_um, self).__init__() 615 | self.fc1 = nn.Linear(500,512) 616 | self.conv1 = nn.Sequential( 617 | nn.Conv2d(2, 32, 5, padding=2), # batch, 32, 16, 16 618 | nn.LeakyReLU(0.2, True) 619 | ) 620 | self.conv2 = nn.Sequential( 621 | nn.Conv2d(32, 64, 5, padding=2), # batch, 64, 16, 16 622 | nn.LeakyReLU(0.2, True), 623 | nn.AvgPool2d(2, stride=2) # batch, 64, 8, 8 624 | ) 625 | self.fc = nn.Sequential( 626 | nn.Linear(64 * 8 * 8, 1024), 627 | nn.LeakyReLU(0.2, True), 628 | nn.Linear(1024, 1), 629 | nn.Sigmoid() 630 | ) 631 | 632 | def forward(self, x): 633 | ''' 634 | x: batch, width, height, channel=1 635 | ''' 636 | x = self.fc1(x) 637 | x = x.view(x.size(0), 2, 16, 16) 638 | x = self.conv1(x) 639 | x = self.conv2(x) 640 | x = x.view(x.size(0), -1) 641 | x = self.fc(x) 642 | return x 643 | 644 | 645 | 646 | class Generator_um(nn.Module): 647 | def __init__(self, input_size, num_feature): 648 | super(Generator_um, self).__init__() 649 | self.fc = nn.Linear(input_size, 1024) # batch,1024 650 | self.br = nn.Sequential( 651 | nn.BatchNorm2d(1), 652 | nn.ReLU(True) 653 | ) 654 | self.downsample1 = nn.Sequential( 655 | nn.Conv2d(1, 50, 3, stride=1, padding=1), # batch, 50, 32, 32 656 | nn.BatchNorm2d(50), 657 | nn.ReLU(True) 658 | ) 659 | self.downsample2 = nn.Sequential( 660 | nn.Conv2d(50, 25, 3, stride=1, padding=1), # batch, 25, 32, 32 661 | nn.BatchNorm2d(25), 662 | nn.ReLU(True) 663 | ) 664 | self.downsample3 = nn.Sequential( 665 | nn.Conv2d(25, 2, 2, stride=2), # batch, 2, 16, 16 666 | nn.Tanh() 667 | ) 668 | self.fc1 = nn.Linear(512, 500) 669 | def forward(self, x): 670 | x = self.fc(x) 671 | x = x.view(x.size(0), 1, 32, 32) 672 | x = self.br(x) 673 | x = self.downsample1(x) 674 | x = self.downsample2(x) 675 | x = self.downsample3(x) 676 | x = x.view(x.size(0),512) 677 | x =self.fc1(x) 678 | return x 679 | 680 | class Discriminator_um_pixel(nn.Module): 681 | def __init__(self): 682 | super(Discriminator_um_pixel, self).__init__() 683 | self.fc1 = nn.Linear(784,512) 684 | self.conv1 = nn.Sequential( 685 | nn.Conv2d(2, 32, 5, padding=2), # batch, 32, 16, 16 686 | nn.LeakyReLU(0.2, True) 687 | ) 688 | self.conv2 = nn.Sequential( 689 | nn.Conv2d(32, 64, 5, padding=2), # batch, 64, 16, 16 690 | nn.LeakyReLU(0.2, True), 691 | nn.AvgPool2d(2, stride=2) # batch, 64, 8, 8 692 | ) 693 | self.fc = nn.Sequential( 694 | nn.Linear(64 * 8 * 8, 1024), 695 | nn.LeakyReLU(0.2, True), 696 | nn.Linear(1024, 1), 697 | nn.Sigmoid() 698 | ) 699 | 700 | def forward(self, x): 701 | ''' 702 | x: batch, width, height, channel=1 703 | ''' 704 | x = x.view(x.size(0),-1) 705 | x = self.fc1(x) 706 | x = x.view(x.size(0), 2, 16, 16) 707 | x = self.conv1(x) 708 | x = self.conv2(x) 709 | x = x.view(x.size(0), -1) 710 | x = self.fc(x) 711 | return x 712 | 713 | 714 | 715 | class Generator_um_pixel(nn.Module): 716 | def __init__(self, input_size, num_feature): 717 | super(Generator_um_pixel, self).__init__() 718 | self.fc = nn.Linear(input_size, 3136) # batch,3136 719 | self.br = nn.Sequential( 720 | nn.BatchNorm2d(1), 721 | nn.ReLU(True) 722 | ) 723 | self.downsample1 = nn.Sequential( 724 | nn.Conv2d(1, 50, 3, stride=1, padding=1), # batch, 50, 56, 56 725 | nn.BatchNorm2d(50), 726 | nn.ReLU(True) 727 | ) 728 | self.downsample2 = nn.Sequential( 729 | nn.Conv2d(50, 25, 3, stride=1, padding=1), # batch, 25, 32, 32 730 | nn.BatchNorm2d(25), 731 | nn.ReLU(True) 732 | ) 733 | self.downsample3 = nn.Sequential( 734 | nn.Conv2d(25, 2, 2, stride=2), # batch, 2, 28, 28 735 | nn.Tanh() 736 | ) 737 | self.fc1 = nn.Linear(1568, 784) 738 | def forward(self, x): 739 | x = x.view(x.size(0),-1) 740 | x = self.fc(x) 741 | x = x.view(x.size(0), 1, 56, 56) 742 | x = self.br(x) 743 | x = self.downsample1(x) 744 | x = self.downsample2(x) 745 | x = self.downsample3(x) 746 | x = x.view(x.size(0),1568) 747 | x =self.fc1(x) 748 | x= x.view(x.size(0),1,28,28) 749 | return x 750 | models={ 751 | "Discriminator":Discriminator, 752 | "Generator":Generator, 753 | "Discriminator_digits": Discriminator_digits, 754 | "Generator_digits": Generator_digits, 755 | "Discriminator_um": Discriminator_um, 756 | "Generator_um": Generator_um, 757 | "Discriminator_um_pixel": Discriminator_um_pixel, 758 | "Generator_um_pixel": Generator_um_pixel, 759 | "RandomLayer":RandomLayer, 760 | "LRN":LRN, 761 | "AlexNet":AlexNet, 762 | "AlexNetFc":AlexNetFc, 763 | "ResNetFc":ResNetFc, 764 | "VGGFc":VGGFc, 765 | "DTN":DTN, 766 | "LeNet":LeNet, 767 | "AdversarialNetwork":AdversarialNetwork, 768 | "AddaNet":AddaNet 769 | } 770 | -------------------------------------------------------------------------------- /data/office/dslr_list.txt: -------------------------------------------------------------------------------- 1 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0001.jpg 5 2 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0002.jpg 5 3 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0003.jpg 5 4 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0004.jpg 5 5 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0005.jpg 5 6 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0006.jpg 5 7 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0007.jpg 5 8 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0008.jpg 5 9 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0009.jpg 5 10 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0010.jpg 5 11 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0011.jpg 5 12 | /home/cep/data/office/domain_adaptation_images/dslr/images/calculator/frame_0012.jpg 5 13 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0001.jpg 24 14 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0002.jpg 24 15 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0003.jpg 24 16 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0004.jpg 24 17 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0005.jpg 24 18 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0006.jpg 24 19 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0007.jpg 24 20 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0008.jpg 24 21 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0009.jpg 24 22 | /home/cep/data/office/domain_adaptation_images/dslr/images/ring_binder/frame_0010.jpg 24 23 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0001.jpg 21 24 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0002.jpg 21 25 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0003.jpg 21 26 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0004.jpg 21 27 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0005.jpg 21 28 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0006.jpg 21 29 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0007.jpg 21 30 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0008.jpg 21 31 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0009.jpg 21 32 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0010.jpg 21 33 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0011.jpg 21 34 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0012.jpg 21 35 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0013.jpg 21 36 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0014.jpg 21 37 | /home/cep/data/office/domain_adaptation_images/dslr/images/printer/frame_0015.jpg 21 38 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0001.jpg 11 39 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0002.jpg 11 40 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0003.jpg 11 41 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0004.jpg 11 42 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0005.jpg 11 43 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0006.jpg 11 44 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0007.jpg 11 45 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0008.jpg 11 46 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0009.jpg 11 47 | /home/cep/data/office/domain_adaptation_images/dslr/images/keyboard/frame_0010.jpg 11 48 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0001.jpg 26 49 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0002.jpg 26 50 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0003.jpg 26 51 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0004.jpg 26 52 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0005.jpg 26 53 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0006.jpg 26 54 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0007.jpg 26 55 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0008.jpg 26 56 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0009.jpg 26 57 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0010.jpg 26 58 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0011.jpg 26 59 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0012.jpg 26 60 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0013.jpg 26 61 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0014.jpg 26 62 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0015.jpg 26 63 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0016.jpg 26 64 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0017.jpg 26 65 | /home/cep/data/office/domain_adaptation_images/dslr/images/scissors/frame_0018.jpg 26 66 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0001.jpg 12 67 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0002.jpg 12 68 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0003.jpg 12 69 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0004.jpg 12 70 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0005.jpg 12 71 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0006.jpg 12 72 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0007.jpg 12 73 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0008.jpg 12 74 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0009.jpg 12 75 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0010.jpg 12 76 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0011.jpg 12 77 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0012.jpg 12 78 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0013.jpg 12 79 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0014.jpg 12 80 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0015.jpg 12 81 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0016.jpg 12 82 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0017.jpg 12 83 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0018.jpg 12 84 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0019.jpg 12 85 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0020.jpg 12 86 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0021.jpg 12 87 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0022.jpg 12 88 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0023.jpg 12 89 | /home/cep/data/office/domain_adaptation_images/dslr/images/laptop_computer/frame_0024.jpg 12 90 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0001.jpg 16 91 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0002.jpg 16 92 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0003.jpg 16 93 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0004.jpg 16 94 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0005.jpg 16 95 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0006.jpg 16 96 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0007.jpg 16 97 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0008.jpg 16 98 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0009.jpg 16 99 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0010.jpg 16 100 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0011.jpg 16 101 | /home/cep/data/office/domain_adaptation_images/dslr/images/mouse/frame_0012.jpg 16 102 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0001.jpg 15 103 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0002.jpg 15 104 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0003.jpg 15 105 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0004.jpg 15 106 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0005.jpg 15 107 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0006.jpg 15 108 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0007.jpg 15 109 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0008.jpg 15 110 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0009.jpg 15 111 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0010.jpg 15 112 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0011.jpg 15 113 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0012.jpg 15 114 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0013.jpg 15 115 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0014.jpg 15 116 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0015.jpg 15 117 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0016.jpg 15 118 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0017.jpg 15 119 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0018.jpg 15 120 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0019.jpg 15 121 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0020.jpg 15 122 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0021.jpg 15 123 | /home/cep/data/office/domain_adaptation_images/dslr/images/monitor/frame_0022.jpg 15 124 | /home/cep/data/office/domain_adaptation_images/dslr/images/mug/frame_0001.jpg 17 125 | /home/cep/data/office/domain_adaptation_images/dslr/images/mug/frame_0002.jpg 17 126 | /home/cep/data/office/domain_adaptation_images/dslr/images/mug/frame_0003.jpg 17 127 | /home/cep/data/office/domain_adaptation_images/dslr/images/mug/frame_0004.jpg 17 128 | /home/cep/data/office/domain_adaptation_images/dslr/images/mug/frame_0005.jpg 17 129 | /home/cep/data/office/domain_adaptation_images/dslr/images/mug/frame_0006.jpg 17 130 | /home/cep/data/office/domain_adaptation_images/dslr/images/mug/frame_0007.jpg 17 131 | /home/cep/data/office/domain_adaptation_images/dslr/images/mug/frame_0008.jpg 17 132 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0001.jpg 29 133 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0002.jpg 29 134 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0003.jpg 29 135 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0004.jpg 29 136 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0005.jpg 29 137 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0006.jpg 29 138 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0007.jpg 29 139 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0008.jpg 29 140 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0009.jpg 29 141 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0010.jpg 29 142 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0011.jpg 29 143 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0012.jpg 29 144 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0013.jpg 29 145 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0014.jpg 29 146 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0015.jpg 29 147 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0016.jpg 29 148 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0017.jpg 29 149 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0018.jpg 29 150 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0019.jpg 29 151 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0020.jpg 29 152 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0021.jpg 29 153 | /home/cep/data/office/domain_adaptation_images/dslr/images/tape_dispenser/frame_0022.jpg 29 154 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0001.jpg 19 155 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0002.jpg 19 156 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0003.jpg 19 157 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0004.jpg 19 158 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0005.jpg 19 159 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0006.jpg 19 160 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0007.jpg 19 161 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0008.jpg 19 162 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0009.jpg 19 163 | /home/cep/data/office/domain_adaptation_images/dslr/images/pen/frame_0010.jpg 19 164 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0001.jpg 1 165 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0002.jpg 1 166 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0003.jpg 1 167 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0004.jpg 1 168 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0005.jpg 1 169 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0006.jpg 1 170 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0007.jpg 1 171 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0008.jpg 1 172 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0009.jpg 1 173 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0010.jpg 1 174 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0011.jpg 1 175 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0012.jpg 1 176 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0013.jpg 1 177 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0014.jpg 1 178 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0015.jpg 1 179 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0016.jpg 1 180 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0017.jpg 1 181 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0018.jpg 1 182 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0019.jpg 1 183 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0020.jpg 1 184 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike/frame_0021.jpg 1 185 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0001.jpg 23 186 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0002.jpg 23 187 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0003.jpg 23 188 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0004.jpg 23 189 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0005.jpg 23 190 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0006.jpg 23 191 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0007.jpg 23 192 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0008.jpg 23 193 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0009.jpg 23 194 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0010.jpg 23 195 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0011.jpg 23 196 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0012.jpg 23 197 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0013.jpg 23 198 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0014.jpg 23 199 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0015.jpg 23 200 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0016.jpg 23 201 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0017.jpg 23 202 | /home/cep/data/office/domain_adaptation_images/dslr/images/punchers/frame_0018.jpg 23 203 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0001.jpg 0 204 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0002.jpg 0 205 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0003.jpg 0 206 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0004.jpg 0 207 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0005.jpg 0 208 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0006.jpg 0 209 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0007.jpg 0 210 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0008.jpg 0 211 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0009.jpg 0 212 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0010.jpg 0 213 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0011.jpg 0 214 | /home/cep/data/office/domain_adaptation_images/dslr/images/back_pack/frame_0012.jpg 0 215 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0001.jpg 8 216 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0002.jpg 8 217 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0003.jpg 8 218 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0004.jpg 8 219 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0005.jpg 8 220 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0006.jpg 8 221 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0007.jpg 8 222 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0008.jpg 8 223 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0009.jpg 8 224 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0010.jpg 8 225 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0011.jpg 8 226 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0012.jpg 8 227 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0013.jpg 8 228 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0014.jpg 8 229 | /home/cep/data/office/domain_adaptation_images/dslr/images/desktop_computer/frame_0015.jpg 8 230 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0001.jpg 27 231 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0002.jpg 27 232 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0003.jpg 27 233 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0004.jpg 27 234 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0005.jpg 27 235 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0006.jpg 27 236 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0007.jpg 27 237 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0008.jpg 27 238 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0009.jpg 27 239 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0010.jpg 27 240 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0011.jpg 27 241 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0012.jpg 27 242 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0013.jpg 27 243 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0014.jpg 27 244 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0015.jpg 27 245 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0016.jpg 27 246 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0017.jpg 27 247 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0018.jpg 27 248 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0019.jpg 27 249 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0020.jpg 27 250 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0021.jpg 27 251 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0022.jpg 27 252 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0023.jpg 27 253 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0024.jpg 27 254 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0025.jpg 27 255 | /home/cep/data/office/domain_adaptation_images/dslr/images/speaker/frame_0026.jpg 27 256 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0001.jpg 14 257 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0002.jpg 14 258 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0003.jpg 14 259 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0004.jpg 14 260 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0005.jpg 14 261 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0006.jpg 14 262 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0007.jpg 14 263 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0008.jpg 14 264 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0009.jpg 14 265 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0010.jpg 14 266 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0011.jpg 14 267 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0012.jpg 14 268 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0013.jpg 14 269 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0014.jpg 14 270 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0015.jpg 14 271 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0016.jpg 14 272 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0017.jpg 14 273 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0018.jpg 14 274 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0019.jpg 14 275 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0020.jpg 14 276 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0021.jpg 14 277 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0022.jpg 14 278 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0023.jpg 14 279 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0024.jpg 14 280 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0025.jpg 14 281 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0026.jpg 14 282 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0027.jpg 14 283 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0028.jpg 14 284 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0029.jpg 14 285 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0030.jpg 14 286 | /home/cep/data/office/domain_adaptation_images/dslr/images/mobile_phone/frame_0031.jpg 14 287 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0001.jpg 18 288 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0002.jpg 18 289 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0003.jpg 18 290 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0004.jpg 18 291 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0005.jpg 18 292 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0006.jpg 18 293 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0007.jpg 18 294 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0008.jpg 18 295 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0009.jpg 18 296 | /home/cep/data/office/domain_adaptation_images/dslr/images/paper_notebook/frame_0010.jpg 18 297 | /home/cep/data/office/domain_adaptation_images/dslr/images/ruler/frame_0001.jpg 25 298 | /home/cep/data/office/domain_adaptation_images/dslr/images/ruler/frame_0002.jpg 25 299 | /home/cep/data/office/domain_adaptation_images/dslr/images/ruler/frame_0003.jpg 25 300 | /home/cep/data/office/domain_adaptation_images/dslr/images/ruler/frame_0004.jpg 25 301 | /home/cep/data/office/domain_adaptation_images/dslr/images/ruler/frame_0005.jpg 25 302 | /home/cep/data/office/domain_adaptation_images/dslr/images/ruler/frame_0006.jpg 25 303 | /home/cep/data/office/domain_adaptation_images/dslr/images/ruler/frame_0007.jpg 25 304 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0001.jpg 13 305 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0002.jpg 13 306 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0003.jpg 13 307 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0004.jpg 13 308 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0005.jpg 13 309 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0006.jpg 13 310 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0007.jpg 13 311 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0008.jpg 13 312 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0009.jpg 13 313 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0010.jpg 13 314 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0011.jpg 13 315 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0012.jpg 13 316 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0013.jpg 13 317 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0014.jpg 13 318 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0015.jpg 13 319 | /home/cep/data/office/domain_adaptation_images/dslr/images/letter_tray/frame_0016.jpg 13 320 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0001.jpg 9 321 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0002.jpg 9 322 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0003.jpg 9 323 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0004.jpg 9 324 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0005.jpg 9 325 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0006.jpg 9 326 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0007.jpg 9 327 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0008.jpg 9 328 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0009.jpg 9 329 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0010.jpg 9 330 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0011.jpg 9 331 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0012.jpg 9 332 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0013.jpg 9 333 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0014.jpg 9 334 | /home/cep/data/office/domain_adaptation_images/dslr/images/file_cabinet/frame_0015.jpg 9 335 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0001.jpg 20 336 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0002.jpg 20 337 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0003.jpg 20 338 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0004.jpg 20 339 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0005.jpg 20 340 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0006.jpg 20 341 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0007.jpg 20 342 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0008.jpg 20 343 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0009.jpg 20 344 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0010.jpg 20 345 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0011.jpg 20 346 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0012.jpg 20 347 | /home/cep/data/office/domain_adaptation_images/dslr/images/phone/frame_0013.jpg 20 348 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0001.jpg 3 349 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0002.jpg 3 350 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0003.jpg 3 351 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0004.jpg 3 352 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0005.jpg 3 353 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0006.jpg 3 354 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0007.jpg 3 355 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0008.jpg 3 356 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0009.jpg 3 357 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0010.jpg 3 358 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0011.jpg 3 359 | /home/cep/data/office/domain_adaptation_images/dslr/images/bookcase/frame_0012.jpg 3 360 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0001.jpg 22 361 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0002.jpg 22 362 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0003.jpg 22 363 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0004.jpg 22 364 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0005.jpg 22 365 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0006.jpg 22 366 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0007.jpg 22 367 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0008.jpg 22 368 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0009.jpg 22 369 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0010.jpg 22 370 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0011.jpg 22 371 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0012.jpg 22 372 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0013.jpg 22 373 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0014.jpg 22 374 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0015.jpg 22 375 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0016.jpg 22 376 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0017.jpg 22 377 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0018.jpg 22 378 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0019.jpg 22 379 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0020.jpg 22 380 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0021.jpg 22 381 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0022.jpg 22 382 | /home/cep/data/office/domain_adaptation_images/dslr/images/projector/frame_0023.jpg 22 383 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0001.jpg 28 384 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0002.jpg 28 385 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0003.jpg 28 386 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0004.jpg 28 387 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0005.jpg 28 388 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0006.jpg 28 389 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0007.jpg 28 390 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0008.jpg 28 391 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0009.jpg 28 392 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0010.jpg 28 393 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0011.jpg 28 394 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0012.jpg 28 395 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0013.jpg 28 396 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0014.jpg 28 397 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0015.jpg 28 398 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0016.jpg 28 399 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0017.jpg 28 400 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0018.jpg 28 401 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0019.jpg 28 402 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0020.jpg 28 403 | /home/cep/data/office/domain_adaptation_images/dslr/images/stapler/frame_0021.jpg 28 404 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0001.jpg 30 405 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0002.jpg 30 406 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0003.jpg 30 407 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0004.jpg 30 408 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0005.jpg 30 409 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0006.jpg 30 410 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0007.jpg 30 411 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0008.jpg 30 412 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0009.jpg 30 413 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0010.jpg 30 414 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0011.jpg 30 415 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0012.jpg 30 416 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0013.jpg 30 417 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0014.jpg 30 418 | /home/cep/data/office/domain_adaptation_images/dslr/images/trash_can/frame_0015.jpg 30 419 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0001.jpg 2 420 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0002.jpg 2 421 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0003.jpg 2 422 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0004.jpg 2 423 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0005.jpg 2 424 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0006.jpg 2 425 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0007.jpg 2 426 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0008.jpg 2 427 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0009.jpg 2 428 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0010.jpg 2 429 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0011.jpg 2 430 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0012.jpg 2 431 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0013.jpg 2 432 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0014.jpg 2 433 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0015.jpg 2 434 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0016.jpg 2 435 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0017.jpg 2 436 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0018.jpg 2 437 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0019.jpg 2 438 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0020.jpg 2 439 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0021.jpg 2 440 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0022.jpg 2 441 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0023.jpg 2 442 | /home/cep/data/office/domain_adaptation_images/dslr/images/bike_helmet/frame_0024.jpg 2 443 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0001.jpg 10 444 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0002.jpg 10 445 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0003.jpg 10 446 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0004.jpg 10 447 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0005.jpg 10 448 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0006.jpg 10 449 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0007.jpg 10 450 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0008.jpg 10 451 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0009.jpg 10 452 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0010.jpg 10 453 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0011.jpg 10 454 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0012.jpg 10 455 | /home/cep/data/office/domain_adaptation_images/dslr/images/headphones/frame_0013.jpg 10 456 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0001.jpg 7 457 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0002.jpg 7 458 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0003.jpg 7 459 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0004.jpg 7 460 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0005.jpg 7 461 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0006.jpg 7 462 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0007.jpg 7 463 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0008.jpg 7 464 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0009.jpg 7 465 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0010.jpg 7 466 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0011.jpg 7 467 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0012.jpg 7 468 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0013.jpg 7 469 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_lamp/frame_0014.jpg 7 470 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0001.jpg 6 471 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0002.jpg 6 472 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0003.jpg 6 473 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0004.jpg 6 474 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0005.jpg 6 475 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0006.jpg 6 476 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0007.jpg 6 477 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0008.jpg 6 478 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0009.jpg 6 479 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0010.jpg 6 480 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0011.jpg 6 481 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0012.jpg 6 482 | /home/cep/data/office/domain_adaptation_images/dslr/images/desk_chair/frame_0013.jpg 6 483 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0001.jpg 4 484 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0002.jpg 4 485 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0003.jpg 4 486 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0004.jpg 4 487 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0005.jpg 4 488 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0006.jpg 4 489 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0007.jpg 4 490 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0008.jpg 4 491 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0009.jpg 4 492 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0010.jpg 4 493 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0011.jpg 4 494 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0012.jpg 4 495 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0013.jpg 4 496 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0014.jpg 4 497 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0015.jpg 4 498 | /home/cep/data/office/domain_adaptation_images/dslr/images/bottle/frame_0016.jpg 4 499 | --------------------------------------------------------------------------------