├── MSDA_OH ├── data_list.py ├── image_source.py ├── image_source_sticker.py ├── image_target.py ├── logger.py ├── loss.py ├── network.py ├── run.sh └── utils.py ├── README.md ├── SSDA_OH ├── data_list.py ├── image_source.py ├── image_source_sticker.py ├── image_target.py ├── logger.py ├── loss.py ├── network.py ├── run.sh └── utils.py └── data ├── include_tuples.npy ├── office-home ├── Art_list.txt ├── Clipart_list.txt ├── Product_list.txt ├── RealWorld_list.txt └── classes.txt └── textured ├── Task_1_classes.txt ├── Task_1_mixup.txt ├── Task_2_classes.txt ├── Task_2_mixup.txt └── paste ├── A ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png ├── H ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png ├── I ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png ├── L ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png ├── N ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png ├── O ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png ├── T ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png ├── W ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png ├── X ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png └── Z ├── 1.png ├── 10t.png ├── 11t.png ├── 12t.png ├── 13t.png ├── 14.png ├── 15.png ├── 16.png ├── 17.png ├── 18.png ├── 19.png ├── 20.png ├── 2t.png ├── 3t.png ├── 4t.png ├── 5t.png ├── 6t.png ├── 7t.png ├── 8t.png └── 9t.png /MSDA_OH/data_list.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import random 4 | from PIL import Image 5 | from torch.utils.data import Dataset 6 | import os 7 | import os.path 8 | import cv2 9 | import torchvision 10 | 11 | from torchvision import transforms 12 | from torch.utils.data import DataLoader 13 | import torchvision.transforms.functional as F 14 | 15 | 16 | SEED = 2020 17 | torch.manual_seed(SEED) 18 | torch.cuda.manual_seed(SEED) 19 | torch.cuda.manual_seed_all(SEED) 20 | np.random.seed(SEED) 21 | random.seed(SEED) 22 | os.environ['PYTHONHASHSEED'] = str(SEED) 23 | 24 | 25 | def shuffle_2(imgs, labels=None, parts=[6, 6]): 26 | bs = len(imgs) 27 | 28 | if len(imgs) < parts[0]*parts[1]: 29 | imgs = imgs.repeat((parts[0]*parts[1]+bs-1)//bs, 1, 1, 1) 30 | # labels = labels.repeat((parts[0]*parts[1]+bs-1)//bs) 31 | 32 | imgs = imgs[:parts[0]*parts[1]] 33 | # labels = labels[:parts[0]*parts[1]] 34 | # print(imgs.shape,labels.shape) 35 | random.shuffle(parts) 36 | 37 | if labels != None: 38 | assert(labels.size(1) > 1) 39 | 40 | ver, hor = parts[0], parts[1] # [2,1] 41 | num = parts[0]*parts[1] 42 | 43 | b = imgs.size(0) 44 | # print(b) 45 | # b=len(imgs) 46 | assert(b%num==0) 47 | 48 | bs_ver = int(imgs.size(2)/ver) 49 | bs_hor = int(imgs.size(3)/hor) 50 | 51 | 52 | if ver!=1: 53 | img1 = [imgs[ :, :, i*bs_ver:(i+1)*bs_ver, :] for i in range(ver-1)] 54 | img1.append(imgs[ :, :, (ver-1)*bs_ver:, :]) 55 | if ver==2: 56 | img1.reverse() 57 | else: 58 | random.shuffle(img1) 59 | img1 = torch.cat(img1, 2) 60 | else: 61 | img1 = imgs 62 | 63 | if hor!=1: 64 | img2 = [img1[:, :, :, i*bs_hor:(i+1)*bs_hor] for i in range(hor-1)] 65 | img2.append(img1[:, :, :, (hor-1)*bs_hor:]) 66 | if hor==2: 67 | img2.reverse() 68 | else: 69 | random.shuffle(img2) 70 | img2 = torch.cat(img2, 3) 71 | else: 72 | img2 = img1 73 | 74 | img3 = torch.stack([img2[i*(b//num):(i+1)*(b//num)] for i in range(num)]) 75 | 76 | if labels != None: 77 | labels1 = torch.stack([labels[i*(b//num):(i+1)*(b//num)] for i in range(num)]) 78 | 79 | mask = torch.zeros_like(img3) 80 | k=0 81 | for i in range(ver): 82 | si = i*bs_ver 83 | ei = (i+1)*bs_ver if i < (ver-1) else imgs.size(2) 84 | for j in range(hor): 85 | sj = j*bs_hor 86 | ej = (j+1)*bs_hor if j < (hor-1) else imgs.size(3) 87 | 88 | mask[k,:,:,si:ei,sj:ej] = 1 89 | k+=1 90 | 91 | img3 = img3*mask 92 | img3 = torch.sum(img3, 0) 93 | if labels != None: 94 | labels2 = torch.sum(labels, 0) 95 | else: 96 | labels2 = None 97 | 98 | 99 | 100 | # print(img3.shape) 101 | 102 | return img3 103 | 104 | def make_dataset(image_list, labels): 105 | images = [] 106 | if labels: 107 | len_ = len(image_list) 108 | images = [(image_list[i].strip(), labels[i, :]) for i in range(len_)] 109 | else: 110 | if len(image_list[0].split()) > 2: 111 | images = [(val.split()[0], np.array([int(la) for la in val.split()[1:]])) for val in image_list] 112 | else: 113 | images = [(val.split()[0], int(val.split()[1])) for val in image_list] 114 | return images 115 | 116 | 117 | def rgb_loader(path): 118 | with open(path, 'rb') as f: 119 | with Image.open(f) as img: 120 | return img.convert('RGB') 121 | 122 | def l_loader(path): 123 | with open(path, 'rb') as f: 124 | with Image.open(f) as img: 125 | return img.convert('L') 126 | 127 | def rgba_loader(path): 128 | with open(path, 'rb') as f: 129 | with Image.open(f) as img: 130 | return img.convert('RGBA') 131 | 132 | class ImageList(Dataset): 133 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB', specified_len = None): 134 | imgs = make_dataset(image_list, labels) # list of tuples 135 | if len(imgs) == 0: 136 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 137 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 138 | 139 | self.imgs = imgs 140 | self.transform = transform 141 | self.target_transform = target_transform 142 | if mode == 'RGB': 143 | self.loader = rgb_loader 144 | elif mode == 'L': 145 | self.loader = l_loader 146 | elif mode == "RGBA": 147 | self.loader = rgba_loader 148 | 149 | if not specified_len: 150 | self.len = len(self.imgs) 151 | else: 152 | self.len = specified_len 153 | 154 | def __getitem__(self, index): 155 | path, target = self.imgs[index] 156 | img = self.loader(path) 157 | # img = img.resize((224, 224)) 158 | if self.transform is not None: 159 | img = self.transform(img) 160 | if self.target_transform is not None: 161 | target = self.target_transform(target) 162 | 163 | return img, target 164 | 165 | def __len__(self): 166 | return self.len 167 | 168 | class ShuffledImageList(Dataset): 169 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB', specified_len = None): 170 | imgs = make_dataset(image_list, labels) # list of tuples 171 | if len(imgs) == 0: 172 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 173 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 174 | 175 | self.imgs = imgs 176 | self.transform = transform 177 | self.target_transform = target_transform 178 | if mode == 'RGB': 179 | self.loader = rgb_loader 180 | elif mode == 'L': 181 | self.loader = l_loader 182 | elif mode == "RGBA": 183 | self.loader = rgba_loader 184 | 185 | if not specified_len: 186 | self.len = len(self.imgs) 187 | else: 188 | self.len = specified_len 189 | 190 | def __getitem__(self, index): 191 | path, target = self.imgs[index] 192 | img = self.loader(path) 193 | # img = img.resize((224, 224)) 194 | if self.transform is not None: 195 | img = self.transform(img) 196 | 197 | img = torch.squeeze(shuffle_2(torch.unsqueeze(img,0)),0) 198 | target = torch.tensor(10) 199 | 200 | return img, target 201 | 202 | def __len__(self): 203 | return self.len 204 | 205 | class ImageList_idx(Dataset): 206 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB'): 207 | imgs = make_dataset(image_list, labels) 208 | if len(imgs) == 0: 209 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 210 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 211 | 212 | self.imgs = imgs 213 | self.transform = transform 214 | self.target_transform = target_transform 215 | if mode == 'RGB': 216 | self.loader = rgb_loader 217 | elif mode == 'L': 218 | self.loader = l_loader 219 | 220 | def __getitem__(self, index): 221 | path, target = self.imgs[index] 222 | img = self.loader(path) 223 | if self.transform is not None: 224 | img = self.transform(img) 225 | if self.target_transform is not None: 226 | target = self.target_transform(target) 227 | 228 | return img, target, index 229 | 230 | def __len__(self): 231 | return len(self.imgs) 232 | 233 | class StickerList(Dataset): 234 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB', specified_len = None): 235 | imgs = make_dataset(image_list, labels) # list of tuples 236 | if len(imgs) == 0: 237 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 238 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 239 | 240 | self.imgs = imgs 241 | self.transform = transform 242 | self.target_transform = target_transform 243 | if mode == 'RGB': 244 | self.loader = rgb_loader 245 | elif mode == 'L': 246 | self.loader = l_loader 247 | elif mode == "RGBA": 248 | self.loader = rgba_loader 249 | 250 | if not specified_len: 251 | self.len = len(self.imgs) 252 | else: 253 | self.len = specified_len 254 | 255 | def __getitem__(self, index): 256 | path, target = self.imgs[random.choice(np.arange(0, len(self.imgs)))] 257 | img = self.loader(path) 258 | 259 | """ 260 | If img is PIL Image, mode “1”, “L”, “I”, “F” and modes with transparency (alpha channel) are not supported. 261 | 262 | Hence, removing the alpha channel, jittering and then putting it back. 263 | """ 264 | 265 | choice = np.random.uniform() 266 | jitter_transform = self.get_hsv_transform(choice) 267 | alpha = img.split()[-1] 268 | jitter_img = jitter_transform(img) 269 | img = Image.new("RGBA", img.size, (255,255,255,0)) 270 | img.paste(jitter_img, mask=alpha) 271 | 272 | # if(random.uniform(0,1) >= 0.5): 273 | # img = transforms.RandomAffine(degrees=30)(img) 274 | # else: 275 | # img = transforms.RandomAffine(degrees=0, shear=(-45,45))(img) 276 | 277 | if self.transform is not None: 278 | img = self.transform(img) 279 | if self.target_transform is not None: 280 | target = self.target_transform(target) 281 | 282 | return img, target 283 | 284 | def get_hsv_transform(self, prob): 285 | if prob <= 0.33: 286 | return transforms.ColorJitter(hue=(-0.5, 0.5)) 287 | elif prob <= 0.66: 288 | return transforms.ColorJitter(brightness=(0.66, 0.88), hue=(-0.5, 0.5)) 289 | else: 290 | return transforms.ColorJitter(saturation=(0.55, 1.0), hue=(-0.5, 0.5)) 291 | 292 | def __len__(self): 293 | return self.len 294 | 295 | class BackGround(Dataset): 296 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB', specified_len = None): 297 | self.image_list = image_list 298 | self.paths = os.listdir(image_list) 299 | self.transform=transform 300 | 301 | if mode == 'RGB': 302 | self.loader = rgb_loader 303 | elif mode == 'L': 304 | self.loader = l_loader 305 | elif mode == "RGBA": 306 | self.loader = rgba_loader 307 | 308 | self.len = len(self.paths) 309 | 310 | def __getitem__(self, index): 311 | path = self.paths[index] 312 | img = self.loader(os.path.join(self.image_list, path)) 313 | img = self.transform(img) 314 | label = torch.tensor(10) 315 | 316 | return img, label 317 | 318 | def __len__(self): 319 | return self.len 320 | 321 | 322 | def get_x_y_mixup(img1:Image, img2:Image): 323 | 324 | x_locations = np.arange(start=0, stop=abs(img1.size[0] - img2.size[0]), step=1) 325 | y_locations = np.arange(start=0, stop=abs(img1.size[1] - img2.size[1]), step=1) 326 | x,y = random.choice(x_locations), random.choice(y_locations) 327 | 328 | return x, y 329 | 330 | class Denormalize(object): 331 | def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), inplace=False): 332 | self.mean = mean 333 | self.demean = [-m/s for m, s in zip(mean, std)] 334 | self.std = std 335 | self.destd = [1/s for s in std] 336 | self.inplace = inplace 337 | 338 | def __call__(self, tensor): 339 | tensor = F.normalize(tensor, self.demean, self.destd, self.inplace) 340 | return torch.clamp(tensor, 0.0, 1.0) 341 | 342 | 343 | def mixup_batch(img_batch, sticker_batch): 344 | """ 345 | Images are denormalized 346 | Both are converted to PIL 347 | Pasted batch is made 348 | Normalization is done 349 | """ 350 | 351 | denormalize = Denormalize() 352 | toPIL = transforms.ToPILImage() 353 | toTens = transforms.ToTensor() 354 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 355 | std=[0.229, 0.224, 0.225]) 356 | 357 | img_batch = denormalize(img_batch) 358 | 359 | start = True 360 | 361 | for i in range(img_batch.shape[0]): 362 | 363 | img = toPIL(img_batch[i]) 364 | img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) 365 | 366 | 367 | sticker = toPIL(sticker_batch[i]).convert('RGBA') 368 | #j+=1 369 | #j=0 370 | #sticker = toPIL(sticker_batch[j]).convert('RGBA') 371 | white_img = Image.new("RGBA", (224, 224), (0, 0, 0, 0)) 372 | percent_sticker = random.choice([0.1, 0.15, 0.2, 0.25, 0.3 ,0.35, 0.4]) 373 | sticker = sticker.resize((round(224 * percent_sticker), round(224 * percent_sticker))) 374 | x,y = get_x_y_mixup(white_img, sticker) 375 | white_img.paste(sticker, (x,y), sticker) 376 | white_img = cv2.cvtColor(np.array(white_img), cv2.COLOR_RGB2BGR) 377 | alpha = np.random.uniform(0.3, 0.7) 378 | 379 | result = np.around(alpha *img + (1-alpha)*white_img).astype(np.uint8) 380 | result[white_img==0] = img[white_img == 0] 381 | mixed_up = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) 382 | 383 | mixed_up = normalize(toTens(mixed_up)).unsqueeze(0) 384 | 385 | if start: 386 | pasted_batch = mixed_up 387 | start = False 388 | else: 389 | pasted_batch = torch.cat((pasted_batch, mixed_up), 0) 390 | 391 | return pasted_batch 392 | 393 | -------------------------------------------------------------------------------- /MSDA_OH/image_source.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | #from object.utils import AverageMeter 3 | import os, sys 4 | import os.path as osp 5 | import torchvision 6 | import numpy as np 7 | import torch 8 | import torch.nn as nn 9 | import torch.optim as optim 10 | from torchvision import transforms 11 | import network, loss 12 | from torch.utils.data import DataLoader 13 | from data_list import ImageList, StickerList, mixup_batch 14 | import random, pdb, math, copy 15 | from tqdm import tqdm 16 | from logger import get_logger 17 | from loss import CrossEntropyLabelSmooth 18 | from scipy.spatial.distance import cdist 19 | from sklearn.metrics import confusion_matrix 20 | from sklearn.cluster import KMeans 21 | 22 | from utils import * 23 | 24 | def op_copy(optimizer): 25 | for param_group in optimizer.param_groups: 26 | param_group['lr0'] = param_group['lr'] 27 | return optimizer 28 | 29 | def lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75): 30 | decay = (1 + gamma * iter_num / max_iter) ** (-power) 31 | for param_group in optimizer.param_groups: 32 | param_group['lr'] = param_group['lr0'] * decay 33 | param_group['weight_decay'] = 1e-3 34 | param_group['momentum'] = 0.9 35 | param_group['nesterov'] = True 36 | return optimizer 37 | 38 | 39 | def image_test(resize_size=256, crop_size=224, alexnet=False): 40 | if not alexnet: 41 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 42 | std=[0.229, 0.224, 0.225]) 43 | else: 44 | normalize = transforms.Normalize(meanfile='./ilsvrc_2012_mean.npy') 45 | return transforms.Compose([ 46 | transforms.Resize((resize_size, resize_size)), 47 | transforms.CenterCrop(crop_size), 48 | transforms.ToTensor(), 49 | normalize 50 | ]) 51 | 52 | def data_load(args): 53 | dset_loaders = {} 54 | train_bs = args.batch_size 55 | 56 | with open(args.s0_dset_path) as f_src0: 57 | txt_src0 = f_src0.readlines() 58 | with open(args.s1_dset_path) as f_src1: 59 | txt_src1 = f_src1.readlines() 60 | with open(args.s2_dset_path) as f_src2: 61 | txt_src2 = f_src2.readlines() 62 | txt_src = txt_src0 + txt_src1 + txt_src2 63 | 64 | with open(args.test_dset_path) as f_test: 65 | txt_test = f_test.readlines() 66 | with open(args.task_1_path) as f_t1: 67 | task_1_list = f_t1.readlines() 68 | 69 | dsize = len(txt_src) 70 | tr_size = int(0.9*dsize) 71 | tr_txt, val_txt = torch.utils.data.random_split(txt_src, [tr_size, dsize - tr_size]) 72 | 73 | train_dataset = ImageList(tr_txt, transform=image_train()) 74 | dset_loaders["train_images"] = DataLoader(train_dataset, batch_size=train_bs, shuffle=True, num_workers=args.worker,drop_last=False, pin_memory = True) 75 | 76 | test_dataset = ImageList(txt_test, transform=image_test()) 77 | dset_loaders["test_images"] = DataLoader(test_dataset, batch_size=train_bs, shuffle=False, num_workers=args.worker,drop_last=False, pin_memory = True) 78 | 79 | val_dataset = ImageList(val_txt, transform=image_test()) 80 | dset_loaders["val_images"] = DataLoader(val_dataset, batch_size=train_bs, shuffle=False, num_workers=args.worker,drop_last=False, pin_memory = True) 81 | 82 | train_sticker_dataset = StickerList(task_1_list, transform=sticker_train(), mode="RGBA", specified_len = len(train_dataset)) 83 | dset_loaders["train_mixup_stickers"] = DataLoader(train_sticker_dataset, batch_size=train_bs//2, shuffle=True, num_workers=0, drop_last=True, pin_memory = True) 84 | 85 | return dset_loaders 86 | 87 | 88 | def cal_acc(args, loader, netBB, netMF, netMB, netMC, flag=False): 89 | start_test = True 90 | with torch.no_grad(): 91 | iter_test = iter(loader) 92 | data_bar = tqdm(range(len(loader))) 93 | for i in data_bar: 94 | data = iter_test.next() 95 | inputs = data[0] 96 | labels = data[1] 97 | inputs = inputs.cuda() 98 | outputs = netMC(netMB(netMF(netBB(inputs)))) 99 | 100 | data_bar.set_description("{} : Step:{} ".format(args.model, i)) 101 | 102 | if start_test: 103 | all_output = outputs.float().cpu() 104 | all_label = labels.float() 105 | start_test = False 106 | else: 107 | all_output = torch.cat((all_output, outputs.float().cpu()), 0) 108 | all_label = torch.cat((all_label, labels.float()), 0) 109 | _, predict = torch.max(all_output, 1) 110 | accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0]) 111 | mean_ent = torch.mean(loss.Entropy(nn.Softmax(dim=1)(all_output))).cpu().data.item() 112 | 113 | if flag: 114 | matrix = confusion_matrix(all_label, torch.squeeze(predict).float()) 115 | acc = matrix.diagonal()/matrix.sum(axis=1) * 100 116 | aacc = acc.mean() 117 | aa = [str(np.round(i, 2)) for i in acc] 118 | acc = ' '.join(aa) 119 | return aacc, acc 120 | else: 121 | return accuracy*100, mean_ent 122 | 123 | def train_source(args): 124 | dset_loaders = data_load(args) 125 | ## set base network 126 | if args.net[0:3] == 'res': 127 | netBB = network.ResBase(res_name=args.net).cuda() 128 | netMF = network.ResBase_Layer4(res_name=args.net).cuda() 129 | 130 | netMB = network.feat_bootleneck(type=args.classifier, feature_dim=netMF.in_features, bottleneck_dim=args.bottleneck).cuda() 131 | netMC = network.feat_classifier(type=args.layer, class_num = args.class_num, bottleneck_dim=args.bottleneck).cuda() 132 | 133 | param_group = [] 134 | learning_rate = args.lr 135 | for k, v in netBB.named_parameters(): 136 | param_group += [{'params': v, 'lr': learning_rate*0.1}] 137 | for k, v in netMF.named_parameters(): 138 | param_group += [{'params': v, 'lr': learning_rate*0.1}] 139 | for k, v in netMB.named_parameters(): 140 | param_group += [{'params': v, 'lr': learning_rate}] 141 | for k, v in netMC.named_parameters(): 142 | param_group += [{'params': v, 'lr': learning_rate}] 143 | 144 | optimizer = optim.SGD(param_group) 145 | optimizer = op_copy(optimizer) 146 | 147 | 148 | acc_init = 0 149 | max_iter = args.max_epoch * len(dset_loaders["train_images"]) 150 | interval_iter = max_iter // 10 151 | save_interval = len(dset_loaders["train_images"]) 152 | iter_num = 0 153 | 154 | netBB.train() 155 | netMF.train() 156 | netMB.train() 157 | netMC.train() 158 | 159 | accumulated_train_acc = AverageMeter() 160 | accumulated_train_loss = AverageMeter() 161 | 162 | accumulated_train_acc_mixed_up = AverageMeter() 163 | accumulated_train_loss_mixed_up = AverageMeter() 164 | 165 | train_data_bar = tqdm(range(max_iter)) 166 | iter_num = 0 167 | for step_i in train_data_bar: 168 | 169 | try: 170 | clean_images, clean_labels = iter_clean.next() 171 | except: 172 | iter_clean = iter(dset_loaders["train_images"]) 173 | clean_images, clean_labels = iter_clean.next() 174 | 175 | try: 176 | mixup_stickers, mixup_labels = iter_sticker.next() 177 | except: 178 | iter_sticker = iter(dset_loaders["train_mixup_stickers"]) 179 | mixup_stickers, mixup_labels = iter_sticker.next() 180 | 181 | if clean_images.size(0) == 1: 182 | continue 183 | 184 | 185 | iter_num += 1 186 | lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter) 187 | len_clean_imgs = len(clean_images) 188 | clean_images1, clean_labels1 = clean_images.cuda()[:len_clean_imgs//2], clean_labels.cuda()[:len_clean_imgs//2] 189 | mixed_up_images, mixed_up_labels = mixup_batch(clean_images[len_clean_imgs//2:], mixup_stickers).cuda(), clean_labels.cuda()[len_clean_imgs//2:] 190 | 191 | images = torch.cat([clean_images1, mixed_up_images]) 192 | labels = torch.cat([clean_labels1, mixed_up_labels]) 193 | 194 | outputs = netMC(netMB(netMF(netBB(images)))) 195 | 196 | classifier_loss = CrossEntropyLabelSmooth(num_classes=args.class_num, epsilon=args.smooth)(outputs, labels) 197 | 198 | optimizer.zero_grad() 199 | classifier_loss.backward() 200 | optimizer.step() 201 | 202 | accumulated_train_loss.update(classifier_loss.item()) 203 | 204 | train_acc = torch.sum(torch.squeeze(torch.max(outputs, 1)[1]).float() == labels).item() / float(labels.size()[0]) * 100 205 | accumulated_train_acc.update(train_acc) 206 | 207 | train_data_bar.set_description("Train: Steps:{} , Loss:{:.4f} , Acc:{:.4f}".format(iter_num, accumulated_train_loss.avg, accumulated_train_acc.avg)) 208 | if ((iter_num) % 500 == 0) or (iter_num == max_iter): 209 | netBB.eval() 210 | netMF.eval() 211 | netMB.eval() 212 | netMC.eval() 213 | 214 | 215 | args.model = "Val" 216 | val_log = cal_acc(args, dset_loaders['val_images'], netBB, netMF, netMB, netMC, False) 217 | args.model = "Test" 218 | test_log= cal_acc(args, dset_loaders['test_images'], netBB, netMF, netMB, netMC, False) 219 | 220 | # Training Logs 221 | args.logger.info("Train : Step:[{}/{}] , Loss:{} , Acc:{} ".format(iter_num , max_iter, 222 | accumulated_train_loss.avg, accumulated_train_acc.avg)) 223 | # Validation Logs 224 | args.logger.info("Val: Acc : {}".format(val_log[0])) 225 | # Test Logs 226 | args.logger.info("Test: Acc : {}".format(test_log[0])) 227 | 228 | if val_log[0] >= acc_init: 229 | acc_init = val_log[0] 230 | args.logger.info("Update Best Mean Acc: {}".format(acc_init)) 231 | best_netBB = netBB.state_dict() 232 | best_netMF = netMF.state_dict() 233 | best_netMB = netMB.state_dict() 234 | best_netMC = netMC.state_dict() 235 | 236 | torch.save({'netBB':best_netBB, 237 | 'netMF':best_netMF, 238 | 'netMB':best_netMB, 239 | 'netMC':best_netMC}, osp.join(args.output_dir_src, "model.pt")) 240 | 241 | 242 | accumulated_train_acc = AverageMeter() 243 | accumulated_train_loss = AverageMeter() 244 | netBB.train() 245 | netMF.train() 246 | netMB.train() 247 | netMC.train() 248 | 249 | 250 | 251 | def test_target(args): 252 | dset_loaders = data_load(args) 253 | ## set base network 254 | if args.net[0:3] == 'res': 255 | netF = network.ResBase(res_name=args.net).cuda() 256 | elif args.net[0:3] == 'vgg': 257 | netF = network.VGGBase(vgg_name=args.net).cuda() 258 | 259 | netB = network.feat_bootleneck(type=args.classifier, feature_dim=netF.in_features, bottleneck_dim=args.bottleneck).cuda() 260 | netC = network.feat_classifier(type=args.layer, class_num = args.class_num, bottleneck_dim=args.bottleneck).cuda() 261 | 262 | args.modelpath = args.output_dir_src + '/source_F.pt' 263 | netF.load_state_dict(torch.load(args.modelpath)) 264 | args.modelpath = args.output_dir_src + '/source_B.pt' 265 | netB.load_state_dict(torch.load(args.modelpath)) 266 | args.modelpath = args.output_dir_src + '/source_C.pt' 267 | netC.load_state_dict(torch.load(args.modelpath)) 268 | netF.eval() 269 | netB.eval() 270 | netC.eval() 271 | 272 | if args.da == 'oda': 273 | acc_os1, acc_os2, acc_unknown = cal_acc_oda(dset_loaders['test'], netF, netB, netC) 274 | log_str = '\nTraining: {}, Task: {}, Accuracy = {:.2f}% / {:.2f}% / {:.2f}%'.format(args.trte, args.name, acc_os2, acc_os1, acc_unknown) 275 | else: 276 | if args.dset=='VISDA-C': 277 | acc, acc_list = cal_acc(dset_loaders['test'], netF, netB, netC, True) 278 | log_str = '\nTraining: {}, Task: {}, Accuracy = {:.2f}%'.format(args.trte, args.name, acc) + '\n' + acc_list 279 | else: 280 | acc, _ = cal_acc(dset_loaders['test'], netF, netB, netC, False) 281 | log_str = '\nTraining: {}, Task: {}, Accuracy = {:.2f}%'.format(args.trte, args.name, acc) 282 | 283 | args.out_file.write(log_str) 284 | args.out_file.flush() 285 | print(log_str) 286 | 287 | def print_args(args): 288 | s = "==========================================\n" 289 | for arg, content in args.__dict__.items(): 290 | s += "{}:{}\n".format(arg, content) 291 | return s 292 | 293 | if __name__ == "__main__": 294 | parser = argparse.ArgumentParser(description='SHOT') 295 | parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run") 296 | parser.add_argument('--s', type=int, default=[0,2,3], nargs = 3, help="source") 297 | parser.add_argument('--t', type=int, default=1, help="target") 298 | parser.add_argument('--max_epoch', type=int, default=100, help="max iterations") 299 | parser.add_argument('--batch_size', type=int, default=64, help="batch_size") 300 | parser.add_argument('--worker', type=int, default=4, help="number of workers") 301 | parser.add_argument('--dset', type=str, default='office-home', choices=['VISDA-C', 'office', 'office-home', 'office-caltech']) 302 | parser.add_argument('--lr', type=float, default=1e-2, help="learning rate") 303 | parser.add_argument('--net', type=str, default='resnet50', help="vgg16, resnet50, resnet101") 304 | parser.add_argument('--seed', type=int, default=2020, help="random seed") 305 | parser.add_argument('--bottleneck', type=int, default=256) 306 | parser.add_argument('--epsilon', type=float, default=1e-5) 307 | parser.add_argument('--layer', type=str, default="wn", choices=["linear", "wn"]) 308 | parser.add_argument('--classifier', type=str, default="bn", choices=["ori", "bn"]) 309 | parser.add_argument('--smooth', type=float, default=0.1) 310 | parser.add_argument('--output', type=str, default='san') 311 | parser.add_argument('--da', type=str, default='uda', choices=['uda', 'pda', 'oda']) 312 | parser.add_argument('--trte', type=str, default='val', choices=['full', 'val']) 313 | parser.add_argument('--task_1_path', type=str, default='../data/textured/Task_1_mixup.txt') 314 | parser.add_argument('--task_1_class_num', type=int, default=10) 315 | 316 | args = parser.parse_args() 317 | 318 | if args.dset == 'office-home': 319 | names = ['Art', 'Clipart', 'Product', 'RealWorld'] 320 | args.class_num = 65 321 | if args.dset == 'office': 322 | names = ['amazon', 'dslr', 'webcam'] 323 | args.class_num = 31 324 | if args.dset == 'VISDA-C': 325 | names = ['train', 'validation'] 326 | args.class_num = 12 327 | if args.dset == 'office-caltech': 328 | names = ['amazon', 'caltech', 'dslr', 'webcam'] 329 | args.class_num = 10 330 | args.sticker_class = 11 331 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id 332 | SEED = args.seed 333 | torch.manual_seed(SEED) 334 | torch.cuda.manual_seed(SEED) 335 | np.random.seed(SEED) 336 | random.seed(SEED) 337 | 338 | folder = '../data/' 339 | args.s0_dset_path = folder + args.dset + '/' + names[args.s[0]] + '_list.txt' 340 | args.s1_dset_path = folder + args.dset + '/' + names[args.s[1]] + '_list.txt' 341 | args.s2_dset_path = folder + args.dset + '/' + names[args.s[2]] + '_list.txt' 342 | args.test_dset_path = folder + args.dset + '/' + names[args.t] + '_list.txt' 343 | 344 | args.task_1_classes = "../data/textured/Task_1_classes.txt" 345 | args.task_3_classes = "../data/" + args.dset + "/classes.txt" 346 | 347 | if args.dset == 'office-home': 348 | if args.da == 'pda': 349 | args.class_num = 65 350 | args.src_classes = [i for i in range(65)] 351 | args.tar_classes = [i for i in range(25)] 352 | if args.da == 'oda': 353 | args.class_num = 25 354 | args.src_classes = [i for i in range(25)] 355 | args.tar_classes = [i for i in range(65)] 356 | 357 | args.output_dir_src = osp.join('Checkpoint_main', args.dset, names[args.t].upper()) 358 | if not osp.exists(args.output_dir_src): 359 | os.system('mkdir -p ' + args.output_dir_src) 360 | if not osp.exists(args.output_dir_src): 361 | os.mkdir(args.output_dir_src) 362 | 363 | log_file = args.output_dir_src + '/log.txt' 364 | args.logger = get_logger('__train__', log_file) 365 | 366 | train_source(args) 367 | -------------------------------------------------------------------------------- /MSDA_OH/image_source_sticker.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | #from object.utils import AverageMeter 3 | import os, sys 4 | import os.path as osp 5 | import torchvision 6 | import numpy as np 7 | import torch 8 | import torch.nn as nn 9 | import torch.optim as optim 10 | from torchvision import transforms 11 | import network 12 | from torch.utils.data import DataLoader 13 | from data_list import ImageList, StickerList, ShuffledImageList, mixup_batch 14 | import random, pdb, math, copy 15 | from tqdm import tqdm 16 | from logger import get_logger 17 | from loss import CrossEntropyLabelSmooth 18 | from scipy.spatial.distance import cdist 19 | from sklearn.metrics import confusion_matrix 20 | from sklearn.cluster import KMeans 21 | 22 | from utils import * 23 | 24 | def op_copy(optimizer): 25 | for param_group in optimizer.param_groups: 26 | param_group['lr0'] = param_group['lr'] 27 | return optimizer 28 | 29 | def lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75): 30 | decay = (1 + gamma * iter_num / max_iter) ** (-power) 31 | for param_group in optimizer.param_groups: 32 | param_group['lr'] = param_group['lr0'] * decay 33 | param_group['weight_decay'] = 1e-3 34 | param_group['momentum'] = 0.9 35 | param_group['nesterov'] = True 36 | return optimizer 37 | 38 | 39 | def image_test(resize_size=256, crop_size=224, alexnet=False): 40 | if not alexnet: 41 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 42 | std=[0.229, 0.224, 0.225]) 43 | else: 44 | normalize = transforms.Normalize(meanfile='./ilsvrc_2012_mean.npy') 45 | return transforms.Compose([ 46 | transforms.Resize((resize_size, resize_size)), 47 | transforms.CenterCrop(crop_size), 48 | transforms.ToTensor(), 49 | normalize 50 | ]) 51 | 52 | def data_load(args): 53 | dset_loaders = {} 54 | train_bs = args.batch_size 55 | 56 | with open(args.s0_dset_path) as f_src0: 57 | txt_src0 = f_src0.readlines() 58 | with open(args.s1_dset_path) as f_src1: 59 | txt_src1 = f_src1.readlines() 60 | with open(args.s2_dset_path) as f_src2: 61 | txt_src2 = f_src2.readlines() 62 | txt_src = txt_src0 + txt_src1 + txt_src2 63 | 64 | with open(args.test_dset_path) as f_test: 65 | txt_test = f_test.readlines() 66 | with open(args.task_1_path) as f_t1: 67 | task_1_list = f_t1.readlines() 68 | 69 | 70 | dsize = len(txt_src) 71 | tr_size = int(0.9*dsize) 72 | tr_txt, val_txt = torch.utils.data.random_split(txt_src, [tr_size, dsize - tr_size]) 73 | 74 | train_dataset = ImageList(tr_txt, transform=image_train()) 75 | dset_loaders["train_clean_images"] = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.worker,drop_last=False, pin_memory = False) 76 | 77 | test_dataset = ImageList(txt_test, transform=image_test()) 78 | dset_loaders["test_images"] = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.worker,drop_last=False, pin_memory = False) 79 | 80 | val_dataset = ImageList(val_txt, transform=image_test()) 81 | dset_loaders["val_images"] = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.worker,drop_last=False, pin_memory = False) 82 | 83 | bg_dataset = ShuffledImageList(tr_txt, transform=image_train()) 84 | dset_loaders["train_bg_images"] = DataLoader(bg_dataset, batch_size=args.batch_size_bg, shuffle=True, num_workers=args.worker,drop_last=True, pin_memory = False) 85 | 86 | train_sticker_dataset = StickerList(task_1_list, transform=sticker_train(), mode="RGBA", specified_len = len(train_dataset)) 87 | dset_loaders["train_mixup_stickers"] = DataLoader(train_sticker_dataset, batch_size=args.batch_size+(args.batch_size_bg//2), shuffle=True, num_workers=0, drop_last=True, pin_memory = False) 88 | 89 | return dset_loaders 90 | 91 | 92 | def cal_acc_oda(args, clean_loader, sticker_loader, bg_loader, netBB, netF, netB, netC, weights): 93 | start_test = True 94 | accumulated_loss = AverageMeter() 95 | 96 | with torch.no_grad(): 97 | iter_clean_val = iter(clean_loader) 98 | iter_sticker_val = iter(sticker_loader) 99 | iter_bg_val = iter(bg_loader) 100 | data_bar = tqdm(range(len(clean_loader))) 101 | for i in data_bar: 102 | try: 103 | mixup_stickers, mixup_labels = iter_sticker_val.next() 104 | except: 105 | iter_sticker_val = iter(sticker_loader) 106 | mixup_stickers, mixup_labels = iter_sticker_val.next() 107 | try: 108 | clean_images, clean_labels = iter_clean_val.next() 109 | except: 110 | iter_clean_val = iter(clean_loader) 111 | clean_images, clean_labels = iter_clean_val.next() 112 | try: 113 | bg_images, bg_labels = iter_bg_val.next() 114 | except: 115 | iter_bg_val = iter(bg_loader) 116 | bg_images, bg_labels = iter_bg_val.next() 117 | 118 | clean_images = clean_images.cuda() 119 | 120 | mixed_up, mixed_up_labels = mixup_batch(clean_images, mixup_stickers[:args.batch_size]).cuda(), mixup_labels.cuda()[:len(clean_images)] 121 | bg_images1, bg_labels1 = bg_images.cuda()[:args.batch_size_bg//2], bg_labels[:args.batch_size_bg//2].cuda() 122 | bg_mix_up ,bg_mix_labels = mixup_batch(bg_images[args.batch_size_bg//2:], mixup_stickers[args.batch_size:]).cuda(), bg_labels[args.batch_size_bg//2:].cuda() 123 | 124 | inputs = torch.cat((mixed_up, bg_images1, bg_mix_up),0) 125 | labels = torch.cat((mixed_up_labels, bg_labels1, bg_mix_labels)) 126 | outputs = netC(netB(netF(netBB(inputs)))) 127 | 128 | loss = CrossEntropyLabelSmooth(num_classes=args.sticker_num, weights=weights, epsilon=args.smooth, reduction=True)(outputs, labels) 129 | 130 | accumulated_loss.update(loss.item()) 131 | data_bar.set_description("{} : Step:{}, Class_loss:{:.4f}".format(args.model, i, loss)) 132 | if start_test: 133 | all_output = outputs.float().cpu() 134 | all_label = labels.float() 135 | start_test = False 136 | else: 137 | all_output = torch.cat((all_output, outputs.float().cpu()), 0) 138 | all_label = torch.cat((all_label, labels.float()), 0) 139 | 140 | all_output = nn.Softmax(dim=1)(all_output) 141 | _, predict = torch.max(all_output, 1) 142 | 143 | all_label = all_label.cpu() 144 | matrix = confusion_matrix(all_label, torch.squeeze(predict).float()) 145 | matrix = matrix[np.unique(all_label).astype(int),:] 146 | 147 | acc = matrix.diagonal()/matrix.sum(axis=1) * 100 148 | ood_acc = acc[-1:].item() 149 | 150 | return accumulated_loss.avg, acc[:-1], np.mean(acc[:-1]), ood_acc, np.mean(acc) 151 | 152 | def train_source(args): 153 | dset_loaders = data_load(args) 154 | ## set base network 155 | if args.net[0:3] == 'res': 156 | netBB = network.ResBase(res_name=args.net).cuda() 157 | netSF = network.ResBase_Layer4(res_name=args.net).cuda() 158 | 159 | netSB = network.feat_bootleneck(type=args.classifier, feature_dim=netSF.in_features, bottleneck_dim=args.bottleneck).cuda() 160 | netSC = network.feat_classifier(type=args.layer, class_num = args.sticker_num, bottleneck_dim=args.bottleneck).cuda() 161 | 162 | param_group = [] 163 | learning_rate = args.lr 164 | for k, v in netBB.named_parameters(): 165 | param_group += [{'params': v, 'lr': learning_rate*0.1}] 166 | for k, v in netSF.named_parameters(): 167 | param_group += [{'params': v, 'lr': learning_rate*0.1}] 168 | for k, v in netSB.named_parameters(): 169 | param_group += [{'params': v, 'lr': learning_rate}] 170 | for k, v in netSC.named_parameters(): 171 | param_group += [{'params': v, 'lr': learning_rate}] 172 | 173 | optimizer = optim.SGD(param_group) 174 | optimizer = op_copy(optimizer) 175 | 176 | modelpath = args.stored_model_dir_src + '/model.pt' 177 | netBB.load_state_dict(torch.load(modelpath)['netBB']) 178 | netSF.load_state_dict(torch.load(modelpath)['netMF']) 179 | netSB.load_state_dict(torch.load(modelpath)['netMB']) 180 | 181 | netBB.eval() 182 | for k, v in netBB.named_parameters(): 183 | v.requires_grad = False 184 | 185 | acc_init = 0 186 | max_iter = args.max_epoch * len(dset_loaders["train_clean_images"]) 187 | interval_iter = max_iter // 10 188 | iter_num = 0 189 | start_test = True 190 | 191 | netSF.train() 192 | netSB.train() 193 | netSC.train() 194 | 195 | accumulated_train_acc = AverageMeter() 196 | accumulated_train_loss = AverageMeter() 197 | 198 | weights = torch.ones((1,11)) 199 | weights[0, -1] = 0.01 200 | weights = weights.cuda() 201 | 202 | train_data_bar = tqdm(range(max_iter)) 203 | iter_num = 0 204 | for step_i in train_data_bar: 205 | if iter_num > max_iter: 206 | break 207 | try: 208 | clean_images, clean_labels = iter_clean.next() 209 | except: 210 | iter_clean = iter(dset_loaders["train_clean_images"]) 211 | clean_images, clean_labels = iter_clean.next() 212 | try: 213 | bg_images, bg_labels = iter_bg.next() 214 | except: 215 | iter_bg = iter(dset_loaders["train_bg_images"]) 216 | bg_images, bg_labels = iter_bg.next() 217 | try: 218 | mixup_stickers, mixup_labels = iter_sticker.next() 219 | except: 220 | iter_sticker = iter(dset_loaders["train_mixup_stickers"]) 221 | mixup_stickers, mixup_labels = iter_sticker.next() 222 | 223 | if clean_images.size(0) == 1: 224 | continue 225 | 226 | 227 | iter_num += 1 228 | lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter) 229 | clean_images = clean_images.cuda() 230 | mixed_up_images, mixed_up_labels = mixup_batch(clean_images, mixup_stickers[:args.batch_size]).cuda(), mixup_labels.cuda()[:len(clean_images)] 231 | 232 | bg_images1, bg_labels1 = bg_images.cuda()[:args.batch_size_bg//2], bg_labels.cuda()[:args.batch_size_bg//2] 233 | bg_mix, bg_mix_labels = mixup_batch(bg_images[args.batch_size_bg//2:],mixup_stickers[args.batch_size:]).cuda(), bg_labels[args.batch_size_bg//2:].cuda() 234 | 235 | images = torch.cat([mixed_up_images, bg_images1, bg_mix]) 236 | labels = torch.cat([mixed_up_labels, bg_labels1, bg_mix_labels]) 237 | 238 | outputs = netSC(netSB(netSF(netBB(images)))) 239 | if start_test: 240 | all_output = outputs.float().cpu() 241 | all_label = labels.float() 242 | start_test = False 243 | else: 244 | all_output = torch.cat((all_output, outputs.float().cpu()), 0) 245 | all_label = torch.cat((all_label, labels.float()), 0) 246 | classifier_loss = CrossEntropyLabelSmooth(num_classes=args.sticker_num, weights = weights, epsilon=args.smooth, reduction=True)(outputs, labels) 247 | 248 | optimizer.zero_grad() 249 | classifier_loss.backward() 250 | optimizer.step() 251 | 252 | accumulated_train_loss.update(classifier_loss.item()) 253 | 254 | train_data_bar.set_description("Train: Steps:{} , Loss:{:.4f}".format(iter_num, accumulated_train_loss.avg)) 255 | if ((iter_num) % 500 == 0) or (iter_num == max_iter): 256 | start_test = True 257 | all_output = nn.Softmax(dim=1)(all_output) 258 | _, predict = torch.max(all_output, 1) 259 | 260 | all_label = all_label.cpu() 261 | matrix = confusion_matrix(all_label, torch.squeeze(predict).float()) 262 | matrix = matrix[np.unique(all_label).astype(int),:] 263 | 264 | acc = matrix.diagonal()/matrix.sum(axis=1) * 100 265 | train_ood_acc = acc[-1:].item() 266 | train_class_acc = acc[:-1] 267 | train_mean_acc = np.mean(acc) 268 | train_class_mean_acc = np.mean(acc[:-1]) 269 | netSF.eval() 270 | netSB.eval() 271 | netSC.eval() 272 | 273 | if args.dset=='VISDA-C': 274 | acc_s_te, acc_list = cal_acc(args, dset_loaders['val_images'], dset_loaders['train_mixup_stickers'], netBB, netSF, netSB, netSC) 275 | 276 | else: 277 | args.model = "Val" 278 | val_loss_acc = cal_acc_oda(args, dset_loaders['val_images'], 279 | dset_loaders['train_mixup_stickers'], dset_loaders['train_bg_images'], netBB, netSF, netSB, netSC, weights) 280 | 281 | args.model = "Test" 282 | test_loss_acc = cal_acc_oda(args, dset_loaders['test_images'], 283 | dset_loaders['train_mixup_stickers'], dset_loaders['train_bg_images'], netBB, netSF, netSB, netSC, weights) 284 | 285 | # Training Logs 286 | args.logger.info("Train : Step:[{}/{}] , Loss:{}, Per_Class_Acc:{}, Class_mean_acc:{}, OOD_Acc:{}, Mean_Acc:{}".format(iter_num , max_iter, 287 | accumulated_train_loss.avg, train_class_acc, train_class_mean_acc, train_ood_acc, train_mean_acc)) 288 | 289 | # Validation Logs 290 | args.logger.info("Val: Loss:{} , Per_Class_Acc:{} , Class_mean_Acc:{}, OOD_Acc:{}, Mean_Acc: {}".format(val_loss_acc[0], 291 | val_loss_acc[1], val_loss_acc[2], val_loss_acc[3], val_loss_acc[4])) 292 | 293 | # Test Logs 294 | args.logger.info("Test: Loss:{}, Per_Class_Acc:{} , Class_mean_Acc:{}, OOD_Acc:{}, Mean_Acc: {}".format(test_loss_acc[0], 295 | test_loss_acc[1], test_loss_acc[2], test_loss_acc[3], test_loss_acc[4])) 296 | 297 | 298 | if val_loss_acc[4] >= acc_init: 299 | acc_init = val_loss_acc[4] 300 | args.logger.info("Update Best Mean Acc: {}".format(acc_init)) 301 | best_netBB = netBB.state_dict() 302 | best_netSF = netSF.state_dict() 303 | best_netSB = netSB.state_dict() 304 | best_netSC = netSC.state_dict() 305 | 306 | torch.save({'netBB':best_netBB, 307 | 'netSF':best_netSF, 308 | 'netSB':best_netSB, 309 | 'netSC':best_netSC}, osp.join(args.output_dir_src, "model.pt")) 310 | 311 | accumulated_train_loss = AverageMeter() 312 | 313 | netSF.train() 314 | netSB.train() 315 | netSC.train() 316 | 317 | 318 | def print_args(args): 319 | s = "==========================================\n" 320 | for arg, content in args.__dict__.items(): 321 | s += "{}:{}\n".format(arg, content) 322 | return s 323 | 324 | if __name__ == "__main__": 325 | parser = argparse.ArgumentParser(description='SHOT') 326 | parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run") 327 | parser.add_argument('--s', type=int, default=[0,1,3], nargs = 3, help="source") 328 | parser.add_argument('--t', type=int, default=2, help="target") 329 | parser.add_argument('--max_epoch', type=int, default=100, help="max iterations") 330 | parser.add_argument('--batch_size', type=int, default=64, help="batch_size of clean images") 331 | parser.add_argument('--batch_size_bg', type=int, default=32, help="batch_size of background images") 332 | parser.add_argument('--worker', type=int, default=4, help="number of workers") 333 | parser.add_argument('--dset', type=str, default='office-home', choices=['VISDA-C', 'office', 'office-home', 'office-caltech']) 334 | parser.add_argument('--lr', type=float, default=1e-2, help="learning rate") 335 | parser.add_argument('--net', type=str, default='resnet50', help="vgg16, resnet50, resnet101") 336 | parser.add_argument('--seed', type=int, default=2020, help="random seed") 337 | parser.add_argument('--bottleneck', type=int, default=256) 338 | parser.add_argument('--epsilon', type=float, default=1e-5) 339 | parser.add_argument('--layer', type=str, default="wn", choices=["linear", "wn"]) 340 | parser.add_argument('--classifier', type=str, default="bn", choices=["ori", "bn"]) 341 | parser.add_argument('--smooth', type=float, default=0.1) 342 | parser.add_argument('--output', type=str, default='san') 343 | parser.add_argument('--da', type=str, default='uda', choices=['uda', 'pda', 'oda']) 344 | parser.add_argument('--trte', type=str, default='val', choices=['full', 'val']) 345 | parser.add_argument('--task_1_path', type=str, default='../data/textured/Task_1_mixup.txt') 346 | parser.add_argument('--task_1_class_num', type=int, default=10) 347 | 348 | args = parser.parse_args() 349 | 350 | if args.dset == 'office-home': 351 | names = ['Art', 'Clipart', 'Product', 'RealWorld'] 352 | args.class_num = 65 353 | if args.dset == 'office': 354 | names = ['amazon', 'dslr', 'webcam'] 355 | args.class_num = 31 356 | if args.dset == 'VISDA-C': 357 | names = ['train', 'validation'] 358 | args.class_num = 12 359 | if args.dset == 'office-caltech': 360 | names = ['amazon', 'caltech', 'dslr', 'webcam'] 361 | args.class_num = 10 362 | args.sticker_num = 11 363 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id 364 | SEED = args.seed 365 | torch.manual_seed(SEED) 366 | torch.cuda.manual_seed(SEED) 367 | np.random.seed(SEED) 368 | random.seed(SEED) 369 | args.logger = get_logger('__train__', 'logs_{}_St.txt'.format(names[args.t])) 370 | # torch.backends.cudnn.deterministic = True 371 | 372 | folder = '../data/' 373 | args.s0_dset_path = folder + args.dset + '/' + names[args.s[0]] + '_list.txt' 374 | args.s1_dset_path = folder + args.dset + '/' + names[args.s[1]] + '_list.txt' 375 | args.s2_dset_path = folder + args.dset + '/' + names[args.s[2]] + '_list.txt' 376 | args.test_dset_path = folder + args.dset + '/' + names[args.t] + '_list.txt' 377 | 378 | args.task_1_classes = "../data/textured/Task_1_classes.txt" 379 | args.task_3_classes = "../data/" + args.dset + "/classes.txt" 380 | 381 | if args.dset == 'office-home': 382 | if args.da == 'pda': 383 | args.class_num = 65 384 | args.src_classes = [i for i in range(65)] 385 | args.tar_classes = [i for i in range(25)] 386 | if args.da == 'oda': 387 | args.class_num = 25 388 | args.src_classes = [i for i in range(25)] 389 | args.tar_classes = [i for i in range(65)] 390 | 391 | args.output_dir_src = osp.join('Checkpoint_Sticker', args.dset, names[args.t].upper()) 392 | if not osp.exists(args.output_dir_src): 393 | os.system('mkdir -p ' + args.output_dir_src) 394 | if not osp.exists(args.output_dir_src): 395 | os.mkdir(args.output_dir_src) 396 | args.stored_model_dir_src = osp.join('Checkpoint_main', args.dset, names[args.t].upper()) 397 | 398 | log_file = args.output_dir_src + '/log.txt' 399 | args.logger = get_logger('__train__', log_file) 400 | 401 | train_source(args) 402 | -------------------------------------------------------------------------------- /MSDA_OH/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | def get_logger(name, log_file=None): 5 | 6 | logger = logging.getLogger(name) 7 | logger.setLevel(logging.INFO) 8 | 9 | formatter = logging.Formatter('') 10 | 11 | stdhandler = logging.StreamHandler(sys.stdout) 12 | stdhandler.setFormatter(formatter) 13 | logger.addHandler(stdhandler) 14 | 15 | if log_file is not None: 16 | file_handler = logging.FileHandler(log_file) 17 | file_handler.setFormatter(formatter) 18 | 19 | logger.addHandler(file_handler) 20 | 21 | return logger 22 | -------------------------------------------------------------------------------- /MSDA_OH/loss.py: -------------------------------------------------------------------------------- 1 | #from _typeshed import SupportsReadline 2 | import numpy as np 3 | import torch 4 | import torch.nn as nn 5 | from torch.autograd import Variable 6 | import math 7 | import torch.nn.functional as F 8 | import pdb 9 | 10 | def Entropy(input_): 11 | bs = input_.size(0) 12 | epsilon = 1e-5 13 | entropy = -input_ * torch.log(input_ + epsilon) 14 | entropy = torch.sum(entropy, dim=1) 15 | return entropy 16 | 17 | def grl_hook(coeff): 18 | def fun1(grad): 19 | return -coeff*grad.clone() 20 | return fun1 21 | 22 | def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None): 23 | softmax_output = input_list[1].detach() 24 | feature = input_list[0] 25 | if random_layer is None: 26 | op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1)) 27 | ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1))) 28 | else: 29 | random_out = random_layer.forward([feature, softmax_output]) 30 | ad_out = ad_net(random_out.view(-1, random_out.size(1))) 31 | batch_size = softmax_output.size(0) // 2 32 | dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda() 33 | if entropy is not None: 34 | entropy.register_hook(grl_hook(coeff)) 35 | entropy = 1.0+torch.exp(-entropy) 36 | source_mask = torch.ones_like(entropy) 37 | source_mask[feature.size(0)//2:] = 0 38 | source_weight = entropy*source_mask 39 | target_mask = torch.ones_like(entropy) 40 | target_mask[0:feature.size(0)//2] = 0 41 | target_weight = entropy*target_mask 42 | weight = source_weight / torch.sum(source_weight).detach().item() + \ 43 | target_weight / torch.sum(target_weight).detach().item() 44 | return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduction='none')(ad_out, dc_target)) / torch.sum(weight).detach().item() 45 | else: 46 | return nn.BCELoss()(ad_out, dc_target) 47 | 48 | def DANN(features, ad_net): 49 | ad_out = ad_net(features) 50 | batch_size = ad_out.size(0) // 2 51 | dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda() 52 | return nn.BCELoss()(ad_out, dc_target) 53 | 54 | 55 | class CrossEntropyLabelSmooth(nn.Module): 56 | """Cross entropy loss with label smoothing regularizer. 57 | Reference: 58 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016. 59 | Equation: y = (1 - epsilon) * y + epsilon / K. 60 | Args: 61 | num_classes (int): number of classes. 62 | epsilon (float): weight. 63 | """ 64 | 65 | def __init__(self, num_classes, weights=None, epsilon=0.1, use_gpu=True, reduction=True): 66 | super(CrossEntropyLabelSmooth, self).__init__() 67 | self.num_classes = num_classes 68 | self.epsilon = epsilon 69 | self.use_gpu = use_gpu 70 | self.reduction = reduction 71 | self.logsoftmax = nn.LogSoftmax(dim=1) 72 | self.weights = weights 73 | 74 | def forward(self, inputs, targets): 75 | """ 76 | Args: 77 | inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) 78 | targets: ground truth labels with shape (num_classes) 79 | """ 80 | log_probs = self.logsoftmax(inputs) 81 | targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1) 82 | if self.use_gpu: targets = targets.cuda() 83 | targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes 84 | loss = (- targets * log_probs) 85 | if self.weights is not None: 86 | loss = loss * self.weights 87 | if self.reduction: 88 | loss = loss.sum(dim=1) 89 | return loss.mean() 90 | else: 91 | return loss 92 | #return loss 93 | 94 | class CrossEntropyWeightedLabelSmooth(nn.Module): 95 | """Cross entropy loss with label smoothing regularizer. 96 | Reference: 97 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016. 98 | Equation: y = (1 - epsilon) * y + epsilon / K. 99 | Args: 100 | num_classes (int): number of classes. 101 | epsilon (float): weight. 102 | """ 103 | 104 | def __init__(self, num_classes, weights=None, epsilon=0.1, use_gpu=True, reduction=True): 105 | super(CrossEntropyLabelSmooth, self).__init__() 106 | self.num_classes = num_classes 107 | self.epsilon = epsilon 108 | self.use_gpu = use_gpu 109 | self.reduction = reduction 110 | self.logsoftmax = nn.LogSoftmax(dim=1) 111 | self.weights = weights 112 | if len(self.weights) == self.num_classes: 113 | raise AssertionError("Len of Weights should be equal to num_classes") 114 | 115 | def forward(self, inputs, targets): 116 | """ 117 | Args: 118 | inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) 119 | targets: ground truth labels with shape (num_classes) 120 | """ 121 | log_probs = self.logsoftmax(inputs) 122 | targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1) 123 | if self.use_gpu: targets = targets.cuda() 124 | targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes 125 | if self.weights: 126 | for i in range(targets.size(0)): 127 | try: 128 | for j in range(targets.size(1)): 129 | if targets[i][j] == 1: 130 | targets[i] *= self.weights[j] 131 | except: 132 | pass 133 | loss = (- targets * log_probs).sum(dim=1) 134 | if self.reduction: 135 | return loss.mean() 136 | else: 137 | return loss 138 | return losss -------------------------------------------------------------------------------- /MSDA_OH/network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torchvision 5 | from torchvision import models 6 | from torch.autograd import Variable 7 | import math 8 | import torch.nn.utils.weight_norm as weightNorm 9 | from collections import OrderedDict 10 | 11 | def calc_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0): 12 | return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low) 13 | 14 | def init_weights(m): 15 | classname = m.__class__.__name__ 16 | if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: 17 | nn.init.kaiming_uniform_(m.weight) 18 | nn.init.zeros_(m.bias) 19 | elif classname.find('BatchNorm') != -1: 20 | nn.init.normal_(m.weight, 1.0, 0.02) 21 | nn.init.zeros_(m.bias) 22 | elif classname.find('Linear') != -1: 23 | nn.init.xavier_normal_(m.weight) 24 | nn.init.zeros_(m.bias) 25 | 26 | vgg_dict = {"vgg11":models.vgg11, "vgg13":models.vgg13, "vgg16":models.vgg16, "vgg19":models.vgg19, 27 | "vgg11bn":models.vgg11_bn, "vgg13bn":models.vgg13_bn, "vgg16bn":models.vgg16_bn, "vgg19bn":models.vgg19_bn} 28 | class VGGBase(nn.Module): 29 | def __init__(self, vgg_name): 30 | super(VGGBase, self).__init__() 31 | model_vgg = vgg_dict[vgg_name](pretrained=True) 32 | self.features = model_vgg.features 33 | self.classifier = nn.Sequential() 34 | for i in range(6): 35 | self.classifier.add_module("classifier"+str(i), model_vgg.classifier[i]) 36 | self.in_features = model_vgg.classifier[6].in_features 37 | 38 | def forward(self, x): 39 | x = self.features(x) 40 | x = x.view(x.size(0), -1) 41 | x = self.classifier(x) 42 | return x 43 | 44 | res_dict = {"resnet18":models.resnet18, "resnet34":models.resnet34, "resnet50":models.resnet50, 45 | "resnet101":models.resnet101, "resnet152":models.resnet152, "resnext50":models.resnext50_32x4d, "resnext101":models.resnext101_32x8d} 46 | 47 | class ResBase(nn.Module): 48 | def __init__(self, res_name): 49 | super(ResBase, self).__init__() 50 | model_resnet = res_dict[res_name](pretrained=True) 51 | self.conv1 = model_resnet.conv1 52 | self.bn1 = model_resnet.bn1 53 | self.relu = model_resnet.relu 54 | self.maxpool = model_resnet.maxpool 55 | self.layer1 = model_resnet.layer1 56 | self.layer2 = model_resnet.layer2 57 | self.layer3 = model_resnet.layer3 58 | 59 | def forward(self, x): 60 | x = self.conv1(x) 61 | x = self.bn1(x) 62 | x = self.relu(x) 63 | x = self.maxpool(x) 64 | x = self.layer1(x) 65 | x = self.layer2(x) 66 | x = self.layer3(x) 67 | return x 68 | 69 | class ResBase_Layer4(nn.Module): 70 | def __init__(self, res_name): 71 | super(ResBase_Layer4, self).__init__() 72 | model_resnet = res_dict[res_name](pretrained=True) 73 | 74 | self.layer4 = model_resnet.layer4 75 | self.avgpool = model_resnet.avgpool 76 | self.in_features = model_resnet.fc.in_features 77 | 78 | def forward(self, x): 79 | 80 | x = self.layer4(x) 81 | x = self.avgpool(x) 82 | x = x.view(x.size(0), -1) 83 | return x 84 | 85 | 86 | class feat_bootleneck(nn.Module): 87 | def __init__(self, feature_dim, bottleneck_dim=256, type="ori"): 88 | super(feat_bootleneck, self).__init__() 89 | self.bn = nn.BatchNorm1d(bottleneck_dim, affine=True) 90 | self.relu = nn.ReLU(inplace=True) 91 | self.dropout = nn.Dropout(p=0.5) 92 | self.bottleneck = nn.Linear(feature_dim, bottleneck_dim) 93 | self.bottleneck.apply(init_weights) 94 | self.type = type 95 | 96 | def forward(self, x): 97 | x = self.bottleneck(x) 98 | if self.type == "bn": 99 | x = self.bn(x) 100 | return x 101 | 102 | class feat_classifier(nn.Module): 103 | def __init__(self, class_num, bottleneck_dim=256, sticker_class = 11, main = True, type="linear"): 104 | super(feat_classifier, self).__init__() 105 | self.type = type 106 | self.main = main 107 | if type == 'wn': 108 | if self.main: 109 | self.fc = weightNorm(nn.Linear(bottleneck_dim, class_num), name="weight") 110 | self.fc.apply(init_weights) 111 | else: 112 | self.fc = weightNorm(nn.Linear(bottleneck_dim, sticker_class), name="weight") 113 | self.fc.apply(init_weights) 114 | else: 115 | self.fc = nn.Linear(bottleneck_dim, class_num) 116 | self.fc.apply(init_weights) 117 | 118 | 119 | def forward(self, x): 120 | x = self.fc(x) 121 | return x 122 | 123 | class feat_classifier_two(nn.Module): 124 | def __init__(self, class_num, input_dim, bottleneck_dim=256): 125 | super(feat_classifier_two, self).__init__() 126 | self.type = type 127 | self.fc0 = nn.Linear(input_dim, bottleneck_dim) 128 | self.fc0.apply(init_weights) 129 | self.fc1 = nn.Linear(bottleneck_dim, class_num) 130 | self.fc1.apply(init_weights) 131 | 132 | def forward(self, x): 133 | x = self.fc0(x) 134 | x = self.fc1(x) 135 | return x 136 | 137 | class Res50(nn.Module): 138 | def __init__(self): 139 | super(Res50, self).__init__() 140 | model_resnet = models.resnet50(pretrained=True) 141 | self.conv1 = model_resnet.conv1 142 | self.bn1 = model_resnet.bn1 143 | self.relu = model_resnet.relu 144 | self.maxpool = model_resnet.maxpool 145 | self.layer1 = model_resnet.layer1 146 | self.layer2 = model_resnet.layer2 147 | self.layer3 = model_resnet.layer3 148 | self.layer4 = model_resnet.layer4 149 | self.avgpool = model_resnet.avgpool 150 | self.in_features = model_resnet.fc.in_features 151 | self.fc = model_resnet.fc 152 | 153 | def forward(self, x): 154 | x = self.conv1(x) 155 | x = self.bn1(x) 156 | x = self.relu(x) 157 | x = self.maxpool(x) 158 | x = self.layer1(x) 159 | x = self.layer2(x) 160 | x = self.layer3(x) 161 | x = self.layer4(x) 162 | x = self.avgpool(x) 163 | x = x.view(x.size(0), -1) 164 | y = self.fc(x) 165 | return x, y 166 | -------------------------------------------------------------------------------- /MSDA_OH/run.sh: -------------------------------------------------------------------------------- 1 | ## Target -> Art 2 | # Train Source Model 3 | python image_source.py --s 1 2 3 --t 0 4 | 5 | # Train Sticker Branch 6 | python image_source_sticker.py --s 1 2 3 --t 0 7 | 8 | # Adaptation 9 | python image_target.py --s 1 2 3 --t 0 10 | 11 | 12 | ## Target -> Clipart 13 | # Train Source Model 14 | python image_source.py --s 0 2 3 --t 1 15 | 16 | # Train Sticker Branch 17 | python image_source_sticker.py --s 0 2 3 --t 1 18 | 19 | # Adaptation 20 | python image_target.py --s 0 2 3 --t 1 21 | 22 | 23 | ## Target -> Product 24 | # Train Source Model 25 | python image_source.py --s 0 1 3 --t 2 26 | 27 | # Train Sticker Branch 28 | python image_source_sticker.py --s 0 1 3 --t 2 29 | 30 | # Adaptation 31 | python image_target.py --s 0 1 3 --t 2 32 | 33 | 34 | ## Target -> RealWorld 35 | # Train Source Model 36 | python image_source.py --s 0 1 2 --t 3 37 | 38 | # Train Sticker Branch 39 | python image_source_sticker.py --s 0 1 2 --t 3 40 | 41 | # Adaptation 42 | python image_target.py --s 0 1 2 --t 3 -------------------------------------------------------------------------------- /MSDA_OH/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import os 4 | from PIL import Image 5 | from torchvision import transforms 6 | import torchvision.transforms.functional as F1 7 | import torch.nn.functional as F 8 | import torch.nn as nn 9 | 10 | 11 | class Denormalize(object): 12 | def __init__(self, mean, std, inplace=False): 13 | self.mean = mean 14 | self.demean = [-m/s for m, s in zip(mean, std)] 15 | self.std = std 16 | self.destd = [1/s for s in std] 17 | self.inplace = inplace 18 | 19 | def __call__(self, tensor): 20 | tensor = F1.normalize(tensor, self.demean, self.destd, self.inplace) 21 | # clamp to get rid of numerical errors 22 | return torch.clamp(tensor, 0.0, 1.0) 23 | 24 | 25 | 26 | class RunningAverage(): 27 | def __init__(self): 28 | self.count = 0 29 | self.sum = 0 30 | 31 | def update(self, value, n_items = 1): 32 | self.sum += value * n_items 33 | self.count += n_items 34 | 35 | def __call__(self): 36 | if self.count: 37 | return self.sum/self.count 38 | else: 39 | return self.sum 40 | 41 | class AverageMeter(object): 42 | """Computes and stores the average and current value""" 43 | 44 | def __init__(self): 45 | self.reset() 46 | 47 | def reset(self): 48 | self.val = 0 49 | self.avg = 0 50 | self.sum = 0 51 | self.count = 0 52 | 53 | def update(self, val, n=1): 54 | self.val = val 55 | self.sum += val * n 56 | self.count += n 57 | self.avg = self.sum / self.count 58 | 59 | 60 | def torch_save(weights, path, **kwargs): 61 | ''' 62 | kwargs can be used to path things like optimizer weights / iteartion number etc. 63 | ''' 64 | data = {'weights': weights} 65 | data.update(kwargs) 66 | 67 | if not os.path.isdir(os.path.split(path)[0]): os.mkdir(os.path.split(path)[0]) 68 | 69 | torch.save(data, path) 70 | 71 | def torch_load(path, key="weights", device=torch.device('cpu')): 72 | ''' 73 | Possible keys should be known beforehand 74 | 75 | load_state_dict should be done in client code 76 | ''' 77 | if not os.path.exists(path): 78 | raise Exception("Checkpoint doesn't exist at {}".format(path)) 79 | checkpoint = torch.load(path, map_location = device) 80 | if not key in checkpoint: 81 | raise Exception("Key {} doesn't exist".format(key)) 82 | return checkpoint[key] 83 | 84 | 85 | def plot_samples(image_tensors, targets, outputs, step = None, idx_to_class=None, title = "Sample Predictions"): 86 | """ 87 | for plotting on wandb 88 | l_to_t: labels to text 89 | """ 90 | 91 | images = image_tensors.cpu().clone() 92 | 93 | unorm = Denormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) 94 | images = unorm(images) 95 | 96 | images = [Image.fromarray(np.transpose(np.uint8(images[i]*255), (1,2,0))) for i in range(images.shape[0])] 97 | outputs = torch.max(nn.Softmax(dim=1)(outputs), 1)[1] 98 | 99 | classes_text = [(targets[i].cpu().item(), outputs[i].cpu().item()) for i, img in enumerate(images)] 100 | if idx_to_class: 101 | classes_text = [(idx_to_class[str(a)], idx_to_class[str(b)]) for a,b in classes_text] 102 | 103 | if step: 104 | wandb.log({title: [wandb.Image(img, caption="Target: %s; Predicted: %s" % (classes_text[i][0], classes_text[i][1])) for i, img in enumerate(images)]}, step=step) 105 | else: 106 | wandb.log({title: [wandb.Image(img, caption="Target: %s; Predicted: %s" % (classes_text[i][0], classes_text[i][1])) for i, img in enumerate(images)]}) 107 | 108 | def image_train(resize_size=256, crop_size=224, alexnet=False): 109 | if not alexnet: 110 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 111 | std=[0.229, 0.224, 0.225]) 112 | else: 113 | normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy') 114 | return transforms.Compose([ 115 | transforms.Resize((resize_size, resize_size)), 116 | transforms.RandomCrop(crop_size), 117 | transforms.RandomHorizontalFlip(), 118 | transforms.ToTensor(), 119 | normalize 120 | ]) 121 | 122 | def image_test(resize_size=256, crop_size=224, alexnet=False): 123 | if not alexnet: 124 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 125 | std=[0.229, 0.224, 0.225]) 126 | else: 127 | normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy') 128 | return transforms.Compose([ 129 | transforms.Resize((resize_size, resize_size)), 130 | transforms.CenterCrop(crop_size), 131 | transforms.ToTensor(), 132 | normalize 133 | ]) 134 | 135 | def sticker_train(resize_size=224): 136 | return transforms.Compose([ 137 | transforms.Resize((resize_size, resize_size)), 138 | transforms.ToTensor(), 139 | ]) 140 | 141 | def sticker_test(resize_size=224, alexnet=False): 142 | return transforms.Compose([ 143 | transforms.Resize((resize_size, resize_size)), 144 | transforms.ToTensor(), 145 | ]) 146 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # [Concurrent Subsidiary Supervision for Source-Free DA (ECCV22)](https://sites.google.com/view/sticker-sfda) 3 | 4 | Code for our **ECCV** 2022 paper 'Concurrent Subsidiary Supervision for Unsupervised Source-Free Domain Adaptation'. 5 | 6 | [[Paper]](https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900177.pdf) [[Project Page]](https://sites.google.com/view/sticker-sfda) 7 | 8 | ## Dataset preparation 9 | 10 | Download the [Office-Home](https://www.hemanthdv.org/officeHomeDataset.html) (use our provided image list files) dataset. Put the dataset in data folder 11 | 12 | ## Office-Home experiments 13 | Code for Single Source Domain Adaptation (SSDA) is in the 'SSDA_OH' folder. 14 | 15 | ``` 16 | sh SSDA_OH/run.sh 17 | ``` 18 | 19 | Code for Multi Source Domain Adaptation (SSDA) is in the 'MSDA_OH' folder. 20 | 21 | ``` 22 | sh MSDA_OH/run.sh 23 | ``` 24 | 25 | ## Pre-trained checkpoints (coming soon) 26 | 27 | ## Citation 28 | If you find our work useful in your research, please cite the following paper: 29 | ``` 30 | @InProceedings{kundu2022concurrent, 31 | title={Concurrent Subsidiary Supervision for Unsupervised Source-Free Domain Adaptation}, 32 | author={Kundu, Jogendra Nath and Bhambri, Suvaansh and Kulkarni, Akshay and Sarkar, Hiran and Jampani, Varun and Babu, R. Venkatesh}, 33 | booktitle={European Conference on Computer Vision}, 34 | year={2022}, 35 | } 36 | ``` 37 | -------------------------------------------------------------------------------- /SSDA_OH/data_list.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import random 4 | from PIL import Image 5 | from torch.utils.data import Dataset 6 | import os 7 | import os.path 8 | import cv2 9 | import torchvision 10 | 11 | from torchvision import transforms 12 | from torch.utils.data import DataLoader 13 | import torchvision.transforms.functional as F 14 | 15 | 16 | SEED = 2020 17 | torch.manual_seed(SEED) 18 | torch.cuda.manual_seed(SEED) 19 | torch.cuda.manual_seed_all(SEED) 20 | np.random.seed(SEED) 21 | random.seed(SEED) 22 | os.environ['PYTHONHASHSEED'] = str(SEED) 23 | 24 | def shuffle_2(imgs, labels=None, parts=[6, 6]): 25 | bs = len(imgs) 26 | 27 | if len(imgs) < parts[0]*parts[1]: 28 | imgs = imgs.repeat((parts[0]*parts[1]+bs-1)//bs, 1, 1, 1) 29 | 30 | imgs = imgs[:parts[0]*parts[1]] 31 | random.shuffle(parts) 32 | 33 | if labels != None: 34 | assert(labels.size(1) > 1) 35 | 36 | ver, hor = parts[0], parts[1] # [2,1] 37 | num = parts[0]*parts[1] 38 | 39 | b = imgs.size(0) 40 | 41 | assert(b%num==0) 42 | 43 | bs_ver = int(imgs.size(2)/ver) 44 | bs_hor = int(imgs.size(3)/hor) 45 | 46 | 47 | if ver!=1: 48 | img1 = [imgs[ :, :, i*bs_ver:(i+1)*bs_ver, :] for i in range(ver-1)] 49 | img1.append(imgs[ :, :, (ver-1)*bs_ver:, :]) 50 | if ver==2: 51 | img1.reverse() 52 | else: 53 | random.shuffle(img1) 54 | img1 = torch.cat(img1, 2) 55 | else: 56 | img1 = imgs 57 | 58 | if hor!=1: 59 | img2 = [img1[:, :, :, i*bs_hor:(i+1)*bs_hor] for i in range(hor-1)] 60 | img2.append(img1[:, :, :, (hor-1)*bs_hor:]) 61 | if hor==2: 62 | img2.reverse() 63 | else: 64 | random.shuffle(img2) 65 | img2 = torch.cat(img2, 3) 66 | else: 67 | img2 = img1 68 | 69 | img3 = torch.stack([img2[i*(b//num):(i+1)*(b//num)] for i in range(num)]) 70 | 71 | if labels != None: 72 | labels1 = torch.stack([labels[i*(b//num):(i+1)*(b//num)] for i in range(num)]) 73 | 74 | mask = torch.zeros_like(img3) 75 | k=0 76 | for i in range(ver): 77 | si = i*bs_ver 78 | ei = (i+1)*bs_ver if i < (ver-1) else imgs.size(2) 79 | for j in range(hor): 80 | sj = j*bs_hor 81 | ej = (j+1)*bs_hor if j < (hor-1) else imgs.size(3) 82 | 83 | mask[k,:,:,si:ei,sj:ej] = 1 84 | k+=1 85 | 86 | img3 = img3*mask 87 | img3 = torch.sum(img3, 0) 88 | if labels != None: 89 | labels2 = torch.sum(labels, 0) 90 | else: 91 | labels2 = None 92 | 93 | return img3 94 | 95 | def make_dataset(image_list, labels): 96 | images = [] 97 | if labels: 98 | len_ = len(image_list) 99 | images = [(image_list[i].strip(), labels[i, :]) for i in range(len_)] 100 | else: 101 | if len(image_list[0].split()) > 2: 102 | images = [(val.split()[0], np.array([int(la) for la in val.split()[1:]])) for val in image_list] 103 | else: 104 | images = [(val.split()[0], int(val.split()[1])) for val in image_list] 105 | return images 106 | 107 | 108 | def rgb_loader(path): 109 | with open(path, 'rb') as f: 110 | with Image.open(f) as img: 111 | return img.convert('RGB') 112 | 113 | def l_loader(path): 114 | with open(path, 'rb') as f: 115 | with Image.open(f) as img: 116 | return img.convert('L') 117 | 118 | def rgba_loader(path): 119 | with open(path, 'rb') as f: 120 | with Image.open(f) as img: 121 | return img.convert('RGBA') 122 | 123 | class ImageList(Dataset): 124 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB', specified_len = None): 125 | imgs = make_dataset(image_list, labels) # list of tuples 126 | if len(imgs) == 0: 127 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 128 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 129 | 130 | self.imgs = imgs 131 | self.transform = transform 132 | self.target_transform = target_transform 133 | if mode == 'RGB': 134 | self.loader = rgb_loader 135 | elif mode == 'L': 136 | self.loader = l_loader 137 | elif mode == "RGBA": 138 | self.loader = rgba_loader 139 | 140 | if not specified_len: 141 | self.len = len(self.imgs) 142 | else: 143 | self.len = specified_len 144 | 145 | def __getitem__(self, index): 146 | path, target = self.imgs[index] 147 | img = self.loader(path) 148 | if self.transform is not None: 149 | img = self.transform(img) 150 | if self.target_transform is not None: 151 | target = self.target_transform(target) 152 | 153 | return img, target 154 | 155 | def __len__(self): 156 | return self.len 157 | 158 | class ShuffledImageList(Dataset): 159 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB', specified_len = None): 160 | imgs = make_dataset(image_list, labels) # list of tuples 161 | if len(imgs) == 0: 162 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 163 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 164 | 165 | self.imgs = imgs 166 | self.transform = transform 167 | self.target_transform = target_transform 168 | if mode == 'RGB': 169 | self.loader = rgb_loader 170 | elif mode == 'L': 171 | self.loader = l_loader 172 | elif mode == "RGBA": 173 | self.loader = rgba_loader 174 | 175 | if not specified_len: 176 | self.len = len(self.imgs) 177 | else: 178 | self.len = specified_len 179 | 180 | def __getitem__(self, index): 181 | path, target = self.imgs[index] 182 | img = self.loader(path) 183 | if self.transform is not None: 184 | img = self.transform(img) 185 | 186 | img = torch.squeeze(shuffle_2(torch.unsqueeze(img,0)),0) 187 | target = torch.tensor(10) 188 | 189 | return img, target 190 | 191 | def __len__(self): 192 | return self.len 193 | 194 | class ImageList_idx(Dataset): 195 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB'): 196 | imgs = make_dataset(image_list, labels) 197 | if len(imgs) == 0: 198 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 199 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 200 | 201 | self.imgs = imgs 202 | self.transform = transform 203 | self.target_transform = target_transform 204 | if mode == 'RGB': 205 | self.loader = rgb_loader 206 | elif mode == 'L': 207 | self.loader = l_loader 208 | 209 | def __getitem__(self, index): 210 | path, target = self.imgs[index] 211 | img = self.loader(path) 212 | if self.transform is not None: 213 | img = self.transform(img) 214 | if self.target_transform is not None: 215 | target = self.target_transform(target) 216 | 217 | return img, target, index 218 | 219 | def __len__(self): 220 | return len(self.imgs) 221 | 222 | class StickerList(Dataset): 223 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB', specified_len = None): 224 | imgs = make_dataset(image_list, labels) # list of tuples 225 | if len(imgs) == 0: 226 | raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 227 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 228 | 229 | self.imgs = imgs 230 | self.transform = transform 231 | self.target_transform = target_transform 232 | if mode == 'RGB': 233 | self.loader = rgb_loader 234 | elif mode == 'L': 235 | self.loader = l_loader 236 | elif mode == "RGBA": 237 | self.loader = rgba_loader 238 | 239 | if not specified_len: 240 | self.len = len(self.imgs) 241 | else: 242 | self.len = specified_len 243 | 244 | def __getitem__(self, index): 245 | path, target = self.imgs[random.choice(np.arange(0, len(self.imgs)))] 246 | img = self.loader(path) 247 | 248 | """ 249 | If img is PIL Image, mode “1”, “L”, “I”, “F” and modes with transparency (alpha channel) are not supported. 250 | 251 | Hence, removing the alpha channel, jittering and then putting it back. 252 | """ 253 | 254 | choice = np.random.uniform() 255 | jitter_transform = self.get_hsv_transform(choice) 256 | alpha = img.split()[-1] 257 | jitter_img = jitter_transform(img) 258 | img = Image.new("RGBA", img.size, (255,255,255,0)) 259 | img.paste(jitter_img, mask=alpha) 260 | 261 | # if(random.uniform(0,1) >= 0.5): 262 | # img = transforms.RandomAffine(degrees=30)(img) 263 | # else: 264 | # img = transforms.RandomAffine(degrees=0, shear=(-45,45))(img) 265 | 266 | if self.transform is not None: 267 | img = self.transform(img) 268 | if self.target_transform is not None: 269 | target = self.target_transform(target) 270 | 271 | return img, target 272 | 273 | def get_hsv_transform(self, prob): 274 | if prob <= 0.33: 275 | return transforms.ColorJitter(hue=(-0.5, 0.5)) 276 | elif prob <= 0.66: 277 | return transforms.ColorJitter(brightness=(0.66, 0.88), hue=(-0.5, 0.5)) 278 | else: 279 | return transforms.ColorJitter(saturation=(0.55, 1.0), hue=(-0.5, 0.5)) 280 | 281 | def __len__(self): 282 | return self.len 283 | 284 | class BackGround(Dataset): 285 | def __init__(self, image_list, labels=None, transform=None, target_transform=None, mode='RGB', specified_len = None): 286 | self.image_list = image_list 287 | self.paths = os.listdir(image_list) 288 | self.transform=transform 289 | 290 | if mode == 'RGB': 291 | self.loader = rgb_loader 292 | elif mode == 'L': 293 | self.loader = l_loader 294 | elif mode == "RGBA": 295 | self.loader = rgba_loader 296 | 297 | self.len = len(self.paths) 298 | 299 | def __getitem__(self, index): 300 | path = self.paths[index] 301 | img = self.loader(os.path.join(self.image_list, path)) 302 | img = self.transform(img) 303 | label = torch.tensor(10) 304 | 305 | return img, label 306 | 307 | def __len__(self): 308 | return self.len 309 | 310 | 311 | def get_x_y_mixup(img1:Image, img2:Image): 312 | 313 | x_locations = np.arange(start=0, stop=abs(img1.size[0] - img2.size[0]), step=1) 314 | y_locations = np.arange(start=0, stop=abs(img1.size[1] - img2.size[1]), step=1) 315 | x,y = random.choice(x_locations), random.choice(y_locations) 316 | 317 | return x, y 318 | 319 | class Denormalize(object): 320 | def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), inplace=False): 321 | self.mean = mean 322 | self.demean = [-m/s for m, s in zip(mean, std)] 323 | self.std = std 324 | self.destd = [1/s for s in std] 325 | self.inplace = inplace 326 | 327 | def __call__(self, tensor): 328 | tensor = F.normalize(tensor, self.demean, self.destd, self.inplace) 329 | return torch.clamp(tensor, 0.0, 1.0) 330 | 331 | def mixup_batch(img_batch, sticker_batch): 332 | """ 333 | Images are denormalized 334 | Both are converted to PIL 335 | Pasted batch is made 336 | Normalization is done 337 | """ 338 | 339 | denormalize = Denormalize() 340 | toPIL = transforms.ToPILImage() 341 | toTens = transforms.ToTensor() 342 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 343 | std=[0.229, 0.224, 0.225]) 344 | 345 | img_batch = denormalize(img_batch) 346 | 347 | start = True 348 | 349 | for i in range(img_batch.shape[0]): 350 | 351 | img = toPIL(img_batch[i]) 352 | img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) 353 | 354 | 355 | sticker = toPIL(sticker_batch[i]).convert('RGBA') 356 | 357 | white_img = Image.new("RGBA", (224, 224), (0, 0, 0, 0)) 358 | percent_sticker = random.choice([0.1, 0.15, 0.2, 0.25, 0.3 ,0.35, 0.4]) 359 | sticker = sticker.resize((round(224 * percent_sticker), round(224 * percent_sticker))) 360 | x,y = get_x_y_mixup(white_img, sticker) 361 | white_img.paste(sticker, (x,y), sticker) 362 | white_img = cv2.cvtColor(np.array(white_img), cv2.COLOR_RGB2BGR) 363 | alpha = np.random.uniform(0.3, 0.7) 364 | 365 | result = np.around(alpha *img + (1-alpha)*white_img).astype(np.uint8) 366 | result[white_img==0] = img[white_img == 0] 367 | mixed_up = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) 368 | 369 | mixed_up = normalize(toTens(mixed_up)).unsqueeze(0) 370 | 371 | if start: 372 | pasted_batch = mixed_up 373 | start = False 374 | else: 375 | pasted_batch = torch.cat((pasted_batch, mixed_up), 0) 376 | 377 | return pasted_batch 378 | -------------------------------------------------------------------------------- /SSDA_OH/image_source_sticker.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | #from object.utils import AverageMeter 3 | import os, sys 4 | import os.path as osp 5 | import torchvision 6 | import numpy as np 7 | import torch 8 | import torch.nn as nn 9 | import torch.optim as optim 10 | from torchvision import transforms 11 | import network 12 | from torch.utils.data import DataLoader 13 | from data_list import ImageList, StickerList, ShuffledImageList, mixup_batch 14 | import random, pdb, math, copy 15 | from tqdm import tqdm 16 | from logger import get_logger 17 | from loss import CrossEntropyLabelSmooth 18 | from scipy.spatial.distance import cdist 19 | from sklearn.metrics import confusion_matrix 20 | from sklearn.cluster import KMeans 21 | 22 | from utils import * 23 | 24 | def op_copy(optimizer): 25 | for param_group in optimizer.param_groups: 26 | param_group['lr0'] = param_group['lr'] 27 | return optimizer 28 | 29 | def lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75): 30 | decay = (1 + gamma * iter_num / max_iter) ** (-power) 31 | for param_group in optimizer.param_groups: 32 | param_group['lr'] = param_group['lr0'] * decay 33 | param_group['weight_decay'] = 1e-3 34 | param_group['momentum'] = 0.9 35 | param_group['nesterov'] = True 36 | return optimizer 37 | 38 | 39 | def image_test(resize_size=256, crop_size=224, alexnet=False): 40 | if not alexnet: 41 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 42 | std=[0.229, 0.224, 0.225]) 43 | else: 44 | normalize = transforms.Normalize(meanfile='./ilsvrc_2012_mean.npy') 45 | return transforms.Compose([ 46 | transforms.Resize((resize_size, resize_size)), 47 | transforms.CenterCrop(crop_size), 48 | transforms.ToTensor(), 49 | normalize 50 | ]) 51 | 52 | def data_load(args): 53 | dset_loaders = {} 54 | train_bs = args.batch_size 55 | 56 | with open(args.s_dset_path) as f_src: 57 | txt_src = f_src.readlines() 58 | 59 | with open(args.test_dset_path) as f_test: 60 | txt_test = f_test.readlines() 61 | 62 | 63 | with open(args.task_1_path) as f_t1: 64 | task_1_list = f_t1.readlines() 65 | 66 | 67 | dsize = len(txt_src) 68 | tr_size = int(0.9*dsize) 69 | tr_txt, val_txt = torch.utils.data.random_split(txt_src, [tr_size, dsize - tr_size]) 70 | 71 | train_dataset = ImageList(tr_txt, transform=image_train()) 72 | dset_loaders["train_clean_images"] = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.worker,drop_last=False, pin_memory = False) 73 | 74 | test_dataset = ImageList(txt_test, transform=image_test()) 75 | dset_loaders["test_images"] = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.worker,drop_last=False, pin_memory = False) 76 | 77 | val_dataset = ImageList(val_txt, transform=image_test()) 78 | dset_loaders["val_images"] = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.worker,drop_last=False, pin_memory = False) 79 | 80 | bg_dataset = ShuffledImageList(tr_txt, transform=image_train()) 81 | dset_loaders["train_bg_images"] = DataLoader(bg_dataset, batch_size=args.batch_size_bg, shuffle=True, num_workers=args.worker,drop_last=True, pin_memory = False) 82 | 83 | train_sticker_dataset = StickerList(task_1_list, transform=sticker_train(), mode="RGBA", specified_len = len(train_dataset)) 84 | dset_loaders["train_mixup_stickers"] = DataLoader(train_sticker_dataset, batch_size=args.batch_size+(args.batch_size_bg//2), shuffle=True, num_workers=0, drop_last=True, pin_memory = False) 85 | 86 | 87 | return dset_loaders 88 | 89 | def cal_acc_oda(args, clean_loader, sticker_loader, bg_loader, netBB, netF, netB, netC, weights): 90 | start_test = True 91 | accumulated_loss = AverageMeter() 92 | 93 | with torch.no_grad(): 94 | iter_clean_val = iter(clean_loader) 95 | iter_sticker_val = iter(sticker_loader) 96 | iter_bg_val = iter(bg_loader) 97 | data_bar = tqdm(range(len(clean_loader))) 98 | for i in data_bar: 99 | 100 | try: 101 | mixup_stickers, mixup_labels = iter_sticker_val.next() 102 | except: 103 | iter_sticker_val = iter(sticker_loader) 104 | mixup_stickers, mixup_labels = iter_sticker_val.next() 105 | try: 106 | clean_images, clean_labels = iter_clean_val.next() 107 | except: 108 | iter_clean_val = iter(clean_loader) 109 | clean_images, clean_labels = iter_clean_val.next() 110 | try: 111 | bg_images, bg_labels = iter_bg_val.next() 112 | except: 113 | iter_bg_val = iter(bg_loader) 114 | bg_images, bg_labels = iter_bg_val.next() 115 | 116 | clean_images = clean_images.cuda() 117 | 118 | mixed_up, mixed_up_labels = mixup_batch(clean_images, mixup_stickers).cuda(), mixup_labels.cuda()[:len(clean_images)] 119 | bg_images1, bg_labels1 = bg_images.cuda()[:args.batch_size_bg//2], bg_labels[:args.batch_size_bg//2].cuda() 120 | bg_mix_up ,bg_mix_labels = mixup_batch(bg_images[args.batch_size_bg//2:], mixup_stickers[args.batch_size:]).cuda(), bg_labels[args.batch_size_bg//2:].cuda() 121 | inputs = torch.cat((mixed_up, bg_images1, bg_mix_up),0) 122 | labels = torch.cat((mixed_up_labels, bg_labels1, bg_mix_labels)) 123 | outputs = netC(netB(netF(netBB(inputs)))) 124 | 125 | data_bar.set_description("{} : Step:{}".format(args.model, i)) 126 | if start_test: 127 | all_output = outputs.float().cpu() 128 | all_label = labels.float() 129 | start_test = False 130 | else: 131 | all_output = torch.cat((all_output, outputs.float().cpu()), 0) 132 | all_label = torch.cat((all_label, labels.float()), 0) 133 | 134 | all_output = nn.Softmax(dim=1)(all_output) 135 | _, predict = torch.max(all_output, 1) 136 | 137 | all_label = all_label.cpu() 138 | matrix = confusion_matrix(all_label, torch.squeeze(predict).float()) 139 | matrix = matrix[np.unique(all_label).astype(int),:] 140 | 141 | acc = matrix.diagonal()/matrix.sum(axis=1) * 100 142 | ood_acc = acc[-1:].item() 143 | 144 | return acc[:-1], np.mean(acc[:-1]), ood_acc, np.mean(acc) 145 | 146 | def train_source(args): 147 | dset_loaders = data_load(args) 148 | ## set base network 149 | if args.net[0:3] == 'res': 150 | netBB = network.ResBase(res_name=args.net).cuda() 151 | netSF = network.ResBase_Layer4(res_name=args.net).cuda() 152 | 153 | netSB = network.feat_bootleneck(type=args.classifier, feature_dim=netSF.in_features, bottleneck_dim=args.bottleneck).cuda() 154 | netSC = network.feat_classifier(type=args.layer, class_num = args.sticker_num, bottleneck_dim=args.bottleneck).cuda() 155 | 156 | param_group = [] 157 | learning_rate = args.lr 158 | for k, v in netBB.named_parameters(): 159 | param_group += [{'params': v, 'lr': learning_rate*0.1}] 160 | for k, v in netSF.named_parameters(): 161 | param_group += [{'params': v, 'lr': learning_rate*0.1}] 162 | for k, v in netSB.named_parameters(): 163 | param_group += [{'params': v, 'lr': learning_rate}] 164 | for k, v in netSC.named_parameters(): 165 | param_group += [{'params': v, 'lr': learning_rate}] 166 | 167 | optimizer = optim.SGD(param_group) 168 | optimizer = op_copy(optimizer) 169 | 170 | modelpath = args.stored_model_dir_src + '/model.pt' 171 | netBB.load_state_dict(torch.load(modelpath)['netBB']) 172 | netSF.load_state_dict(torch.load(modelpath)['netMF']) 173 | netSB.load_state_dict(torch.load(modelpath)['netMB']) 174 | 175 | netBB.eval() 176 | for k, v in netBB.named_parameters(): 177 | v.requires_grad = False 178 | 179 | acc_init = 0 180 | max_iter = args.max_epoch * len(dset_loaders["train_clean_images"]) 181 | interval_iter = max_iter // 10 182 | start_test = True 183 | 184 | netSF.train() 185 | netSB.train() 186 | netSC.train() 187 | 188 | accumulated_train_acc = AverageMeter() 189 | accumulated_train_loss = AverageMeter() 190 | 191 | weights = torch.ones((1,11)) 192 | weights[0, -1] = 0.01 193 | weights = weights.cuda() 194 | train_data_bar = tqdm(range(max_iter)) 195 | iter_num = 0 196 | for step_i in train_data_bar: 197 | #if iter_num > max_iter: 198 | # break 199 | try: 200 | clean_images, clean_labels = iter_clean.next() 201 | except: 202 | iter_clean = iter(dset_loaders["train_clean_images"]) 203 | clean_images, clean_labels = iter_clean.next() 204 | try: 205 | bg_images, bg_labels = iter_bg.next() 206 | except: 207 | iter_bg = iter(dset_loaders["train_bg_images"]) 208 | bg_images, bg_labels = iter_bg.next() 209 | try: 210 | mixup_stickers, mixup_labels = iter_sticker.next() 211 | except: 212 | iter_sticker = iter(dset_loaders["train_mixup_stickers"]) 213 | mixup_stickers, mixup_labels = iter_sticker.next() 214 | 215 | if clean_images.size(0) == 1: 216 | continue 217 | 218 | 219 | iter_num += 1 220 | lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter) 221 | clean_images = clean_images.cuda() 222 | mixed_up_images, mixed_up_labels = mixup_batch(clean_images, mixup_stickers).cuda(), mixup_labels.cuda()[:len(clean_images)] 223 | 224 | bg_images1, bg_labels1 = bg_images.cuda()[:args.batch_size_bg//2], bg_labels.cuda()[:args.batch_size_bg//2] 225 | bg_mix, bg_mix_labels = mixup_batch(bg_images[args.batch_size_bg//2:],mixup_stickers[args.batch_size:]).cuda(), bg_labels[args.batch_size_bg//2:].cuda() 226 | 227 | images = torch.cat([mixed_up_images, bg_images1, bg_mix]) 228 | labels = torch.cat([mixed_up_labels, bg_labels1, bg_mix_labels]) 229 | 230 | outputs = netSC(netSB(netSF(netBB(images)))) 231 | if start_test: 232 | all_output = outputs.float().cpu() 233 | all_label = labels.float() 234 | start_test = False 235 | else: 236 | all_output = torch.cat((all_output, outputs.float().cpu()), 0) 237 | all_label = torch.cat((all_label, labels.float()), 0) 238 | classifier_loss = CrossEntropyLabelSmooth(num_classes=args.sticker_num, weights = weights, epsilon=args.smooth, reduction=True)(outputs, labels) 239 | 240 | #mixed_up_classifier_loss = CrossEntropyLabelSmooth(num_classes=args.sticker_num, epsilon=args.smooth)(mixed_up_outputs, mixed_up_labels) 241 | 242 | optimizer.zero_grad() 243 | classifier_loss.backward() 244 | optimizer.step() 245 | 246 | accumulated_train_loss.update(classifier_loss.item()) 247 | 248 | train_data_bar.set_description("Train: Steps:{} , Loss:{:.4f}".format(iter_num, accumulated_train_loss.avg)) 249 | if ((iter_num) % 500 == 0) or (iter_num == max_iter): 250 | start_test = True 251 | all_output = nn.Softmax(dim=1)(all_output) 252 | _, predict = torch.max(all_output, 1) 253 | 254 | all_label = all_label.cpu() 255 | matrix = confusion_matrix(all_label, torch.squeeze(predict).float()) 256 | matrix = matrix[np.unique(all_label).astype(int),:] 257 | 258 | acc = matrix.diagonal()/matrix.sum(axis=1) * 100 259 | train_ood_acc = acc[-1:].item() 260 | train_class_acc = acc[:-1] 261 | train_mean_acc = np.mean(acc) 262 | train_class_mean_acc = np.mean(acc[:-1]) 263 | netSF.eval() 264 | netSB.eval() 265 | netSC.eval() 266 | 267 | if args.dset=='VISDA-C': 268 | acc_s_te, acc_list = cal_acc(args, dset_loaders['val_images'], dset_loaders['train_mixup_stickers'], netBB, netSF, netSB, netSC) 269 | 270 | else: 271 | args.model = "Val" 272 | val_loss_acc = cal_acc_oda(args, dset_loaders['val_images'], 273 | dset_loaders['train_mixup_stickers'], dset_loaders['train_bg_images'], netBB, netSF, netSB, netSC, weights) 274 | 275 | args.model = "Test" 276 | test_loss_acc = cal_acc_oda(args, dset_loaders['test_images'], 277 | dset_loaders['train_mixup_stickers'], dset_loaders['train_bg_images'], netBB, netSF, netSB, netSC, weights) 278 | 279 | # Training Logs 280 | args.logger.info("Train : Step:[{}/{}] , Loss:{}, Per_Class_Acc:{}, Class_mean_acc:{}, OOD_Acc:{}, Mean_Acc:{}".format(iter_num , max_iter, 281 | accumulated_train_loss.avg, train_class_acc, train_class_mean_acc, train_ood_acc, train_mean_acc)) 282 | 283 | # Validation Logs 284 | args.logger.info("Val: Step:[{}/{}], Per_Class_Acc:{} , Class_mean_Acc:{}, OOD_Acc:{}, Mean_Acc: {}".format(iter_num , max_iter, val_loss_acc[0], 285 | val_loss_acc[1], val_loss_acc[2], val_loss_acc[3])) 286 | 287 | # Test Logs 288 | args.logger.info("Test: Per_Class_Acc:{} , Class_mean_Acc:{}, OOD_Acc:{}, Mean_Acc: {}".format(test_loss_acc[0], 289 | test_loss_acc[1], test_loss_acc[2], test_loss_acc[3])) 290 | 291 | 292 | if val_loss_acc[3] >= acc_init: 293 | acc_init = val_loss_acc[3] 294 | args.logger.info("Update Best Mean_Acc: {}".format(acc_init)) 295 | best_netBB = netBB.state_dict() 296 | best_netSF = netSF.state_dict() 297 | best_netSB = netSB.state_dict() 298 | best_netSC = netSC.state_dict() 299 | 300 | torch.save({'netBB':best_netBB, 301 | 'netSF':best_netSF, 302 | 'netSB':best_netSB, 303 | 'netSC':best_netSC}, osp.join(args.output_dir_src, "model.pt")) 304 | accumulated_train_loss = AverageMeter() 305 | 306 | netSF.train() 307 | netSB.train() 308 | netSC.train() 309 | 310 | 311 | def print_args(args): 312 | s = "==========================================\n" 313 | for arg, content in args.__dict__.items(): 314 | s += "{}:{}\n".format(arg, content) 315 | return s 316 | 317 | if __name__ == "__main__": 318 | parser = argparse.ArgumentParser(description='SHOT') 319 | parser.add_argument('--gpu_id', type=str, nargs='?', default='1', help="device id to run") 320 | parser.add_argument('--s', type=int, default=3, help="source") 321 | parser.add_argument('--t', type=int, default=1, help="target") 322 | parser.add_argument('--max_epoch', type=int, default=1500, help="max iterations") 323 | parser.add_argument('--batch_size', type=int, default=64, help="batch_size of clean images") 324 | parser.add_argument('--batch_size_bg', type=int, default=32, help="batch_size of background images") 325 | parser.add_argument('--worker', type=int, default=4, help="number of workers") 326 | parser.add_argument('--dset', type=str, default='office-home', choices=['VISDA-C', 'office-31', 'office-home', 'office-caltech']) 327 | parser.add_argument('--lr', type=float, default=1e-2, help="learning rate") 328 | parser.add_argument('--net', type=str, default='resnet50', help="vgg16, resnet50, resnet101") 329 | parser.add_argument('--seed', type=int, default=2020, help="random seed") 330 | parser.add_argument('--bottleneck', type=int, default=256) 331 | parser.add_argument('--epsilon', type=float, default=1e-5) 332 | parser.add_argument('--layer', type=str, default="wn", choices=["linear", "wn"]) 333 | parser.add_argument('--classifier', type=str, default="bn", choices=["ori", "bn"]) 334 | parser.add_argument('--smooth', type=float, default=0.1) 335 | parser.add_argument('--output', type=str, default='san') 336 | parser.add_argument('--da', type=str, default='uda', choices=['uda']) 337 | parser.add_argument('--trte', type=str, default='val', choices=['full', 'val']) 338 | parser.add_argument('--task_1_path', type=str, default='../data/textured/Task_1_mixup.txt') 339 | 340 | args = parser.parse_args() 341 | 342 | if args.dset == 'office-home': 343 | names = ['Art', 'Clipart', 'Product', 'RealWorld'] 344 | args.class_num = 65 345 | if args.dset == 'office-31': 346 | names = ['amazon', 'dslr', 'webcam'] 347 | args.class_num = 31 348 | if args.dset == 'VISDA-C': 349 | names = ['train', 'validation'] 350 | args.class_num = 12 351 | if args.dset == 'office-caltech': 352 | names = ['amazon', 'caltech', 'dslr', 'webcam'] 353 | args.class_num = 10 354 | args.sticker_num = 11 355 | args.names = names 356 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id 357 | SEED = args.seed 358 | torch.manual_seed(SEED) 359 | torch.cuda.manual_seed(SEED) 360 | np.random.seed(SEED) 361 | random.seed(SEED) 362 | 363 | folder = '../data/' 364 | args.s_dset_path = folder + args.dset + '/' + names[args.s] + '_list.txt' 365 | 366 | args.test_dset_path = folder + args.dset + '/' + names[args.t] + '_list.txt' 367 | 368 | args.task_1_classes = "../data/textured/Task_1_classes.txt" 369 | args.task_3_classes = "../data/" + args.dset + "/classes.txt" 370 | 371 | if args.dset == 'office-home': 372 | 373 | if args.da == 'oda': 374 | args.class_num = 25 375 | args.src_classes = [i for i in range(25)] 376 | args.tar_classes = [i for i in range(65)] 377 | 378 | args.output_dir_src = osp.join('Checkpoint_Sticker', args.dset, names[args.s][0].upper() + '2' + names[args.t][0].upper()) 379 | if not osp.exists(args.output_dir_src): 380 | os.system('mkdir -p ' + args.output_dir_src) 381 | if not osp.exists(args.output_dir_src): 382 | os.mkdir(args.output_dir_src) 383 | 384 | log_file = args.output_dir_src + '/log.txt' 385 | args.logger = get_logger('__train__', log_file) 386 | args.stored_model_dir_src = osp.join('Checkpoint_main', args.dset, names[args.s][0].upper() + '2' + names[args.t][0].upper()) 387 | 388 | 389 | train_source(args) 390 | 391 | -------------------------------------------------------------------------------- /SSDA_OH/image_target.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os, sys 3 | import os.path as osp 4 | import torchvision 5 | import numpy as np 6 | import torch 7 | import torch.nn as nn 8 | import torch.optim as optim 9 | from torchvision import transforms 10 | import network, loss 11 | from torch.utils.data import DataLoader 12 | from data_list import ImageList, ImageList_idx, StickerList, mixup_batch 13 | import random, pdb, math, copy 14 | from tqdm import tqdm 15 | from logger import get_logger 16 | from scipy.spatial.distance import cdist 17 | from sklearn.metrics import confusion_matrix 18 | from loss import CrossEntropyLabelSmooth 19 | 20 | from utils import * 21 | 22 | def op_copy(optimizer): 23 | for param_group in optimizer.param_groups: 24 | param_group['lr0'] = param_group['lr'] 25 | return optimizer 26 | 27 | def lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75): 28 | decay = (1 + gamma * iter_num / max_iter) ** (-power) 29 | for param_group in optimizer.param_groups: 30 | param_group['lr'] = param_group['lr0'] * decay 31 | param_group['weight_decay'] = 1e-3 32 | param_group['momentum'] = 0.9 33 | param_group['nesterov'] = True 34 | return optimizer 35 | 36 | def image_train(resize_size=256, crop_size=224, alexnet=False): 37 | if not alexnet: 38 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 39 | std=[0.229, 0.224, 0.225]) 40 | else: 41 | normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy') 42 | return transforms.Compose([ 43 | transforms.Resize((resize_size, resize_size)), 44 | transforms.RandomCrop(crop_size), 45 | transforms.RandomHorizontalFlip(), 46 | transforms.ToTensor(), 47 | normalize 48 | ]) 49 | 50 | def image_test(resize_size=256, crop_size=224, alexnet=False): 51 | if not alexnet: 52 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 53 | std=[0.229, 0.224, 0.225]) 54 | else: 55 | normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy') 56 | return transforms.Compose([ 57 | transforms.Resize((resize_size, resize_size)), 58 | transforms.CenterCrop(crop_size), 59 | transforms.ToTensor(), 60 | normalize 61 | ]) 62 | 63 | def data_load(args): 64 | dset_loaders = {} 65 | train_bs = args.batch_size 66 | 67 | with open(args.test_dset_path) as f_test: 68 | txt_test = f_test.readlines() 69 | with open(args.task_1_path) as f_t1: 70 | task_1_list = f_t1.readlines() 71 | 72 | 73 | target_dataset = ImageList_idx(txt_test, transform=image_train()) 74 | dset_loaders["target_images"] = DataLoader(target_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.worker,drop_last=False, pin_memory = False) 75 | 76 | test_dataset = ImageList_idx(txt_test, transform=image_test()) 77 | dset_loaders["test"] = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.worker,drop_last=False, pin_memory = False) 78 | 79 | train_sticker_dataset = StickerList(task_1_list, transform=sticker_train(), mode="RGBA", specified_len = len(target_dataset)) 80 | dset_loaders["train_mixup_stickers"] = DataLoader(train_sticker_dataset, batch_size=(args.batch_size//2), shuffle=True, num_workers=0, drop_last=True, pin_memory = False) 81 | 82 | return dset_loaders 83 | 84 | def cal_acc(clean_loader, netBB, netMF, netMB, netMC, flag=False): 85 | start_test = True 86 | with torch.no_grad(): 87 | data_bar = tqdm(range(len(clean_loader))) 88 | for i in data_bar: 89 | try: 90 | clean_images, clean_labels, _ = iter_clean.next() 91 | except: 92 | iter_clean = iter(clean_loader) 93 | clean_images, clean_labels, _ = iter_clean.next() 94 | 95 | data_bar.set_description("MainBranch : Step:{}".format(i)) 96 | clean_images1, clean_labels1 = clean_images.cuda(), clean_labels.cuda() 97 | 98 | clean_outputs = netMC(netMB(netMF(netBB(clean_images1)))) 99 | if start_test: 100 | all_output_clean = clean_outputs.float().cpu() 101 | all_label_clean = clean_labels1.float() 102 | 103 | start_test = False 104 | else: 105 | all_output_clean = torch.cat((all_output_clean, clean_outputs.float().cpu()), 0) 106 | all_label_clean = torch.cat((all_label_clean, clean_labels1.float()), 0) 107 | _, predict_clean = torch.max(all_output_clean, 1) 108 | predict_clean = predict_clean.cuda() 109 | accuracy_clean = torch.sum(torch.squeeze(predict_clean).float() == all_label_clean).item() / float(all_label_clean.size()[0]) 110 | mean_ent_clean = torch.mean(loss.Entropy(nn.Softmax(dim=1)(all_output_clean))).cpu().data.item() 111 | 112 | return accuracy_clean*100, mean_ent_clean 113 | 114 | 115 | def train_target(args): 116 | dset_loaders = data_load(args) 117 | ## set base network 118 | if args.net[0:3] == 'res': 119 | netBB = network.ResBase(res_name=args.net).cuda() 120 | netMF = network.ResBase_Layer4(res_name=args.net).cuda() 121 | netSF = network.ResBase_Layer4(res_name=args.net).cuda() 122 | 123 | 124 | netMB = network.feat_bootleneck(type=args.classifier, feature_dim=netMF.in_features, bottleneck_dim=args.bottleneck).cuda() 125 | netSB = network.feat_bootleneck(type=args.classifier, feature_dim=netSF.in_features, bottleneck_dim=args.bottleneck).cuda() 126 | netMC = network.feat_classifier(type=args.layer, class_num = args.class_num, bottleneck_dim=args.bottleneck).cuda() 127 | netSC = network.feat_classifier(type=args.layer, class_num = args.sticker_num, bottleneck_dim=args.bottleneck).cuda() 128 | 129 | modelpath = args.stored_model_dir_src_main + '/model.pt' 130 | netBB.load_state_dict(torch.load(modelpath)['netBB']) 131 | netMB.load_state_dict(torch.load(modelpath)['netMB']) 132 | netMF.load_state_dict(torch.load(modelpath)['netMF']) 133 | netMC.load_state_dict(torch.load(modelpath)['netMC']) 134 | modelpath = args.stored_model_dir_src_st + '/model.pt' 135 | netSB.load_state_dict(torch.load(modelpath)['netSB']) 136 | netSF.load_state_dict(torch.load(modelpath)['netSF']) 137 | netSC.load_state_dict(torch.load(modelpath)['netSC']) 138 | 139 | optimizer = optim.SGD( 140 | [ 141 | { 142 | 'params': netBB.parameters(), 143 | 'lr': args.lr * .1 #1 144 | }, 145 | { 146 | 'params': netMF.parameters(), 147 | 'lr': args.lr * 1 #10 148 | }, 149 | { 150 | 'params': netMB.bn.parameters(), 151 | 'lr': args.lr * 1 #10 152 | }, 153 | { 154 | 'params': netMC.parameters(), 155 | 'lr': args.lr * 1 #10 156 | }, 157 | { 158 | 'params': netSF.parameters(), 159 | 'lr': args.lr * 1 #10 160 | }, 161 | { 162 | 'params': netSB.bn.parameters(), 163 | 'lr': args.lr * 1 #10 164 | }, 165 | { 166 | 'params': netSC.parameters(), 167 | 'lr': args.lr * 1 #10 168 | } 169 | ], 170 | momentum=0.9, 171 | weight_decay=5e-4, 172 | nesterov=True) 173 | optimizer = op_copy(optimizer) 174 | 175 | max_iter = args.max_epoch * len(dset_loaders["target_images"]) 176 | interval_iter = max_iter // args.interval 177 | iter_num = 0 178 | acc_init = 0 179 | 180 | loader = dset_loaders["target_images"] 181 | num_sample = len(loader.dataset) 182 | fea_bank = torch.randn(num_sample, 256) 183 | score_bank = torch.randn(num_sample, args.class_num).cuda() 184 | 185 | netBB.eval() 186 | netMF.eval() 187 | netMB.eval() 188 | netMC.eval() 189 | with torch.no_grad(): 190 | iter_test = iter(loader) 191 | for i in range(len(loader)): 192 | data = iter_test.next() 193 | inputs = data[0] 194 | indx = data[-1] 195 | #labels = data[1] 196 | inputs = inputs.cuda() 197 | output = netMB(netMF(netBB(inputs))) # a^t 198 | output_norm = F.normalize(output) 199 | outputs = netMC(output) 200 | outputs = nn.Softmax(-1)(outputs) 201 | fea_bank[indx] = output_norm.detach().clone().cpu() 202 | score_bank[indx] = outputs.detach().clone() #.cpu() 203 | 204 | netBB.train() 205 | netMF.train() 206 | netMB.train() 207 | netMC.train() 208 | train_data_bar = tqdm(range(max_iter)) 209 | for step_i in train_data_bar: 210 | #if iter_num > max_iter: 211 | # break 212 | if iter_num>0.5*max_iter: 213 | args.K = 5 214 | args.KK = 4 215 | 216 | netBB.train() 217 | netMF.train() 218 | netMB.train() 219 | netMC.train() 220 | netSF.train() 221 | netSB.train() 222 | netSC.train() 223 | 224 | try: 225 | test_images, _, tar_idx = iter_test.next() 226 | except: 227 | iter_test = iter(dset_loaders["target_images"]) 228 | test_images, _, tar_idx = iter_test.next() 229 | try: 230 | sticker_images, sticker_labels = iter_sticker.next() 231 | except: 232 | iter_sticker = iter(dset_loaders["train_mixup_stickers"]) 233 | sticker_images, sticker_labels = iter_sticker.next() 234 | 235 | if test_images.size(0) == 1: 236 | continue 237 | 238 | 239 | iter_num += 1 240 | 241 | test_images1, tar_idx1 = test_images.cuda()[:len(test_images)//2], tar_idx[:len(test_images)//2] 242 | test_mixup_images = mixup_batch(test_images[len(test_images)//2:], sticker_images).cuda() 243 | test_mixup_labels_SB, test_mixup_tar_MB = sticker_labels[:len(test_images)//2].cuda(), tar_idx[len(test_images)//2:] 244 | 245 | MB_images = torch.cat([test_images1, test_mixup_images]) 246 | MB_idx = torch.cat([tar_idx1, test_mixup_tar_MB]) 247 | 248 | SB_images = test_mixup_images 249 | SB_labels = test_mixup_labels_SB 250 | 251 | #lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter) 252 | 253 | features_test = netMB(netMF(netBB(MB_images))) 254 | output_test = netMC(features_test) 255 | softmax_out = nn.Softmax(dim=1)(output_test) 256 | output_re = softmax_out.unsqueeze(1) 257 | 258 | SB_output = netSC(netSB(netSF(netBB(SB_images)))) 259 | 260 | with torch.no_grad(): 261 | SB_loss = CrossEntropyLabelSmooth(num_classes=args.sticker_num, epsilon=args.smooth, reduction=True)(SB_output, SB_labels).cuda() 262 | 263 | output_f_norm = F.normalize(features_test) 264 | output_f_ = output_f_norm.cpu().detach().clone() 265 | 266 | fea_bank[tar_idx] = output_f_.detach().clone().cpu() 267 | score_bank[tar_idx] = softmax_out.detach().clone() 268 | 269 | 270 | distance = output_f_ @ fea_bank.T 271 | _, idx_near = torch.topk(distance, 272 | dim=-1, 273 | largest=True, 274 | k=args.K + 1) 275 | idx_near = idx_near[:, 1:] #batch x K 276 | score_near = score_bank[idx_near] #batch x K x C 277 | #score_near=score_near.permute(0,2,1) 278 | 279 | fea_near = fea_bank[idx_near] #batch x K x num_dim 280 | fea_bank_re = fea_bank.unsqueeze(0).expand(fea_near.shape[0], -1, 281 | -1) # batch x n x dim 282 | distance_ = torch.bmm(fea_near, 283 | fea_bank_re.permute(0, 2, 284 | 1)) # batch x K x n 285 | _, idx_near_near = torch.topk( 286 | distance_, dim=-1, largest=True, 287 | k=args.KK + 1) # M near neighbors for each of above K ones 288 | idx_near_near = idx_near_near[:, :, 1:] # batch x K x M 289 | tar_idx_ = tar_idx.unsqueeze(-1).unsqueeze(-1) 290 | match = (idx_near_near == tar_idx_).sum(-1).float() # batch x K 291 | weight = torch.where( 292 | match > 0., match, 293 | torch.ones_like(match).fill_(0.1)) # batch x K 294 | 295 | weight_kk = weight.unsqueeze(-1).expand(-1, -1, 296 | args.KK) # batch x K x M 297 | 298 | 299 | #weight_kk[idx_near_near == tar_idx_] = 0 300 | 301 | score_near_kk = score_bank[idx_near_near] # batch x K x M x C 302 | #print(weight_kk.shape) 303 | weight_kk = weight_kk.contiguous().view(weight_kk.shape[0], 304 | -1) # batch x KM 305 | weight_kk = weight_kk.fill_(0.1) 306 | score_near_kk = score_near_kk.contiguous().view( 307 | score_near_kk.shape[0], -1, args.class_num) # batch x KM x C 308 | 309 | # nn of nn 310 | output_re = softmax_out.unsqueeze(1).expand(-1, args.K * args.KK, 311 | -1) # batch x KM x C 312 | const = torch.mean( 313 | (F.kl_div(output_re, score_near_kk, reduction='none').sum(-1) * 314 | weight_kk.cuda()).sum(1)) 315 | loss = torch.mean(const) #* 0.5 316 | 317 | # nn 318 | softmax_out_un = softmax_out.unsqueeze(1).expand(-1, args.K, 319 | -1) # batch x K x C 320 | 321 | loss += torch.mean( 322 | (F.kl_div(softmax_out_un, score_near, reduction='none').sum(-1) * 323 | weight.cuda()).sum(1)) # 324 | 325 | msoftmax = softmax_out.mean(dim=0) 326 | im_div = torch.sum(msoftmax * torch.log(msoftmax + 1e-5)) 327 | loss += im_div 328 | 329 | loss += (args.st_wt * SB_loss) 330 | 331 | optimizer.zero_grad() 332 | loss.backward() 333 | optimizer.step() 334 | 335 | train_data_bar.set_description("Train : Step:{}, MB_class_loss:{:.4f}, SB_class_loss:{:.4f}".format(iter_num, loss, SB_loss)) 336 | if (iter_num % 100) == 0 or iter_num == max_iter: 337 | netBB.eval() 338 | netMF.eval() 339 | netMB.eval() 340 | netMC.eval() 341 | netSF.eval() 342 | netSB.eval() 343 | netSC.eval() 344 | 345 | acc, ent = cal_acc(dset_loaders['test'], netBB, netMF, netMB, netMC, False) 346 | 347 | args.logger.info('Step: [{}/{}], Clean_Adapt_Acc.:{:.4f}'.format(iter_num, max_iter, acc)) 348 | 349 | if acc >= acc_init: 350 | acc_init = acc 351 | args.logger.info("Update Best Acc: {}".format(acc_init)) 352 | best_netBB = netBB.state_dict() 353 | best_netMB = netMB.state_dict() 354 | best_netMF = netMF.state_dict() 355 | best_netMC = netMC.state_dict() 356 | best_netSF = netSF.state_dict() 357 | best_netSB = netSB.state_dict() 358 | best_netSC = netSC.state_dict() 359 | 360 | torch.save({'netBB':best_netBB, 361 | 'netMF':best_netMF, 362 | 'netMB':best_netMB, 363 | 'netMC':best_netMC, 364 | 'netSF':best_netSF, 365 | 'netSB':best_netSB, 366 | 'netSC':best_netSC}, osp.join(args.output_dir_src, "model.pt")) 367 | 368 | 369 | def print_args(args): 370 | s = "==========================================\n" 371 | for arg, content in args.__dict__.items(): 372 | s += "{}:{}\n".format(arg, content) 373 | return s 374 | 375 | 376 | if __name__ == "__main__": 377 | parser = argparse.ArgumentParser(description='SHOT') 378 | parser.add_argument('--gpu_id', type=str, nargs='?', default='3', help="device id to run") 379 | parser.add_argument('--s', type=int, default=3, help="source") 380 | parser.add_argument('--t', type=int, default=1, help="target") 381 | parser.add_argument('--max_epoch', type=int, default=300, help="max iterations") 382 | parser.add_argument('--interval', type=int, default=15) 383 | parser.add_argument('--batch_size', type=int, default=64, help="batch_size") 384 | parser.add_argument('--worker', type=int, default=4, help="number of workers") 385 | parser.add_argument('--dset', type=str, default='office-home', choices=['VISDA-C', 'office', 'office-home', 'office-caltech']) 386 | parser.add_argument('--lr', type=float, default=1e-3, help="learning rate") 387 | parser.add_argument('--net', type=str, default='resnet50', help="alexnet, vgg16, resnet50, res101") 388 | parser.add_argument('--seed', type=int, default=2020, help="random seed") 389 | 390 | parser.add_argument('--gent', type=bool, default=True) 391 | parser.add_argument('--ent', type=bool, default=True) 392 | parser.add_argument('--threshold', type=int, default=0) 393 | parser.add_argument('--cls_par', type=float, default=0.3) 394 | parser.add_argument('--ent_par', type=float, default=1.0) 395 | parser.add_argument('--lr_decay1', type=float, default=0.1) 396 | parser.add_argument('--lr_decay2', type=float, default=1.0) 397 | parser.add_argument('--smooth', type=float, default=0.1) 398 | parser.add_argument('--k', type=int, default=2, help="number of neighborhoods") 399 | parser.add_argument('--K', type=int, default=4) 400 | parser.add_argument('--KK', type=int, default=3) 401 | parser.add_argument('--st_wt', type=int, default=1) 402 | 403 | parser.add_argument('--bottleneck', type=int, default=256) 404 | parser.add_argument('--epsilon', type=float, default=1e-5) 405 | parser.add_argument('--layer', type=str, default="wn", choices=["linear", "wn"]) 406 | parser.add_argument('--classifier', type=str, default="bn", choices=["ori", "bn"]) 407 | parser.add_argument('--distance', type=str, default='cosine', choices=["euclidean", "cosine"]) 408 | parser.add_argument('--output', type=str, default='san') 409 | parser.add_argument('--output_src', type=str, default='san') 410 | parser.add_argument('--da', type=str, default='uda', choices=['uda', 'pda']) 411 | parser.add_argument('--issave', type=bool, default=True) 412 | parser.add_argument('--task_1_path', type=str, default='../data/textured/Task_1_mixup.txt') 413 | args = parser.parse_args() 414 | 415 | if args.dset == 'office-home': 416 | names = ['Art', 'Clipart', 'Product', 'RealWorld'] 417 | args.class_num = 65 418 | if args.dset == 'office': 419 | names = ['amazon', 'dslr', 'webcam'] 420 | args.class_num = 31 421 | if args.dset == 'VISDA-C': 422 | names = ['train', 'validation'] 423 | args.class_num = 12 424 | if args.dset == 'office-caltech': 425 | names = ['amazon', 'caltech', 'dslr', 'webcam'] 426 | args.class_num = 10 427 | args.sticker_num = 11 428 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id 429 | SEED = args.seed 430 | torch.manual_seed(SEED) 431 | torch.cuda.manual_seed(SEED) 432 | np.random.seed(SEED) 433 | random.seed(SEED) 434 | 435 | folder = '../data/' 436 | args.t_dset_path = folder + args.dset + '/' + names[args.t] + '_list.txt' 437 | args.test_dset_path = folder + args.dset + '/' + names[args.t] + '_list.txt' 438 | 439 | 440 | args.task_1_classes = "../data/textured/Task_1_classes.txt" 441 | if args.dset == 'office-home': 442 | if args.da == 'pda': 443 | args.class_num = 65 444 | args.src_classes = [i for i in range(65)] 445 | args.tar_classes = [i for i in range(25)] 446 | 447 | args.output_dir_src = osp.join('Ckpt_Adapt_unf', args.dset, names[args.s].upper()[0]+'2'+names[args.t].upper()[0]) 448 | if not osp.exists(args.output_dir_src): 449 | os.system('mkdir -p ' + args.output_dir_src) 450 | if not osp.exists(args.output_dir_src): 451 | os.mkdir(args.output_dir_src) 452 | 453 | log_file = args.output_dir_src + '/log.txt' 454 | args.logger = get_logger('__train__', log_file) 455 | 456 | args.stored_model_dir_src_main = osp.join('Checkpoint_main', args.dset, names[args.s].upper()[0]+'2'+names[args.t].upper()[0]) 457 | args.stored_model_dir_src = osp.join('Checkpoint_Sticker', args.dset, names[args.s].upper()[0]+'2'+names[args.t].upper()[0]) 458 | 459 | train_target(args) -------------------------------------------------------------------------------- /SSDA_OH/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | def get_logger(name, log_file=None): 5 | 6 | logger = logging.getLogger(name) 7 | logger.setLevel(logging.INFO) 8 | 9 | formatter = logging.Formatter('') 10 | 11 | stdhandler = logging.StreamHandler(sys.stdout) 12 | stdhandler.setFormatter(formatter) 13 | logger.addHandler(stdhandler) 14 | 15 | if log_file is not None: 16 | file_handler = logging.FileHandler(log_file) 17 | file_handler.setFormatter(formatter) 18 | 19 | logger.addHandler(file_handler) 20 | 21 | return logger 22 | -------------------------------------------------------------------------------- /SSDA_OH/loss.py: -------------------------------------------------------------------------------- 1 | #from _typeshed import SupportsReadline 2 | import numpy as np 3 | import torch 4 | import torch.nn as nn 5 | from torch.autograd import Variable 6 | import math 7 | import torch.nn.functional as F 8 | import pdb 9 | 10 | def Entropy(input_): 11 | bs = input_.size(0) 12 | epsilon = 1e-5 13 | entropy = -input_ * torch.log(input_ + epsilon) 14 | entropy = torch.sum(entropy, dim=1) 15 | return entropy 16 | 17 | def grl_hook(coeff): 18 | def fun1(grad): 19 | return -coeff*grad.clone() 20 | return fun1 21 | 22 | def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None): 23 | softmax_output = input_list[1].detach() 24 | feature = input_list[0] 25 | if random_layer is None: 26 | op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1)) 27 | ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1))) 28 | else: 29 | random_out = random_layer.forward([feature, softmax_output]) 30 | ad_out = ad_net(random_out.view(-1, random_out.size(1))) 31 | batch_size = softmax_output.size(0) // 2 32 | dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda() 33 | if entropy is not None: 34 | entropy.register_hook(grl_hook(coeff)) 35 | entropy = 1.0+torch.exp(-entropy) 36 | source_mask = torch.ones_like(entropy) 37 | source_mask[feature.size(0)//2:] = 0 38 | source_weight = entropy*source_mask 39 | target_mask = torch.ones_like(entropy) 40 | target_mask[0:feature.size(0)//2] = 0 41 | target_weight = entropy*target_mask 42 | weight = source_weight / torch.sum(source_weight).detach().item() + \ 43 | target_weight / torch.sum(target_weight).detach().item() 44 | return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduction='none')(ad_out, dc_target)) / torch.sum(weight).detach().item() 45 | else: 46 | return nn.BCELoss()(ad_out, dc_target) 47 | 48 | def DANN(features, ad_net): 49 | ad_out = ad_net(features) 50 | batch_size = ad_out.size(0) // 2 51 | dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda() 52 | return nn.BCELoss()(ad_out, dc_target) 53 | 54 | 55 | class CrossEntropyLabelSmooth(nn.Module): 56 | """Cross entropy loss with label smoothing regularizer. 57 | Reference: 58 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016. 59 | Equation: y = (1 - epsilon) * y + epsilon / K. 60 | Args: 61 | num_classes (int): number of classes. 62 | epsilon (float): weight. 63 | """ 64 | 65 | def __init__(self, num_classes, weights=None, epsilon=0.1, use_gpu=True, reduction=True): 66 | super(CrossEntropyLabelSmooth, self).__init__() 67 | self.num_classes = num_classes 68 | self.epsilon = epsilon 69 | self.use_gpu = use_gpu 70 | self.reduction = reduction 71 | self.logsoftmax = nn.LogSoftmax(dim=1) 72 | self.weights = weights 73 | 74 | def forward(self, inputs, targets): 75 | """ 76 | Args: 77 | inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) 78 | targets: ground truth labels with shape (num_classes) 79 | """ 80 | log_probs = self.logsoftmax(inputs) 81 | targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1) 82 | if self.use_gpu: targets = targets.cuda() 83 | targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes 84 | loss = (- targets * log_probs) 85 | if self.weights is not None: 86 | loss = loss * self.weights 87 | if self.reduction: 88 | loss = loss.sum(dim=1) 89 | return loss.mean() 90 | else: 91 | return loss 92 | #return loss 93 | 94 | class CrossEntropyWeightedLabelSmooth(nn.Module): 95 | """Cross entropy loss with label smoothing regularizer. 96 | Reference: 97 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016. 98 | Equation: y = (1 - epsilon) * y + epsilon / K. 99 | Args: 100 | num_classes (int): number of classes. 101 | epsilon (float): weight. 102 | """ 103 | 104 | def __init__(self, num_classes, weights=None, epsilon=0.1, use_gpu=True, reduction=True): 105 | super(CrossEntropyLabelSmooth, self).__init__() 106 | self.num_classes = num_classes 107 | self.epsilon = epsilon 108 | self.use_gpu = use_gpu 109 | self.reduction = reduction 110 | self.logsoftmax = nn.LogSoftmax(dim=1) 111 | self.weights = weights 112 | if len(self.weights) == self.num_classes: 113 | raise AssertionError("Len of Weights should be equal to num_classes") 114 | 115 | def forward(self, inputs, targets): 116 | """ 117 | Args: 118 | inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) 119 | targets: ground truth labels with shape (num_classes) 120 | """ 121 | log_probs = self.logsoftmax(inputs) 122 | targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1) 123 | if self.use_gpu: targets = targets.cuda() 124 | targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes 125 | if self.weights: 126 | for i in range(targets.size(0)): 127 | try: 128 | for j in range(targets.size(1)): 129 | if targets[i][j] == 1: 130 | targets[i] *= self.weights[j] 131 | except: 132 | pass 133 | loss = (- targets * log_probs).sum(dim=1) 134 | if self.reduction: 135 | return loss.mean() 136 | else: 137 | return loss 138 | return losss -------------------------------------------------------------------------------- /SSDA_OH/network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torchvision 5 | from torchvision import models 6 | from torch.autograd import Variable 7 | import math 8 | import torch.nn.utils.weight_norm as weightNorm 9 | from collections import OrderedDict 10 | 11 | def calc_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0): 12 | return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low) 13 | 14 | def init_weights(m): 15 | classname = m.__class__.__name__ 16 | if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: 17 | nn.init.kaiming_uniform_(m.weight) 18 | nn.init.zeros_(m.bias) 19 | elif classname.find('BatchNorm') != -1: 20 | nn.init.normal_(m.weight, 1.0, 0.02) 21 | nn.init.zeros_(m.bias) 22 | elif classname.find('Linear') != -1: 23 | nn.init.xavier_normal_(m.weight) 24 | nn.init.zeros_(m.bias) 25 | 26 | vgg_dict = {"vgg11":models.vgg11, "vgg13":models.vgg13, "vgg16":models.vgg16, "vgg19":models.vgg19, 27 | "vgg11bn":models.vgg11_bn, "vgg13bn":models.vgg13_bn, "vgg16bn":models.vgg16_bn, "vgg19bn":models.vgg19_bn} 28 | class VGGBase(nn.Module): 29 | def __init__(self, vgg_name): 30 | super(VGGBase, self).__init__() 31 | model_vgg = vgg_dict[vgg_name](pretrained=True) 32 | self.features = model_vgg.features 33 | self.classifier = nn.Sequential() 34 | for i in range(6): 35 | self.classifier.add_module("classifier"+str(i), model_vgg.classifier[i]) 36 | self.in_features = model_vgg.classifier[6].in_features 37 | 38 | def forward(self, x): 39 | x = self.features(x) 40 | x = x.view(x.size(0), -1) 41 | x = self.classifier(x) 42 | return x 43 | 44 | res_dict = {"resnet18":models.resnet18, "resnet34":models.resnet34, "resnet50":models.resnet50, 45 | "resnet101":models.resnet101, "resnet152":models.resnet152, "resnext50":models.resnext50_32x4d, "resnext101":models.resnext101_32x8d} 46 | 47 | class ResBase(nn.Module): 48 | def __init__(self, res_name): 49 | super(ResBase, self).__init__() 50 | model_resnet = res_dict[res_name](pretrained=True) 51 | self.conv1 = model_resnet.conv1 52 | self.bn1 = model_resnet.bn1 53 | self.relu = model_resnet.relu 54 | self.maxpool = model_resnet.maxpool 55 | self.layer1 = model_resnet.layer1 56 | self.layer2 = model_resnet.layer2 57 | self.layer3 = model_resnet.layer3 58 | # self.layer4 = model_resnet.layer4 59 | # self.avgpool = model_resnet.avgpool 60 | # self.in_features = model_resnet.fc.in_features 61 | 62 | def forward(self, x): 63 | x = self.conv1(x) 64 | x = self.bn1(x) 65 | x = self.relu(x) 66 | x = self.maxpool(x) 67 | x = self.layer1(x) 68 | x = self.layer2(x) 69 | x = self.layer3(x) 70 | return x 71 | 72 | class ResBase_Layer4(nn.Module): 73 | def __init__(self, res_name): 74 | super(ResBase_Layer4, self).__init__() 75 | model_resnet = res_dict[res_name](pretrained=True) 76 | 77 | self.layer4 = model_resnet.layer4 78 | self.avgpool = model_resnet.avgpool 79 | self.in_features = model_resnet.fc.in_features 80 | 81 | def forward(self, x): 82 | 83 | x = self.layer4(x) 84 | x = self.avgpool(x) 85 | x = x.view(x.size(0), -1) 86 | return x 87 | 88 | 89 | class feat_bootleneck(nn.Module): 90 | def __init__(self, feature_dim, bottleneck_dim=256, type="ori"): 91 | super(feat_bootleneck, self).__init__() 92 | self.bn = nn.BatchNorm1d(bottleneck_dim, affine=True) 93 | self.relu = nn.ReLU(inplace=True) 94 | self.dropout = nn.Dropout(p=0.5) 95 | self.bottleneck = nn.Linear(feature_dim, bottleneck_dim) 96 | self.bottleneck.apply(init_weights) 97 | self.type = type 98 | 99 | def forward(self, x): 100 | x = self.bottleneck(x) 101 | if self.type == "bn": 102 | x = self.bn(x) 103 | return x 104 | 105 | class feat_classifier(nn.Module): 106 | def __init__(self, class_num, bottleneck_dim=256, sticker_class = 11, main = True, type="linear"): 107 | super(feat_classifier, self).__init__() 108 | self.type = type 109 | self.main = main 110 | if type == 'wn': 111 | if self.main: 112 | self.fc = weightNorm(nn.Linear(bottleneck_dim, class_num), name="weight") 113 | self.fc.apply(init_weights) 114 | else: 115 | self.fc = weightNorm(nn.Linear(bottleneck_dim, sticker_class), name="weight") 116 | self.fc.apply(init_weights) 117 | else: 118 | self.fc = nn.Linear(bottleneck_dim, class_num) 119 | self.fc.apply(init_weights) 120 | 121 | 122 | def forward(self, x): 123 | x = self.fc(x) 124 | return x 125 | 126 | class feat_classifier_two(nn.Module): 127 | def __init__(self, class_num, input_dim, bottleneck_dim=256): 128 | super(feat_classifier_two, self).__init__() 129 | self.type = type 130 | self.fc0 = nn.Linear(input_dim, bottleneck_dim) 131 | self.fc0.apply(init_weights) 132 | self.fc1 = nn.Linear(bottleneck_dim, class_num) 133 | self.fc1.apply(init_weights) 134 | 135 | def forward(self, x): 136 | x = self.fc0(x) 137 | x = self.fc1(x) 138 | return x 139 | 140 | class Res50(nn.Module): 141 | def __init__(self): 142 | super(Res50, self).__init__() 143 | model_resnet = models.resnet50(pretrained=True) 144 | self.conv1 = model_resnet.conv1 145 | self.bn1 = model_resnet.bn1 146 | self.relu = model_resnet.relu 147 | self.maxpool = model_resnet.maxpool 148 | self.layer1 = model_resnet.layer1 149 | self.layer2 = model_resnet.layer2 150 | self.layer3 = model_resnet.layer3 151 | self.layer4 = model_resnet.layer4 152 | self.avgpool = model_resnet.avgpool 153 | self.in_features = model_resnet.fc.in_features 154 | self.fc = model_resnet.fc 155 | 156 | def forward(self, x): 157 | x = self.conv1(x) 158 | x = self.bn1(x) 159 | x = self.relu(x) 160 | x = self.maxpool(x) 161 | x = self.layer1(x) 162 | x = self.layer2(x) 163 | x = self.layer3(x) 164 | x = self.layer4(x) 165 | x = self.avgpool(x) 166 | x = x.view(x.size(0), -1) 167 | y = self.fc(x) 168 | return x, y 169 | -------------------------------------------------------------------------------- /SSDA_OH/run.sh: -------------------------------------------------------------------------------- 1 | ## Source -> Art 2 | # Train Source Model 3 | python image_source.py --s 0 --t 1 2 3 4 | 5 | # Train Sticker Branch 6 | # A2C 7 | python image_source_sticker.py --s 0 --t 1 8 | # A2P 9 | python image_source_sticker.py --s 0 --t 2 10 | # A2R 11 | python image_source_sticker.py --s 0 --t 3 12 | 13 | # Adaptation 14 | # A2C 15 | python image_target.py --s 0 --t 1 16 | # A2P 17 | python image_target.py --s 0 --t 2 18 | # A2R 19 | python image_target.py --s 0 --t 3 20 | 21 | 22 | ## Source -> Clipart 23 | # Train Source Model 24 | python image_source.py --s 1 --t 0 2 3 25 | 26 | # Train Sticker Branch 27 | # C2A 28 | python image_source_sticker.py --s 1 --t 0 29 | # C2P 30 | python image_source_sticker.py --s 1 --t 2 31 | # C2R 32 | python image_source_sticker.py --s 1 --t 3 33 | 34 | # Adaptation 35 | # C2A 36 | python image_target.py --s 1 --t 0 37 | # C2P 38 | python image_target.py --s 1 --t 2 39 | # C2R 40 | python image_target.py --s 1 --t 3 41 | 42 | 43 | ## Source -> Product 44 | # Train Source Model 45 | python image_source.py --s 2 --t 0 1 3 46 | 47 | # Train Sticker Branch 48 | # P2A 49 | python image_source_sticker.py --s 2 --t 0 50 | # P2C 51 | python image_source_sticker.py --s 2 --t 1 52 | # P2R 53 | python image_source_sticker.py --s 2 --t 3 54 | 55 | # Adaptation 56 | # P2A 57 | python image_target.py --s 2 --t 0 58 | # P2C 59 | python image_target.py --s 2 --t 1 60 | # P2R 61 | python image_target.py --s 2 --t 3 62 | 63 | 64 | ## Source -> RealWorld 65 | # Train Source Model 66 | python image_source.py --s 3 --t 0 1 2 67 | 68 | # Train Sticker Branch 69 | # R2A 70 | python image_source_sticker.py --s 3 --t 0 71 | # R2C 72 | python image_source_sticker.py --s 3 --t 1 73 | # R2P 74 | python image_source_sticker.py --s 3 --t 2 75 | 76 | # Adaptation 77 | # R2A 78 | python image_target.py --s 3 --t 0 79 | # R2C 80 | python image_target.py --s 3 --t 1 81 | # R2P 82 | python image_target.py --s 3 --t 2 -------------------------------------------------------------------------------- /SSDA_OH/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import os 4 | from PIL import Image 5 | from torchvision import transforms 6 | import torchvision.transforms.functional as F1 7 | import torch.nn.functional as F 8 | import torch.nn as nn 9 | 10 | 11 | class Denormalize(object): 12 | def __init__(self, mean, std, inplace=False): 13 | self.mean = mean 14 | self.demean = [-m/s for m, s in zip(mean, std)] 15 | self.std = std 16 | self.destd = [1/s for s in std] 17 | self.inplace = inplace 18 | 19 | def __call__(self, tensor): 20 | tensor = F1.normalize(tensor, self.demean, self.destd, self.inplace) 21 | # clamp to get rid of numerical errors 22 | return torch.clamp(tensor, 0.0, 1.0) 23 | 24 | 25 | 26 | class RunningAverage(): 27 | def __init__(self): 28 | self.count = 0 29 | self.sum = 0 30 | 31 | def update(self, value, n_items = 1): 32 | self.sum += value * n_items 33 | self.count += n_items 34 | 35 | def __call__(self): 36 | if self.count: 37 | return self.sum/self.count 38 | else: 39 | return self.sum 40 | 41 | class AverageMeter(object): 42 | """Computes and stores the average and current value""" 43 | 44 | def __init__(self): 45 | self.reset() 46 | 47 | def reset(self): 48 | self.val = 0 49 | self.avg = 0 50 | self.sum = 0 51 | self.count = 0 52 | 53 | def update(self, val, n=1): 54 | self.val = val 55 | self.sum += val * n 56 | self.count += n 57 | self.avg = self.sum / self.count 58 | 59 | 60 | def torch_save(weights, path, **kwargs): 61 | ''' 62 | kwargs can be used to path things like optimizer weights / iteartion number etc. 63 | ''' 64 | data = {'weights': weights} 65 | data.update(kwargs) 66 | 67 | if not os.path.isdir(os.path.split(path)[0]): os.mkdir(os.path.split(path)[0]) 68 | 69 | torch.save(data, path) 70 | 71 | def torch_load(path, key="weights", device=torch.device('cpu')): 72 | ''' 73 | Possible keys should be known beforehand 74 | 75 | load_state_dict should be done in client code 76 | ''' 77 | if not os.path.exists(path): 78 | raise Exception("Checkpoint doesn't exist at {}".format(path)) 79 | checkpoint = torch.load(path, map_location = device) 80 | if not key in checkpoint: 81 | raise Exception("Key {} doesn't exist".format(key)) 82 | return checkpoint[key] 83 | 84 | 85 | def plot_samples(image_tensors, targets, outputs, step = None, idx_to_class=None, title = "Sample Predictions"): 86 | """ 87 | for plotting on wandb 88 | l_to_t: labels to text 89 | """ 90 | 91 | images = image_tensors.cpu().clone() 92 | 93 | unorm = Denormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) 94 | images = unorm(images) 95 | 96 | images = [Image.fromarray(np.transpose(np.uint8(images[i]*255), (1,2,0))) for i in range(images.shape[0])] 97 | outputs = torch.max(nn.Softmax(dim=1)(outputs), 1)[1] 98 | 99 | classes_text = [(targets[i].cpu().item(), outputs[i].cpu().item()) for i, img in enumerate(images)] 100 | if idx_to_class: 101 | classes_text = [(idx_to_class[str(a)], idx_to_class[str(b)]) for a,b in classes_text] 102 | 103 | if step: 104 | wandb.log({title: [wandb.Image(img, caption="Target: %s; Predicted: %s" % (classes_text[i][0], classes_text[i][1])) for i, img in enumerate(images)]}, step=step) 105 | else: 106 | wandb.log({title: [wandb.Image(img, caption="Target: %s; Predicted: %s" % (classes_text[i][0], classes_text[i][1])) for i, img in enumerate(images)]}) 107 | 108 | def image_train(resize_size=256, crop_size=224, alexnet=False): 109 | if not alexnet: 110 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 111 | std=[0.229, 0.224, 0.225]) 112 | else: 113 | normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy') 114 | return transforms.Compose([ 115 | transforms.Resize((resize_size, resize_size)), 116 | transforms.RandomCrop(crop_size), 117 | transforms.RandomHorizontalFlip(), 118 | transforms.ToTensor(), 119 | normalize 120 | ]) 121 | 122 | def image_test(resize_size=256, crop_size=224, alexnet=False): 123 | if not alexnet: 124 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 125 | std=[0.229, 0.224, 0.225]) 126 | else: 127 | normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy') 128 | return transforms.Compose([ 129 | transforms.Resize((resize_size, resize_size)), 130 | transforms.CenterCrop(crop_size), 131 | transforms.ToTensor(), 132 | normalize 133 | ]) 134 | 135 | def sticker_train(resize_size=224): 136 | return transforms.Compose([ 137 | transforms.Resize((resize_size, resize_size)), 138 | transforms.ToTensor(), 139 | ]) 140 | 141 | def sticker_test(resize_size=224, alexnet=False): 142 | return transforms.Compose([ 143 | transforms.Resize((resize_size, resize_size)), 144 | transforms.ToTensor(), 145 | ]) 146 | -------------------------------------------------------------------------------- /data/include_tuples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/include_tuples.npy -------------------------------------------------------------------------------- /data/office-home/classes.txt: -------------------------------------------------------------------------------- 1 | 0 Push_Pin 2 | 1 Radio 3 | 2 Keyboard 4 | 3 Telephone 5 | 4 Fan 6 | 5 Sink 7 | 6 Sneakers 8 | 7 Laptop 9 | 8 Screwdriver 10 | 9 Curtains 11 | 10 Speaker 12 | 11 Kettle 13 | 12 Flipflops 14 | 13 Candles 15 | 14 Monitor 16 | 15 Scissors 17 | 16 Couch 18 | 17 Mop 19 | 18 Clipboards 20 | 19 Toys 21 | 20 Fork 22 | 21 Ruler 23 | 22 Postit_Notes 24 | 23 Alarm_Clock 25 | 24 Calculator 26 | 25 Lamp_Shade 27 | 26 Computer 28 | 27 Marker 29 | 28 Knives 30 | 29 Spoon 31 | 30 Mug 32 | 31 Chair 33 | 32 Bucket 34 | 33 Batteries 35 | 34 Flowers 36 | 35 Glasses 37 | 36 Desk_Lamp 38 | 37 Backpack 39 | 38 Bed 40 | 39 Bike 41 | 40 Pan 42 | 41 Hammer 43 | 42 Helmet 44 | 43 Calendar 45 | 44 File_Cabinet 46 | 45 Drill 47 | 46 TV 48 | 47 Trash_Can 49 | 48 Refrigerator 50 | 49 Shelf 51 | 50 Eraser 52 | 51 Mouse 53 | 52 Soda 54 | 53 Pencil 55 | 54 Oven 56 | 55 Exit_Sign 57 | 56 Webcam 58 | 57 Printer 59 | 58 Notebook 60 | 59 ToothBrush 61 | 60 Paper_Clip 62 | 61 Bottle 63 | 62 Table 64 | 63 Folder 65 | 64 Pen 66 | -------------------------------------------------------------------------------- /data/textured/Task_1_classes.txt: -------------------------------------------------------------------------------- 1 | 0 A 2 | 1 H 3 | 2 T 4 | 3 X 5 | 4 Z 6 | 5 W 7 | 6 O 8 | 7 I 9 | 8 N 10 | 9 L 11 | -------------------------------------------------------------------------------- /data/textured/Task_1_mixup.txt: -------------------------------------------------------------------------------- 1 | ../data/textured/paste/A/16.png 0 2 | ../data/textured/paste/A/17.png 0 3 | ../data/textured/paste/A/7t.png 0 4 | ../data/textured/paste/A/19.png 0 5 | ../data/textured/paste/A/20.png 0 6 | ../data/textured/paste/A/2t.png 0 7 | ../data/textured/paste/A/9t.png 0 8 | ../data/textured/paste/A/12t.png 0 9 | ../data/textured/paste/A/14.png 0 10 | ../data/textured/paste/A/11t.png 0 11 | ../data/textured/paste/A/13t.png 0 12 | ../data/textured/paste/A/10t.png 0 13 | ../data/textured/paste/A/6t.png 0 14 | ../data/textured/paste/A/8t.png 0 15 | ../data/textured/paste/A/18.png 0 16 | ../data/textured/paste/A/4t.png 0 17 | ../data/textured/paste/A/15.png 0 18 | ../data/textured/paste/A/5t.png 0 19 | ../data/textured/paste/A/1.png 0 20 | ../data/textured/paste/A/3t.png 0 21 | ../data/textured/paste/H/16.png 1 22 | ../data/textured/paste/H/17.png 1 23 | ../data/textured/paste/H/7t.png 1 24 | ../data/textured/paste/H/19.png 1 25 | ../data/textured/paste/H/20.png 1 26 | ../data/textured/paste/H/2t.png 1 27 | ../data/textured/paste/H/9t.png 1 28 | ../data/textured/paste/H/12t.png 1 29 | ../data/textured/paste/H/14.png 1 30 | ../data/textured/paste/H/11t.png 1 31 | ../data/textured/paste/H/13t.png 1 32 | ../data/textured/paste/H/10t.png 1 33 | ../data/textured/paste/H/6t.png 1 34 | ../data/textured/paste/H/8t.png 1 35 | ../data/textured/paste/H/18.png 1 36 | ../data/textured/paste/H/4t.png 1 37 | ../data/textured/paste/H/15.png 1 38 | ../data/textured/paste/H/5t.png 1 39 | ../data/textured/paste/H/1.png 1 40 | ../data/textured/paste/H/3t.png 1 41 | ../data/textured/paste/T/16.png 2 42 | ../data/textured/paste/T/17.png 2 43 | ../data/textured/paste/T/7t.png 2 44 | ../data/textured/paste/T/19.png 2 45 | ../data/textured/paste/T/20.png 2 46 | ../data/textured/paste/T/2t.png 2 47 | ../data/textured/paste/T/9t.png 2 48 | ../data/textured/paste/T/12t.png 2 49 | ../data/textured/paste/T/14.png 2 50 | ../data/textured/paste/T/11t.png 2 51 | ../data/textured/paste/T/13t.png 2 52 | ../data/textured/paste/T/10t.png 2 53 | ../data/textured/paste/T/6t.png 2 54 | ../data/textured/paste/T/8t.png 2 55 | ../data/textured/paste/T/18.png 2 56 | ../data/textured/paste/T/4t.png 2 57 | ../data/textured/paste/T/15.png 2 58 | ../data/textured/paste/T/5t.png 2 59 | ../data/textured/paste/T/1.png 2 60 | ../data/textured/paste/T/3t.png 2 61 | ../data/textured/paste/X/16.png 3 62 | ../data/textured/paste/X/17.png 3 63 | ../data/textured/paste/X/7t.png 3 64 | ../data/textured/paste/X/19.png 3 65 | ../data/textured/paste/X/20.png 3 66 | ../data/textured/paste/X/2t.png 3 67 | ../data/textured/paste/X/9t.png 3 68 | ../data/textured/paste/X/12t.png 3 69 | ../data/textured/paste/X/14.png 3 70 | ../data/textured/paste/X/11t.png 3 71 | ../data/textured/paste/X/13t.png 3 72 | ../data/textured/paste/X/10t.png 3 73 | ../data/textured/paste/X/6t.png 3 74 | ../data/textured/paste/X/8t.png 3 75 | ../data/textured/paste/X/18.png 3 76 | ../data/textured/paste/X/4t.png 3 77 | ../data/textured/paste/X/15.png 3 78 | ../data/textured/paste/X/5t.png 3 79 | ../data/textured/paste/X/1.png 3 80 | ../data/textured/paste/X/3t.png 3 81 | ../data/textured/paste/Z/16.png 4 82 | ../data/textured/paste/Z/17.png 4 83 | ../data/textured/paste/Z/7t.png 4 84 | ../data/textured/paste/Z/19.png 4 85 | ../data/textured/paste/Z/20.png 4 86 | ../data/textured/paste/Z/2t.png 4 87 | ../data/textured/paste/Z/9t.png 4 88 | ../data/textured/paste/Z/12t.png 4 89 | ../data/textured/paste/Z/14.png 4 90 | ../data/textured/paste/Z/11t.png 4 91 | ../data/textured/paste/Z/13t.png 4 92 | ../data/textured/paste/Z/10t.png 4 93 | ../data/textured/paste/Z/6t.png 4 94 | ../data/textured/paste/Z/8t.png 4 95 | ../data/textured/paste/Z/18.png 4 96 | ../data/textured/paste/Z/4t.png 4 97 | ../data/textured/paste/Z/15.png 4 98 | ../data/textured/paste/Z/5t.png 4 99 | ../data/textured/paste/Z/1.png 4 100 | ../data/textured/paste/Z/3t.png 4 101 | ../data/textured/paste/W/16.png 5 102 | ../data/textured/paste/W/17.png 5 103 | ../data/textured/paste/W/7t.png 5 104 | ../data/textured/paste/W/19.png 5 105 | ../data/textured/paste/W/20.png 5 106 | ../data/textured/paste/W/2t.png 5 107 | ../data/textured/paste/W/9t.png 5 108 | ../data/textured/paste/W/12t.png 5 109 | ../data/textured/paste/W/14.png 5 110 | ../data/textured/paste/W/11t.png 5 111 | ../data/textured/paste/W/13t.png 5 112 | ../data/textured/paste/W/10t.png 5 113 | ../data/textured/paste/W/6t.png 5 114 | ../data/textured/paste/W/8t.png 5 115 | ../data/textured/paste/W/18.png 5 116 | ../data/textured/paste/W/4t.png 5 117 | ../data/textured/paste/W/15.png 5 118 | ../data/textured/paste/W/5t.png 5 119 | ../data/textured/paste/W/1.png 5 120 | ../data/textured/paste/W/3t.png 5 121 | ../data/textured/paste/O/16.png 6 122 | ../data/textured/paste/O/17.png 6 123 | ../data/textured/paste/O/7t.png 6 124 | ../data/textured/paste/O/19.png 6 125 | ../data/textured/paste/O/20.png 6 126 | ../data/textured/paste/O/2t.png 6 127 | ../data/textured/paste/O/9t.png 6 128 | ../data/textured/paste/O/12t.png 6 129 | ../data/textured/paste/O/14.png 6 130 | ../data/textured/paste/O/11t.png 6 131 | ../data/textured/paste/O/13t.png 6 132 | ../data/textured/paste/O/10t.png 6 133 | ../data/textured/paste/O/6t.png 6 134 | ../data/textured/paste/O/8t.png 6 135 | ../data/textured/paste/O/18.png 6 136 | ../data/textured/paste/O/4t.png 6 137 | ../data/textured/paste/O/15.png 6 138 | ../data/textured/paste/O/5t.png 6 139 | ../data/textured/paste/O/1.png 6 140 | ../data/textured/paste/O/3t.png 6 141 | ../data/textured/paste/I/16.png 7 142 | ../data/textured/paste/I/17.png 7 143 | ../data/textured/paste/I/7t.png 7 144 | ../data/textured/paste/I/19.png 7 145 | ../data/textured/paste/I/20.png 7 146 | ../data/textured/paste/I/2t.png 7 147 | ../data/textured/paste/I/9t.png 7 148 | ../data/textured/paste/I/12t.png 7 149 | ../data/textured/paste/I/14.png 7 150 | ../data/textured/paste/I/11t.png 7 151 | ../data/textured/paste/I/13t.png 7 152 | ../data/textured/paste/I/10t.png 7 153 | ../data/textured/paste/I/6t.png 7 154 | ../data/textured/paste/I/8t.png 7 155 | ../data/textured/paste/I/18.png 7 156 | ../data/textured/paste/I/4t.png 7 157 | ../data/textured/paste/I/15.png 7 158 | ../data/textured/paste/I/5t.png 7 159 | ../data/textured/paste/I/1.png 7 160 | ../data/textured/paste/I/3t.png 7 161 | ../data/textured/paste/N/16.png 8 162 | ../data/textured/paste/N/17.png 8 163 | ../data/textured/paste/N/7t.png 8 164 | ../data/textured/paste/N/19.png 8 165 | ../data/textured/paste/N/20.png 8 166 | ../data/textured/paste/N/2t.png 8 167 | ../data/textured/paste/N/9t.png 8 168 | ../data/textured/paste/N/12t.png 8 169 | ../data/textured/paste/N/14.png 8 170 | ../data/textured/paste/N/11t.png 8 171 | ../data/textured/paste/N/13t.png 8 172 | ../data/textured/paste/N/10t.png 8 173 | ../data/textured/paste/N/6t.png 8 174 | ../data/textured/paste/N/8t.png 8 175 | ../data/textured/paste/N/18.png 8 176 | ../data/textured/paste/N/4t.png 8 177 | ../data/textured/paste/N/15.png 8 178 | ../data/textured/paste/N/5t.png 8 179 | ../data/textured/paste/N/1.png 8 180 | ../data/textured/paste/N/3t.png 8 181 | ../data/textured/paste/L/16.png 9 182 | ../data/textured/paste/L/17.png 9 183 | ../data/textured/paste/L/7t.png 9 184 | ../data/textured/paste/L/19.png 9 185 | ../data/textured/paste/L/20.png 9 186 | ../data/textured/paste/L/2t.png 9 187 | ../data/textured/paste/L/9t.png 9 188 | ../data/textured/paste/L/12t.png 9 189 | ../data/textured/paste/L/14.png 9 190 | ../data/textured/paste/L/11t.png 9 191 | ../data/textured/paste/L/13t.png 9 192 | ../data/textured/paste/L/10t.png 9 193 | ../data/textured/paste/L/6t.png 9 194 | ../data/textured/paste/L/8t.png 9 195 | ../data/textured/paste/L/18.png 9 196 | ../data/textured/paste/L/4t.png 9 197 | ../data/textured/paste/L/15.png 9 198 | ../data/textured/paste/L/5t.png 9 199 | ../data/textured/paste/L/1.png 9 200 | ../data/textured/paste/L/3t.png 9 201 | -------------------------------------------------------------------------------- /data/textured/Task_2_classes.txt: -------------------------------------------------------------------------------- 1 | 0 1 2 | 1 9 3 | 2 8 4 | 3 7 5 | 4 3 6 | 5 5 7 | 6 4 8 | 7 2 9 | 8 6 10 | 9 0 11 | -------------------------------------------------------------------------------- /data/textured/Task_2_mixup.txt: -------------------------------------------------------------------------------- 1 | ../data/textured/mixup/1/16.png 0 2 | ../data/textured/mixup/1/17.png 0 3 | ../data/textured/mixup/1/7t.png 0 4 | ../data/textured/mixup/1/19.png 0 5 | ../data/textured/mixup/1/20.png 0 6 | ../data/textured/mixup/1/2t.png 0 7 | ../data/textured/mixup/1/9t.png 0 8 | ../data/textured/mixup/1/12t.png 0 9 | ../data/textured/mixup/1/14.png 0 10 | ../data/textured/mixup/1/11t.png 0 11 | ../data/textured/mixup/1/13t.png 0 12 | ../data/textured/mixup/1/10t.png 0 13 | ../data/textured/mixup/1/6t.png 0 14 | ../data/textured/mixup/1/8t.png 0 15 | ../data/textured/mixup/1/18.png 0 16 | ../data/textured/mixup/1/4t.png 0 17 | ../data/textured/mixup/1/15.png 0 18 | ../data/textured/mixup/1/5t.png 0 19 | ../data/textured/mixup/1/1.png 0 20 | ../data/textured/mixup/1/3t.png 0 21 | ../data/textured/mixup/9/16.png 1 22 | ../data/textured/mixup/9/17.png 1 23 | ../data/textured/mixup/9/7t.png 1 24 | ../data/textured/mixup/9/19.png 1 25 | ../data/textured/mixup/9/20.png 1 26 | ../data/textured/mixup/9/2t.png 1 27 | ../data/textured/mixup/9/9t.png 1 28 | ../data/textured/mixup/9/12t.png 1 29 | ../data/textured/mixup/9/14.png 1 30 | ../data/textured/mixup/9/11t.png 1 31 | ../data/textured/mixup/9/13t.png 1 32 | ../data/textured/mixup/9/10t.png 1 33 | ../data/textured/mixup/9/6t.png 1 34 | ../data/textured/mixup/9/8t.png 1 35 | ../data/textured/mixup/9/18.png 1 36 | ../data/textured/mixup/9/4t.png 1 37 | ../data/textured/mixup/9/15.png 1 38 | ../data/textured/mixup/9/5t.png 1 39 | ../data/textured/mixup/9/1.png 1 40 | ../data/textured/mixup/9/3t.png 1 41 | ../data/textured/mixup/8/16.png 2 42 | ../data/textured/mixup/8/17.png 2 43 | ../data/textured/mixup/8/7t.png 2 44 | ../data/textured/mixup/8/19.png 2 45 | ../data/textured/mixup/8/20.png 2 46 | ../data/textured/mixup/8/2t.png 2 47 | ../data/textured/mixup/8/9t.png 2 48 | ../data/textured/mixup/8/12t.png 2 49 | ../data/textured/mixup/8/14.png 2 50 | ../data/textured/mixup/8/11t.png 2 51 | ../data/textured/mixup/8/13t.png 2 52 | ../data/textured/mixup/8/10t.png 2 53 | ../data/textured/mixup/8/6t.png 2 54 | ../data/textured/mixup/8/8t.png 2 55 | ../data/textured/mixup/8/18.png 2 56 | ../data/textured/mixup/8/4t.png 2 57 | ../data/textured/mixup/8/15.png 2 58 | ../data/textured/mixup/8/5t.png 2 59 | ../data/textured/mixup/8/1.png 2 60 | ../data/textured/mixup/8/3t.png 2 61 | ../data/textured/mixup/7/16.png 3 62 | ../data/textured/mixup/7/17.png 3 63 | ../data/textured/mixup/7/7t.png 3 64 | ../data/textured/mixup/7/19.png 3 65 | ../data/textured/mixup/7/20.png 3 66 | ../data/textured/mixup/7/2t.png 3 67 | ../data/textured/mixup/7/9t.png 3 68 | ../data/textured/mixup/7/12t.png 3 69 | ../data/textured/mixup/7/14.png 3 70 | ../data/textured/mixup/7/11t.png 3 71 | ../data/textured/mixup/7/13t.png 3 72 | ../data/textured/mixup/7/10t.png 3 73 | ../data/textured/mixup/7/6t.png 3 74 | ../data/textured/mixup/7/8t.png 3 75 | ../data/textured/mixup/7/18.png 3 76 | ../data/textured/mixup/7/4t.png 3 77 | ../data/textured/mixup/7/15.png 3 78 | ../data/textured/mixup/7/5t.png 3 79 | ../data/textured/mixup/7/1.png 3 80 | ../data/textured/mixup/7/3t.png 3 81 | ../data/textured/mixup/3/16.png 4 82 | ../data/textured/mixup/3/17.png 4 83 | ../data/textured/mixup/3/7t.png 4 84 | ../data/textured/mixup/3/19.png 4 85 | ../data/textured/mixup/3/20.png 4 86 | ../data/textured/mixup/3/2t.png 4 87 | ../data/textured/mixup/3/9t.png 4 88 | ../data/textured/mixup/3/12t.png 4 89 | ../data/textured/mixup/3/14.png 4 90 | ../data/textured/mixup/3/11t.png 4 91 | ../data/textured/mixup/3/13t.png 4 92 | ../data/textured/mixup/3/10t.png 4 93 | ../data/textured/mixup/3/6t.png 4 94 | ../data/textured/mixup/3/8t.png 4 95 | ../data/textured/mixup/3/18.png 4 96 | ../data/textured/mixup/3/4t.png 4 97 | ../data/textured/mixup/3/15.png 4 98 | ../data/textured/mixup/3/5t.png 4 99 | ../data/textured/mixup/3/1.png 4 100 | ../data/textured/mixup/3/3t.png 4 101 | ../data/textured/mixup/5/16.png 5 102 | ../data/textured/mixup/5/17.png 5 103 | ../data/textured/mixup/5/7t.png 5 104 | ../data/textured/mixup/5/19.png 5 105 | ../data/textured/mixup/5/20.png 5 106 | ../data/textured/mixup/5/2t.png 5 107 | ../data/textured/mixup/5/9t.png 5 108 | ../data/textured/mixup/5/12t.png 5 109 | ../data/textured/mixup/5/14.png 5 110 | ../data/textured/mixup/5/11t.png 5 111 | ../data/textured/mixup/5/13t.png 5 112 | ../data/textured/mixup/5/10t.png 5 113 | ../data/textured/mixup/5/6t.png 5 114 | ../data/textured/mixup/5/8t.png 5 115 | ../data/textured/mixup/5/18.png 5 116 | ../data/textured/mixup/5/4t.png 5 117 | ../data/textured/mixup/5/15.png 5 118 | ../data/textured/mixup/5/5t.png 5 119 | ../data/textured/mixup/5/1.png 5 120 | ../data/textured/mixup/5/3t.png 5 121 | ../data/textured/mixup/4/16.png 6 122 | ../data/textured/mixup/4/17.png 6 123 | ../data/textured/mixup/4/7t.png 6 124 | ../data/textured/mixup/4/19.png 6 125 | ../data/textured/mixup/4/20.png 6 126 | ../data/textured/mixup/4/2t.png 6 127 | ../data/textured/mixup/4/9t.png 6 128 | ../data/textured/mixup/4/12t.png 6 129 | ../data/textured/mixup/4/14.png 6 130 | ../data/textured/mixup/4/11t.png 6 131 | ../data/textured/mixup/4/13t.png 6 132 | ../data/textured/mixup/4/10t.png 6 133 | ../data/textured/mixup/4/6t.png 6 134 | ../data/textured/mixup/4/8t.png 6 135 | ../data/textured/mixup/4/18.png 6 136 | ../data/textured/mixup/4/4t.png 6 137 | ../data/textured/mixup/4/15.png 6 138 | ../data/textured/mixup/4/5t.png 6 139 | ../data/textured/mixup/4/1.png 6 140 | ../data/textured/mixup/4/3t.png 6 141 | ../data/textured/mixup/2/16.png 7 142 | ../data/textured/mixup/2/17.png 7 143 | ../data/textured/mixup/2/7t.png 7 144 | ../data/textured/mixup/2/19.png 7 145 | ../data/textured/mixup/2/20.png 7 146 | ../data/textured/mixup/2/2t.png 7 147 | ../data/textured/mixup/2/9t.png 7 148 | ../data/textured/mixup/2/12t.png 7 149 | ../data/textured/mixup/2/14.png 7 150 | ../data/textured/mixup/2/11t.png 7 151 | ../data/textured/mixup/2/13t.png 7 152 | ../data/textured/mixup/2/10t.png 7 153 | ../data/textured/mixup/2/6t.png 7 154 | ../data/textured/mixup/2/8t.png 7 155 | ../data/textured/mixup/2/18.png 7 156 | ../data/textured/mixup/2/4t.png 7 157 | ../data/textured/mixup/2/15.png 7 158 | ../data/textured/mixup/2/5t.png 7 159 | ../data/textured/mixup/2/1.png 7 160 | ../data/textured/mixup/2/3t.png 7 161 | ../data/textured/mixup/6/16.png 8 162 | ../data/textured/mixup/6/17.png 8 163 | ../data/textured/mixup/6/7t.png 8 164 | ../data/textured/mixup/6/19.png 8 165 | ../data/textured/mixup/6/20.png 8 166 | ../data/textured/mixup/6/2t.png 8 167 | ../data/textured/mixup/6/9t.png 8 168 | ../data/textured/mixup/6/12t.png 8 169 | ../data/textured/mixup/6/14.png 8 170 | ../data/textured/mixup/6/11t.png 8 171 | ../data/textured/mixup/6/13t.png 8 172 | ../data/textured/mixup/6/10t.png 8 173 | ../data/textured/mixup/6/6t.png 8 174 | ../data/textured/mixup/6/8t.png 8 175 | ../data/textured/mixup/6/18.png 8 176 | ../data/textured/mixup/6/4t.png 8 177 | ../data/textured/mixup/6/15.png 8 178 | ../data/textured/mixup/6/5t.png 8 179 | ../data/textured/mixup/6/1.png 8 180 | ../data/textured/mixup/6/3t.png 8 181 | ../data/textured/mixup/0/16.png 9 182 | ../data/textured/mixup/0/17.png 9 183 | ../data/textured/mixup/0/7t.png 9 184 | ../data/textured/mixup/0/19.png 9 185 | ../data/textured/mixup/0/20.png 9 186 | ../data/textured/mixup/0/2t.png 9 187 | ../data/textured/mixup/0/9t.png 9 188 | ../data/textured/mixup/0/12t.png 9 189 | ../data/textured/mixup/0/14.png 9 190 | ../data/textured/mixup/0/11t.png 9 191 | ../data/textured/mixup/0/13t.png 9 192 | ../data/textured/mixup/0/10t.png 9 193 | ../data/textured/mixup/0/6t.png 9 194 | ../data/textured/mixup/0/8t.png 9 195 | ../data/textured/mixup/0/18.png 9 196 | ../data/textured/mixup/0/4t.png 9 197 | ../data/textured/mixup/0/15.png 9 198 | ../data/textured/mixup/0/5t.png 9 199 | ../data/textured/mixup/0/1.png 9 200 | ../data/textured/mixup/0/3t.png 9 201 | -------------------------------------------------------------------------------- /data/textured/paste/A/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/1.png -------------------------------------------------------------------------------- /data/textured/paste/A/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/10t.png -------------------------------------------------------------------------------- /data/textured/paste/A/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/11t.png -------------------------------------------------------------------------------- /data/textured/paste/A/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/12t.png -------------------------------------------------------------------------------- /data/textured/paste/A/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/13t.png -------------------------------------------------------------------------------- /data/textured/paste/A/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/14.png -------------------------------------------------------------------------------- /data/textured/paste/A/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/15.png -------------------------------------------------------------------------------- /data/textured/paste/A/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/16.png -------------------------------------------------------------------------------- /data/textured/paste/A/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/17.png -------------------------------------------------------------------------------- /data/textured/paste/A/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/18.png -------------------------------------------------------------------------------- /data/textured/paste/A/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/19.png -------------------------------------------------------------------------------- /data/textured/paste/A/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/20.png -------------------------------------------------------------------------------- /data/textured/paste/A/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/2t.png -------------------------------------------------------------------------------- /data/textured/paste/A/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/3t.png -------------------------------------------------------------------------------- /data/textured/paste/A/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/4t.png -------------------------------------------------------------------------------- /data/textured/paste/A/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/5t.png -------------------------------------------------------------------------------- /data/textured/paste/A/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/6t.png -------------------------------------------------------------------------------- /data/textured/paste/A/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/7t.png -------------------------------------------------------------------------------- /data/textured/paste/A/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/8t.png -------------------------------------------------------------------------------- /data/textured/paste/A/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/A/9t.png -------------------------------------------------------------------------------- /data/textured/paste/H/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/1.png -------------------------------------------------------------------------------- /data/textured/paste/H/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/10t.png -------------------------------------------------------------------------------- /data/textured/paste/H/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/11t.png -------------------------------------------------------------------------------- /data/textured/paste/H/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/12t.png -------------------------------------------------------------------------------- /data/textured/paste/H/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/13t.png -------------------------------------------------------------------------------- /data/textured/paste/H/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/14.png -------------------------------------------------------------------------------- /data/textured/paste/H/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/15.png -------------------------------------------------------------------------------- /data/textured/paste/H/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/16.png -------------------------------------------------------------------------------- /data/textured/paste/H/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/17.png -------------------------------------------------------------------------------- /data/textured/paste/H/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/18.png -------------------------------------------------------------------------------- /data/textured/paste/H/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/19.png -------------------------------------------------------------------------------- /data/textured/paste/H/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/20.png -------------------------------------------------------------------------------- /data/textured/paste/H/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/2t.png -------------------------------------------------------------------------------- /data/textured/paste/H/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/3t.png -------------------------------------------------------------------------------- /data/textured/paste/H/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/4t.png -------------------------------------------------------------------------------- /data/textured/paste/H/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/5t.png -------------------------------------------------------------------------------- /data/textured/paste/H/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/6t.png -------------------------------------------------------------------------------- /data/textured/paste/H/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/7t.png -------------------------------------------------------------------------------- /data/textured/paste/H/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/8t.png -------------------------------------------------------------------------------- /data/textured/paste/H/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/H/9t.png -------------------------------------------------------------------------------- /data/textured/paste/I/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/1.png -------------------------------------------------------------------------------- /data/textured/paste/I/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/10t.png -------------------------------------------------------------------------------- /data/textured/paste/I/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/11t.png -------------------------------------------------------------------------------- /data/textured/paste/I/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/12t.png -------------------------------------------------------------------------------- /data/textured/paste/I/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/13t.png -------------------------------------------------------------------------------- /data/textured/paste/I/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/14.png -------------------------------------------------------------------------------- /data/textured/paste/I/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/15.png -------------------------------------------------------------------------------- /data/textured/paste/I/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/16.png -------------------------------------------------------------------------------- /data/textured/paste/I/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/17.png -------------------------------------------------------------------------------- /data/textured/paste/I/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/18.png -------------------------------------------------------------------------------- /data/textured/paste/I/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/19.png -------------------------------------------------------------------------------- /data/textured/paste/I/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/20.png -------------------------------------------------------------------------------- /data/textured/paste/I/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/2t.png -------------------------------------------------------------------------------- /data/textured/paste/I/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/3t.png -------------------------------------------------------------------------------- /data/textured/paste/I/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/4t.png -------------------------------------------------------------------------------- /data/textured/paste/I/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/5t.png -------------------------------------------------------------------------------- /data/textured/paste/I/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/6t.png -------------------------------------------------------------------------------- /data/textured/paste/I/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/7t.png -------------------------------------------------------------------------------- /data/textured/paste/I/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/8t.png -------------------------------------------------------------------------------- /data/textured/paste/I/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/I/9t.png -------------------------------------------------------------------------------- /data/textured/paste/L/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/1.png -------------------------------------------------------------------------------- /data/textured/paste/L/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/10t.png -------------------------------------------------------------------------------- /data/textured/paste/L/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/11t.png -------------------------------------------------------------------------------- /data/textured/paste/L/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/12t.png -------------------------------------------------------------------------------- /data/textured/paste/L/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/13t.png -------------------------------------------------------------------------------- /data/textured/paste/L/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/14.png -------------------------------------------------------------------------------- /data/textured/paste/L/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/15.png -------------------------------------------------------------------------------- /data/textured/paste/L/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/16.png -------------------------------------------------------------------------------- /data/textured/paste/L/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/17.png -------------------------------------------------------------------------------- /data/textured/paste/L/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/18.png -------------------------------------------------------------------------------- /data/textured/paste/L/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/19.png -------------------------------------------------------------------------------- /data/textured/paste/L/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/20.png -------------------------------------------------------------------------------- /data/textured/paste/L/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/2t.png -------------------------------------------------------------------------------- /data/textured/paste/L/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/3t.png -------------------------------------------------------------------------------- /data/textured/paste/L/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/4t.png -------------------------------------------------------------------------------- /data/textured/paste/L/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/5t.png -------------------------------------------------------------------------------- /data/textured/paste/L/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/6t.png -------------------------------------------------------------------------------- /data/textured/paste/L/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/7t.png -------------------------------------------------------------------------------- /data/textured/paste/L/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/8t.png -------------------------------------------------------------------------------- /data/textured/paste/L/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/L/9t.png -------------------------------------------------------------------------------- /data/textured/paste/N/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/1.png -------------------------------------------------------------------------------- /data/textured/paste/N/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/10t.png -------------------------------------------------------------------------------- /data/textured/paste/N/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/11t.png -------------------------------------------------------------------------------- /data/textured/paste/N/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/12t.png -------------------------------------------------------------------------------- /data/textured/paste/N/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/13t.png -------------------------------------------------------------------------------- /data/textured/paste/N/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/14.png -------------------------------------------------------------------------------- /data/textured/paste/N/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/15.png -------------------------------------------------------------------------------- /data/textured/paste/N/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/16.png -------------------------------------------------------------------------------- /data/textured/paste/N/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/17.png -------------------------------------------------------------------------------- /data/textured/paste/N/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/18.png -------------------------------------------------------------------------------- /data/textured/paste/N/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/19.png -------------------------------------------------------------------------------- /data/textured/paste/N/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/20.png -------------------------------------------------------------------------------- /data/textured/paste/N/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/2t.png -------------------------------------------------------------------------------- /data/textured/paste/N/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/3t.png -------------------------------------------------------------------------------- /data/textured/paste/N/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/4t.png -------------------------------------------------------------------------------- /data/textured/paste/N/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/5t.png -------------------------------------------------------------------------------- /data/textured/paste/N/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/6t.png -------------------------------------------------------------------------------- /data/textured/paste/N/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/7t.png -------------------------------------------------------------------------------- /data/textured/paste/N/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/8t.png -------------------------------------------------------------------------------- /data/textured/paste/N/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/N/9t.png -------------------------------------------------------------------------------- /data/textured/paste/O/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/1.png -------------------------------------------------------------------------------- /data/textured/paste/O/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/10t.png -------------------------------------------------------------------------------- /data/textured/paste/O/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/11t.png -------------------------------------------------------------------------------- /data/textured/paste/O/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/12t.png -------------------------------------------------------------------------------- /data/textured/paste/O/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/13t.png -------------------------------------------------------------------------------- /data/textured/paste/O/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/14.png -------------------------------------------------------------------------------- /data/textured/paste/O/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/15.png -------------------------------------------------------------------------------- /data/textured/paste/O/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/16.png -------------------------------------------------------------------------------- /data/textured/paste/O/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/17.png -------------------------------------------------------------------------------- /data/textured/paste/O/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/18.png -------------------------------------------------------------------------------- /data/textured/paste/O/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/19.png -------------------------------------------------------------------------------- /data/textured/paste/O/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/20.png -------------------------------------------------------------------------------- /data/textured/paste/O/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/2t.png -------------------------------------------------------------------------------- /data/textured/paste/O/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/3t.png -------------------------------------------------------------------------------- /data/textured/paste/O/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/4t.png -------------------------------------------------------------------------------- /data/textured/paste/O/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/5t.png -------------------------------------------------------------------------------- /data/textured/paste/O/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/6t.png -------------------------------------------------------------------------------- /data/textured/paste/O/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/7t.png -------------------------------------------------------------------------------- /data/textured/paste/O/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/8t.png -------------------------------------------------------------------------------- /data/textured/paste/O/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/O/9t.png -------------------------------------------------------------------------------- /data/textured/paste/T/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/1.png -------------------------------------------------------------------------------- /data/textured/paste/T/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/10t.png -------------------------------------------------------------------------------- /data/textured/paste/T/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/11t.png -------------------------------------------------------------------------------- /data/textured/paste/T/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/12t.png -------------------------------------------------------------------------------- /data/textured/paste/T/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/13t.png -------------------------------------------------------------------------------- /data/textured/paste/T/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/14.png -------------------------------------------------------------------------------- /data/textured/paste/T/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/15.png -------------------------------------------------------------------------------- /data/textured/paste/T/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/16.png -------------------------------------------------------------------------------- /data/textured/paste/T/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/17.png -------------------------------------------------------------------------------- /data/textured/paste/T/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/18.png -------------------------------------------------------------------------------- /data/textured/paste/T/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/19.png -------------------------------------------------------------------------------- /data/textured/paste/T/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/20.png -------------------------------------------------------------------------------- /data/textured/paste/T/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/2t.png -------------------------------------------------------------------------------- /data/textured/paste/T/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/3t.png -------------------------------------------------------------------------------- /data/textured/paste/T/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/4t.png -------------------------------------------------------------------------------- /data/textured/paste/T/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/5t.png -------------------------------------------------------------------------------- /data/textured/paste/T/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/6t.png -------------------------------------------------------------------------------- /data/textured/paste/T/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/7t.png -------------------------------------------------------------------------------- /data/textured/paste/T/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/8t.png -------------------------------------------------------------------------------- /data/textured/paste/T/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/T/9t.png -------------------------------------------------------------------------------- /data/textured/paste/W/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/1.png -------------------------------------------------------------------------------- /data/textured/paste/W/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/10t.png -------------------------------------------------------------------------------- /data/textured/paste/W/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/11t.png -------------------------------------------------------------------------------- /data/textured/paste/W/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/12t.png -------------------------------------------------------------------------------- /data/textured/paste/W/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/13t.png -------------------------------------------------------------------------------- /data/textured/paste/W/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/14.png -------------------------------------------------------------------------------- /data/textured/paste/W/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/15.png -------------------------------------------------------------------------------- /data/textured/paste/W/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/16.png -------------------------------------------------------------------------------- /data/textured/paste/W/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/17.png -------------------------------------------------------------------------------- /data/textured/paste/W/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/18.png -------------------------------------------------------------------------------- /data/textured/paste/W/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/19.png -------------------------------------------------------------------------------- /data/textured/paste/W/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/20.png -------------------------------------------------------------------------------- /data/textured/paste/W/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/2t.png -------------------------------------------------------------------------------- /data/textured/paste/W/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/3t.png -------------------------------------------------------------------------------- /data/textured/paste/W/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/4t.png -------------------------------------------------------------------------------- /data/textured/paste/W/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/5t.png -------------------------------------------------------------------------------- /data/textured/paste/W/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/6t.png -------------------------------------------------------------------------------- /data/textured/paste/W/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/7t.png -------------------------------------------------------------------------------- /data/textured/paste/W/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/8t.png -------------------------------------------------------------------------------- /data/textured/paste/W/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/W/9t.png -------------------------------------------------------------------------------- /data/textured/paste/X/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/1.png -------------------------------------------------------------------------------- /data/textured/paste/X/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/10t.png -------------------------------------------------------------------------------- /data/textured/paste/X/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/11t.png -------------------------------------------------------------------------------- /data/textured/paste/X/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/12t.png -------------------------------------------------------------------------------- /data/textured/paste/X/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/13t.png -------------------------------------------------------------------------------- /data/textured/paste/X/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/14.png -------------------------------------------------------------------------------- /data/textured/paste/X/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/15.png -------------------------------------------------------------------------------- /data/textured/paste/X/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/16.png -------------------------------------------------------------------------------- /data/textured/paste/X/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/17.png -------------------------------------------------------------------------------- /data/textured/paste/X/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/18.png -------------------------------------------------------------------------------- /data/textured/paste/X/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/19.png -------------------------------------------------------------------------------- /data/textured/paste/X/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/20.png -------------------------------------------------------------------------------- /data/textured/paste/X/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/2t.png -------------------------------------------------------------------------------- /data/textured/paste/X/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/3t.png -------------------------------------------------------------------------------- /data/textured/paste/X/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/4t.png -------------------------------------------------------------------------------- /data/textured/paste/X/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/5t.png -------------------------------------------------------------------------------- /data/textured/paste/X/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/6t.png -------------------------------------------------------------------------------- /data/textured/paste/X/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/7t.png -------------------------------------------------------------------------------- /data/textured/paste/X/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/8t.png -------------------------------------------------------------------------------- /data/textured/paste/X/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/X/9t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/1.png -------------------------------------------------------------------------------- /data/textured/paste/Z/10t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/10t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/11t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/11t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/12t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/12t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/13t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/13t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/14.png -------------------------------------------------------------------------------- /data/textured/paste/Z/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/15.png -------------------------------------------------------------------------------- /data/textured/paste/Z/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/16.png -------------------------------------------------------------------------------- /data/textured/paste/Z/17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/17.png -------------------------------------------------------------------------------- /data/textured/paste/Z/18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/18.png -------------------------------------------------------------------------------- /data/textured/paste/Z/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/19.png -------------------------------------------------------------------------------- /data/textured/paste/Z/20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/20.png -------------------------------------------------------------------------------- /data/textured/paste/Z/2t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/2t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/3t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/3t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/4t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/4t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/5t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/5t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/6t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/6t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/7t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/7t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/8t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/8t.png -------------------------------------------------------------------------------- /data/textured/paste/Z/9t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/val-iisc/StickerDA/b70dfd18a32dce7f55ac86952512f4278e289ba4/data/textured/paste/Z/9t.png --------------------------------------------------------------------------------