├── framework2.jpg ├── torchattacks ├── attacks │ ├── __init__.py │ ├── pgd.py │ ├── LTA.py │ └── mfgsm.py ├── __init__.py ├── __pycache__ │ ├── attack.cpython-36.pyc │ ├── attack.cpython-37.pyc │ ├── attack.cpython-38.pyc │ ├── attack.cpython-39.pyc │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── __init__.cpython-38.pyc │ └── __init__.cpython-39.pyc └── attack.py ├── circle_loss.py ├── prepare.py ├── evaluate_rerank.py ├── re_ranking.py ├── evaluate.py ├── evaluate_gpu.py ├── model.py ├── defendAugment.py ├── README.txt ├── multimodal.py ├── test.py ├── aa_SMA.py ├── aa_LTA.py ├── aa_IFGSM.py └── train.py /framework2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finger-monkey/LTA_and_joint-defence/HEAD/framework2.jpg -------------------------------------------------------------------------------- /torchattacks/attacks/__init__.py: -------------------------------------------------------------------------------- 1 | from .mfgsm import MIFGSM 2 | from .LTA import LTA 3 | from .pgd import PGD 4 | -------------------------------------------------------------------------------- /torchattacks/__init__.py: -------------------------------------------------------------------------------- 1 | from .attacks.LTA import LTA 2 | from .attacks.mfgsm import MIFGSM 3 | from .attacks.pgd import PGD 4 | __version__ = 2.4 -------------------------------------------------------------------------------- /torchattacks/__pycache__/attack.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finger-monkey/LTA_and_joint-defence/HEAD/torchattacks/__pycache__/attack.cpython-36.pyc -------------------------------------------------------------------------------- /torchattacks/__pycache__/attack.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finger-monkey/LTA_and_joint-defence/HEAD/torchattacks/__pycache__/attack.cpython-37.pyc -------------------------------------------------------------------------------- /torchattacks/__pycache__/attack.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finger-monkey/LTA_and_joint-defence/HEAD/torchattacks/__pycache__/attack.cpython-38.pyc -------------------------------------------------------------------------------- /torchattacks/__pycache__/attack.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finger-monkey/LTA_and_joint-defence/HEAD/torchattacks/__pycache__/attack.cpython-39.pyc -------------------------------------------------------------------------------- /torchattacks/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finger-monkey/LTA_and_joint-defence/HEAD/torchattacks/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /torchattacks/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finger-monkey/LTA_and_joint-defence/HEAD/torchattacks/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /torchattacks/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finger-monkey/LTA_and_joint-defence/HEAD/torchattacks/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /torchattacks/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/finger-monkey/LTA_and_joint-defence/HEAD/torchattacks/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /circle_loss.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | import torch 3 | from torch import nn, Tensor 4 | 5 | def convert_label_to_similarity(normed_feature: Tensor, label: Tensor) -> Tuple[Tensor, Tensor]: 6 | similarity_matrix = normed_feature @ normed_feature.transpose(1, 0) 7 | label_matrix = label.unsqueeze(1) == label.unsqueeze(0) 8 | positive_matrix = label_matrix.triu(diagonal=1) 9 | negative_matrix = label_matrix.logical_not().triu(diagonal=1) 10 | similarity_matrix = similarity_matrix.view(-1) 11 | positive_matrix = positive_matrix.view(-1) 12 | negative_matrix = negative_matrix.view(-1) 13 | return similarity_matrix[positive_matrix], similarity_matrix[negative_matrix] 14 | 15 | 16 | class CircleLoss(nn.Module): 17 | def __init__(self, m: float, gamma: float) -> None: 18 | super(CircleLoss, self).__init__() 19 | self.m = m 20 | self.gamma = gamma 21 | self.soft_plus = nn.Softplus() 22 | 23 | def forward(self, sp: Tensor, sn: Tensor) -> Tensor: 24 | ap = torch.clamp_min(- sp.detach() + 1 + self.m, min=0.) 25 | an = torch.clamp_min(sn.detach() + self.m, min=0.) 26 | 27 | delta_p = 1 - self.m 28 | delta_n = self.m 29 | 30 | logit_p = - ap * (sp - delta_p) * self.gamma 31 | logit_n = an * (sn - delta_n) * self.gamma 32 | 33 | loss = self.soft_plus(torch.logsumexp(logit_n, dim=0) + torch.logsumexp(logit_p, dim=0)) 34 | 35 | return loss 36 | 37 | 38 | if __name__ == "__main__": 39 | feat = nn.functional.normalize(torch.rand(256, 64, requires_grad=True)) 40 | lbl = torch.randint(high=10, size=(256,)) 41 | 42 | inp_sp, inp_sn = convert_label_to_similarity(feat, lbl) 43 | 44 | criterion = CircleLoss(m=0.25, gamma=256) 45 | circle_loss = criterion(inp_sp, inp_sn) 46 | 47 | print(circle_loss) 48 | -------------------------------------------------------------------------------- /prepare.py: -------------------------------------------------------------------------------- 1 | import os 2 | from shutil import copyfile 3 | 4 | # You only need to change this line to your dataset download path 5 | download_path = '../Market' 6 | 7 | if not os.path.isdir(download_path): 8 | print('please change the download_path') 9 | 10 | save_path = download_path + '/pytorch' 11 | if not os.path.isdir(save_path): 12 | os.mkdir(save_path) 13 | #----------------------------------------- 14 | #query 15 | query_path = download_path + '/query' 16 | query_save_path = download_path + '/pytorch/query' 17 | if not os.path.isdir(query_save_path): 18 | os.mkdir(query_save_path) 19 | 20 | for root, dirs, files in os.walk(query_path, topdown=True): 21 | for name in files: 22 | if not name[-3:]=='jpg': 23 | continue 24 | ID = name.split('_') 25 | src_path = query_path + '/' + name 26 | dst_path = query_save_path + '/' + ID[0] 27 | if not os.path.isdir(dst_path): 28 | os.mkdir(dst_path) 29 | copyfile(src_path, dst_path + '/' + name) 30 | 31 | #----------------------------------------- 32 | #gallery 33 | gallery_path = download_path + '/bounding_box_test' 34 | gallery_save_path = download_path + '/pytorch/gallery' 35 | if not os.path.isdir(gallery_save_path): 36 | os.mkdir(gallery_save_path) 37 | 38 | for root, dirs, files in os.walk(gallery_path, topdown=True): 39 | for name in files: 40 | if not name[-3:]=='jpg': 41 | continue 42 | ID = name.split('_') 43 | src_path = gallery_path + '/' + name 44 | dst_path = gallery_save_path + '/' + ID[0] 45 | if not os.path.isdir(dst_path): 46 | os.mkdir(dst_path) 47 | copyfile(src_path, dst_path + '/' + name) 48 | 49 | #--------------------------------------- 50 | #train_all 51 | train_path = download_path + '/bounding_box_train' 52 | train_save_path = download_path + '/pytorch/train_all' 53 | if not os.path.isdir(train_save_path): 54 | os.mkdir(train_save_path) 55 | 56 | for root, dirs, files in os.walk(train_path, topdown=True): 57 | for name in files: 58 | if not name[-3:]=='jpg': 59 | continue 60 | ID = name.split('_') 61 | src_path = train_path + '/' + name 62 | dst_path = train_save_path + '/' + ID[0] 63 | if not os.path.isdir(dst_path): 64 | os.mkdir(dst_path) 65 | copyfile(src_path, dst_path + '/' + name) 66 | 67 | 68 | -------------------------------------------------------------------------------- /torchattacks/attacks/pgd.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torchvision import transforms 4 | from multimodal import * 5 | from ..attack import Attack 6 | from defendAugment import * 7 | def extract_feature_img(model, data, flip=False): 8 | img = data 9 | # Resize and Normalize 10 | img = torch.nn.functional.interpolate(img, size=(256, 128), mode='bilinear', align_corners=False) 11 | img -= torch.cuda.FloatTensor([[[0.485]], [[0.456]], [[0.406]]]) 12 | img /= torch.cuda.FloatTensor([[[0.229]], [[0.224]], [[0.225]]]) 13 | 14 | f1 = model(img) 15 | if flip: 16 | flip_img = fliplr(img) 17 | f2 = model(flip_img, False) 18 | ff = f1 + f2 19 | else: 20 | ff = f1 21 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) 22 | ff = ff / fnorm 23 | return ff 24 | 25 | 26 | def criterion(f1s, f2s): 27 | ret = 0 28 | loss = torch.nn.MSELoss() 29 | for f1 in f1s: 30 | for f2 in f2s: 31 | ret += loss(f1, f2) 32 | return ret 33 | 34 | class PGD(Attack): 35 | def __init__(self, model, eps=5/255.0, alpha=1/255.0, steps=15, random_start=False, targeted=False): 36 | super(PGD, self).__init__("PGD", model) 37 | self.eps = eps 38 | self.alpha = alpha 39 | self.steps = steps 40 | self.random_start = random_start 41 | self.sign = 1 42 | if targeted: 43 | self.sign = -1 44 | 45 | def forward(self, images, labels): 46 | r""" 47 | Overridden. 48 | """ 49 | images = images.to(self.device) 50 | labels = labels.to(self.device) 51 | loss = nn.CrossEntropyLoss() 52 | 53 | adv_images = images.clone().detach() 54 | 55 | if self.random_start: 56 | # Starting at a uniformly random point 57 | adv_images = adv_images + \ 58 | torch.empty_like(adv_images).uniform_(-self.eps, self.eps) 59 | adv_images = torch.clamp(adv_images, min=-1, max=1) 60 | 61 | for i in range(self.steps): 62 | adv_images.requires_grad = True 63 | 64 | q_feature1 = extract_feature_img(self.model, adv_images) 65 | q_feature2 = extract_feature_img(self.model, images) 66 | cost = criterion(q_feature1, q_feature2).to(self.device) 67 | 68 | grad = torch.autograd.grad(cost, adv_images, 69 | retain_graph=False, create_graph=False)[0] 70 | 71 | adv_images = adv_images.detach() + self.alpha*grad.sign() 72 | delta = torch.clamp(adv_images - images, 73 | min=-self.eps, max=self.eps) 74 | adv_images = torch.clamp(images + delta, min=-1, max=1).detach() 75 | 76 | return adv_images 77 | -------------------------------------------------------------------------------- /evaluate_rerank.py: -------------------------------------------------------------------------------- 1 | import scipy.io 2 | import torch 3 | import numpy as np 4 | import time 5 | from re_ranking import re_ranking 6 | ####################################################################### 7 | # Evaluate 8 | def evaluate(score,ql,qc,gl,gc): 9 | index = np.argsort(score) #from small to large 10 | #index = index[::-1] 11 | # good index 12 | query_index = np.argwhere(gl==ql) 13 | camera_index = np.argwhere(gc==qc) 14 | 15 | good_index = np.setdiff1d(query_index, camera_index, assume_unique=True) 16 | junk_index1 = np.argwhere(gl==-1) 17 | junk_index2 = np.intersect1d(query_index, camera_index) 18 | junk_index = np.append(junk_index2, junk_index1) #.flatten()) 19 | 20 | CMC_tmp = compute_mAP(index, good_index, junk_index) 21 | return CMC_tmp 22 | 23 | 24 | def compute_mAP(index, good_index, junk_index): 25 | ap = 0 26 | cmc = torch.IntTensor(len(index)).zero_() 27 | if good_index.size==0: # if empty 28 | cmc[0] = -1 29 | return ap,cmc 30 | 31 | # remove junk_index 32 | mask = np.in1d(index, junk_index, invert=True) 33 | index = index[mask] 34 | 35 | # find good_index index 36 | ngood = len(good_index) 37 | mask = np.in1d(index, good_index) 38 | rows_good = np.argwhere(mask==True) 39 | rows_good = rows_good.flatten() 40 | 41 | cmc[rows_good[0]:] = 1 42 | for i in range(ngood): 43 | d_recall = 1.0/ngood 44 | precision = (i+1)*1.0/(rows_good[i]+1) 45 | if rows_good[i]!=0: 46 | old_precision = i*1.0/rows_good[i] 47 | else: 48 | old_precision=1.0 49 | ap = ap + d_recall*(old_precision + precision)/2 50 | 51 | return ap, cmc 52 | 53 | ###################################################################### 54 | result = scipy.io.loadmat('pytorch_result.mat') 55 | 56 | query_feature = result['query_f'] 57 | query_cam = result['query_cam'][0] 58 | query_label = result['query_label'][0] 59 | gallery_feature = result['gallery_f'] 60 | gallery_cam = result['gallery_cam'][0] 61 | gallery_label = result['gallery_label'][0] 62 | 63 | CMC = torch.IntTensor(len(gallery_label)).zero_() 64 | ap = 0.0 65 | #re-ranking 66 | print('calculate initial distance') 67 | q_g_dist = np.dot(query_feature, np.transpose(gallery_feature)) 68 | q_q_dist = np.dot(query_feature, np.transpose(query_feature)) 69 | g_g_dist = np.dot(gallery_feature, np.transpose(gallery_feature)) 70 | since = time.time() 71 | re_rank = re_ranking(q_g_dist, q_q_dist, g_g_dist) 72 | time_elapsed = time.time() - since 73 | print('Reranking complete in {:.0f}m {:.0f}s'.format( 74 | time_elapsed // 60, time_elapsed % 60)) 75 | for i in range(len(query_label)): 76 | ap_tmp, CMC_tmp = evaluate(re_rank[i,:],query_label[i],query_cam[i],gallery_label,gallery_cam) 77 | if CMC_tmp[0]==-1: 78 | continue 79 | CMC = CMC + CMC_tmp 80 | ap += ap_tmp 81 | #print(i, CMC_tmp[0]) 82 | 83 | CMC = CMC.float() 84 | CMC = CMC/len(query_label) #average CMC 85 | print('top1:%f top5:%f top10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label))) 86 | -------------------------------------------------------------------------------- /re_ranking.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2/python3 2 | # -*- coding: utf-8 -*- 3 | import numpy as np 4 | 5 | def k_reciprocal_neigh( initial_rank, i, k1): 6 | forward_k_neigh_index = initial_rank[i,:k1+1] 7 | backward_k_neigh_index = initial_rank[forward_k_neigh_index,:k1+1] 8 | fi = np.where(backward_k_neigh_index==i)[0] 9 | return forward_k_neigh_index[fi] 10 | 11 | def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=20, k2=6, lambda_value=0.3): 12 | # The following naming, e.g. gallery_num, is different from outer scope. 13 | # Don't care about it. 14 | original_dist = np.concatenate( 15 | [np.concatenate([q_q_dist, q_g_dist], axis=1), 16 | np.concatenate([q_g_dist.T, g_g_dist], axis=1)], 17 | axis=0) 18 | original_dist = 2. - 2 * original_dist # change the cosine similarity metric to euclidean similarity metric 19 | original_dist = np.power(original_dist, 2).astype(np.float32) 20 | original_dist = np.transpose(1. * original_dist/np.max(original_dist,axis = 0)) 21 | V = np.zeros_like(original_dist).astype(np.float32) 22 | #initial_rank = np.argsort(original_dist).astype(np.int32) 23 | # top K1+1 24 | initial_rank = np.argpartition( original_dist, range(1,k1+1) ) 25 | 26 | query_num = q_g_dist.shape[0] 27 | all_num = original_dist.shape[0] 28 | 29 | for i in range(all_num): 30 | # k-reciprocal neighbors 31 | k_reciprocal_index = k_reciprocal_neigh( initial_rank, i, k1) 32 | k_reciprocal_expansion_index = k_reciprocal_index 33 | for j in range(len(k_reciprocal_index)): 34 | candidate = k_reciprocal_index[j] 35 | candidate_k_reciprocal_index = k_reciprocal_neigh( initial_rank, candidate, int(np.around(k1/2))) 36 | if len(np.intersect1d(candidate_k_reciprocal_index,k_reciprocal_index))> 2./3*len(candidate_k_reciprocal_index): 37 | k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index,candidate_k_reciprocal_index) 38 | 39 | k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index) 40 | weight = np.exp(-original_dist[i,k_reciprocal_expansion_index]) 41 | V[i,k_reciprocal_expansion_index] = 1.*weight/np.sum(weight) 42 | 43 | original_dist = original_dist[:query_num,] 44 | if k2 != 1: 45 | V_qe = np.zeros_like(V,dtype=np.float32) 46 | for i in range(all_num): 47 | V_qe[i,:] = np.mean(V[initial_rank[i,:k2],:],axis=0) 48 | V = V_qe 49 | del V_qe 50 | del initial_rank 51 | invIndex = [] 52 | for i in range(all_num): 53 | invIndex.append(np.where(V[:,i] != 0)[0]) 54 | 55 | jaccard_dist = np.zeros_like(original_dist,dtype = np.float32) 56 | 57 | for i in range(query_num): 58 | temp_min = np.zeros(shape=[1,all_num],dtype=np.float32) 59 | indNonZero = np.where(V[i,:] != 0)[0] 60 | indImages = [] 61 | indImages = [invIndex[ind] for ind in indNonZero] 62 | for j in range(len(indNonZero)): 63 | temp_min[0,indImages[j]] = temp_min[0,indImages[j]]+ np.minimum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]]) 64 | jaccard_dist[i] = 1-temp_min/(2.-temp_min) 65 | 66 | final_dist = jaccard_dist*(1-lambda_value) + original_dist*lambda_value 67 | del original_dist 68 | del V 69 | del jaccard_dist 70 | final_dist = final_dist[:query_num,query_num:] 71 | return final_dist 72 | -------------------------------------------------------------------------------- /evaluate.py: -------------------------------------------------------------------------------- 1 | import scipy.io 2 | import torch 3 | import numpy as np 4 | #import time 5 | import os 6 | 7 | ####################################################################### 8 | # Evaluate 9 | def evaluate(qf,ql,qc,gf,gl,gc): 10 | query = qf 11 | score = np.dot(gf,query) 12 | # predict index 13 | index = np.argsort(score) #from small to large 14 | index = index[::-1] 15 | #index = index[0:2000] 16 | # good index 17 | query_index = np.argwhere(gl==ql) 18 | camera_index = np.argwhere(gc==qc) 19 | 20 | good_index = np.setdiff1d(query_index, camera_index, assume_unique=True) 21 | junk_index1 = np.argwhere(gl==-1) 22 | junk_index2 = np.intersect1d(query_index, camera_index) 23 | junk_index = np.append(junk_index2, junk_index1) #.flatten()) 24 | 25 | CMC_tmp = compute_mAP(index, good_index, junk_index) 26 | return CMC_tmp 27 | 28 | 29 | def compute_mAP(index, good_index, junk_index): 30 | ap = 0 31 | cmc = torch.IntTensor(len(index)).zero_() 32 | if good_index.size==0: # if empty 33 | cmc[0] = -1 34 | return ap,cmc 35 | 36 | # remove junk_index 37 | mask = np.in1d(index, junk_index, invert=True) 38 | index = index[mask] 39 | 40 | # find good_index index 41 | ngood = len(good_index) 42 | mask = np.in1d(index, good_index) 43 | rows_good = np.argwhere(mask==True) 44 | rows_good = rows_good.flatten() 45 | 46 | cmc[rows_good[0]:] = 1 47 | for i in range(ngood): 48 | d_recall = 1.0/ngood 49 | precision = (i+1)*1.0/(rows_good[i]+1) 50 | if rows_good[i]!=0: 51 | old_precision = i*1.0/rows_good[i] 52 | else: 53 | old_precision=1.0 54 | ap = ap + d_recall*(old_precision + precision)/2 55 | 56 | return ap, cmc 57 | 58 | ###################################################################### 59 | result = scipy.io.loadmat('pytorch_result.mat') 60 | query_feature = result['query_f'] 61 | query_cam = result['query_cam'][0] 62 | query_label = result['query_label'][0] 63 | gallery_feature = result['gallery_f'] 64 | gallery_cam = result['gallery_cam'][0] 65 | gallery_label = result['gallery_label'][0] 66 | 67 | multi = os.path.isfile('multi_query.mat') 68 | 69 | if multi: 70 | m_result = scipy.io.loadmat('multi_query.mat') 71 | mquery_feature = m_result['mquery_f'] 72 | mquery_cam = m_result['mquery_cam'][0] 73 | mquery_label = m_result['mquery_label'][0] 74 | 75 | CMC = torch.IntTensor(len(gallery_label)).zero_() 76 | ap = 0.0 77 | #print(query_label) 78 | for i in range(len(query_label)): 79 | ap_tmp, CMC_tmp = evaluate(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam) 80 | if CMC_tmp[0]==-1: 81 | continue 82 | CMC = CMC + CMC_tmp 83 | ap += ap_tmp 84 | print(i, CMC_tmp[0]) 85 | 86 | CMC = CMC.float() 87 | CMC = CMC/len(query_label) #average CMC 88 | print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label))) 89 | 90 | # multiple-query 91 | CMC = torch.IntTensor(len(gallery_label)).zero_() 92 | ap = 0.0 93 | if multi: 94 | for i in range(len(query_label)): 95 | mquery_index1 = np.argwhere(mquery_label==query_label[i]) 96 | mquery_index2 = np.argwhere(mquery_cam==query_cam[i]) 97 | mquery_index = np.intersect1d(mquery_index1, mquery_index2) 98 | mq = np.mean(mquery_feature[mquery_index,:], axis=0) 99 | ap_tmp, CMC_tmp = evaluate(mq,query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam) 100 | if CMC_tmp[0]==-1: 101 | continue 102 | CMC = CMC + CMC_tmp 103 | ap += ap_tmp 104 | #print(i, CMC_tmp[0]) 105 | CMC = CMC.float() 106 | CMC = CMC/len(query_label) #average CMC 107 | print('multi Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label))) 108 | -------------------------------------------------------------------------------- /torchattacks/attacks/LTA.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from ..attack import Attack 4 | from collections import OrderedDict 5 | from torchvision import transforms 6 | from multimodal import * 7 | class LoadFromFloder(torch.utils.data.Dataset): 8 | def __init__(self, name_list, transform=None): 9 | self.name_list = name_list 10 | self.transform = transform 11 | 12 | def __len__(self): 13 | return len(self.name_list) 14 | 15 | def __getitem__(self, idx): 16 | from PIL import Image 17 | image = Image.open(self.name_list[idx]) 18 | if self.transform != None: 19 | image = self.transform(image) 20 | sample = image 21 | 22 | return sample 23 | 24 | def extract_feature_img(model, data, flip=False): 25 | img = data 26 | # Resize and Normalize 27 | img = torch.nn.functional.interpolate(img, size=(256, 128), mode='bilinear', align_corners=False) 28 | img -= torch.cuda.FloatTensor([[[0.485]], [[0.456]], [[0.406]]]) 29 | img /= torch.cuda.FloatTensor([[[0.229]], [[0.224]], [[0.225]]]) 30 | 31 | f1 = model(img) 32 | if flip: 33 | flip_img = fliplr(img) 34 | f2 = model(flip_img, False) 35 | ff = f1 + f2 36 | else: 37 | ff = f1 38 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) 39 | ff = ff / fnorm 40 | return ff 41 | 42 | 43 | def criterion(f1s, f2s): 44 | ret = 0 45 | loss = torch.nn.MSELoss() 46 | for f1 in f1s: 47 | for f2 in f2s: 48 | ret += loss(f1, f2) 49 | return ret 50 | 51 | def get_id(img_path): 52 | camera_id = [] 53 | labels = [] 54 | data_list = OrderedDict() 55 | for path, v in img_path: 56 | label, filename = path.split('/')[-2:] 57 | camera = filename.split('c')[1] 58 | labels.append(int(label)) 59 | camera_id.append(int(camera[0])) 60 | if label in data_list: 61 | data_list[label].append(path) 62 | else: 63 | data_list[label] = [path] 64 | return camera_id, labels, data_list 65 | 66 | 67 | class LTA(Attack): 68 | def __init__(self, model, eps=5/255.0, alpha = 1/255.0, steps = 15, momentum = 1.0 ): 69 | super(LTA, self).__init__("LTA", model) 70 | self.eps = eps 71 | self.model = model 72 | self.alpha = alpha 73 | self.max_iter = steps 74 | self.momentum = momentum 75 | 76 | def forward(self, images, labels): 77 | # print('max_iter=',max_iter,'epsilon=',self.eps) 78 | images = images.to(self.device) 79 | transform = transforms.Compose([ 80 | transforms.ToPILImage(), 81 | LGPR(1), 82 | transforms.ToTensor() 83 | ]) 84 | lower_bound = images.data.cuda() - self.eps 85 | lower_bound[lower_bound < 0.0] = 0.0 86 | upper_bound = images.data.cuda() + self.eps 87 | upper_bound[upper_bound > 1.0] = 1.0 88 | x_adv = images 89 | 90 | x_adv.requires_grad = True 91 | grad = None 92 | for _ in range(self.max_iter): 93 | img_gray = images.clone().detach() 94 | img_gray = img_gray.cpu() 95 | img_gray[0] = transform(img_gray[0]) 96 | img_gray = img_gray.to(self.device) 97 | q_feature = extract_feature_img(self.model, x_adv) 98 | g_feature = extract_feature_img(self.model, img_gray) 99 | g_feature.detach_() 100 | loss = criterion(q_feature, g_feature).to(self.device) 101 | loss.backward() 102 | # get normed x_grad 103 | x_grad = x_adv.grad.data 104 | norm = torch.mean(torch.abs(x_grad).view((x_grad.shape[0], -1)), dim=1).view((-1, 1, 1, 1)) 105 | norm[norm < 1e-12] = 1e-12 106 | x_grad /= norm 107 | 108 | grad = x_grad if grad is None else self.momentum * grad + x_grad 109 | x_adv = x_adv.data + self.alpha * torch.sign(grad) 110 | 111 | x_adv = torch.max(x_adv, lower_bound) 112 | x_adv = torch.min(x_adv, upper_bound) 113 | x_adv.requires_grad = True 114 | return x_adv -------------------------------------------------------------------------------- /evaluate_gpu.py: -------------------------------------------------------------------------------- 1 | import scipy.io 2 | import torch 3 | import numpy as np 4 | #import time 5 | import os 6 | 7 | ####################################################################### 8 | # Evaluate 9 | def evaluate(qf,ql,qc,gf,gl,gc): 10 | query = qf.view(-1,1) 11 | # print(query.shape) 12 | score = torch.mm(gf,query) 13 | score = score.squeeze(1).cpu() 14 | score = score.numpy() 15 | # predict index 16 | index = np.argsort(score) #from small to large 17 | index = index[::-1] 18 | # index = index[0:2000] 19 | # good index 20 | query_index = np.argwhere(gl==ql) 21 | camera_index = np.argwhere(gc==qc) 22 | 23 | good_index = np.setdiff1d(query_index, camera_index, assume_unique=True) 24 | junk_index1 = np.argwhere(gl==-1) 25 | junk_index2 = np.intersect1d(query_index, camera_index) 26 | junk_index = np.append(junk_index2, junk_index1) #.flatten()) 27 | 28 | CMC_tmp = compute_mAP(index, good_index, junk_index) 29 | return CMC_tmp 30 | 31 | 32 | def compute_mAP(index, good_index, junk_index): 33 | ap = 0 34 | cmc = torch.IntTensor(len(index)).zero_() 35 | if good_index.size==0: # if empty 36 | cmc[0] = -1 37 | return ap,cmc 38 | 39 | # remove junk_index 40 | mask = np.in1d(index, junk_index, invert=True) 41 | index = index[mask] 42 | 43 | # find good_index index 44 | ngood = len(good_index) 45 | mask = np.in1d(index, good_index) 46 | rows_good = np.argwhere(mask==True) 47 | rows_good = rows_good.flatten() 48 | 49 | cmc[rows_good[0]:] = 1 50 | for i in range(ngood): 51 | d_recall = 1.0/ngood 52 | precision = (i+1)*1.0/(rows_good[i]+1) 53 | if rows_good[i]!=0: 54 | old_precision = i*1.0/rows_good[i] 55 | else: 56 | old_precision=1.0 57 | ap = ap + d_recall*(old_precision + precision)/2 58 | 59 | return ap, cmc 60 | 61 | ###################################################################### 62 | result = scipy.io.loadmat('pytorch_result.mat') 63 | query_feature = torch.FloatTensor(result['query_f']) 64 | query_cam = result['query_cam'][0] 65 | query_label = result['query_label'][0] 66 | gallery_feature = torch.FloatTensor(result['gallery_f']) 67 | gallery_cam = result['gallery_cam'][0] 68 | gallery_label = result['gallery_label'][0] 69 | 70 | multi = os.path.isfile('multi_query.mat') 71 | 72 | if multi: 73 | m_result = scipy.io.loadmat('multi_query.mat') 74 | mquery_feature = torch.FloatTensor(m_result['mquery_f']) 75 | mquery_cam = m_result['mquery_cam'][0] 76 | mquery_label = m_result['mquery_label'][0] 77 | mquery_feature = mquery_feature.cuda() 78 | 79 | query_feature = query_feature.cuda() 80 | gallery_feature = gallery_feature.cuda() 81 | 82 | print(query_feature.shape) 83 | CMC = torch.IntTensor(len(gallery_label)).zero_() 84 | ap = 0.0 85 | #print(query_label) 86 | for i in range(len(query_label)): 87 | ap_tmp, CMC_tmp = evaluate(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam) 88 | if CMC_tmp[0]==-1: 89 | continue 90 | CMC = CMC + CMC_tmp 91 | ap += ap_tmp 92 | #print(i, CMC_tmp[0]) 93 | 94 | CMC = CMC.float() 95 | CMC = CMC/len(query_label) #average CMC 96 | print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label))) 97 | 98 | # multiple-query 99 | CMC = torch.IntTensor(len(gallery_label)).zero_() 100 | ap = 0.0 101 | if multi: 102 | for i in range(len(query_label)): 103 | mquery_index1 = np.argwhere(mquery_label==query_label[i]) 104 | mquery_index2 = np.argwhere(mquery_cam==query_cam[i]) 105 | mquery_index = np.intersect1d(mquery_index1, mquery_index2) 106 | mq = torch.mean(mquery_feature[mquery_index,:], dim=0) 107 | ap_tmp, CMC_tmp = evaluate(mq,query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam) 108 | if CMC_tmp[0]==-1: 109 | continue 110 | CMC = CMC + CMC_tmp 111 | ap += ap_tmp 112 | #print(i, CMC_tmp[0]) 113 | CMC = CMC.float() 114 | CMC = CMC/len(query_label) #average CMC 115 | print('multi Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label))) 116 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | from torchvision import models 5 | from torch.autograd import Variable 6 | import pretrainedmodels 7 | 8 | ###################################################################### 9 | def weights_init_kaiming(m): 10 | classname = m.__class__.__name__ 11 | # print(classname) 12 | if classname.find('Conv') != -1: 13 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') # For old pytorch, you may use kaiming_normal. 14 | elif classname.find('Linear') != -1: 15 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_out') 16 | init.constant_(m.bias.data, 0.0) 17 | elif classname.find('BatchNorm1d') != -1: 18 | init.normal_(m.weight.data, 1.0, 0.02) 19 | init.constant_(m.bias.data, 0.0) 20 | 21 | def weights_init_classifier(m): 22 | classname = m.__class__.__name__ 23 | if classname.find('Linear') != -1: 24 | init.normal_(m.weight.data, std=0.001) 25 | init.constant_(m.bias.data, 0.0) 26 | 27 | # Defines the new fc layer and classification layer 28 | # |--Linear--|--bn--|--relu--|--Linear--| 29 | class ClassBlock(nn.Module): 30 | def __init__(self, input_dim, class_num, droprate, relu=False, bnorm=True, num_bottleneck=512, linear=True, return_f = False): 31 | super(ClassBlock, self).__init__() 32 | self.return_f = return_f 33 | add_block = [] 34 | if linear: 35 | add_block += [nn.Linear(input_dim, num_bottleneck)] 36 | else: 37 | num_bottleneck = input_dim 38 | if bnorm: 39 | add_block += [nn.BatchNorm1d(num_bottleneck)] 40 | if relu: 41 | add_block += [nn.LeakyReLU(0.1)] 42 | if droprate>0: 43 | add_block += [nn.Dropout(p=droprate)] 44 | add_block = nn.Sequential(*add_block) 45 | add_block.apply(weights_init_kaiming) 46 | 47 | classifier = [] 48 | classifier += [nn.Linear(num_bottleneck, class_num)] 49 | classifier = nn.Sequential(*classifier) 50 | classifier.apply(weights_init_classifier) 51 | 52 | self.add_block = add_block 53 | self.classifier = classifier 54 | def forward(self, x): 55 | x = self.add_block(x) 56 | if self.return_f: 57 | f = x 58 | x = self.classifier(x) 59 | return [x,f] 60 | else: 61 | x = self.classifier(x) 62 | return x 63 | 64 | # Define the ResNet50-based Model 65 | class ft_net(nn.Module): 66 | 67 | def __init__(self, class_num, droprate=0.5, stride=2, circle=False): 68 | super(ft_net, self).__init__() 69 | model_ft = models.resnet50(pretrained=True) 70 | # avg pooling to global pooling 71 | if stride == 1: 72 | model_ft.layer4[0].downsample[0].stride = (1,1) 73 | model_ft.layer4[0].conv2.stride = (1,1) 74 | model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1)) 75 | self.model = model_ft 76 | self.circle = circle 77 | self.classifier = ClassBlock(2048, class_num, droprate, return_f = circle) 78 | 79 | def forward(self, x): 80 | x = self.model.conv1(x) 81 | x = self.model.bn1(x) 82 | x = self.model.relu(x) 83 | x = self.model.maxpool(x) 84 | x = self.model.layer1(x) 85 | x = self.model.layer2(x) 86 | x = self.model.layer3(x) 87 | x = self.model.layer4(x) 88 | x = self.model.avgpool(x) 89 | x = x.view(x.size(0), x.size(1)) 90 | x = self.classifier(x) 91 | return x 92 | 93 | # Define the DenseNet121-based Model 94 | class ft_net_dense(nn.Module): 95 | 96 | def __init__(self, class_num, droprate=0.5, circle=False): 97 | super().__init__() 98 | model_ft = models.densenet121(pretrained=True) 99 | model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1,1)) 100 | model_ft.fc = nn.Sequential() 101 | self.model = model_ft 102 | self.circle = circle 103 | # For DenseNet, the feature dim is 1024 104 | self.classifier = ClassBlock(1024, class_num, droprate, return_f=circle) 105 | 106 | def forward(self, x): 107 | x = self.model.features(x) 108 | x = x.view(x.size(0), x.size(1)) 109 | x = self.classifier(x) 110 | return x 111 | 112 | -------------------------------------------------------------------------------- /torchattacks/attack.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class Attack(object): 5 | r""" 6 | Base class for all attacks. 7 | 8 | .. note:: 9 | It automatically set device to the device where given model is. 10 | It temporarily changes the original model's `training mode` to `test` 11 | by `.eval()` only during an attack process. 12 | """ 13 | def __init__(self, name, model): 14 | r""" 15 | Initializes internal Attack state. 16 | 17 | Arguments: 18 | name (str) : name of attack. 19 | model (nn.Module): model to attack. 20 | """ 21 | 22 | self.attack = name 23 | self.model = model 24 | self.model_name = str(model).split("(")[0] 25 | 26 | self.training = model.training 27 | self.device = next(model.parameters()).device 28 | 29 | self.mode = 'float' 30 | 31 | # It defines the computation performed at every call. 32 | # Should be overridden by all subclasses. 33 | def forward(self, *input): 34 | r""" 35 | It defines the computation performed at every call. 36 | Should be overridden by all subclasses. 37 | """ 38 | raise NotImplementedError 39 | 40 | # Determine return all adversarial images as 'int' OR 'float'. 41 | def set_mode(self, mode): 42 | r""" 43 | Set whether return adversarial images as `int` or `float`. 44 | 45 | Arguments: 46 | mode (str) : 'float' or 'int'. (DEFAULT : 'float') 47 | 48 | """ 49 | if mode == 'float': 50 | self.mode = 'float' 51 | elif mode == 'int': 52 | self.mode = 'int' 53 | else: 54 | raise ValueError(mode + " is not valid") 55 | 56 | # Save image data as torch tensor from data_loader. 57 | def save(self, file_name, data_loader, accuracy=True): 58 | r""" 59 | Save adversarial images as torch.tensor from data_loader. 60 | 61 | Arguments: 62 | file_name (str) : save path. 63 | data_loader (torch.utils.data.DataLoader) : dataloader. 64 | accuracy (bool) : If you don't want to know an accuaracy, 65 | set accuracy as False. (DEFAULT : True) 66 | 67 | """ 68 | self.model.eval() 69 | 70 | image_list = [] 71 | label_list = [] 72 | 73 | correct = 0 74 | total = 0 75 | 76 | total_batch = len(data_loader) 77 | 78 | for step, (images, labels) in enumerate(data_loader): 79 | adv_images = self.__call__(images, labels) 80 | 81 | image_list.append(adv_images.cpu()) 82 | label_list.append(labels.cpu()) 83 | 84 | if self.mode == 'int': 85 | adv_images = adv_images.float()/255 86 | 87 | if accuracy: 88 | outputs = self.model(adv_images) 89 | _, predicted = torch.max(outputs.data, 1) 90 | total += labels.size(0) 91 | correct += (predicted == labels.to(self.device)).sum() 92 | 93 | acc = 100 * float(correct) / total 94 | print('- Save Progress : %2.2f %% / Accuracy : %2.2f %%' % ((step+1)/total_batch*100, acc), end='\r') 95 | else: 96 | print('- Save Progress : %2.2f %% ' % ((step+1)/total_batch*100), end='\r') 97 | 98 | x = torch.cat(image_list, 0) 99 | y = torch.cat(label_list, 0) 100 | torch.save((x, y), file_name) 101 | print('\n- Save Complete!') 102 | 103 | self._switch_model() 104 | 105 | # Whole structure of the model will be NOT displayed for print pretty. 106 | def __str__(self): 107 | info = self.__dict__.copy() 108 | del info['model'] 109 | del info['attack'] 110 | return self.attack + "(" + ', '.join('{}={}'.format(key, val) for key, val in info.items()) + ")" 111 | 112 | def __call__(self, *input, **kwargs): 113 | self.model.eval() 114 | images = self.forward(*input, **kwargs) 115 | self._switch_model() 116 | 117 | if self.mode == 'int': 118 | images = self._to_uint(images) 119 | 120 | return images 121 | 122 | def _to_uint(self, images): 123 | return (images*255).type(torch.uint8) 124 | 125 | # It changes model to the original eval/train. 126 | def _switch_model(self): 127 | if self.training: 128 | self.model.train() 129 | else: 130 | self.model.eval() 131 | -------------------------------------------------------------------------------- /torchattacks/attacks/mfgsm.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from ..attack import Attack 4 | from collections import OrderedDict 5 | from torchvision import transforms 6 | from multimodal import * 7 | class LoadFromFloder(torch.utils.data.Dataset): 8 | def __init__(self, name_list, transform=None): 9 | self.name_list = name_list 10 | self.transform = transform 11 | 12 | def __len__(self): 13 | return len(self.name_list) 14 | 15 | def __getitem__(self, idx): 16 | from PIL import Image 17 | image = Image.open(self.name_list[idx]) 18 | if self.transform != None: 19 | image = self.transform(image) 20 | sample = image 21 | 22 | return sample 23 | 24 | def extract_feature_img(model, data, flip=False): 25 | img = data 26 | # Resize and Normalize 27 | img = torch.nn.functional.interpolate(img, size=(256, 128), mode='bilinear', align_corners=False) 28 | img -= torch.cuda.FloatTensor([[[0.485]], [[0.456]], [[0.406]]]) 29 | img /= torch.cuda.FloatTensor([[[0.229]], [[0.224]], [[0.225]]]) 30 | 31 | f1 = model(img) 32 | if flip: 33 | flip_img = fliplr(img) 34 | f2 = model(flip_img, False) 35 | ff = f1 + f2 36 | else: 37 | ff = f1 38 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) 39 | ff = ff / fnorm 40 | return ff 41 | 42 | 43 | def criterion(f1s, f2s): 44 | ret = 0 45 | loss = torch.nn.MSELoss() 46 | for f1 in f1s: 47 | for f2 in f2s: 48 | ret += loss(f1, f2) 49 | return ret 50 | 51 | def get_id(img_path): 52 | camera_id = [] 53 | labels = [] 54 | data_list = OrderedDict() 55 | for path, v in img_path: 56 | label, filename = path.split('/')[-2:] 57 | camera = filename.split('c')[1] 58 | labels.append(int(label)) 59 | camera_id.append(int(camera[0])) 60 | if label in data_list: 61 | data_list[label].append(path) 62 | else: 63 | data_list[label] = [path] 64 | return camera_id, labels, data_list 65 | 66 | 67 | class MIFGSM(Attack): 68 | 69 | def __init__(self, model,image_datasets, eps=5/255.0, alpha = 1/ 255.0, steps = 15,momentum = 0): 70 | super(MIFGSM, self).__init__("MIFGSM", model) 71 | self.eps = eps 72 | self.alpha = alpha 73 | self.image_datasets = image_datasets #add 74 | self.model = model 75 | self.max_iter = steps 76 | self.momentum = momentum 77 | 78 | def forward(self, images, labels): 79 | r""" 80 | Overridden. 81 | """ 82 | images = images.to(self.device) 83 | gallery_path = self.image_datasets['gallery'].imgs 84 | 85 | gallery_cam, gallery_label, gallery_dict = get_id(gallery_path) 86 | data_transforms = transforms.Compose([ 87 | transforms.ToTensor(), 88 | ]) 89 | gallery_datasets = LoadFromFloder(gallery_dict[labels],data_transforms) 90 | gallery_dataloader = torch.utils.data.DataLoader(gallery_datasets, batch_size=1,shuffle=False, num_workers=1) 91 | 92 | 93 | for g_data in gallery_dataloader: 94 | g_img = g_data.to(self.device) 95 | g_feature = extract_feature_img(self.model, g_img) 96 | g_feature.detach_() 97 | x_adv = images 98 | lower_bound = images.data.cuda() - self.eps 99 | lower_bound[lower_bound < 0.0] = 0.0 100 | upper_bound = images.data.cuda() + self.eps 101 | upper_bound[upper_bound > 1.0] = 1.0 102 | 103 | x_adv.requires_grad = True 104 | grad = None 105 | for i in range(self.max_iter): 106 | q_feature = extract_feature_img(self.model, x_adv) 107 | loss = criterion(q_feature, g_feature).to(self.device) 108 | loss.backward() 109 | # get normed x_grad 110 | x_grad = x_adv.grad.data 111 | norm = torch.mean(torch.abs(x_grad).view((x_grad.shape[0], -1)), dim=1).view((-1, 1, 1, 1)) 112 | norm[norm < 1e-12] = 1e-12 113 | x_grad /= norm 114 | 115 | grad = x_grad if grad is None else self.momentum * grad + x_grad 116 | x_adv = x_adv.data + self.alpha* torch.sign(grad) 117 | 118 | x_adv = torch.max(x_adv, lower_bound) 119 | x_adv = torch.min(x_adv, upper_bound) 120 | x_adv.requires_grad = True 121 | return x_adv 122 | 123 | -------------------------------------------------------------------------------- /defendAugment.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | from PIL import Image, ImageEnhance, ImageOps 3 | import numpy as np 4 | import random 5 | 6 | class AutoAugPolicy(object): 7 | 8 | def __init__(self, p=1, fillcolor=(128, 128, 128)): 9 | self.p = p 10 | 11 | self.policies = [ 12 | SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor), 13 | SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), 14 | SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor), 15 | SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor), 16 | SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), 17 | 18 | SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor), 19 | SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor), 20 | SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor), 21 | SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor), 22 | SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor), 23 | 24 | SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor), 25 | SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor), 26 | SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor), 27 | SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), 28 | SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), 29 | 30 | SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor), 31 | SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor), 32 | SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor), 33 | SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor), 34 | SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor), 35 | 36 | SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), 37 | SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), 38 | SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), 39 | SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), 40 | SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor) 41 | ] 42 | 43 | def __call__(self, img): 44 | if random.uniform(0, 1) < self.p: 45 | policy_idx = random.randint(0, len(self.policies) - 1) 46 | return self.policies[policy_idx](img) 47 | else: 48 | return img 49 | 50 | def __repr__(self): 51 | return "RandomAug Policy to defend" 52 | 53 | class SubPolicy(object): 54 | def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)): 55 | ranges = { 56 | "shearX": np.linspace(0, 0.3, 10), 57 | "shearY": np.linspace(0, 0.3, 10), 58 | "translateX": np.linspace(0, 150 / 331, 10), 59 | "translateY": np.linspace(0, 150 / 331, 10), 60 | "rotate": np.linspace(0, 30, 10), 61 | "color": np.linspace(0.0, 0.9, 10), 62 | "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), 63 | "solarize": np.linspace(256, 0, 10), 64 | "contrast": np.linspace(0.0, 0.9, 10), 65 | "sharpness": np.linspace(0.0, 0.9, 10), 66 | "brightness": np.linspace(0.0, 0.9, 10), 67 | "autocontrast": [0] * 10, 68 | "equalize": [0] * 10, 69 | "invert": [0] * 10 70 | } 71 | 72 | def rotate_with_fill(img, magnitude): 73 | rot = img.convert("RGBA").rotate(magnitude) 74 | return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode) 75 | 76 | func = { 77 | "shearX": lambda img, magnitude: img.transform( 78 | img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), 79 | Image.BICUBIC, fillcolor=fillcolor), 80 | "shearY": lambda img, magnitude: img.transform( 81 | img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), 82 | Image.BICUBIC, fillcolor=fillcolor), 83 | "translateX": lambda img, magnitude: img.transform( 84 | img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0), 85 | fillcolor=fillcolor), 86 | "translateY": lambda img, magnitude: img.transform( 87 | img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])), 88 | fillcolor=fillcolor), 89 | "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), 90 | "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])), 91 | "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), 92 | "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), 93 | "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( 94 | 1 + magnitude * random.choice([-1, 1])), 95 | "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( 96 | 1 + magnitude * random.choice([-1, 1])), 97 | "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( 98 | 1 + magnitude * random.choice([-1, 1])), 99 | "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), 100 | "equalize": lambda img, magnitude: ImageOps.equalize(img), 101 | "invert": lambda img, magnitude: ImageOps.invert(img) 102 | } 103 | 104 | self.p1 = p1 105 | self.operation1 = func[operation1] 106 | self.magnitude1 = ranges[operation1][magnitude_idx1] 107 | self.p2 = p2 108 | self.operation2 = func[operation2] 109 | self.magnitude2 = ranges[operation2][magnitude_idx2] 110 | 111 | def __call__(self, img): 112 | if random.random() < self.p1: 113 | img = self.operation1(img, self.magnitude1) 114 | if random.random() < self.p2: 115 | img = self.operation2(img, self.magnitude2) 116 | return img 117 | -------------------------------------------------------------------------------- /README.txt: -------------------------------------------------------------------------------- 1 | # Code for the CVPR 2022 paper "Person Re-identification Method Based on Color Attack and Joint Defence". 2 | 3 | Prerequisites 4 | ·Python 3.6 5 | ·GPU Memory >= 6G 6 | ·Numpy 7 | ·Pytorch 0.3+ (http://pytorch.org/) 8 | ·Torchvision from the source 9 | 10 | You can run pip install -r requirements.txt to install required packages or conda env create -f environment.yml to create a new environment with the required packages installed. 11 | 12 | As we use Market1501 and DukeMTMC-reid datasets for our experiments, you must download them beforehand. 13 | 14 | --------------------------------------------------------------------------Getting started----------------------------------------------------------------------------- 15 | Part 1: Training 16 | 17 | Part 1.1: Prepare Data Folder (python prepare.py) 18 | 19 | You may notice that the downloaded folder is organized as: 20 | ├── Market/ 21 | │ ├── bounding_box_test/ /* Files for testing (candidate images pool) 22 | │ ├── bounding_box_train/ /* Files for training 23 | │ ├── gt_bbox/ /* We do not use it 24 | │ ├── gt_query/ /* We do not use it 25 | │ ├── query/ /* Files for testing (query images) 26 | │ ├── readme.txt 27 | 28 | Open and edit the script prepare.py in the editor. Change the fifth line in prepare.py to your download path, such as \home\Download\Market. Run this script in the terminal: 29 | 30 | python prepare.py 31 | 32 | We create a subfolder called pytorch under the download folder: 33 | ├── Market/ 34 | │ ├── bounding_box_test/ /* Files for testing (candidate images pool) 35 | │ ├── bounding_box_train/ /* Files for training 36 | │ ├── gt_bbox/ /* We do not use it 37 | │ ├── gt_query/ /* We do not use it 38 | │ ├── query/ /* Files for testing (query images) 39 | │ ├── readme.txt 40 | │ ├── pytorch/ 41 | │ ├── train_all/ /* train 42 | │ ├── 0002 43 | │ ├── 0007 44 | │ ... 45 | │ ├── val/ /* val 46 | │ ├── query/ /* query files 47 | │ ├── gallery/ /* gallery files 48 | In every subdir, such as pytorch/train/0001, images with the same ID are arranged in the folder. Now we have successfully prepared the data for torchvision to read the data. 49 | ----------------- 50 | 51 | Part 1.2: Training (python train.py) 52 | 53 | We can train a normally trained model by: 54 | 55 | python train.py --gpu_ids 0 --name Normally_Trained --data_dir your_data_path --epoch 60 56 | 57 | --gpu_ids: which gpu to run. 58 | --name: the name of the model. 59 | --data_dir: the path of the training data. 60 | --epoch: the training epoch 61 | 62 | The default used is Resnet50, you may apply '--use_dense' to use DenseNet. 63 | The trained model will be saved in . /model 64 | 65 | If you want to train a DL defense model, you can do as follows : 66 | Change the 65-th line in train.py to apply the code 'Fuse_LFusePR(G=0.05, G_rgb=0.01, S_rgb=0.01,Aug = 0.05,F = 0.1)’. 67 | 68 | python train.py --gpu_ids 0 --name DL --data_dir your_data_path --epoch 120 69 | --------------------------------------------------------------------------------------------------------------------------------------------------- 70 | Part 2: Test 71 | 72 | Part 2.1: Extracting feature (python test.py) 73 | 74 | In this part, we load the network weight (we just trained) to extract the visual feature of every image. 75 | python test.py --gpu_ids 0 --name Normally_Trained --test_dir your_data_path 76 | 77 | --name: the dir name of the trained model. 78 | 79 | ----------------- 80 | Part 2.2: test with re-ranking. 81 | python evaluate_gpu.py 82 | 83 | Before using it,you must first run the 'python test.py'.It may take more than 10G memory to run. So run it on a powerful machine if possible. 84 | 85 | --------------------------------------------------------------------------------------------------------------------------------------------------- 86 | Part 3: White-Box Attack 87 | 88 | python aa_LTA.py --gpu_ids 0 --name (such as: DL) --test_dir your_data_path 89 | --name: the name of the folder where the model you want to attack 90 | 91 | The adversarial examples will be saved in . /adv_data, which is the adversarial version of the query set. Use it to replace the original query set and run 'test.py' and 'evaluate_gpu.py' to test the effect of the attack. 92 | --------------------------------------------------------------------------------------------------------------------------------------------------- 93 | Part 4: Joint Adversarial Defense(JAD) 94 | 95 | Before that, you need to train a DL model and perform a white-box attack on it to get adversarial examples. 96 | 97 | Change the 78th-80th line in test.py to apply the code: 98 | ######## JAD 99 | transforms.Resize((110,50), interpolation=3), 100 | transforms.Resize((220,100), interpolation=3), 101 | transforms.Resize((110,50), interpolation=3), 102 | 103 | This is our passive defense Circuitous Scaling. To run 'test.py' and 'evaluate_gpu.py' to test the effect of the JAD defence. 104 | 105 | Considering the limitations of DL, it is better to train several DL models, test them separately for attacks and defenses, or use another dataset to test the cross-domain performance of DL models. In cross-domain tests, models that perform better than the normally trained models will have better defensive capabilities. 106 | 107 | 108 | If you encounter any issues with reproducing adversarial defense, please refer to the earlier open-source version of the code: https://github.com/finger-monkey/ReID_Adversarial_Defense/ 109 | 110 | if you use our code, please cite the following paper: 111 | 112 | ``` 113 | @inproceedings{colorAttack2022, 114 | title={Person re-identification method based on color attack and joint defence}, 115 | author={Gong, Yunpeng and Huang, Liqing and Chen, Lifei}, 116 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, 117 | pages={4313--4322}, 118 | year={2022} 119 | } 120 | ``` 121 | 122 | ## Contact Me 123 | 124 | Email: fmonkey625@gmail.com 125 | -------------------------------------------------------------------------------- /multimodal.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | 3 | import math 4 | from PIL import Image 5 | import random 6 | import numpy as np 7 | import random 8 | import cv2 9 | import torchvision.transforms as T 10 | from defendAugment import * 11 | 12 | ########################### this code is for Local Grayscale Transformation(LGT = LGPR) ################################# 13 | class LGPR(object): #Used to implement LTA ( Local Transformation Attack ) attack 14 | 15 | def __init__(self, probability=0.2, sl=0.02, sh=0.4, r1=0.3): 16 | self.probability = probability 17 | self.sl = sl 18 | self.sh = sh 19 | self.r1 = r1 20 | 21 | def __call__(self, img): 22 | 23 | new = img.convert("L") 24 | np_img = np.array(new, dtype=np.uint8) 25 | img_gray = np.dstack([np_img, np_img, np_img]) 26 | 27 | if random.uniform(0, 1) >= self.probability: 28 | return img 29 | 30 | for attempt in range(100): 31 | area = img.size[0] * img.size[1] 32 | target_area = random.uniform(self.sl, self.sh) * area 33 | aspect_ratio = random.uniform(self.r1, 1 / self.r1) 34 | 35 | h = int(round(math.sqrt(target_area * aspect_ratio))) 36 | w = int(round(math.sqrt(target_area / aspect_ratio))) 37 | 38 | if w < img.size[1] and h < img.size[0]: 39 | x1 = random.randint(0, img.size[0] - h) 40 | y1 = random.randint(0, img.size[1] - w) 41 | img = np.asarray(img).astype('float') 42 | 43 | img[y1:y1 + h, x1:x1 + w, 0] = img_gray[y1:y1 + h, x1:x1 + w, 0] 44 | img[y1:y1 + h, x1:x1 + w, 1] = img_gray[y1:y1 + h, x1:x1 + w, 1] 45 | img[y1:y1 + h, x1:x1 + w, 2] = img_gray[y1:y1 + h, x1:x1 + w, 2] 46 | 47 | img = Image.fromarray(img.astype('uint8')) 48 | 49 | return img 50 | 51 | return img 52 | ####################################################################################################################### 53 | ################################ this code is for DL Defense ################################################ 54 | 55 | def toSketch(img): # Convert visible image to sketch image 56 | img_np = np.asarray(img) 57 | img_inv = 255 - img_np 58 | img_blur = cv2.GaussianBlur(img_inv, ksize=(27, 27), sigmaX=0, sigmaY=0) 59 | img_blend = cv2.divide(img_np, 255 - img_blur, scale=256) 60 | img_blend = Image.fromarray(img_blend) 61 | return img_blend 62 | 63 | """ 64 | Randomly select several channels of visible image (R, G, B), gray image (gray), and sketch image (sketch) 65 | to fuse them into a new 3-channel image. 66 | """ 67 | def random_choose(r, g, b, gray_or_sketch): 68 | p = [r, g, b, gray_or_sketch, gray_or_sketch] 69 | idx = [0, 1, 2, 3, 4] 70 | random.shuffle(idx) 71 | return Image.merge('RGB', [p[idx[0]], p[idx[1]], p[idx[2]]]) 72 | 73 | 74 | ####################################################################################################### 75 | # Diversity Learning (DL = Fuse_LFusePR). G=grayscale, G_rgb=use Grayscale-RGB, S_rgb=fuse Sketch-RGB, Aug=more transformation, F=LHT 76 | class Fuse_LFusePR(object): 77 | def __init__(self, G=0.05, G_rgb=0.01, S_rgb=0.01,Aug = 0.05,F = 0.1): 78 | self.G = G 79 | self.G_rgb = G_rgb 80 | self.S_rgb = S_rgb 81 | self.Aug = Aug 82 | self.F = F 83 | 84 | def __call__(self, img): 85 | r, g, b = img.split() 86 | gray = img.convert('L') #convert visible image to grayscale images 87 | p = random.random() 88 | if p < self.G: #just Grayscale 89 | return Image.merge('RGB', [gray, gray, gray]) 90 | 91 | elif p < self.G + self.G_rgb: #fuse Grayscale-RGB 92 | img2 = random_choose(r, g, b, gray) 93 | return img2 94 | 95 | elif p < self.G + self.G_rgb + self.S_rgb: #fuse Sketch-RGB 96 | sketch = toSketch(gray) 97 | img3 = random_choose(r, g, b, sketch) 98 | return img3 99 | elif p < self.G + self.G_rgb + self.S_rgb + self.Aug: #defendAugment 100 | policy = AutoAugPolicy() 101 | imgAug = policy(img) 102 | return imgAug 103 | elif p < self.G + self.G_rgb + self.S_rgb + + self.Aug + self.F: #DL with LHT(self.F means add LHT) 104 | img4 = fusePR(img) 105 | # pp = random.randint(0,300) 106 | # img4.save('./temp/'+str(pp)+'.jpg') 107 | return img4 108 | else: 109 | return img 110 | 111 | def fusePR(img1): 112 | sl = 0.02 113 | sh = 0.4 114 | r1 = 0.3 115 | 116 | G = 0.2 117 | G_rgb = 0.2 118 | S_rgb = 0.2 119 | Aug = 0.4 120 | r, g, b = img1.split() 121 | gray = img1.convert('L') #convert visible image to grayscale images 122 | p = random.random() 123 | img = img1 124 | if p < G: #just Grayscale 125 | img = Image.merge('RGB', [gray, gray, gray]) 126 | 127 | elif p < G + G_rgb: #fuse Grayscale-RGB 128 | img = random_choose(r, g, b, gray) 129 | 130 | elif p < G + G_rgb + S_rgb: #fuse Sketch-RGB 131 | sketch = toSketch(gray) 132 | img = random_choose(r, g, b, sketch) 133 | elif p < G + G_rgb + S_rgb + Aug: 134 | policy = AutoAugPolicy() 135 | img = policy(img1) 136 | 137 | new = img 138 | 139 | np_img = np.array(new, dtype=np.uint8) 140 | 141 | for attempt in range(100): 142 | area = img1.size[0] * img1.size[1] 143 | target_area = random.uniform(sl, sh) * area 144 | aspect_ratio = random.uniform(r1, 1 / r1) 145 | 146 | h = int(round(math.sqrt(target_area * aspect_ratio))) 147 | w = int(round(math.sqrt(target_area / aspect_ratio))) 148 | 149 | if w < img1.size[1] and h < img1.size[0]: 150 | x1 = random.randint(0, img1.size[0] - h) 151 | y1 = random.randint(0, img1.size[1] - w) 152 | img1 = np.asarray(img1).astype('float') 153 | 154 | img1[y1:y1 + h, x1:x1 + w, 0] = np_img[y1:y1 + h, x1:x1 + w, 0] 155 | img1[y1:y1 + h, x1:x1 + w, 1] = np_img[y1:y1 + h, x1:x1 + w, 1] 156 | img1[y1:y1 + h, x1:x1 + w, 2] = np_img[y1:y1 + h, x1:x1 + w, 2] 157 | 158 | img1 = Image.fromarray(img1.astype('uint8')) 159 | 160 | return img1 161 | return img1 162 | 163 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function, division 3 | import argparse 4 | import torch 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | from torch.optim import lr_scheduler 8 | from torch.autograd import Variable 9 | import torch.backends.cudnn as cudnn 10 | import numpy as np 11 | import torchvision 12 | from torchvision import datasets, models, transforms 13 | import time 14 | import os 15 | import scipy.io 16 | import yaml 17 | import math 18 | from model import ft_net, ft_net_dense 19 | from multimodal import * 20 | 21 | # Options 22 | # -------- 23 | parser = argparse.ArgumentParser(description='Test') 24 | parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2') 25 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last') 26 | parser.add_argument('--test_dir',default='../data/Market/pytorch',type=str, help='./test_data') 27 | # parser.add_argument('--test_dir',default='./adv_data',type=str, help='./test_data') 28 | parser.add_argument('--name', default='testDL', type=str, help='save model path') 29 | parser.add_argument('--batchsize', default=256, type=int, help='batchsize') 30 | parser.add_argument('--use_dense', action='store_true', help='use densenet121') 31 | parser.add_argument('--ms',default='1', type=str,help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2') 32 | 33 | opt = parser.parse_args() 34 | ###load config### 35 | # load the training config 36 | config_path = os.path.join('./model',opt.name,'opts.yaml') 37 | with open(config_path, 'r') as stream: 38 | config = yaml.load(stream) 39 | 40 | opt.use_dense = config['use_dense'] 41 | opt.stride = config['stride'] 42 | 43 | if 'nclasses' in config: # tp compatible with old config files 44 | opt.nclasses = config['nclasses'] 45 | else: 46 | opt.nclasses = 751 47 | 48 | str_ids = opt.gpu_ids.split(',') 49 | #which_epoch = opt.which_epoch 50 | name = opt.name 51 | test_dir = opt.test_dir 52 | 53 | gpu_ids = [] 54 | for str_id in str_ids: 55 | id = int(str_id) 56 | if id >=0: 57 | gpu_ids.append(id) 58 | 59 | print('We use the scale: %s'%opt.ms) 60 | str_ms = opt.ms.split(',') 61 | ms = [] 62 | for s in str_ms: 63 | s_f = float(s) 64 | ms.append(math.sqrt(s_f)) 65 | 66 | # set gpu ids 67 | if len(gpu_ids)>0: 68 | torch.cuda.set_device(gpu_ids[0]) 69 | cudnn.benchmark = True 70 | 71 | ###################################################################### 72 | # Load Data 73 | # --------- 74 | 75 | data_transforms = transforms.Compose([ 76 | ############## Circuitous Scaling ################################ 77 | ######## JAD 78 | # transforms.Resize((110,50), interpolation=3), 79 | # transforms.Resize((220,100), interpolation=3), 80 | # transforms.Resize((110,50), interpolation=3), 81 | ############################ 82 | ######## JAD2 83 | # transforms.Resize((90,40), interpolation=3), 84 | # transforms.Resize((200,90), interpolation=3), 85 | # transforms.Resize((90,40), interpolation=3), 86 | ############################ 87 | ######## JAD3 88 | # transforms.Resize((110,50), interpolation=3), 89 | # transforms.Resize((220,100), interpolation=3), 90 | # # # transforms.Pad(5), 91 | # transforms.Resize((110,50), interpolation=3), 92 | ############## Circuitous Scaling ################################# 93 | 94 | ####### normal 95 | transforms.Resize((256,128), interpolation=3), 96 | transforms.ToTensor(), 97 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 98 | 99 | ]) 100 | 101 | data_dir = test_dir 102 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']} 103 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize, 104 | shuffle=False, num_workers=16) for x in ['gallery','query']} 105 | 106 | class_names = image_datasets['query'].classes 107 | use_gpu = torch.cuda.is_available() 108 | ###################################################################### 109 | # Load model 110 | #--------------------------- 111 | def load_network(network): 112 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch) 113 | network.load_state_dict(torch.load(save_path)) 114 | return network 115 | ###################################################################### 116 | # Extract feature 117 | # ---------------------- 118 | # Extract feature from a trained model. 119 | # 120 | def fliplr(img): 121 | '''flip horizontal''' 122 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W 123 | img_flip = img.index_select(3,inv_idx) 124 | return img_flip 125 | 126 | def extract_feature(model,dataloaders): 127 | features = torch.FloatTensor() 128 | count = 0 129 | for data in dataloaders: 130 | img, label = data 131 | n, c, h, w = img.size() 132 | count += n 133 | print(count) 134 | ff = torch.FloatTensor(n,512).zero_().cuda() 135 | 136 | for i in range(2): 137 | if(i==1): 138 | img = fliplr(img) 139 | input_img = Variable(img.cuda()) 140 | for scale in ms: 141 | if scale != 1: 142 | # bicubic is only available in pytorch>= 1.1 143 | input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bicubic', align_corners=False) 144 | outputs = model(input_img) 145 | ff += outputs 146 | 147 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) 148 | ff = ff.div(fnorm.expand_as(ff)) 149 | 150 | features = torch.cat((features,ff.data.cpu()), 0) 151 | return features 152 | 153 | def get_id(img_path): 154 | camera_id = [] 155 | labels = [] 156 | for path, v in img_path: 157 | #filename = path.split('/')[-1] 158 | filename = os.path.basename(path) 159 | label = filename[0:4] 160 | camera = filename.split('c')[1] 161 | if label[0:2]=='-1': 162 | labels.append(-1) 163 | else: 164 | labels.append(int(label)) 165 | camera_id.append(int(camera[0])) 166 | return camera_id, labels 167 | 168 | gallery_path = image_datasets['gallery'].imgs 169 | query_path = image_datasets['query'].imgs 170 | 171 | gallery_cam,gallery_label = get_id(gallery_path) 172 | query_cam,query_label = get_id(query_path) 173 | 174 | ###################################################################### 175 | # Load Collected data Trained model 176 | print('-------test-----------') 177 | if opt.use_dense: 178 | model_structure = ft_net_dense(opt.nclasses) 179 | else: 180 | model_structure = ft_net(opt.nclasses, stride = opt.stride) 181 | 182 | model = load_network(model_structure) 183 | 184 | # Remove the final fc layer and classifier layer 185 | model.classifier.classifier = nn.Sequential() 186 | 187 | # Change to test mode 188 | model = model.eval() 189 | if use_gpu: 190 | model = model.cuda() 191 | 192 | # Extract feature 193 | with torch.no_grad(): 194 | gallery_feature = extract_feature(model,dataloaders['gallery']) 195 | query_feature = extract_feature(model,dataloaders['query']) 196 | 197 | # Save to Matlab for check 198 | result = {'gallery_f':gallery_feature.numpy(),'gallery_label':gallery_label,'gallery_cam':gallery_cam,'query_f':query_feature.numpy(),'query_label':query_label,'query_cam':query_cam} 199 | scipy.io.savemat('pytorch_result.mat',result) 200 | 201 | print(opt.name) 202 | result = './model/%s/result.txt'%opt.name 203 | os.system('python evaluate_gpu.py | tee -a %s'%result) 204 | -------------------------------------------------------------------------------- /aa_SMA.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function, division 3 | import argparse 4 | import torch 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | from torch.optim import lr_scheduler 8 | from torch.autograd import Variable 9 | import torch.backends.cudnn as cudnn 10 | import numpy as np 11 | import torchvision 12 | from torchvision import datasets, models, transforms 13 | import time 14 | import os 15 | import yaml 16 | import math 17 | from model import ft_net, ft_net_dense 18 | import os.path as osp 19 | import torchattacks 20 | from PIL import Image 21 | import cv2 22 | # Options 23 | # -------- 24 | parser = argparse.ArgumentParser(description='Training') 25 | parser.add_argument('--gpu_ids',default='1', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2') 26 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last') 27 | parser.add_argument('--test_dir',default='../data/Market/pytorch',type=str, help='./test_data') 28 | parser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path') 29 | parser.add_argument('--batchsize', default=1, type=int, help='batchsize')#To save the adversarial sample code only supports batchsize=1 30 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' ) 31 | parser.add_argument('--ms',default='1', type=str,help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2') 32 | 33 | opt = parser.parse_args() 34 | 35 | save_root_path = './adv_data/SMA_to_attack_' + opt.name 36 | 37 | def mkdir_if_missing(directory): 38 | if not osp.exists(directory): 39 | try: 40 | os.makedirs(directory) 41 | except OSError as e: 42 | if e.errno != errno.EEXIST: 43 | raise 44 | mkdir_if_missing(save_root_path) 45 | toImage = transforms.ToPILImage() 46 | def save_image_tensor2cv2(input_tensor: torch.Tensor, filename): 47 | 48 | mean = [0.485, 0.456, 0.406] 49 | std = [0.229, 0.224, 0.225] 50 | assert (len(input_tensor.shape) == 4 and input_tensor.shape[0] == 1) 51 | input_tensor = input_tensor.clone().detach() 52 | input_tensor = input_tensor.to(torch.device('cpu')) 53 | input_tensor = input_tensor.squeeze() 54 | for i in range(len(mean)): 55 | input_tensor[i] = input_tensor[i] * std[i] + mean[i] 56 | input_tensor = input_tensor.mul_(255).add_(0.5).clamp_( 57 | 0, 255).permute(1, 2, 0).type(torch.uint8).numpy() 58 | input_tensor = input_tensor.astype(np.uint8) 59 | input_tensor = cv2.cvtColor(input_tensor, cv2.COLOR_RGB2BGR) 60 | cv2.imwrite(filename, input_tensor) 61 | return input_tensor 62 | 63 | ###load config### 64 | # load the training config 65 | config_path = os.path.join('./model',opt.name,'opts.yaml') 66 | with open(config_path, 'r') as stream: 67 | config = yaml.load(stream) 68 | 69 | opt.use_dense = config['use_dense'] 70 | 71 | opt.stride = config['stride'] 72 | 73 | if 'nclasses' in config: # tp compatible with old config files 74 | opt.nclasses = config['nclasses'] 75 | else: 76 | opt.nclasses = 751 77 | 78 | str_ids = opt.gpu_ids.split(',') 79 | #which_epoch = opt.which_epoch 80 | name = opt.name 81 | test_dir = opt.test_dir 82 | 83 | gpu_ids = [] 84 | for str_id in str_ids: 85 | id = int(str_id) 86 | if id >=0: 87 | gpu_ids.append(id) 88 | 89 | print('We use the scale: %s'%opt.ms) 90 | str_ms = opt.ms.split(',') 91 | ms = [] 92 | for s in str_ms: 93 | s_f = float(s) 94 | ms.append(math.sqrt(s_f)) 95 | 96 | # set gpu ids 97 | if len(gpu_ids)>0: 98 | torch.cuda.set_device(gpu_ids[0]) 99 | cudnn.benchmark = True 100 | 101 | ###################################################################### 102 | # Load Data 103 | # --------- 104 | data_transforms = transforms.Compose([ 105 | transforms.Resize((256,128), interpolation=3), 106 | transforms.ToTensor(), 107 | ]) 108 | 109 | data_dir = test_dir 110 | 111 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']} 112 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize, 113 | shuffle=False, num_workers=16) for x in ['gallery','query']} 114 | class_names = image_datasets['query'].classes 115 | use_gpu = torch.cuda.is_available() 116 | 117 | ###################################################################### 118 | # Load model 119 | #--------------------------- 120 | def load_network(network): 121 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch) 122 | network.load_state_dict(torch.load(save_path)) 123 | return network 124 | ###################################################################### 125 | # Extract feature from a trained model. 126 | # 127 | def fliplr(img): 128 | '''flip horizontal''' 129 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W 130 | img_flip = img.index_select(3,inv_idx) 131 | return img_flip 132 | 133 | def extract_feature2(model,dataloaders): 134 | features = torch.FloatTensor() 135 | count = 0 136 | total = 0 137 | _, query_label,name = get_id2(query_path) 138 | for data in dataloaders: 139 | img, label = data 140 | real_label = name[total][0:4] 141 | # ------------------Generating adversarial examples---------------------------------- 142 | img = SMA(img, label) 143 | path = save_root_path + '/' + real_label + '/' 144 | mkdir_if_missing(path) 145 | save_path = path + name[total] 146 | img = img.cpu() 147 | img = toImage(img[0]) 148 | img.save(save_path) 149 | print('{} --> adversarial example have been saved'.format(save_path)) 150 | trans = transforms.Compose( 151 | [ 152 | transforms.ToTensor(), 153 | transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) 154 | ] 155 | ) 156 | img = trans(img) 157 | img = img.reshape(1,3,256,128) 158 | #------------------------------------------------------------------------ 159 | total += 1 160 | n, c, h, w = img.size() 161 | count += n 162 | print(count) 163 | ff = torch.FloatTensor(n,512).zero_().cuda() 164 | 165 | for i in range(2): 166 | if(i==1): 167 | img = fliplr(img) 168 | input_img = Variable(img.cuda()) 169 | for scale in ms: 170 | if scale != 1: 171 | # bicubic is only available in pytorch>= 1.1 172 | input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bicubic', align_corners=False) 173 | outputs = model(input_img) 174 | ff += outputs 175 | 176 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) 177 | ff = ff.div(fnorm.expand_as(ff)) 178 | 179 | features = torch.cat((features,ff.data.cpu()), 0) 180 | print('features.size():',features.size()) 181 | 182 | return features 183 | 184 | def get_id(img_path): 185 | camera_id = [] 186 | labels = [] 187 | for path, v in img_path: 188 | #filename = path.split('/')[-1] 189 | filename = os.path.basename(path) 190 | # print('filename:',filename) 191 | label = filename[0:4] 192 | camera = filename.split('c')[1] 193 | if label[0:2]=='-1': 194 | labels.append(-1) 195 | else: 196 | labels.append(int(label)) 197 | camera_id.append(int(camera[0])) 198 | return camera_id, labels 199 | 200 | def get_id2(img_path): 201 | camera_id = [] 202 | labels = [] 203 | name = [] 204 | for path, v in img_path: 205 | #filename = path.split('/')[-1] 206 | filename = os.path.basename(path) 207 | # print('filename:',filename) 208 | label = filename[0:4] 209 | camera = filename.split('c')[1] 210 | if label[0:2]=='-1': 211 | labels.append(-1) 212 | else: 213 | labels.append(int(label)) 214 | camera_id.append(int(camera[0])) 215 | name.append(filename) 216 | return camera_id, labels,name 217 | #----------------------------------------------------------------------------------------------------------- 218 | 219 | gallery_path = image_datasets['gallery'].imgs 220 | query_path = image_datasets['query'].imgs 221 | 222 | gallery_cam,gallery_label = get_id(gallery_path) 223 | query_cam,query_label = get_id(query_path) 224 | 225 | 226 | ###################################################################### 227 | # Load Collected data Trained model 228 | if opt.use_dense: 229 | model_structure = ft_net_dense(opt.nclasses) 230 | else: 231 | model_structure = ft_net(opt.nclasses, stride = opt.stride) 232 | 233 | model = load_network(model_structure) 234 | 235 | # Remove the final fc layer and classifier layer 236 | model.classifier.classifier = nn.Sequential() 237 | 238 | # Change to test mode 239 | model = model.eval() 240 | if use_gpu: 241 | model = model.cuda() 242 | ##new add 243 | for p in model.parameters(): 244 | p.requires_grad = False 245 | 246 | print('================>Init Attack...<===================') 247 | 248 | SMA = torchattacks.PGD(model, eps=5/255, alpha=1/255,steps=15, random_start=True) 249 | 250 | # Extract feature 251 | query_feature = extract_feature2(model,dataloaders['query']) 252 | 253 | -------------------------------------------------------------------------------- /aa_LTA.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function, division 3 | import argparse 4 | import torch 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | from torch.optim import lr_scheduler 8 | from torch.autograd import Variable 9 | import torch.backends.cudnn as cudnn 10 | import numpy as np 11 | import torchvision 12 | from torchvision import datasets, models, transforms 13 | import time 14 | import os 15 | import yaml 16 | import math 17 | from model import ft_net, ft_net_dense 18 | import os.path as osp 19 | import torchattacks 20 | from PIL import Image 21 | import cv2 22 | # Options 23 | # -------- 24 | parser = argparse.ArgumentParser(description='Training') 25 | parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2') 26 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last') 27 | parser.add_argument('--test_dir',default='../data/Market/pytorch',type=str, help='./test_data') 28 | parser.add_argument('--name', default='newDLe120', type=str, help='save model path') 29 | parser.add_argument('--batchsize', default=1, type=int, help='batchsize') #To save the adversarial sample code only supports batchsize=1 30 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' ) 31 | parser.add_argument('--ms',default='1', type=str,help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2') 32 | 33 | opt = parser.parse_args() 34 | 35 | save_root_path = './adv_data/LTA_to_attack_' + opt.name 36 | 37 | def mkdir_if_missing(directory): 38 | if not osp.exists(directory): 39 | try: 40 | os.makedirs(directory) 41 | except OSError as e: 42 | if e.errno != errno.EEXIST: 43 | raise 44 | mkdir_if_missing(save_root_path) 45 | toImage = transforms.ToPILImage() 46 | def save_image_tensor2cv2(input_tensor: torch.Tensor, filename): 47 | 48 | mean = [0.485, 0.456, 0.406] 49 | std = [0.229, 0.224, 0.225] 50 | assert (len(input_tensor.shape) == 4 and input_tensor.shape[0] == 1) 51 | input_tensor = input_tensor.clone().detach() 52 | input_tensor = input_tensor.to(torch.device('cpu')) 53 | input_tensor = input_tensor.squeeze() 54 | for i in range(len(mean)): 55 | input_tensor[i] = input_tensor[i] * std[i] + mean[i] 56 | input_tensor = input_tensor.mul_(255).add_(0.5).clamp_( 57 | 0, 255).permute(1, 2, 0).type(torch.uint8).numpy() 58 | input_tensor = input_tensor.astype(np.uint8) 59 | input_tensor = cv2.cvtColor(input_tensor, cv2.COLOR_RGB2BGR) 60 | cv2.imwrite(filename, input_tensor) 61 | return input_tensor 62 | 63 | ###load config### 64 | # load the training config 65 | config_path = os.path.join('./model',opt.name,'opts.yaml') 66 | with open(config_path, 'r') as stream: 67 | config = yaml.load(stream) 68 | 69 | opt.use_dense = config['use_dense'] 70 | 71 | opt.stride = config['stride'] 72 | 73 | if 'nclasses' in config: # tp compatible with old config files 74 | opt.nclasses = config['nclasses'] 75 | else: 76 | opt.nclasses = 751 77 | 78 | str_ids = opt.gpu_ids.split(',') 79 | #which_epoch = opt.which_epoch 80 | name = opt.name 81 | test_dir = opt.test_dir 82 | 83 | gpu_ids = [] 84 | for str_id in str_ids: 85 | id = int(str_id) 86 | if id >=0: 87 | gpu_ids.append(id) 88 | 89 | print('We use the scale: %s'%opt.ms) 90 | str_ms = opt.ms.split(',') 91 | ms = [] 92 | for s in str_ms: 93 | s_f = float(s) 94 | ms.append(math.sqrt(s_f)) 95 | 96 | # set gpu ids 97 | if len(gpu_ids)>0: 98 | torch.cuda.set_device(gpu_ids[0]) 99 | cudnn.benchmark = True 100 | 101 | ###################################################################### 102 | # Load Data 103 | # --------- 104 | data_transforms = transforms.Compose([ 105 | transforms.Resize((256,128), interpolation=3), 106 | transforms.ToTensor(), 107 | ]) 108 | 109 | data_dir = test_dir 110 | 111 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']} 112 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize, 113 | shuffle=False, num_workers=16) for x in ['gallery','query']} 114 | class_names = image_datasets['query'].classes 115 | use_gpu = torch.cuda.is_available() 116 | 117 | ###################################################################### 118 | # Load model 119 | #--------------------------- 120 | def load_network(network): 121 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch) 122 | network.load_state_dict(torch.load(save_path)) 123 | return network 124 | ###################################################################### 125 | # Extract feature from a trained model. 126 | # 127 | def fliplr(img): 128 | '''flip horizontal''' 129 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W 130 | img_flip = img.index_select(3,inv_idx) 131 | return img_flip 132 | 133 | def extract_feature2(model,dataloaders): 134 | features = torch.FloatTensor() 135 | count = 0 136 | total = 0 137 | _, query_label,name = get_id2(query_path) 138 | for data in dataloaders: 139 | img, label = data 140 | real_label = name[total][0:4] 141 | # ------------------Generating adversarial examples---------------------------------- 142 | img = LTA(img, label) 143 | path = save_root_path + '/' + real_label + '/' 144 | mkdir_if_missing(path) 145 | save_path = path + name[total] 146 | img = img.cpu() 147 | img = toImage(img[0]) 148 | img.save(save_path) 149 | print('{} --> adversarial example have been saved'.format(save_path)) 150 | trans = transforms.Compose( 151 | [ 152 | transforms.ToTensor(), 153 | transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) 154 | ] 155 | ) 156 | img = trans(img) 157 | img = img.reshape(1,3,256,128) 158 | #------------------------------------------------------------------------ 159 | total += 1 160 | n, c, h, w = img.size() 161 | count += n 162 | print(count) 163 | ff = torch.FloatTensor(n,512).zero_().cuda() 164 | 165 | for i in range(2): 166 | if(i==1): 167 | img = fliplr(img) 168 | input_img = Variable(img.cuda()) 169 | for scale in ms: 170 | if scale != 1: 171 | # bicubic is only available in pytorch>= 1.1 172 | input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bicubic', align_corners=False) 173 | outputs = model(input_img) 174 | ff += outputs 175 | 176 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) 177 | ff = ff.div(fnorm.expand_as(ff)) 178 | 179 | features = torch.cat((features,ff.data.cpu()), 0) 180 | print('features.size():',features.size()) 181 | 182 | return features 183 | 184 | def get_id(img_path): 185 | camera_id = [] 186 | labels = [] 187 | for path, v in img_path: 188 | #filename = path.split('/')[-1] 189 | filename = os.path.basename(path) 190 | # print('filename:',filename) 191 | label = filename[0:4] 192 | camera = filename.split('c')[1] 193 | if label[0:2]=='-1': 194 | labels.append(-1) 195 | else: 196 | labels.append(int(label)) 197 | camera_id.append(int(camera[0])) 198 | return camera_id, labels 199 | 200 | def get_id2(img_path): 201 | camera_id = [] 202 | labels = [] 203 | name = [] 204 | for path, v in img_path: 205 | #filename = path.split('/')[-1] 206 | filename = os.path.basename(path) 207 | # print('filename:',filename) 208 | label = filename[0:4] 209 | camera = filename.split('c')[1] 210 | if label[0:2]=='-1': 211 | labels.append(-1) 212 | else: 213 | labels.append(int(label)) 214 | camera_id.append(int(camera[0])) 215 | name.append(filename) 216 | return camera_id, labels,name 217 | #----------------------------------------------------------------------------------------------------------- 218 | 219 | gallery_path = image_datasets['gallery'].imgs 220 | query_path = image_datasets['query'].imgs 221 | 222 | gallery_cam,gallery_label = get_id(gallery_path) 223 | query_cam,query_label = get_id(query_path) 224 | 225 | 226 | ###################################################################### 227 | # Load Collected data Trained model 228 | if opt.use_dense: 229 | model_structure = ft_net_dense(opt.nclasses) 230 | else: 231 | model_structure = ft_net(opt.nclasses, stride = opt.stride) 232 | 233 | model = load_network(model_structure) 234 | 235 | # Remove the final fc layer and classifier layer 236 | model.classifier.classifier = nn.Sequential() 237 | 238 | # Change to test mode 239 | model = model.eval() 240 | if use_gpu: 241 | model = model.cuda() 242 | ##new add 243 | for p in model.parameters(): 244 | p.requires_grad = False 245 | 246 | print('================>Init Attack...<===================') 247 | 248 | LTA = torchattacks.LTA(model=model,eps=5/255.0,alpha = 1/255.0, steps = 15, momentum = 1.0) 249 | 250 | 251 | # Extract feature 252 | query_feature = extract_feature2(model,dataloaders['query']) 253 | -------------------------------------------------------------------------------- /aa_IFGSM.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function, division 3 | import argparse 4 | import torch 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | from torch.optim import lr_scheduler 8 | from torch.autograd import Variable 9 | import torch.backends.cudnn as cudnn 10 | import numpy as np 11 | import torchvision 12 | from torchvision import datasets, models, transforms 13 | import time 14 | import os 15 | import yaml 16 | import math 17 | from model import ft_net, ft_net_dense 18 | import os.path as osp 19 | import torchattacks 20 | from PIL import Image 21 | import cv2 22 | # Options 23 | # -------- 24 | parser = argparse.ArgumentParser(description='Training') 25 | parser.add_argument('--gpu_ids',default='5', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2') 26 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last') 27 | parser.add_argument('--test_dir',default='../data/Market/pytorch',type=str, help='./test_data') 28 | parser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path') 29 | parser.add_argument('--batchsize', default=1, type=int, help='batchsize')#To save the adversarial sample code only supports batchsize=1 30 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' ) 31 | parser.add_argument('--ms',default='1', type=str,help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2') 32 | 33 | opt = parser.parse_args() 34 | 35 | save_root_path = './adv_data/IFGSM_to_attack_' + opt.name 36 | 37 | def mkdir_if_missing(directory): 38 | if not osp.exists(directory): 39 | try: 40 | os.makedirs(directory) 41 | except OSError as e: 42 | if e.errno != errno.EEXIST: 43 | raise 44 | mkdir_if_missing(save_root_path) 45 | toImage = transforms.ToPILImage() 46 | def save_image_tensor2cv2(input_tensor: torch.Tensor, filename): 47 | 48 | mean = [0.485, 0.456, 0.406] 49 | std = [0.229, 0.224, 0.225] 50 | assert (len(input_tensor.shape) == 4 and input_tensor.shape[0] == 1) 51 | input_tensor = input_tensor.clone().detach() 52 | input_tensor = input_tensor.to(torch.device('cpu')) 53 | input_tensor = input_tensor.squeeze() 54 | for i in range(len(mean)): 55 | input_tensor[i] = input_tensor[i] * std[i] + mean[i] 56 | input_tensor = input_tensor.mul_(255).add_(0.5).clamp_( 57 | 0, 255).permute(1, 2, 0).type(torch.uint8).numpy() 58 | input_tensor = input_tensor.astype(np.uint8) 59 | input_tensor = cv2.cvtColor(input_tensor, cv2.COLOR_RGB2BGR) 60 | cv2.imwrite(filename, input_tensor) 61 | return input_tensor 62 | 63 | ###load config### 64 | # load the training config 65 | config_path = os.path.join('./model',opt.name,'opts.yaml') 66 | with open(config_path, 'r') as stream: 67 | config = yaml.load(stream) 68 | 69 | opt.use_dense = config['use_dense'] 70 | opt.stride = config['stride'] 71 | 72 | if 'nclasses' in config: # tp compatible with old config files 73 | opt.nclasses = config['nclasses'] 74 | else: 75 | opt.nclasses = 751 76 | 77 | str_ids = opt.gpu_ids.split(',') 78 | #which_epoch = opt.which_epoch 79 | name = opt.name 80 | test_dir = opt.test_dir 81 | 82 | gpu_ids = [] 83 | for str_id in str_ids: 84 | id = int(str_id) 85 | if id >=0: 86 | gpu_ids.append(id) 87 | 88 | print('We use the scale: %s'%opt.ms) 89 | str_ms = opt.ms.split(',') 90 | ms = [] 91 | for s in str_ms: 92 | s_f = float(s) 93 | ms.append(math.sqrt(s_f)) 94 | 95 | # set gpu ids 96 | if len(gpu_ids)>0: 97 | torch.cuda.set_device(gpu_ids[0]) 98 | cudnn.benchmark = True 99 | 100 | ###################################################################### 101 | # Load Data 102 | # --------- 103 | data_transforms = transforms.Compose([ 104 | transforms.Resize((256,128), interpolation=3), 105 | transforms.ToTensor(), 106 | ]) 107 | 108 | data_dir = test_dir 109 | 110 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']} 111 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize, 112 | shuffle=False, num_workers=16) for x in ['gallery','query']} 113 | class_names = image_datasets['query'].classes 114 | use_gpu = torch.cuda.is_available() 115 | 116 | ###################################################################### 117 | # Load model 118 | #--------------------------- 119 | def load_network(network): 120 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch) 121 | network.load_state_dict(torch.load(save_path)) 122 | return network 123 | ###################################################################### 124 | # Extract feature from a trained model. 125 | # 126 | def fliplr(img): 127 | '''flip horizontal''' 128 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W 129 | img_flip = img.index_select(3,inv_idx) 130 | return img_flip 131 | 132 | def extract_feature2(model,dataloaders): 133 | features = torch.FloatTensor() 134 | count = 0 135 | total = 0 136 | _, query_label,name = get_id2(query_path) 137 | for data in dataloaders: 138 | img, label = data 139 | real_label = name[total][0:4] 140 | # ------------------Generating adversarial examples---------------------------------- 141 | img = IFGSM(img, real_label) 142 | path = save_root_path + '/' + real_label + '/' 143 | mkdir_if_missing(path) 144 | save_path = path + name[total] 145 | img = img.cpu() 146 | img = toImage(img[0]) 147 | img.save(save_path) 148 | print('{} --> adversarial example have been saved'.format(save_path)) 149 | trans = transforms.Compose( 150 | [ 151 | transforms.ToTensor(), 152 | transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) 153 | ] 154 | ) 155 | img = trans(img) 156 | img = img.reshape(1,3,256,128) 157 | #------------------------------------------------------------------------ 158 | total += 1 159 | n, c, h, w = img.size() 160 | count += n 161 | print(count) 162 | ff = torch.FloatTensor(n,512).zero_().cuda() 163 | 164 | for i in range(2): 165 | if(i==1): 166 | img = fliplr(img) 167 | input_img = Variable(img.cuda()) 168 | for scale in ms: 169 | if scale != 1: 170 | # bicubic is only available in pytorch>= 1.1 171 | input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bicubic', align_corners=False) 172 | outputs = model(input_img) 173 | ff += outputs 174 | 175 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) 176 | ff = ff.div(fnorm.expand_as(ff)) 177 | 178 | features = torch.cat((features,ff.data.cpu()), 0) 179 | print('features.size():',features.size()) 180 | 181 | return features 182 | 183 | def get_id(img_path): 184 | camera_id = [] 185 | labels = [] 186 | for path, v in img_path: 187 | #filename = path.split('/')[-1] 188 | filename = os.path.basename(path) 189 | # print('filename:',filename) 190 | label = filename[0:4] 191 | camera = filename.split('c')[1] 192 | if label[0:2]=='-1': 193 | labels.append(-1) 194 | else: 195 | labels.append(int(label)) 196 | camera_id.append(int(camera[0])) 197 | return camera_id, labels 198 | 199 | def get_id2(img_path): 200 | camera_id = [] 201 | labels = [] 202 | name = [] 203 | for path, v in img_path: 204 | #filename = path.split('/')[-1] 205 | filename = os.path.basename(path) 206 | # print('filename:',filename) 207 | label = filename[0:4] 208 | camera = filename.split('c')[1] 209 | if label[0:2]=='-1': 210 | labels.append(-1) 211 | else: 212 | labels.append(int(label)) 213 | camera_id.append(int(camera[0])) 214 | name.append(filename) 215 | return camera_id, labels,name 216 | #----------------------------------------------------------------------------------------------------------- 217 | 218 | gallery_path = image_datasets['gallery'].imgs 219 | query_path = image_datasets['query'].imgs 220 | 221 | gallery_cam,gallery_label = get_id(gallery_path) 222 | query_cam,query_label = get_id(query_path) 223 | 224 | 225 | ###################################################################### 226 | # Load Collected data Trained model 227 | if opt.use_dense: 228 | model_structure = ft_net_dense(opt.nclasses) 229 | else: 230 | model_structure = ft_net(opt.nclasses, stride = opt.stride) 231 | 232 | model = load_network(model_structure) 233 | 234 | # Remove the final fc layer and classifier layer 235 | model.classifier.classifier = nn.Sequential() 236 | 237 | # Change to test mode 238 | model = model.eval() 239 | if use_gpu: 240 | model = model.cuda() 241 | ##new add 242 | for p in model.parameters(): 243 | p.requires_grad = False 244 | 245 | print('================>Init Attack...<===================') 246 | 247 | IFGSM = torchattacks.MIFGSM(model=model,image_datasets=image_datasets,eps=5/255.0,alpha = 1/255.0, steps = 15) 248 | 249 | 250 | # Extract feature 251 | query_feature = extract_feature2(model,dataloaders['query']) 252 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function, division 3 | import argparse 4 | import torch 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | from torch.optim import lr_scheduler 8 | from torch.autograd import Variable 9 | from torchvision import datasets, transforms 10 | import torch.backends.cudnn as cudnn 11 | import matplotlib 12 | matplotlib.use('agg') 13 | import matplotlib.pyplot as plt 14 | import time 15 | import os 16 | from model import ft_net, ft_net_dense 17 | import yaml 18 | from shutil import copyfile 19 | from circle_loss import CircleLoss, convert_label_to_similarity 20 | from multimodal import * 21 | import random 22 | from defendAugment import * 23 | version = torch.__version__ 24 | 25 | # Options 26 | # -------- 27 | parser = argparse.ArgumentParser(description='Training') 28 | parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2') 29 | parser.add_argument('--name',default='Norm_ResNet50', type=str, help='output model name') 30 | parser.add_argument('--data_dir',default='../data/Market/pytorch',type=str, help='training dir path') 31 | parser.add_argument('--batchsize', default=32, type=int, help='batchsize') 32 | parser.add_argument('--stride', default=2, type=int, help='stride') 33 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' ) 34 | parser.add_argument('--warm_epoch', default=0, type=int, help='the first K epoch that needs warm up') 35 | parser.add_argument('--lr', default=0.05, type=float, help='learning rate') 36 | parser.add_argument('--droprate', default=0.5, type=float, help='drop rate') 37 | parser.add_argument('--circle', action='store_true', help='use Circle loss' ) 38 | parser.add_argument('--resume', action='store_true', help='training again' ) 39 | parser.add_argument('--epoch', default=120, type=int, help='training epoch' ) 40 | opt = parser.parse_args() 41 | 42 | data_dir = opt.data_dir 43 | name = opt.name 44 | str_ids = opt.gpu_ids.split(',') 45 | gpu_ids = [] 46 | for str_id in str_ids: 47 | gid = int(str_id) 48 | if gid >=0: 49 | gpu_ids.append(gid) 50 | 51 | # set gpu ids 52 | if len(gpu_ids)>0: 53 | torch.cuda.set_device(gpu_ids[0]) 54 | cudnn.benchmark = True 55 | ###################################################################### 56 | # Load Data 57 | # --------- 58 | transform_train_list = [ 59 | transforms.Resize((256,128), interpolation=3), 60 | transforms.Pad(10), 61 | transforms.RandomCrop((256,128)), 62 | transforms.RandomHorizontalFlip(), 63 | # Fuse_RGB_Gray_Sketch(G=0.05, G_rgb=0.01, S_rgb=0.01,Aug=0), #CF 64 | # Diversity Learning (DL = Fuse_LFusePR). Probability setting: G=grayscale, G_rgb=use Grayscale-RGB, S_rgb=fuse Sketch-RGB, Aug=more transformation, F=LHT ( Local Homogeneous Transformation) 65 | # Fuse_LFusePR(G=0.05, G_rgb=0.01, S_rgb=0.01,Aug = 0.05,F = 0.1),#DL 66 | transforms.ToTensor(), 67 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 68 | ] 69 | 70 | transform_val_list = [ 71 | transforms.Resize(size=(256,128),interpolation=3), #Image.BICUBIC 72 | transforms.ToTensor(), 73 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 74 | ] 75 | 76 | print(transform_train_list) 77 | data_transforms = { 78 | 'train': transforms.Compose( transform_train_list ), 79 | 'val': transforms.Compose(transform_val_list), 80 | } 81 | 82 | # train_all = '' 83 | # if opt.train_all: 84 | # train_all = '_all' 85 | 86 | train_all = '_all' 87 | 88 | image_datasets = {} 89 | image_datasets['train'] = datasets.ImageFolder(os.path.join(data_dir, 'train' + train_all), 90 | data_transforms['train']) 91 | image_datasets['val'] = datasets.ImageFolder(os.path.join(data_dir, 'val'), 92 | data_transforms['val']) 93 | 94 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize, 95 | shuffle=True, num_workers=8, pin_memory=True) # 8 workers may work faster 96 | for x in ['train', 'val']} 97 | dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} 98 | class_names = image_datasets['train'].classes 99 | 100 | use_gpu = torch.cuda.is_available() 101 | 102 | since = time.time() 103 | inputs, classes = next(iter(dataloaders['train'])) 104 | print(time.time()-since) 105 | ###################################################################### 106 | # Training the model 107 | # ------------------ 108 | 109 | y_loss = {} # loss history 110 | y_loss['train'] = [] 111 | y_loss['val'] = [] 112 | y_err = {} 113 | y_err['train'] = [] 114 | y_err['val'] = [] 115 | 116 | def load_network(network): 117 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch) 118 | network.load_state_dict(torch.load(save_path)) 119 | return network 120 | if opt.resume: 121 | # if opt.use_dense: 122 | # model_structure = ft_net_dense(opt.nclasses) 123 | model_structure = ft_net(751, stride = 2) 124 | model = load_network(model_structure) 125 | 126 | def train_model(model, criterion, optimizer, scheduler, num_epochs=60): 127 | since = time.time() 128 | 129 | #best_model_wts = model.state_dict() 130 | #best_acc = 0.0 131 | warm_up = 0.1 # We start from the 0.1*lrRate 132 | warm_iteration = round(dataset_sizes['train']/opt.batchsize)*opt.warm_epoch # first 5 epoch 133 | if opt.circle: 134 | criterion_circle = CircleLoss(m=0.25, gamma=32) 135 | for epoch in range(num_epochs): 136 | print('Epoch {}/{}'.format(epoch, num_epochs - 1)) 137 | print('-' * 10) 138 | 139 | # Each epoch has a training and validation phase 140 | for phase in ['train', 'val']: 141 | if phase == 'train': 142 | scheduler.step() 143 | model.train(True) # Set model to training mode 144 | else: 145 | model.train(False) # Set model to evaluate mode 146 | 147 | running_loss = 0.0 148 | running_corrects = 0.0 149 | # Iterate over data. 150 | for data in dataloaders[phase]: 151 | # get the inputs 152 | inputs, labels = data 153 | now_batch_size,c,h,w = inputs.shape 154 | if now_batch_size0 or int(version[2]) > 3: # for the new version like 0.4.0, 0.5.0 and 1.0.0 198 | running_loss += loss.item() * now_batch_size 199 | else : # for the old version like 0.3.0 and 0.3.1 200 | running_loss += loss.data[0] * now_batch_size 201 | running_corrects += float(torch.sum(preds == labels.data)) 202 | 203 | epoch_loss = running_loss / dataset_sizes[phase] 204 | epoch_acc = running_corrects / dataset_sizes[phase] 205 | 206 | print('{} Loss: {:.4f} Acc: {:.4f}'.format( 207 | phase, epoch_loss, epoch_acc)) 208 | 209 | y_loss[phase].append(epoch_loss) 210 | y_err[phase].append(1.0-epoch_acc) 211 | # deep copy the model 212 | if phase == 'val': 213 | last_model_wts = model.state_dict() 214 | if epoch%10 == 9: 215 | save_network(model, epoch) 216 | draw_curve(epoch) 217 | 218 | time_elapsed = time.time() - since 219 | print('Training complete in {:.0f}m {:.0f}s'.format( 220 | time_elapsed // 60, time_elapsed % 60)) 221 | print() 222 | 223 | time_elapsed = time.time() - since 224 | print('Training complete in {:.0f}m {:.0f}s'.format( 225 | time_elapsed // 60, time_elapsed % 60)) 226 | #print('Best val Acc: {:4f}'.format(best_acc)) 227 | 228 | # load best model weights 229 | model.load_state_dict(last_model_wts) 230 | save_network(model, 'last') 231 | return model 232 | 233 | ###################################################################### 234 | # Draw Curve 235 | #--------------------------- 236 | x_epoch = [] 237 | fig = plt.figure() 238 | ax0 = fig.add_subplot(121, title="loss") 239 | ax1 = fig.add_subplot(122, title="top1err") 240 | def draw_curve(current_epoch): 241 | x_epoch.append(current_epoch) 242 | ax0.plot(x_epoch, y_loss['train'], 'bo-', label='train') 243 | ax0.plot(x_epoch, y_loss['val'], 'ro-', label='val') 244 | ax1.plot(x_epoch, y_err['train'], 'bo-', label='train') 245 | ax1.plot(x_epoch, y_err['val'], 'ro-', label='val') 246 | if current_epoch == 0: 247 | ax0.legend() 248 | ax1.legend() 249 | fig.savefig( os.path.join('./model',name,'train.jpg')) 250 | 251 | ###################################################################### 252 | # Save model 253 | #--------------------------- 254 | def save_network(network, epoch_label): 255 | save_filename = 'net_%s.pth'% epoch_label 256 | save_path = os.path.join('./model',name,save_filename) 257 | torch.save(network.cpu().state_dict(), save_path) 258 | if torch.cuda.is_available(): 259 | network.cuda(gpu_ids[0]) 260 | 261 | ###################################################################### 262 | # Finetuning the convnet 263 | # ---------------------- 264 | # Load a pretrainied model and reset final fully connected layer. 265 | 266 | if opt.use_dense: 267 | model = ft_net_dense(len(class_names), opt.droprate, circle = opt.circle) 268 | else: 269 | model = ft_net(len(class_names), opt.droprate, opt.stride, circle =opt.circle) 270 | 271 | opt.nclasses = len(class_names) 272 | print(model) 273 | 274 | ignored_params = list(map(id, model.classifier.parameters() )) 275 | base_params = filter(lambda p: id(p) not in ignored_params, model.parameters()) 276 | optimizer_ft = optim.SGD([ 277 | {'params': base_params, 'lr': 0.1*opt.lr}, 278 | {'params': model.classifier.parameters(), 'lr': opt.lr} 279 | ], weight_decay=5e-4, momentum=0.9, nesterov=True) 280 | 281 | # Decay LR by a factor of 0.1 every 40 epochs 282 | exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=40, gamma=0.1) 283 | 284 | ###################################################################### 285 | # Train and evaluate 286 | # num_epochs=60 should take around 1-2 hours on GPU. 287 | # 288 | dir_name = os.path.join('./model',name) 289 | if not os.path.isdir(dir_name): 290 | os.mkdir(dir_name) 291 | #record every run 292 | copyfile('./train.py', dir_name+'/train.py') 293 | copyfile('./model.py', dir_name+'/model.py') 294 | 295 | # save opts 296 | with open('%s/opts.yaml'%dir_name,'w') as fp: 297 | yaml.dump(vars(opt), fp, default_flow_style=False) 298 | 299 | # model to gpu 300 | model = model.cuda() 301 | 302 | criterion = nn.CrossEntropyLoss() 303 | 304 | model = train_model(model, criterion, optimizer_ft, exp_lr_scheduler, 305 | num_epochs=opt.epoch) 306 | --------------------------------------------------------------------------------