├── model └── need_to_download ├── result └── need_to_download ├── cppapi ├── README.md └── pytorch2torchscript.py ├── utils ├── theta_distribution_hist.jpg ├── __init__.py ├── generate_train_list.py ├── README.md ├── logging.py ├── visualize.py ├── plot_logit.py ├── load_images_from_bin.py └── plot_theta.py ├── attack ├── state │ ├── (3.4962) random_state.obj │ └── (3.4968) random_state.obj ├── preprocess_dataset.py ├── util.py ├── functions.py ├── attack_dataset.py ├── preprocess_eval.py ├── attack.py └── log │ └── 64x64, alpha=0.2, 7models, 34epochs, lr=5.0e+05, batch_size=32.log (3.4962) ├── margin ├── __init__.py ├── InnerProduct.py ├── CosineMarginProduct.py ├── ArcMarginProduct.py ├── MultiMarginProduct.py └── SphereMarginProduct.py ├── backbone ├── __init__.py ├── spherenet.py ├── mobilefacenet.py ├── arcfacenet.py ├── resnet.py ├── cbam.py └── attention.py ├── dataset ├── __init__.py ├── casia_webface.py ├── megaface.py ├── lfw_2.py ├── agedb.py ├── cfp.py └── lfw.py ├── lossfunctions ├── __init__.py ├── agentcenterloss.py └── centerloss.py ├── README.md └── wu ├── eval_lfw_blufr.py ├── eval_deepglint_merge.py ├── eval_megaface.py ├── eval_lfw.py ├── eval_cfp.py ├── eval_agedb30.py ├── train.py ├── train_softmax.py └── train_center.py /model/need_to_download: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /result/need_to_download: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cppapi/README.md: -------------------------------------------------------------------------------- 1 | ## CppAPI 2 | 3 | libtorch for C++ deployment 4 | 5 | -------------------------------------------------------------------------------- /utils/theta_distribution_hist.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhang943/Face-Adversarial-Attack/HEAD/utils/theta_distribution_hist.jpg -------------------------------------------------------------------------------- /attack/state/(3.4962) random_state.obj: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhang943/Face-Adversarial-Attack/HEAD/attack/state/(3.4962) random_state.obj -------------------------------------------------------------------------------- /attack/state/(3.4968) random_state.obj: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhang943/Face-Adversarial-Attack/HEAD/attack/state/(3.4968) random_state.obj -------------------------------------------------------------------------------- /margin/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: __init__.py.py 7 | @time: 2018/12/25 9:12 8 | @desc: 9 | ''' -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: __init__.py.py 7 | @time: 2018/12/22 9:41 8 | @desc: 9 | ''' -------------------------------------------------------------------------------- /backbone/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: __init__.py.py 7 | @time: 2018/12/21 15:30 8 | @desc: 9 | ''' -------------------------------------------------------------------------------- /dataset/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: __init__.py.py 7 | @time: 2018/12/21 15:31 8 | @desc: 9 | ''' -------------------------------------------------------------------------------- /lossfunctions/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: __init__.py.py 7 | @time: 2019/1/4 15:24 8 | @desc: 9 | ''' -------------------------------------------------------------------------------- /cppapi/pytorch2torchscript.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: pytorch2torchscript.py 7 | @time: 2019/2/18 17:45 8 | @desc: convert your pytorch model to torch script and save to file 9 | ''' 10 | 11 | -------------------------------------------------------------------------------- /utils/generate_train_list.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | root = "/data/FaceRecognition/WebFace/webface_align_112" 5 | 6 | dirs = os.listdir(root) 7 | dirs.sort() 8 | 9 | n = 0 10 | 11 | with open("{}/{}".format("/data/FaceRecognition/WebFace", "align_train.list"), 'w') as f: 12 | for i, d in enumerate(dirs): 13 | imgs = os.listdir("{}/{}".format(root, d)) 14 | imgs.sort() 15 | for img in imgs: 16 | f.write("{}/{} {}\n".format(d, img, i)) 17 | 18 | -------------------------------------------------------------------------------- /utils/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## MXNET binary tools 3 | 4 | Tools for restore the aligned images from mxnet binart file provided by [insightface](https://github.com/deepinsight/insightface). 5 | 6 | You should install a mxnet-cpu first for the image parsing, just do ' **pip install mxnet** ' is ok. 7 | 8 | The processed images are list below: 9 | [LFW @ BaiduNetdisk](https://pan.baidu.com/s/1Rue4FBmGvdGMPkyy2ZqcdQ), [AgeDB-30 @ BaiduNetdisk](https://pan.baidu.com/s/1sdw1lO5JfP6Ja99O7zprUg), [CFP_FP @ BaiduNetdisk](https://pan.baidu.com/s/1gyFAAy427weUd2G-ozMgEg) 10 | 11 | -------------------------------------------------------------------------------- /utils/logging.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: logging.py 7 | @time: 2018/12/22 9:42 8 | @desc: logging tools 9 | ''' 10 | 11 | from __future__ import print_function 12 | import os 13 | import logging 14 | 15 | 16 | def init_log(output_dir): 17 | logging.basicConfig(level=logging.DEBUG, 18 | format='%(asctime)s %(message)s', 19 | datefmt='%Y%m%d-%H:%M:%S', 20 | filename=os.path.join(output_dir, 'log.log'), 21 | filemode='w') 22 | console = logging.StreamHandler() 23 | console.setLevel(logging.INFO) 24 | logging.getLogger('').addHandler(console) 25 | return logging 26 | 27 | 28 | if __name__ == '__main__': 29 | pass 30 | -------------------------------------------------------------------------------- /margin/InnerProduct.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: InnerProduct.py 7 | @time: 2019/1/4 16:54 8 | @desc: just normal inner product as fully connected layer do. 9 | ''' 10 | import torch 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | from torch.nn import Parameter 14 | 15 | class InnerProduct(nn.Module): 16 | def __init__(self, in_feature=128, out_feature=10575): 17 | super(InnerProduct, self).__init__() 18 | self.in_feature = in_feature 19 | self.out_feature = out_feature 20 | 21 | self.weight = Parameter(torch.Tensor(out_feature, in_feature)) 22 | nn.init.xavier_uniform_(self.weight) 23 | 24 | 25 | def forward(self, input, label): 26 | # label not used 27 | output = F.linear(input, self.weight) 28 | return output 29 | 30 | 31 | if __name__ == '__main__': 32 | pass -------------------------------------------------------------------------------- /margin/CosineMarginProduct.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: CosineMarginProduct.py 7 | @time: 2018/12/25 9:13 8 | @desc: additive cosine margin for cosface 9 | ''' 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.functional as F 14 | from torch.nn import Parameter 15 | 16 | 17 | class CosineMarginProduct(nn.Module): 18 | def __init__(self, in_feature=128, out_feature=10575, s=30.0, m=0.35): 19 | super(CosineMarginProduct, self).__init__() 20 | self.in_feature = in_feature 21 | self.out_feature = out_feature 22 | self.s = s 23 | self.m = m 24 | self.weight = Parameter(torch.Tensor(out_feature, in_feature)) 25 | nn.init.xavier_uniform_(self.weight) 26 | 27 | 28 | def forward(self, input, label): 29 | cosine = F.linear(F.normalize(input), F.normalize(self.weight)) 30 | # one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu') 31 | one_hot = torch.zeros_like(cosine) 32 | one_hot.scatter_(1, label.view(-1, 1), 1.0) 33 | 34 | output = self.s * (cosine - one_hot * self.m) 35 | return output 36 | 37 | 38 | if __name__ == '__main__': 39 | pass -------------------------------------------------------------------------------- /utils/visualize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: visualize.py 7 | @time: 2019/1/7 16:07 8 | @desc: visualize tools 9 | ''' 10 | 11 | import visdom 12 | import numpy as np 13 | import time 14 | 15 | class Visualizer(): 16 | def __init__(self, env='default', **kwargs): 17 | self.vis = visdom.Visdom(env=env, **kwargs) 18 | self.index = 1 19 | 20 | def plot_curves(self, d, iters, title='loss', xlabel='iters', ylabel='accuracy'): 21 | name = list(d.keys()) 22 | val = list(d.values()) 23 | if len(val) == 1: 24 | y = np.array(val) 25 | else: 26 | y = np.array(val).reshape(-1, len(val)) 27 | self.vis.line(Y=y, 28 | X=np.array([self.index]), 29 | win=title, 30 | opts=dict(legend=name, title = title, xlabel=xlabel, ylabel=ylabel), 31 | update=None if self.index == 0 else 'append') 32 | self.index = iters 33 | 34 | 35 | if __name__ == '__main__': 36 | vis = Visualizer(env='test') 37 | for i in range(10): 38 | x = i 39 | y = 2 * i 40 | z = 4 * i 41 | vis.plot_curves({'train': x, 'test': y}, iters=i, title='train') 42 | vis.plot_curves({'train': z, 'test': y, 'val': i}, iters=i, title='test') 43 | time.sleep(1) -------------------------------------------------------------------------------- /lossfunctions/agentcenterloss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: agentcenterloss.py 7 | @time: 2019/1/7 10:53 8 | @desc: the variety of center loss, which use the class weight as the class center and normalize both the weight and feature, 9 | in this way, the cos distance of weight and feature can be used as the supervised signal. 10 | It's similar with torch.nn.CosineEmbeddingLoss, x_1 means weight_i, x_2 means feature_i. 11 | ''' 12 | 13 | import torch 14 | import torch.nn as nn 15 | import torch.nn.functional as F 16 | 17 | class AgentCenterLoss(nn.Module): 18 | 19 | def __init__(self, num_classes, feat_dim, scale): 20 | super(AgentCenterLoss, self).__init__() 21 | self.num_classes = num_classes 22 | self.feat_dim = feat_dim 23 | self.scale = scale 24 | 25 | self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim)) 26 | 27 | def forward(self, x, labels): 28 | ''' 29 | Parameters: 30 | x: input tensor with shape (batch_size, feat_dim) 31 | labels: ground truth label with shape (batch_size) 32 | Return: 33 | loss of centers 34 | ''' 35 | cos_dis = F.linear(F.normalize(x), F.normalize(self.centers)) * self.scale 36 | 37 | one_hot = torch.zeros_like(cos_dis) 38 | one_hot.scatter_(1, labels.view(-1, 1), 1.0) 39 | 40 | # loss = 1 - cosine(i) 41 | loss = one_hot * self.scale - (one_hot * cos_dis) 42 | 43 | return loss.mean() -------------------------------------------------------------------------------- /attack/preprocess_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 4 | import numpy as np 5 | import torch 6 | import torch.utils.data as data 7 | 8 | 9 | def img_loader(path): 10 | try: 11 | with open(path, 'rb') as f: 12 | img = cv2.imread(path) 13 | if len(img.shape) == 2: 14 | img = np.stack([img] * 3, 2) 15 | return img 16 | except IOError: 17 | print('Cannot load image ' + path) 18 | 19 | 20 | class LFW(data.Dataset): 21 | def __init__(self, root, dev_path, transform=None, loader=img_loader, flip=False): 22 | self.LFW_root = "/data/FaceRecognition/LFW/lfw_align_112" 23 | self.root = root 24 | self.transform = transform 25 | self.loader = loader 26 | 27 | self.dev = np.loadtxt(dev_path, dtype=str, delimiter=',', skiprows=1) 28 | 29 | self.flip = flip 30 | 31 | def __getitem__(self, index): 32 | person = self.dev[index, 2] 33 | image_name = self.dev[index, 1] 34 | 35 | imglist = [] 36 | for img in os.listdir("{}/{}".format(self.LFW_root, person)): 37 | imglist.append(os.path.join(self.LFW_root, person, img)) 38 | imglist.sort() 39 | imglist.insert(0, os.path.join(self.root, image_name)) 40 | 41 | imgs = [] 42 | if self.transform is not None: 43 | for img_path in imglist: 44 | img = self.transform(self.loader(img_path)) 45 | if self.flip: 46 | img = torch.flip(img, dims=(2,)) 47 | imgs.append(img) 48 | else: 49 | for img_path in imglist: 50 | imgs.append(torch.from_numpy(self.loader(img_path))) 51 | 52 | return imgs 53 | 54 | def __len__(self): 55 | return self.dev.shape[0] 56 | -------------------------------------------------------------------------------- /lossfunctions/centerloss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: centerloss.py 7 | @time: 2019/1/4 15:24 8 | @desc: the implementation of center loss 9 | ''' 10 | 11 | import torch 12 | import torch.nn as nn 13 | 14 | class CenterLoss(nn.Module): 15 | 16 | def __init__(self, num_classes, feat_dim): 17 | super(CenterLoss, self).__init__() 18 | self.num_classes = num_classes 19 | self.feat_dim = feat_dim 20 | 21 | self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim)) 22 | 23 | def forward(self, x, labels): 24 | ''' 25 | Parameters: 26 | x: input tensor with shape (batch_size, feat_dim) 27 | labels: ground truth label with shape (batch_size) 28 | Return: 29 | loss of centers 30 | ''' 31 | # compute the distance of (x-center)^2 32 | batch_size = x.size(0) 33 | distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \ 34 | torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t() 35 | distmat.addmm_(1, -2, x, self.centers.t()) 36 | 37 | # get one_hot matrix 38 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 39 | classes = torch.arange(self.num_classes).long().to(device) 40 | labels = labels.unsqueeze(1).expand(batch_size, self.num_classes) 41 | mask = labels.eq(classes.expand(batch_size, self.num_classes)) 42 | 43 | dist = [] 44 | for i in range(batch_size): 45 | value = distmat[i][mask[i]] 46 | value = value.clamp(min=1e-12, max=1e+12) # for numerical stability 47 | dist.append(value) 48 | dist = torch.cat(dist) 49 | loss = dist.mean() 50 | 51 | return loss -------------------------------------------------------------------------------- /attack/util.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from pickle import dump, load 4 | 5 | import numpy as np 6 | 7 | 8 | class AverageMeter(object): 9 | 10 | def __init__(self): 11 | self.val = 0 12 | self.avg = 0 13 | self.sum = 0 14 | self.count = 0 15 | 16 | def reset(self): 17 | self.val = 0 18 | self.avg = 0 19 | self.sum = 0 20 | self.count = 0 21 | 22 | def update(self, val, n=1): 23 | self.val = val 24 | self.sum += val * n 25 | self.count += n 26 | self.avg = self.sum / self.count if self.count != 0 else 0 27 | 28 | 29 | def get_log_filename(args): 30 | filename = "{h}x{w}, alpha={alpha}, {n_models}models, {epochs}epochs, lr={lr:.1e}, batch_size={bs}".format( 31 | alpha=args.alpha, n_models=len(args.backbone_net), epochs=args.epochs, lr=args.lr, 32 | bs=args.batch_size, h=args.masks_size[-2], w=args.masks_size[-1] 33 | ) 34 | return filename 35 | 36 | 37 | def init_log(output_dir, filename): 38 | if not os.path.exists(output_dir): 39 | os.mkdir(output_dir) 40 | logging.basicConfig(level=logging.DEBUG, 41 | format='%(asctime)s %(message)s', 42 | datefmt='%Y-%m-%d %H:%M:%S', 43 | filename=os.path.join(output_dir, filename), 44 | filemode='w') 45 | console = logging.StreamHandler() 46 | console.setLevel(logging.INFO) 47 | logging.getLogger('').addHandler(console) 48 | return logging 49 | 50 | 51 | def init_random_state(args): 52 | if args.random_state != "": 53 | with open(args.random_state, 'rb') as f: 54 | random_state = load(f) 55 | np.random.set_state(random_state) 56 | else: 57 | with open("state/random_state.obj", 'wb') as f: 58 | random_state = np.random.get_state() 59 | dump(random_state, f) 60 | -------------------------------------------------------------------------------- /margin/ArcMarginProduct.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: ArcMarginProduct.py 7 | @time: 2018/12/25 9:13 8 | @desc: additive angular margin for arcface/insightface 9 | ''' 10 | 11 | import math 12 | import torch 13 | from torch import nn 14 | from torch.nn import Parameter 15 | import torch.nn.functional as F 16 | 17 | class ArcMarginProduct(nn.Module): 18 | def __init__(self, in_feature=128, out_feature=10575, s=32.0, m=0.50, easy_margin=False): 19 | super(ArcMarginProduct, self).__init__() 20 | self.in_feature = in_feature 21 | self.out_feature = out_feature 22 | self.s = s 23 | self.m = m 24 | self.weight = Parameter(torch.Tensor(out_feature, in_feature)) 25 | nn.init.xavier_uniform_(self.weight) 26 | 27 | self.easy_margin = easy_margin 28 | self.cos_m = math.cos(m) 29 | self.sin_m = math.sin(m) 30 | 31 | # make the function cos(theta+m) monotonic decreasing while theta in [0°,180°] 32 | self.th = math.cos(math.pi - m) 33 | self.mm = math.sin(math.pi - m) * m 34 | 35 | def forward(self, x, label): 36 | # cos(theta) 37 | cosine = F.linear(F.normalize(x), F.normalize(self.weight)) 38 | # cos(theta + m) 39 | sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) 40 | phi = cosine * self.cos_m - sine * self.sin_m 41 | 42 | if self.easy_margin: 43 | phi = torch.where(cosine > 0, phi, cosine) 44 | else: 45 | phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm) 46 | 47 | #one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu') 48 | one_hot = torch.zeros_like(cosine) 49 | one_hot.scatter_(1, label.view(-1, 1), 1) 50 | output = (one_hot * phi) + ((1.0 - one_hot) * cosine) 51 | output = output * self.s 52 | 53 | return output 54 | 55 | 56 | if __name__ == '__main__': 57 | pass -------------------------------------------------------------------------------- /utils/plot_logit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: plot_logit.py 7 | @time: 2019/3/29 14:21 8 | @desc: plot the logit corresponding to shpereface, cosface, arcface and so on. 9 | ''' 10 | 11 | import math 12 | import torch 13 | import matplotlib.pyplot as plt 14 | import numpy as np 15 | 16 | def softmax(theta): 17 | return torch.cos(theta) 18 | 19 | def sphereface(theta, m=4): 20 | return (torch.cos(m * theta) + 20 * torch.cos(theta)) / (20 + 1) 21 | 22 | def cosface(theta, m): 23 | return torch.cos(theta) - m 24 | 25 | def arcface(theta, m): 26 | return torch.cos(theta + m) 27 | 28 | def multimargin(theta, m1, m2): 29 | return torch.cos(theta + m1) - m2 30 | 31 | 32 | theta = torch.arange(0, math.pi, 0.001) 33 | print(theta.type) 34 | 35 | x = theta.numpy() 36 | y_softmax = softmax(theta).numpy() 37 | y_cosface = cosface(theta, 0.35).numpy() 38 | y_arcface = arcface(theta, 0.5).numpy() 39 | 40 | y_multimargin_1 = multimargin(theta, 0.2, 0.3).numpy() 41 | y_multimargin_2 = multimargin(theta, 0.2, 0.4).numpy() 42 | y_multimargin_3 = multimargin(theta, 0.3, 0.2).numpy() 43 | y_multimargin_4 = multimargin(theta, 0.3, 0.3).numpy() 44 | y_multimargin_5 = multimargin(theta, 0.4, 0.2).numpy() 45 | y_multimargin_6 = multimargin(theta, 0.4, 0.3).numpy() 46 | 47 | plt.plot(x, y_softmax, x, y_cosface, x, y_arcface, x, y_multimargin_1, x, y_multimargin_2, x, y_multimargin_3, x, y_multimargin_4, x, y_multimargin_5, x, y_multimargin_6) 48 | plt.legend(['Softmax(0.00, 0.00)', 'CosFace(0.00, 0.35)', 'ArcFace(0.50, 0.00)', 'MultiMargin(0.20, 0.30)', 'MultiMargin(0.20, 0.40)', 'MultiMargin(0.30, 0.20)', 'MultiMargin(0.30, 0.30)', 'MultiMargin(0.40, 0.20)', 'MultiMargin(0.40, 0.30)']) 49 | plt.grid(False) 50 | plt.xlim((0, 3/4*math.pi)) 51 | plt.ylim((-1.2, 1.2)) 52 | 53 | plt.xticks(np.arange(0, 2.4, 0.3)) 54 | plt.yticks(np.arange(-1.2, 1.2, 0.2)) 55 | plt.xlabel('Angular between the Feature and Target Center (Radian: 0 - 3/4 Pi)') 56 | plt.ylabel('Target Logit') 57 | 58 | plt.savefig('target logits') -------------------------------------------------------------------------------- /margin/MultiMarginProduct.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: MultiMarginProduct.py 7 | @time: 2019/3/30 10:09 8 | @desc: Combination of additive angular margin and additive cosine margin 9 | ''' 10 | 11 | import math 12 | import torch 13 | from torch import nn 14 | from torch.nn import Parameter 15 | import torch.nn.functional as F 16 | 17 | class MultiMarginProduct(nn.Module): 18 | def __init__(self, in_feature=128, out_feature=10575, s=32.0, m1=0.20, m2=0.35, easy_margin=False): 19 | super(MultiMarginProduct, self).__init__() 20 | self.in_feature = in_feature 21 | self.out_feature = out_feature 22 | self.s = s 23 | self.m1 = m1 24 | self.m2 = m2 25 | self.weight = Parameter(torch.Tensor(out_feature, in_feature)) 26 | nn.init.xavier_uniform_(self.weight) 27 | 28 | self.easy_margin = easy_margin 29 | self.cos_m1 = math.cos(m1) 30 | self.sin_m1 = math.sin(m1) 31 | 32 | # make the function cos(theta+m) monotonic decreasing while theta in [0°,180°] 33 | self.th = math.cos(math.pi - m1) 34 | self.mm = math.sin(math.pi - m1) * m1 35 | 36 | def forward(self, x, label): 37 | # cos(theta) 38 | cosine = F.linear(F.normalize(x), F.normalize(self.weight)) 39 | # cos(theta + m1) 40 | sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) 41 | phi = cosine * self.cos_m1 - sine * self.sin_m1 42 | 43 | if self.easy_margin: 44 | phi = torch.where(cosine > 0, phi, cosine) 45 | else: 46 | phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm) 47 | 48 | 49 | one_hot = torch.zeros_like(cosine) 50 | one_hot.scatter_(1, label.view(-1, 1), 1) 51 | output = (one_hot * phi) + ((1.0 - one_hot) * cosine) # additive angular margin 52 | output = output - one_hot * self.m2 # additive cosine margin 53 | output = output * self.s 54 | 55 | return output 56 | 57 | 58 | if __name__ == '__main__': 59 | pass -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Face-Adversarial-Attack 2 | 3 | ## Introduction 4 | This is an easy approach for the competition "Facial Adversary Examples" in [TIANCHI](https://tianchi.aliyun.com/competition/entrance/231745/introduction?lang=en-us), which can get **3.5** in score based the evaluation criterion of 5 | the competition. 6 | 7 | 8 | ## Preparation 9 | 1. Download the dataset from [TIANCHI](https://tianchi.aliyun.com/competition/entrance/231745/introduction?lang=en-us). Suppose the directory is $DATA_DIR. 10 | 11 | 2. Download the pretrained Face-Recognition models from [Baidu](https://pan.baidu.com/s/1g0WNAqNQvqtB86JliYtQeQ) (Extraction code: sjqs). 12 | 13 | 3. Download the feature files from [Baidu](https://pan.baidu.com/s/1c5qsC5WdOPQFTfE8VMKzSg) (Extraction code: jf2z). Or you can use the script *attack/preprocess_eval.py* to generate these files. 14 | 15 | 4. Init attack mask directory: 16 | 17 | ``` 18 | mkdir attack/masks 19 | ``` 20 | 21 | Your directory tree should look like this: 22 | 23 | ``` 24 | ${PROJECT_HOME} 25 | ├── attack 26 | ├── log 27 | ├── masks 28 | ├── state 29 | └── *.py 30 | ├── model 31 | └── downloaded models 32 | ├── result 33 | └── downloaded features 34 | ├── ... 35 | └── ... 36 | ``` 37 | 38 | 39 | ## Dependencies 40 | - python 3.6 41 | - PyTorch 1.0.1 42 | - CUDA 9.0 43 | - CUDNN 7.1.2 44 | - opencv 3.4.2 45 | - numpy 1.15 46 | - scipy 1.2.0 47 | ### Note 48 | - The code is developed using python 3.6 on Ubuntu 18.04. NVIDIA GPUs are needed. 49 | - The code is tested using 1 NVIDIA 1080Ti GPU card. Other platforms or GPU cards are not fully tested. 50 | - OpenCV is installed through anaconda, which is a little different with installed through pip. 51 | 52 | 53 | ## Usage 54 | ```(bash) 55 | cd $PROJECT_HOME/attack 56 | 57 | python attack.py \ 58 | --root $DATA_DIR/securityAI_round1_images \ 59 | --dev_path $DATA_DIR/securityAI_round1_dev.csv \ 60 | --output_path $OUTPUT_PATH 61 | ``` 62 | 63 | 64 | ## Acknowledgement 65 | We develop our attack codes based wujiyang's [Face_Pytorch](https://github.com/wujiyang/Face_Pytorch). -------------------------------------------------------------------------------- /margin/SphereMarginProduct.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: SphereMarginProduct.py 7 | @time: 2018/12/25 9:19 8 | @desc: multiplicative angular margin for sphereface 9 | ''' 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.functional as F 14 | from torch.nn import Parameter 15 | import math 16 | 17 | class SphereMarginProduct(nn.Module): 18 | def __init__(self, in_feature, out_feature, m=4, base=1000.0, gamma=0.0001, power=2, lambda_min=5.0, iter=0): 19 | assert m in [1, 2, 3, 4], 'margin should be 1, 2, 3 or 4' 20 | self.in_feature = in_feature 21 | self.out_feature = out_feature 22 | self.m = m 23 | self.base = base 24 | self.gamma = gamma 25 | self.power = power 26 | self.lambda_min = lambda_min 27 | self.iter = 0 28 | self.weight = Parameter(torch.Tensor(out_feature, in_feature)) 29 | nn.init.xavier_uniform_(self.weight) 30 | 31 | # duplication formula 32 | self.margin_formula = [ 33 | lambda x : x ** 0, 34 | lambda x : x ** 1, 35 | lambda x : 2 * x ** 2 - 1, 36 | lambda x : 4 * x ** 3 - 3 * x, 37 | lambda x : 8 * x ** 4 - 8 * x ** 2 + 1, 38 | lambda x : 16 * x ** 5 - 20 * x ** 3 + 5 * x 39 | ] 40 | 41 | def forward(self, input, label): 42 | self.iter += 1 43 | self.cur_lambda = max(self.lambda_min, self.base * (1 + self.gamma * self.iter) ** (-1 * self.power)) 44 | 45 | cos_theta = F.linear(F.normalize(input), F.normalize(self.weight)) 46 | cos_theta = cos_theta(-1, 1) 47 | 48 | cos_m_theta = self.margin_formula(self.m)(cos_theta) 49 | theta = cos_theta.data.acos() 50 | k = ((self.m * theta) / math.pi).floor() 51 | phi_theta = ((-1.0) ** k) * cos_m_theta - 2 * k 52 | phi_theta_ = (self.cur_lambda * cos_theta + phi_theta) / (1 + self.cur_lambda) 53 | norm_of_feature = torch.norm(input, 2, 1) 54 | 55 | one_hot = torch.zeros_like(cos_theta) 56 | one_hot.scatter_(1, label.view(-1, 1), 1) 57 | 58 | output = one_hot * phi_theta_ + (1 - one_hot) * cos_theta 59 | output *= norm_of_feature.view(-1, 1) 60 | 61 | return output 62 | 63 | 64 | if __name__ == '__main__': 65 | pass -------------------------------------------------------------------------------- /wu/eval_lfw_blufr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: eval_lfw_blufr.py 7 | @time: 2019/1/17 15:52 8 | @desc: test lfw accuracy on blufr protocol 9 | ''' 10 | ''' 11 | LFW BLUFR TEST PROTOCOL 12 | 13 | Official Website: http://www.cbsr.ia.ac.cn/users/scliao/projects/blufr/ 14 | 15 | When I try to do this, I find that the blufr_lfw_config.mat file provided by above site is too old. 16 | Some image files listed in the mat have been removed in lfw pairs.txt 17 | So this work is suspended for now... 18 | ''' 19 | 20 | import scipy.io as sio 21 | import argparse 22 | 23 | def readName(file='pairs.txt'): 24 | name_list = [] 25 | f = open(file, 'r') 26 | lines = f.readlines() 27 | 28 | for line in lines[1:]: 29 | line_split = line.rstrip().split() 30 | if len(line_split) == 3: 31 | name_list.append(line_split[0]) 32 | elif len(line_split) == 4: 33 | name_list.append(line_split[0]) 34 | name_list.append(line_split[2]) 35 | else: 36 | print('wrong file, please check again') 37 | 38 | return list(set(name_list)) 39 | 40 | 41 | def main(args): 42 | blufr_info = sio.loadmat(args.lfw_blufr_file) 43 | #print(blufr_info) 44 | name_list = readName() 45 | 46 | image = blufr_info['imageList'] 47 | missing_files = [] 48 | for i in range(image.shape[0]): 49 | name = image[i][0][0] 50 | index = name.rfind('_') 51 | name = name[0:index] 52 | if name not in name_list: 53 | print(name) 54 | missing_files.append(name) 55 | print('lfw pairs.txt total persons: ', len(name_list)) 56 | print('blufr_mat_missing persons: ', len(missing_files)) 57 | 58 | ''' 59 | Some of the missing file: 60 | Zdravko_Mucic 61 | Zelma_Novelo 62 | Zeng_Qinghong 63 | Zumrati_Juma 64 | lfw pairs.txt total persons: 4281 65 | blufr_mat_missing persons: 1549 66 | 67 | ''' 68 | 69 | if __name__ == '__main__': 70 | parser = argparse.ArgumentParser(description='lfw blufr test') 71 | parser.add_argument('--lfw_blufr_file', type=str, default='./blufr_lfw_config.mat', help='feature dimension') 72 | parser.add_argument('--lfw_pairs.txt', type=str, default='./pairs.txt', help='feature dimension') 73 | parser.add_argument('--gpus', type=str, default='2,3', help='gpu list') 74 | args = parser.parse_args() 75 | 76 | main(args) -------------------------------------------------------------------------------- /dataset/casia_webface.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: casia_webface.py 7 | @time: 2018/12/21 19:09 8 | @desc: CASIA-WebFace dataset loader 9 | ''' 10 | 11 | import torchvision.transforms as transforms 12 | import torch.utils.data as data 13 | import numpy as np 14 | import cv2 15 | import os 16 | import torch 17 | 18 | 19 | def img_loader(path): 20 | try: 21 | with open(path, 'rb') as f: 22 | img = cv2.imread(path) 23 | if len(img.shape) == 2: 24 | img = np.stack([img] * 3, 2) 25 | return img 26 | except IOError: 27 | print('Cannot load image ' + path) 28 | 29 | 30 | class CASIAWebFace(data.Dataset): 31 | def __init__(self, root, file_list, transform=None, loader=img_loader): 32 | 33 | self.root = root 34 | self.transform = transform 35 | self.loader = loader 36 | 37 | image_list = [] 38 | label_list = [] 39 | with open(file_list) as f: 40 | img_label_list = f.read().splitlines() 41 | for info in img_label_list: 42 | image_path, label_name = info.split(' ') 43 | image_list.append(image_path) 44 | label_list.append(int(label_name)) 45 | 46 | self.image_list = image_list 47 | self.label_list = label_list 48 | self.class_nums = len(np.unique(self.label_list)) 49 | print("dataset size: ", len(self.image_list), '/', self.class_nums) 50 | 51 | def __getitem__(self, index): 52 | img_path = self.image_list[index] 53 | label = self.label_list[index] 54 | 55 | img = self.loader(os.path.join(self.root, img_path)) 56 | 57 | # random flip with ratio of 0.5 58 | flip = np.random.choice(2) * 2 - 1 59 | if flip == 1: 60 | img = cv2.flip(img, 1) 61 | 62 | if self.transform is not None: 63 | img = self.transform(img) 64 | else: 65 | img = torch.from_numpy(img) 66 | 67 | return img, label 68 | 69 | def __len__(self): 70 | return len(self.image_list) 71 | 72 | 73 | if __name__ == '__main__': 74 | root = 'D:/data/webface_align_112' 75 | file_list = 'D:/data/webface_align_train.list' 76 | 77 | transform = transforms.Compose([ 78 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 79 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 80 | ]) 81 | dataset = CASIAWebFace(root, file_list, transform=transform) 82 | trainloader = data.DataLoader(dataset, batch_size=64, shuffle=True, num_workers=2, drop_last=False) 83 | print(len(dataset)) 84 | for data in trainloader: 85 | print(data[0].shape) -------------------------------------------------------------------------------- /utils/load_images_from_bin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: load_images_from_bin.py 7 | @time: 2018/12/25 19:21 8 | @desc: For AgeDB-30 and CFP-FP test dataset, we use the mxnet binary file provided by insightface, this is the tool to restore 9 | the aligned images from mxnet binary file. 10 | You should install a mxnet-cpu first, just do 'pip install mxnet==1.2.1' is ok. 11 | ''' 12 | 13 | from PIL import Image 14 | import cv2 15 | import os 16 | import pickle 17 | import mxnet as mx 18 | from tqdm import tqdm 19 | 20 | ''' 21 | For train dataset, insightface provide a mxnet .rec file, just install a mxnet-cpu for extract images 22 | ''' 23 | 24 | def load_mx_rec(rec_path): 25 | save_path = os.path.join(rec_path, 'emore_images_2') 26 | if not os.path.exists(save_path): 27 | os.makedirs(save_path) 28 | 29 | imgrec = mx.recordio.MXIndexedRecordIO(os.path.join(rec_path, 'train.idx'), os.path.join(rec_path, 'train.rec'), 'r') 30 | img_info = imgrec.read_idx(0) 31 | header,_ = mx.recordio.unpack(img_info) 32 | max_idx = int(header.label[0]) 33 | for idx in tqdm(range(1,max_idx)): 34 | img_info = imgrec.read_idx(idx) 35 | header, img = mx.recordio.unpack_img(img_info) 36 | label = int(header.label) 37 | #img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) 38 | #img = Image.fromarray(img) 39 | label_path = os.path.join(save_path, str(label).zfill(6)) 40 | if not os.path.exists(label_path): 41 | os.makedirs(label_path) 42 | #img.save(os.path.join(label_path, str(idx).zfill(8) + '.jpg'), quality=95) 43 | cv2.imwrite(os.path.join(label_path, str(idx).zfill(8) + '.jpg'), img) 44 | 45 | 46 | def load_image_from_bin(bin_path, save_dir): 47 | if not os.path.exists(save_dir): 48 | os.makedirs(save_dir) 49 | file = open(os.path.join(save_dir, '../', 'lfw_pair.txt'), 'w') 50 | bins, issame_list = pickle.load(open(bin_path, 'rb'), encoding='bytes') 51 | for idx in tqdm(range(len(bins))): 52 | _bin = bins[idx] 53 | img = mx.image.imdecode(_bin).asnumpy() 54 | img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) 55 | cv2.imwrite(os.path.join(save_dir, str(idx+1).zfill(5)+'.jpg'), img) 56 | if idx % 2 == 0: 57 | label = 1 if issame_list[idx//2] == True else -1 58 | file.write(str(idx+1).zfill(5) + '.jpg' + ' ' + str(idx+2).zfill(5) +'.jpg' + ' ' + str(label) + '\n') 59 | 60 | 61 | if __name__ == '__main__': 62 | #bin_path = 'D:/face_data_emore/faces_webface_112x112/lfw.bin' 63 | #save_dir = 'D:/face_data_emore/faces_webface_112x112/lfw' 64 | rec_path = 'D:/face_data_emore/faces_emore' 65 | load_mx_rec(rec_path) 66 | #load_image_from_bin(bin_path, save_dir) 67 | -------------------------------------------------------------------------------- /dataset/megaface.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: megaface.py 7 | @time: 2018/12/24 16:29 8 | @desc: 9 | ''' 10 | 11 | import torchvision.transforms as transforms 12 | import torch.utils.data as data 13 | import numpy as np 14 | import cv2 15 | import os 16 | import torch 17 | 18 | def img_loader(path): 19 | try: 20 | with open(path, 'rb') as f: 21 | img = cv2.imread(path) 22 | if len(img.shape) == 2: 23 | img = np.stack([img] * 3, 2) 24 | return img 25 | except IOError: 26 | print('Cannot load image ' + path) 27 | 28 | 29 | class MegaFace(data.Dataset): 30 | def __init__(self, facescrub_dir, megaface_dir, transform=None, loader=img_loader): 31 | 32 | self.transform = transform 33 | self.loader = loader 34 | 35 | test_image_file_list = [] 36 | print('Scanning files under facescrub and megaface...') 37 | for root, dirs, files in os.walk(facescrub_dir): 38 | for e in files: 39 | filename = os.path.join(root, e) 40 | ext = os.path.splitext(filename)[1].lower() 41 | if ext in ('.png', '.bmp', '.jpg', '.jpeg'): 42 | test_image_file_list.append(filename) 43 | for root, dirs, files in os.walk(megaface_dir): 44 | for e in files: 45 | filename = os.path.join(root, e) 46 | ext = os.path.splitext(filename)[1].lower() 47 | if ext in ('.png', '.bmp', '.jpg', '.jpeg'): 48 | test_image_file_list.append(filename) 49 | 50 | self.image_list = test_image_file_list 51 | 52 | def __getitem__(self, index): 53 | img_path = self.image_list[index] 54 | img = self.loader(img_path) 55 | 56 | #水平翻转图像 57 | #img = cv2.flip(img, 1) 58 | 59 | if self.transform is not None: 60 | img = self.transform(img) 61 | else: 62 | img = torch.from_numpy(img) 63 | 64 | return img, img_path 65 | 66 | def __len__(self): 67 | return len(self.image_list) 68 | 69 | 70 | if __name__ == '__main__': 71 | facescrub = '/media/sda/megaface_test_kit/facescrub_align_112/' 72 | megaface = '/media/sda/megaface_test_kit/megaface_align_112/' 73 | 74 | transform = transforms.Compose([ 75 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 76 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 77 | ]) 78 | dataset = MegaFace(facescrub, megaface, transform=transform) 79 | trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) 80 | print(len(dataset)) 81 | for data in trainloader: 82 | print(data.shape) -------------------------------------------------------------------------------- /dataset/lfw_2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: lfw_2.py 7 | @time: 2019/2/19 16:59 8 | @desc: lfw dataset from insightface ,just like agedb and cfp-fp 9 | ''' 10 | 11 | 12 | import numpy as np 13 | import cv2 14 | import os 15 | import torch.utils.data as data 16 | 17 | import torch 18 | import torchvision.transforms as transforms 19 | 20 | def img_loader(path): 21 | try: 22 | with open(path, 'rb') as f: 23 | img = cv2.imread(path) 24 | if len(img.shape) == 2: 25 | img = np.stack([img] * 3, 2) 26 | return img 27 | except IOError: 28 | print('Cannot load image ' + path) 29 | 30 | class LFW_2(data.Dataset): 31 | def __init__(self, root, file_list, transform=None, loader=img_loader): 32 | 33 | self.root = root 34 | self.file_list = file_list 35 | self.transform = transform 36 | self.loader = loader 37 | self.nameLs = [] 38 | self.nameRs = [] 39 | self.folds = [] 40 | self.flags = [] 41 | 42 | with open(file_list) as f: 43 | pairs = f.read().splitlines() 44 | for i, p in enumerate(pairs): 45 | p = p.split(' ') 46 | nameL = p[0] 47 | nameR = p[1] 48 | fold = i // 600 49 | flag = int(p[2]) 50 | 51 | self.nameLs.append(nameL) 52 | self.nameRs.append(nameR) 53 | self.folds.append(fold) 54 | self.flags.append(flag) 55 | 56 | def __getitem__(self, index): 57 | 58 | img_l = self.loader(os.path.join(self.root, self.nameLs[index])) 59 | img_r = self.loader(os.path.join(self.root, self.nameRs[index])) 60 | imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] 61 | 62 | if self.transform is not None: 63 | for i in range(len(imglist)): 64 | imglist[i] = self.transform(imglist[i]) 65 | 66 | imgs = imglist 67 | return imgs 68 | else: 69 | imgs = [torch.from_numpy(i) for i in imglist] 70 | return imgs 71 | 72 | def __len__(self): 73 | return len(self.nameLs) 74 | 75 | 76 | if __name__ == '__main__': 77 | root = '/media/sda/insightface_emore/lfw' 78 | file_list = '/media/sda/insightface_emore/pair_lfw.txt' 79 | 80 | transform = transforms.Compose([ 81 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 82 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 83 | ]) 84 | 85 | dataset = LFW_2(root, file_list, transform=transform) 86 | trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) 87 | for data in trainloader: 88 | for d in data: 89 | print(d[0].shape) -------------------------------------------------------------------------------- /dataset/agedb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: agedb.py.py 7 | @time: 2018/12/25 18:43 8 | @desc: AgeDB-30 test data loader, agedb test protocol is the same with lfw 9 | ''' 10 | 11 | import numpy as np 12 | import cv2 13 | import os 14 | import torch.utils.data as data 15 | 16 | import torch 17 | import torchvision.transforms as transforms 18 | 19 | def img_loader(path): 20 | try: 21 | with open(path, 'rb') as f: 22 | img = cv2.imread(path) 23 | if len(img.shape) == 2: 24 | img = np.stack([img] * 3, 2) 25 | return img 26 | except IOError: 27 | print('Cannot load image ' + path) 28 | 29 | class AgeDB30(data.Dataset): 30 | def __init__(self, root, file_list, transform=None, loader=img_loader): 31 | 32 | self.root = root 33 | self.file_list = file_list 34 | self.transform = transform 35 | self.loader = loader 36 | self.nameLs = [] 37 | self.nameRs = [] 38 | self.folds = [] 39 | self.flags = [] 40 | 41 | with open(file_list) as f: 42 | pairs = f.read().splitlines() 43 | for i, p in enumerate(pairs): 44 | p = p.split(' ') 45 | nameL = p[0] 46 | nameR = p[1] 47 | fold = i // 600 48 | flag = int(p[2]) 49 | 50 | self.nameLs.append(nameL) 51 | self.nameRs.append(nameR) 52 | self.folds.append(fold) 53 | self.flags.append(flag) 54 | 55 | def __getitem__(self, index): 56 | 57 | img_l = self.loader(os.path.join(self.root, self.nameLs[index])) 58 | img_r = self.loader(os.path.join(self.root, self.nameRs[index])) 59 | imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] 60 | 61 | if self.transform is not None: 62 | for i in range(len(imglist)): 63 | imglist[i] = self.transform(imglist[i]) 64 | 65 | imgs = imglist 66 | return imgs 67 | else: 68 | imgs = [torch.from_numpy(i) for i in imglist] 69 | return imgs 70 | 71 | def __len__(self): 72 | return len(self.nameLs) 73 | 74 | 75 | if __name__ == '__main__': 76 | root = '/media/sda/AgeDB-30/agedb30_align_112' 77 | file_list = '/media/sda/AgeDB-30/agedb_30_pair.txt' 78 | 79 | transform = transforms.Compose([ 80 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 81 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 82 | ]) 83 | 84 | dataset = AgeDB30(root, file_list, transform=transform) 85 | trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) 86 | for data in trainloader: 87 | for d in data: 88 | print(d[0].shape) -------------------------------------------------------------------------------- /dataset/cfp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: cfp.py 7 | @time: 2018/12/26 16:19 8 | @desc: the CFP-FP test dataset loader, it's similar with lfw and adedb, except that it has 700 pairs every fold 9 | ''' 10 | 11 | 12 | import numpy as np 13 | import cv2 14 | import os 15 | import torch.utils.data as data 16 | 17 | import torch 18 | import torchvision.transforms as transforms 19 | 20 | def img_loader(path): 21 | try: 22 | with open(path, 'rb') as f: 23 | img = cv2.imread(path) 24 | if len(img.shape) == 2: 25 | img = np.stack([img] * 3, 2) 26 | return img 27 | except IOError: 28 | print('Cannot load image ' + path) 29 | 30 | class CFP_FP(data.Dataset): 31 | def __init__(self, root, file_list, transform=None, loader=img_loader): 32 | 33 | self.root = root 34 | self.file_list = file_list 35 | self.transform = transform 36 | self.loader = loader 37 | self.nameLs = [] 38 | self.nameRs = [] 39 | self.folds = [] 40 | self.flags = [] 41 | 42 | with open(file_list) as f: 43 | pairs = f.read().splitlines() 44 | for i, p in enumerate(pairs): 45 | p = p.split(' ') 46 | nameL = p[0] 47 | nameR = p[1] 48 | fold = i // 700 49 | flag = int(p[2]) 50 | 51 | self.nameLs.append(nameL) 52 | self.nameRs.append(nameR) 53 | self.folds.append(fold) 54 | self.flags.append(flag) 55 | 56 | def __getitem__(self, index): 57 | 58 | img_l = self.loader(os.path.join(self.root, self.nameLs[index])) 59 | img_r = self.loader(os.path.join(self.root, self.nameRs[index])) 60 | imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] 61 | 62 | if self.transform is not None: 63 | for i in range(len(imglist)): 64 | imglist[i] = self.transform(imglist[i]) 65 | 66 | imgs = imglist 67 | return imgs 68 | else: 69 | imgs = [torch.from_numpy(i) for i in imglist] 70 | return imgs 71 | 72 | def __len__(self): 73 | return len(self.nameLs) 74 | 75 | 76 | if __name__ == '__main__': 77 | root = '/media/sda/CFP-FP/CFP_FP_aligned_112' 78 | file_list = '/media/sda/CFP-FP/cfp-fp-pair.txt' 79 | 80 | transform = transforms.Compose([ 81 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 82 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 83 | ]) 84 | 85 | dataset = CFP_FP(root, file_list, transform=transform) 86 | trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) 87 | for data in trainloader: 88 | for d in data: 89 | print(d[0].shape) -------------------------------------------------------------------------------- /attack/functions.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 4 | import numpy as np 5 | import torch 6 | from torch.nn import DataParallel 7 | 8 | from attack_dataset import AttackDataset 9 | from backbone import mobilefacenet, cbam, attention 10 | from util import get_log_filename 11 | 12 | device = 'cuda' 13 | 14 | 15 | def loadModel(args, idx): 16 | if args.backbone_net[idx] == 'MobileFace': 17 | net = mobilefacenet.MobileFaceNet() 18 | elif args.backbone_net[idx] == 'CBAM_50': 19 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim[idx], mode='ir') 20 | elif args.backbone_net[idx] == 'CBAM_50_SE': 21 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim[idx], mode='ir_se') 22 | elif args.backbone_net[idx] == 'CBAM_100': 23 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim[idx], mode='ir') 24 | elif args.backbone_net[idx] == 'CBAM_100_SE': 25 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim[idx], mode='ir_se') 26 | elif args.backbone_net[idx] == 'CBAM_152': 27 | net = cbam.CBAMResNet(152, feature_dim=args.feature_dim[idx], mode='ir') 28 | elif args.backbone_net[idx] == 'CBAM_152_SE': 29 | net = cbam.CBAMResNet(152, feature_dim=args.feature_dim[idx], mode='ir_se') 30 | elif args.backbone_net[idx] == 'Attention_56': 31 | net = attention.ResidualAttentionNet_56(feature_dim=args.feature_dim[idx]) 32 | else: 33 | net = None 34 | print(args.backbone_net[idx], ' is not available!') 35 | assert 1 == 0 36 | 37 | # gpu init 38 | multi_gpus = False 39 | # if len(args.gpus.split(',')) > 1: 40 | # multi_gpus = True 41 | # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus 42 | # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 43 | 44 | net.load_state_dict(torch.load(args.resume[idx])['net_state_dict']) 45 | 46 | if multi_gpus: 47 | net = DataParallel(net).to(device) 48 | else: 49 | net = net.to(device) 50 | 51 | return net.eval() 52 | 53 | 54 | def get_dist(args, load_attack_masks=False): 55 | attack_dataset = AttackDataset(args.root, args.dev_path, args.features_path[0], args.flip_features_path[0], True) 56 | if load_attack_masks: 57 | AttackDataset.init_attack_masks(args.masks_size, args.pt_x, args.pt_y) 58 | log_filename = get_log_filename(args) 59 | path = "{}/{}.pth".format(args.masks_dir, log_filename) 60 | attack_dataset.load_attack_masks(path) 61 | 62 | dists = [] 63 | with torch.no_grad(): 64 | for i, (img, img_after_attack) in enumerate(attack_dataset): 65 | dist = torch.mean(torch.sqrt(torch.sum(torch.pow(img_after_attack - img, 2), dim=(0,)))) 66 | dists.append(dist.cpu().numpy()) 67 | 68 | return np.mean(dists) 69 | 70 | 71 | def get_dist_from_images(root_dir, output_dir): 72 | dists = [] 73 | for filename in sorted(os.listdir(root_dir)): 74 | img = cv2.imread("{}/{}".format(root_dir, filename)).astype(np.float32) 75 | img_a = cv2.imread("{}/{}".format(output_dir, filename)).astype(np.float32) 76 | 77 | dist = np.mean(np.sqrt(np.sum(np.power((img_a - img), 2), axis=-1))) 78 | dists.append(dist) 79 | 80 | return np.mean(dists) 81 | -------------------------------------------------------------------------------- /dataset/lfw.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: lfw.py.py 7 | @time: 2018/12/22 10:00 8 | @desc: lfw dataset loader 9 | ''' 10 | 11 | import numpy as np 12 | import cv2 13 | import os 14 | import torch.utils.data as data 15 | 16 | import torch 17 | import torchvision.transforms as transforms 18 | 19 | def img_loader(path): 20 | try: 21 | with open(path, 'rb') as f: 22 | img = cv2.imread(path) 23 | if len(img.shape) == 2: 24 | img = np.stack([img] * 3, 2) 25 | return img 26 | except IOError: 27 | print('Cannot load image ' + path) 28 | 29 | class LFW(data.Dataset): 30 | def __init__(self, root, file_list, transform=None, loader=img_loader): 31 | 32 | self.root = root 33 | self.file_list = file_list 34 | self.transform = transform 35 | self.loader = loader 36 | self.nameLs = [] 37 | self.nameRs = [] 38 | self.folds = [] 39 | self.flags = [] 40 | 41 | with open(file_list) as f: 42 | pairs = f.read().splitlines()[1:] 43 | for i, p in enumerate(pairs): 44 | p = p.split('\t') 45 | if len(p) == 3: 46 | nameL = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[1])) 47 | nameR = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[2])) 48 | fold = i // 600 49 | flag = 1 50 | elif len(p) == 4: 51 | nameL = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[1])) 52 | nameR = p[2] + '/' + p[2] + '_' + '{:04}.jpg'.format(int(p[3])) 53 | fold = i // 600 54 | flag = -1 55 | self.nameLs.append(nameL) 56 | self.nameRs.append(nameR) 57 | self.folds.append(fold) 58 | self.flags.append(flag) 59 | 60 | def __getitem__(self, index): 61 | 62 | img_l = self.loader(os.path.join(self.root, self.nameLs[index])) 63 | img_r = self.loader(os.path.join(self.root, self.nameRs[index])) 64 | imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] 65 | 66 | if self.transform is not None: 67 | for i in range(len(imglist)): 68 | imglist[i] = self.transform(imglist[i]) 69 | 70 | imgs = imglist 71 | return imgs 72 | else: 73 | imgs = [torch.from_numpy(i) for i in imglist] 74 | return imgs 75 | 76 | def __len__(self): 77 | return len(self.nameLs) 78 | 79 | 80 | if __name__ == '__main__': 81 | root = 'D:/data/lfw_align_112' 82 | file_list = 'D:/data/pairs.txt' 83 | 84 | transform = transforms.Compose([ 85 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 86 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 87 | ]) 88 | 89 | dataset = LFW(root, file_list, transform=transform) 90 | #dataset = LFW(root, file_list) 91 | trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) 92 | print(len(dataset)) 93 | for data in trainloader: 94 | for d in data: 95 | print(d[0].shape) -------------------------------------------------------------------------------- /wu/eval_deepglint_merge.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: eval_deepglint_merge.py.py 7 | @time: 2019/3/21 11:09 8 | @desc: merge the feature of deepglint test data to one file. original deepglint feature is generated by the protocol of megaface. 9 | ''' 10 | 11 | """ 12 | We use the same format as Megaface(http://megaface.cs.washington.edu) 13 | except that we merge all files into a single binary file. 14 | 15 | for examples: 16 | 17 | when megaface: N * (512, 1) 18 | while deepglint:(N, 512) 19 | 20 | """ 21 | import struct 22 | import numpy as np 23 | import sys, os 24 | import argparse 25 | 26 | cv_type_to_dtype = { 27 | 5: np.dtype('float32') 28 | } 29 | 30 | dtype_to_cv_type = {v: k for k, v in cv_type_to_dtype.items()} 31 | 32 | 33 | def write_mat(f, m): 34 | """Write mat m to file f""" 35 | if len(m.shape) == 1: 36 | rows = m.shape[0] 37 | cols = 1 38 | else: 39 | rows, cols = m.shape 40 | header = struct.pack('iiii', rows, cols, cols * 4, dtype_to_cv_type[m.dtype]) 41 | f.write(header) 42 | f.write(m.data) 43 | 44 | 45 | def read_mat(f): 46 | """ 47 | Reads an OpenCV mat from the given file opened in binary mode 48 | """ 49 | rows, cols, stride, type_ = struct.unpack('iiii', f.read(4 * 4)) 50 | mat = np.fromstring(f.read(rows * stride), dtype=cv_type_to_dtype[type_]) 51 | return mat.reshape(rows, cols) 52 | 53 | 54 | def load_mat(filename): 55 | """ 56 | Reads a OpenCV Mat from the given filename 57 | """ 58 | return read_mat(open(filename, 'rb')) 59 | 60 | 61 | def save_mat(filename, m): 62 | """Saves mat m to the given filename""" 63 | return write_mat(open(filename, 'wb'), m) 64 | 65 | 66 | 67 | def main(args): 68 | 69 | deepglint_features = args.deepglint_features_path 70 | # merge all features into one file 71 | total_feature = [] 72 | total_files = [] 73 | for root, dirs, files in os.walk(deepglint_features): 74 | for file in files: 75 | filename = os.path.join(root, file) 76 | ext = os.path.splitext(filename)[1] 77 | ext = ext.lower() 78 | if ext in ('.feat'): 79 | total_files.append(filename) 80 | 81 | assert len(total_files) == 1862120 82 | total_files.sort() # important 83 | 84 | for i in range(len(total_files)): 85 | filename = total_files[i] 86 | tmp_feature = load_mat(filename) 87 | # print(filename) 88 | # print(tmp_feature.shape) 89 | tmp_feature = tmp_feature.T 90 | total_feature.append(tmp_feature) 91 | print(i + 1, tmp_feature.shape) 92 | # write_mat(feature_path_out, feature_fusion) 93 | 94 | print('total feature number: ', len(total_feature)) 95 | total_feature = np.array(total_feature).squeeze() 96 | print(total_feature.shape, total_feature.dtype, type(total_feature)) 97 | save_mat('deepglint_test_feature.bin', total_feature) 98 | 99 | 100 | if __name__ == '__main__': 101 | parser = argparse.ArgumentParser() 102 | parser.add_argument("--deepglint_features_path", type=str, default="/home/wujiyang/deepglint/deepglint_feature_ir+ws/") 103 | args = parser.parse_args() 104 | 105 | main(args) 106 | -------------------------------------------------------------------------------- /backbone/spherenet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: spherenet.py 7 | @time: 2018/12/26 10:14 8 | @desc: A 64 layer residual network struture used in sphereface and cosface, for fast convergence, I add BN after every Conv layer. 9 | ''' 10 | 11 | import torch 12 | import torch.nn as nn 13 | 14 | class Block(nn.Module): 15 | def __init__(self, channels): 16 | super(Block, self).__init__() 17 | self.conv1 = nn.Conv2d(channels, channels, 3, 1, 1, bias=False) 18 | self.bn1 = nn.BatchNorm2d(channels) 19 | self.prelu1 = nn.PReLU(channels) 20 | self.conv2 = nn.Conv2d(channels, channels, 3, 1, 1, bias=False) 21 | self.bn2 = nn.BatchNorm2d(channels) 22 | self.prelu2 = nn.PReLU(channels) 23 | 24 | def forward(self, x): 25 | short_cut = x 26 | x = self.conv1(x) 27 | x = self.bn1(x) 28 | x = self.prelu1(x) 29 | x = self.conv2(x) 30 | x = self.bn2(x) 31 | x = self.prelu2(x) 32 | 33 | return x + short_cut 34 | 35 | 36 | class SphereNet(nn.Module): 37 | def __init__(self, num_layers = 20, feature_dim=512): 38 | super(SphereNet, self).__init__() 39 | assert num_layers in [20, 64], 'SphereNet num_layers should be 20 or 64' 40 | if num_layers == 20: 41 | layers = [1, 2, 4, 1] 42 | elif num_layers == 64: 43 | layers = [3, 7, 16, 3] 44 | else: 45 | raise ValueError('sphere' + str(num_layers) + " IS NOT SUPPORTED! (sphere20 or sphere64)") 46 | 47 | filter_list = [3, 64, 128, 256, 512] 48 | block = Block 49 | self.layer1 = self._make_layer(block, filter_list[0], filter_list[1], layers[0], stride=2) 50 | self.layer2 = self._make_layer(block, filter_list[1], filter_list[2], layers[1], stride=2) 51 | self.layer3 = self._make_layer(block, filter_list[2], filter_list[3], layers[2], stride=2) 52 | self.layer4 = self._make_layer(block, filter_list[3], filter_list[4], layers[3], stride=2) 53 | self.fc = nn.Linear(512 * 7 * 7, feature_dim) 54 | self.last_bn = nn.BatchNorm1d(feature_dim) 55 | 56 | for m in self.modules(): 57 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): 58 | if m.bias is not None: 59 | nn.init.xavier_uniform_(m.weight) 60 | nn.init.constant_(m.bias, 0) 61 | else: 62 | nn.init.normal_(m.weight, 0, 0.01) 63 | 64 | def _make_layer(self, block, inplanes, planes, num_units, stride): 65 | layers = [] 66 | layers.append(nn.Conv2d(inplanes, planes, 3, stride, 1)) 67 | layers.append(nn.BatchNorm2d(planes)) 68 | layers.append(nn.PReLU(planes)) 69 | for i in range(num_units): 70 | layers.append(block(planes)) 71 | 72 | return nn.Sequential(*layers) 73 | 74 | 75 | def forward(self, x): 76 | x = self.layer1(x) 77 | x = self.layer2(x) 78 | x = self.layer3(x) 79 | x = self.layer4(x) 80 | 81 | x = x.view(x.size(0), -1) 82 | x = self.fc(x) 83 | x = self.last_bn(x) 84 | 85 | return x 86 | 87 | 88 | if __name__ == '__main__': 89 | input = torch.Tensor(2, 3, 112, 112) 90 | net = SphereNet(num_layers=64, feature_dim=512) 91 | 92 | out = net(input) 93 | print(out.shape) 94 | 95 | -------------------------------------------------------------------------------- /backbone/mobilefacenet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: mobilefacenet.py 7 | @time: 2018/12/21 15:45 8 | @desc: mobilefacenet backbone 9 | ''' 10 | 11 | import torch 12 | from torch import nn 13 | import math 14 | 15 | MobileFaceNet_BottleNeck_Setting = [ 16 | # t, c , n ,s 17 | [2, 64, 5, 2], 18 | [4, 128, 1, 2], 19 | [2, 128, 6, 1], 20 | [4, 128, 1, 2], 21 | [2, 128, 2, 1] 22 | ] 23 | 24 | class BottleNeck(nn.Module): 25 | def __init__(self, inp, oup, stride, expansion): 26 | super(BottleNeck, self).__init__() 27 | self.connect = stride == 1 and inp == oup 28 | 29 | self.conv = nn.Sequential( 30 | # 1*1 conv 31 | nn.Conv2d(inp, inp * expansion, 1, 1, 0, bias=False), 32 | nn.BatchNorm2d(inp * expansion), 33 | nn.PReLU(inp * expansion), 34 | 35 | # 3*3 depth wise conv 36 | nn.Conv2d(inp * expansion, inp * expansion, 3, stride, 1, groups=inp * expansion, bias=False), 37 | nn.BatchNorm2d(inp * expansion), 38 | nn.PReLU(inp * expansion), 39 | 40 | # 1*1 conv 41 | nn.Conv2d(inp * expansion, oup, 1, 1, 0, bias=False), 42 | nn.BatchNorm2d(oup), 43 | ) 44 | 45 | def forward(self, x): 46 | if self.connect: 47 | return x + self.conv(x) 48 | else: 49 | return self.conv(x) 50 | 51 | 52 | class ConvBlock(nn.Module): 53 | def __init__(self, inp, oup, k, s, p, dw=False, linear=False): 54 | super(ConvBlock, self).__init__() 55 | self.linear = linear 56 | if dw: 57 | self.conv = nn.Conv2d(inp, oup, k, s, p, groups=inp, bias=False) 58 | else: 59 | self.conv = nn.Conv2d(inp, oup, k, s, p, bias=False) 60 | 61 | self.bn = nn.BatchNorm2d(oup) 62 | if not linear: 63 | self.prelu = nn.PReLU(oup) 64 | 65 | def forward(self, x): 66 | x = self.conv(x) 67 | x = self.bn(x) 68 | if self.linear: 69 | return x 70 | else: 71 | return self.prelu(x) 72 | 73 | 74 | class MobileFaceNet(nn.Module): 75 | def __init__(self, feature_dim=128, bottleneck_setting=MobileFaceNet_BottleNeck_Setting): 76 | super(MobileFaceNet, self).__init__() 77 | self.conv1 = ConvBlock(3, 64, 3, 2, 1) 78 | self.dw_conv1 = ConvBlock(64, 64, 3, 1, 1, dw=True) 79 | 80 | self.cur_channel = 64 81 | block = BottleNeck 82 | self.blocks = self._make_layer(block, bottleneck_setting) 83 | 84 | self.conv2 = ConvBlock(128, 512, 1, 1, 0) 85 | self.linear7 = ConvBlock(512, 512, 7, 1, 0, dw=True, linear=True) 86 | self.linear1 = ConvBlock(512, feature_dim, 1, 1, 0, linear=True) 87 | 88 | for m in self.modules(): 89 | if isinstance(m, nn.Conv2d): 90 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 91 | m.weight.data.normal_(0, math.sqrt(2. / n)) 92 | elif isinstance(m, nn.BatchNorm2d): 93 | m.weight.data.fill_(1) 94 | m.bias.data.zero_() 95 | 96 | def _make_layer(self, block, setting): 97 | layers = [] 98 | for t, c, n, s in setting: 99 | for i in range(n): 100 | if i == 0: 101 | layers.append(block(self.cur_channel, c, s, t)) 102 | else: 103 | layers.append(block(self.cur_channel, c, 1, t)) 104 | self.cur_channel = c 105 | 106 | return nn.Sequential(*layers) 107 | 108 | def forward(self, x): 109 | x = self.conv1(x) 110 | x = self.dw_conv1(x) 111 | x = self.blocks(x) 112 | x = self.conv2(x) 113 | x = self.linear7(x) 114 | x = self.linear1(x) 115 | x = x.view(x.size(0), -1) 116 | 117 | return x 118 | 119 | 120 | if __name__ == "__main__": 121 | input = torch.Tensor(2, 3, 112, 112) 122 | net = MobileFaceNet() 123 | print(net) 124 | 125 | x = net(input) 126 | print(x.shape) -------------------------------------------------------------------------------- /attack/attack_dataset.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import scipy.io 4 | import torch 5 | import torchvision.transforms as transforms 6 | from torch.utils.data.dataset import Dataset 7 | 8 | device = 'cuda' 9 | 10 | 11 | class AttackDataset(Dataset): 12 | attack_masks = None 13 | x, y, w, h = None, None, None, None 14 | 15 | def __init__(self, root, dev_path, features_path, flip_features_path, test=False): 16 | super(AttackDataset, self).__init__() 17 | 18 | self.root = root 19 | 20 | self.dev = np.loadtxt(dev_path, dtype=str, delimiter=',', skiprows=1) 21 | self.to_be_attacked = self.dev[:, 1] 22 | 23 | features = scipy.io.loadmat(features_path) 24 | self.features_query = features['features_query'] 25 | self.features_avg = features['features_avg'] 26 | 27 | flip_features = scipy.io.loadmat(flip_features_path) 28 | self.flip_features_query = flip_features['features_query'] 29 | self.flip_features_avg = flip_features['features_avg'] 30 | 31 | self.num_features = self.features_query.shape[0] 32 | 33 | self.transform = transforms.Compose([ 34 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) 35 | ]) 36 | 37 | self.test = test 38 | 39 | def __getitem__(self, index): 40 | img = self.img_loader("{}/{}".format(self.root, self.to_be_attacked[index])) 41 | img = np.transpose(img, axes=[2, 0, 1]) 42 | img = torch.from_numpy(img).float().to(device) 43 | 44 | attack_mask = torch.clamp(self.attack_masks[index], -25.5, 25.5) 45 | img_after_attack = img.clone() 46 | img_after_attack[:, self.y:self.y + self.h, self.x:self.x + self.w] = \ 47 | img_after_attack[:, self.y:self.y + self.h, self.x:self.x + self.w] + attack_mask 48 | img_after_attack = torch.clamp(img_after_attack, 0, 255) 49 | 50 | if self.test: 51 | return img, img_after_attack 52 | 53 | is_flip = np.random.random() < 0.5 54 | 55 | img_t = self.transform(img_after_attack / 255) 56 | img_t = self.augment(img_t, is_flip) 57 | 58 | if is_flip: 59 | feature = self.flip_features_avg[index] 60 | else: 61 | feature = self.features_avg[index] 62 | 63 | return img_t, feature, img, img_after_attack 64 | 65 | def __len__(self): 66 | return len(self.to_be_attacked) 67 | 68 | def eval(self, features_after_attack=None): 69 | features_query = self.features_query 70 | if features_after_attack is not None: 71 | assert features_after_attack.shape == self.features_query.shape 72 | features_query = features_after_attack 73 | 74 | norm_query = np.linalg.norm(features_query, axis=1, keepdims=True) 75 | norm_avg = np.linalg.norm(self.features_avg, axis=1, keepdims=True) 76 | 77 | cos_dist = np.matmul(features_query, self.features_avg.T) / np.matmul(norm_query, norm_avg.T) 78 | preds = np.argmax(cos_dist, axis=1) 79 | 80 | acc = np.sum(preds == np.arange(0, self.num_features)) / self.num_features 81 | return acc, cos_dist 82 | 83 | @staticmethod 84 | def augment(img, flag_flip): 85 | if flag_flip: 86 | img = torch.flip(img, (2,)) 87 | return img 88 | 89 | @staticmethod 90 | def img_loader(path): 91 | try: 92 | img = cv2.imread(path) 93 | if len(img.shape) == 2: 94 | img = np.stack([img] * 3, 2) 95 | return img 96 | except IOError: 97 | print('Cannot load image ' + path) 98 | 99 | @staticmethod 100 | def init_attack_masks(masks_size, x, y): 101 | if AttackDataset.attack_masks is not None: 102 | return 103 | 104 | AttackDataset.x = x 105 | AttackDataset.y = y 106 | AttackDataset.w = masks_size[-1] 107 | AttackDataset.h = masks_size[-2] 108 | 109 | assert AttackDataset.x + AttackDataset.w <= 112 110 | assert AttackDataset.y + AttackDataset.h <= 112 111 | 112 | AttackDataset.attack_masks = [] 113 | for i in range(masks_size[0]): 114 | AttackDataset.attack_masks.append(torch.zeros(size=masks_size[1:], device=device, requires_grad=True)) 115 | 116 | @staticmethod 117 | def load_attack_masks(path): 118 | AttackDataset.attack_masks = torch.load(path) 119 | -------------------------------------------------------------------------------- /wu/eval_megaface.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: eval_megaface.py 7 | @time: 2018/12/24 16:28 8 | @desc: megaface feature extractor 9 | ''' 10 | import numpy as np 11 | import struct 12 | import os 13 | import torch.utils.data 14 | from backbone import mobilefacenet, cbam, self_attention 15 | from dataset.megaface import MegaFace 16 | import torchvision.transforms as transforms 17 | import argparse 18 | from torch.nn import DataParallel 19 | 20 | 21 | cv_type_to_dtype = {5: np.dtype('float32'), 6: np.dtype('float64')} 22 | dtype_to_cv_type = {v: k for k, v in cv_type_to_dtype.items()} 23 | 24 | def write_mat(filename, m): 25 | """Write mat m to file f""" 26 | if len(m.shape) == 1: 27 | rows = m.shape[0] 28 | cols = 1 29 | else: 30 | rows, cols = m.shape 31 | header = struct.pack('iiii', rows, cols, cols * 4, dtype_to_cv_type[m.dtype]) 32 | 33 | with open(filename, 'wb') as outfile: 34 | outfile.write(header) 35 | outfile.write(m.data) 36 | 37 | 38 | def read_mat(filename): 39 | """ 40 | Reads an OpenCV mat from the given file opened in binary mode 41 | """ 42 | with open(filename, 'rb') as fin: 43 | rows, cols, stride, type_ = struct.unpack('iiii', fin.read(4 * 4)) 44 | mat = np.fromstring(str(fin.read(rows * stride)), dtype=cv_type_to_dtype[type_]) 45 | return mat.reshape(rows, cols) 46 | 47 | 48 | def extract_feature(model_path, backbone_net, face_scrub_path, megaface_path, batch_size=32, gpus='0', do_norm=False): 49 | 50 | if backbone_net == 'MobileFace': 51 | net = mobilefacenet.MobileFaceNet() 52 | elif backbone_net == 'CBAM_50': 53 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') 54 | elif backbone_net == 'CBAM_50_SE': 55 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') 56 | elif backbone_net == 'CBAM_100': 57 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') 58 | elif backbone_net == 'CBAM_100_SE': 59 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') 60 | else: 61 | print(args.backbone, ' is not available!') 62 | 63 | multi_gpus = False 64 | if len(gpus.split(',')) > 1: 65 | multi_gpus = True 66 | os.environ['CUDA_VISIBLE_DEVICES'] = gpus 67 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 68 | 69 | net.load_state_dict(torch.load(model_path)['net_state_dict']) 70 | if multi_gpus: 71 | net = DataParallel(net).to(device) 72 | else: 73 | net = net.to(device) 74 | net.eval() 75 | 76 | transform = transforms.Compose([ 77 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 78 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 79 | ]) 80 | megaface_dataset = MegaFace(face_scrub_path, megaface_path, transform=transform) 81 | megaface_loader = torch.utils.data.DataLoader(megaface_dataset, batch_size=batch_size, 82 | shuffle=False, num_workers=12, drop_last=False) 83 | 84 | for data in megaface_loader: 85 | img, img_path= data[0].to(device), data[1] 86 | with torch.no_grad(): 87 | output = net(img).data.cpu().numpy() 88 | 89 | if do_norm is False: 90 | for i in range(len(img_path)): 91 | abs_path = img_path[i] + '.feat' 92 | write_mat(abs_path, output[i]) 93 | print('extract 1 batch...without feature normalization') 94 | else: 95 | for i in range(len(img_path)): 96 | abs_path = img_path[i] + '.feat' 97 | feat = output[i] 98 | feat = feat / np.sqrt((np.dot(feat, feat))) 99 | write_mat(abs_path, feat) 100 | print('extract 1 batch...with feature normalization') 101 | print('all images have been processed!') 102 | 103 | 104 | if __name__ == '__main__': 105 | parser = argparse.ArgumentParser(description='Testing') 106 | parser.add_argument('--model_path', type=str, default='./model/RES100_RES100_IR_20190423_100728/Iter_333000_net.ckpt', help='The path of trained model') 107 | parser.add_argument('--backbone_net', type=str, default='CBAM_100', help='MobileFace, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE') 108 | parser.add_argument('--facescrub_dir', type=str, default='/media/sda/megaface_test_kit/facescrub_align_112/', help='facescrub data') 109 | parser.add_argument('--megaface_dir', type=str, default='/media/sda/megaface_test_kit/megaface_align_112/', help='megaface data') 110 | parser.add_argument('--batch_size', type=int, default=1024, help='batch size') 111 | parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension') 112 | parser.add_argument('--gpus', type=str, default='0,1,2,3', help='gpu list') 113 | parser.add_argument("--do_norm", type=int, default=1, help="1 if normalize feature, 0 do nothing(Default case)") 114 | args = parser.parse_args() 115 | 116 | extract_feature(args.model_path, args.backbone_net, args.facescrub_dir, args.megaface_dir, args.batch_size, args.gpus, args.do_norm) -------------------------------------------------------------------------------- /utils/plot_theta.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: plot_theta.py 7 | @time: 2019/1/2 19:08 8 | @desc: plot theta distribution between weight and feature vector 9 | ''' 10 | 11 | from matplotlib import pyplot as plt 12 | plt.switch_backend('agg') 13 | 14 | import argparse 15 | from backbone.mobilefacenet import MobileFaceNet 16 | from margin.ArcMarginProduct import ArcMarginProduct 17 | from torch.utils.data import DataLoader 18 | import torch 19 | 20 | from torchvision import transforms 21 | import torch.nn.functional as F 22 | import os 23 | import numpy as np 24 | from dataset.casia_webface import CASIAWebFace 25 | 26 | 27 | def get_train_loader(img_folder, filelist): 28 | print('Loading dataset...') 29 | transform = transforms.Compose([ 30 | transforms.ToTensor(), 31 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) 32 | ]) 33 | trainset = CASIAWebFace(img_folder, filelist, transform=transform) 34 | trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, 35 | shuffle=False, num_workers=8, drop_last=False) 36 | return trainloader 37 | 38 | def load_model(backbone_state_dict, margin_state_dict, device): 39 | 40 | # load model 41 | net = MobileFaceNet() 42 | net.load_state_dict(torch.load(backbone_state_dict)['net_state_dict']) 43 | margin = ArcMarginProduct(in_feature=128, out_feature=10575) 44 | margin.load_state_dict(torch.load(margin_state_dict)['net_state_dict']) 45 | 46 | net = net.to(device) 47 | margin = margin.to(device) 48 | 49 | return net.eval(), margin.eval() 50 | 51 | 52 | if __name__ == '__main__': 53 | parser = argparse.ArgumentParser(description='plot theta distribution of trained model') 54 | parser.add_argument('--img_root', type=str, default='/media/ramdisk/webface_align_112', help='train image root') 55 | parser.add_argument('--file_list', type=str, default='/media/ramdisk/webface_align_train.list', help='train list') 56 | parser.add_argument('--backbone_file', type=str, default='../model/Paper_MOBILEFACE_20190103_111830/Iter_088000_net.ckpt', help='backbone state dict file') 57 | parser.add_argument('--margin_file', type=str, default='../model/Paper_MOBILEFACE_20190103_111830/Iter_088000_margin.ckpt', help='backbone state dict file') 58 | parser.add_argument('--gpus', type=str, default='0', help='model prefix, single gpu only') 59 | args = parser.parse_args() 60 | 61 | # gpu init 62 | os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus 63 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 64 | 65 | # load pretrain model 66 | trained_net, trained_margin = load_model(args.backbone_file, args.margin_file, device) 67 | 68 | # initial model 69 | initial_net = MobileFaceNet() 70 | initial_margin = ArcMarginProduct() 71 | initial_net = initial_net.to(device).eval() 72 | initial_margin = initial_margin.to(device).eval() 73 | 74 | # image dataloader 75 | image_loader = get_train_loader(args.img_root, args.file_list) 76 | theta_trained = [] 77 | theta_initial = [] 78 | for data in image_loader: 79 | img, label = data[0].to(device), data[1].to(device) 80 | # pretrained 81 | embedding = trained_net(img) 82 | cos_theta = F.linear(F.normalize(embedding), F.normalize(trained_margin.weight)) 83 | cos_theta = cos_theta.clamp(-1, 1).detach().cpu().numpy() 84 | for i in range(img.shape[0]): 85 | cos_trget = cos_theta[i][label[i]] 86 | theta_trained.append(np.arccos(cos_trget) / np.pi * 180) 87 | # initial 88 | embedding = initial_net(img) 89 | cos_theta = F.linear(F.normalize(embedding), F.normalize(initial_margin.weight)) 90 | cos_theta = cos_theta.clamp(-1, 1).detach().cpu().numpy() 91 | for i in range(img.shape[0]): 92 | cos_trget = cos_theta[i][label[i]] 93 | theta_initial.append(np.arccos(cos_trget) / np.pi * 180) 94 | ''' 95 | # write theta list to txt file 96 | trained_theta_file = open('arcface_theta.txt', 'w') 97 | initial_theta_file = open('initial_theta.txt', 'w') 98 | for item in theta_trained: 99 | trained_theta_file.write(str(item)) 100 | trained_theta_file.write('\n') 101 | for item in theta_initial: 102 | initial_theta_file.write(str(item)) 103 | initial_theta_file.write('\n') 104 | 105 | # plot the theta, read theta from txt first 106 | theta_trained = [] 107 | theta_initial = [] 108 | trained_theta_file = open('arcface_theta.txt', 'r') 109 | initial_theta_file = open('initial_theta.txt', 'r') 110 | lines = trained_theta_file.readlines() 111 | for line in lines: 112 | theta_trained.append(float(line.strip('\n')[0])) 113 | lines = initial_theta_file.readlines() 114 | for line in lines: 115 | theta_initial.append(float(line.split('\n')[0])) 116 | ''' 117 | print(len(theta_trained), len(theta_initial)) 118 | plt.figure() 119 | plt.xlabel('Theta') 120 | plt.ylabel('Numbers') 121 | plt.title('Theta Distribution') 122 | plt.hist(theta_trained, bins=180, normed=0) 123 | plt.hist(theta_initial, bins=180, normed=0) 124 | plt.legend(['trained theta distribution', 'initial theta distribution']) 125 | plt.savefig('theta_distribution_hist.jpg') 126 | -------------------------------------------------------------------------------- /wu/eval_lfw.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: eval_lfw.py 7 | @time: 2018/12/22 9:47 8 | @desc: 9 | ''' 10 | 11 | import numpy as np 12 | import scipy.io 13 | import os 14 | import json 15 | import torch.utils.data 16 | from backbone import mobilefacenet, resnet, arcfacenet, cbam 17 | from dataset.lfw import LFW 18 | import torchvision.transforms as transforms 19 | from torch.nn import DataParallel 20 | import argparse 21 | 22 | def getAccuracy(scores, flags, threshold): 23 | p = np.sum(scores[flags == 1] > threshold) 24 | n = np.sum(scores[flags == -1] < threshold) 25 | return 1.0 * (p + n) / len(scores) 26 | 27 | def getThreshold(scores, flags, thrNum): 28 | accuracys = np.zeros((2 * thrNum + 1, 1)) 29 | thresholds = np.arange(-thrNum, thrNum + 1) * 1.0 / thrNum 30 | for i in range(2 * thrNum + 1): 31 | accuracys[i] = getAccuracy(scores, flags, thresholds[i]) 32 | max_index = np.squeeze(accuracys == np.max(accuracys)) 33 | bestThreshold = np.mean(thresholds[max_index]) 34 | return bestThreshold 35 | 36 | def evaluation_10_fold(feature_path='./result/cur_epoch_result.mat'): 37 | ACCs = np.zeros(10) 38 | result = scipy.io.loadmat(feature_path) 39 | 40 | for i in range(10): 41 | fold = result['fold'] 42 | flags = result['flag'] 43 | featureLs = result['fl'] 44 | featureRs = result['fr'] 45 | 46 | valFold = fold != i 47 | testFold = fold == i 48 | flags = np.squeeze(flags) 49 | 50 | mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0) 51 | mu = np.expand_dims(mu, 0) 52 | featureLs = featureLs - mu 53 | featureRs = featureRs - mu 54 | featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1) 55 | featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1) 56 | 57 | scores = np.sum(np.multiply(featureLs, featureRs), 1) 58 | threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000) 59 | ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold) 60 | 61 | return ACCs 62 | 63 | def loadModel(data_root, file_list, backbone_net, gpus='0', resume=None): 64 | 65 | if backbone_net == 'MobileFace': 66 | net = mobilefacenet.MobileFaceNet() 67 | elif backbone_net == 'CBAM_50': 68 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') 69 | elif backbone_net == 'CBAM_50_SE': 70 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') 71 | elif backbone_net == 'CBAM_100': 72 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') 73 | elif backbone_net == 'CBAM_100_SE': 74 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') 75 | else: 76 | print(backbone_net, ' is not available!') 77 | 78 | # gpu init 79 | multi_gpus = False 80 | if len(gpus.split(',')) > 1: 81 | multi_gpus = True 82 | os.environ['CUDA_VISIBLE_DEVICES'] = gpus 83 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 84 | 85 | net.load_state_dict(torch.load(resume)['net_state_dict']) 86 | 87 | if multi_gpus: 88 | net = DataParallel(net).to(device) 89 | else: 90 | net = net.to(device) 91 | 92 | transform = transforms.Compose([ 93 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 94 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 95 | ]) 96 | lfw_dataset = LFW(data_root, file_list, transform=transform) 97 | lfw_loader = torch.utils.data.DataLoader(lfw_dataset, batch_size=128, 98 | shuffle=False, num_workers=2, drop_last=False) 99 | 100 | return net.eval(), device, lfw_dataset, lfw_loader 101 | 102 | def getFeatureFromTorch(feature_save_dir, net, device, data_set, data_loader): 103 | featureLs = None 104 | featureRs = None 105 | count = 0 106 | for data in data_loader: 107 | for i in range(len(data)): 108 | data[i] = data[i].to(device) 109 | count += data[0].size(0) 110 | #print('extracing deep features from the face pair {}...'.format(count)) 111 | with torch.no_grad(): 112 | res = [net(d).data.cpu().numpy() for d in data] 113 | featureL = np.concatenate((res[0], res[1]), 1) 114 | featureR = np.concatenate((res[2], res[3]), 1) 115 | # print(featureL.shape, featureR.shape) 116 | if featureLs is None: 117 | featureLs = featureL 118 | else: 119 | featureLs = np.concatenate((featureLs, featureL), 0) 120 | if featureRs is None: 121 | featureRs = featureR 122 | else: 123 | featureRs = np.concatenate((featureRs, featureR), 0) 124 | # print(featureLs.shape, featureRs.shape) 125 | 126 | result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.flags} 127 | scipy.io.savemat(feature_save_dir, result) 128 | 129 | if __name__ == '__main__': 130 | parser = argparse.ArgumentParser(description='Testing') 131 | parser.add_argument('--root', type=str, default='/data/FaceRecognition/LFW/lfw_align_112', help='The path of lfw data') 132 | parser.add_argument('--file_list', type=str, default='/data/FaceRecognition/LFW/pairs.txt', help='The path of lfw data') 133 | parser.add_argument('--backbone_net', type=str, default='MobileFace', help='MobileFace, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE') 134 | parser.add_argument('--feature_dim', type=int, default=128, help='feature dimension') 135 | parser.add_argument('--resume', type=str, default='model/Mobile_MOBILEFACE_20190813_112144/Iter_054000_net.ckpt',help='The path pf save model') 136 | parser.add_argument('--feature_save_path', type=str, default='result/cur_epoch_lfw_result.mat',help='The path of the extract features save, must be .mat file') 137 | parser.add_argument('--gpus', type=str, default='0', help='gpu list') 138 | args = parser.parse_args() 139 | 140 | net, device, lfw_dataset, lfw_loader = loadModel(args.root, args.file_list, args.backbone_net, args.gpus, args.resume) 141 | getFeatureFromTorch(args.feature_save_path, net, device, lfw_dataset, lfw_loader) 142 | ACCs = evaluation_10_fold(args.feature_save_path) 143 | for i in range(len(ACCs)): 144 | print('{} {:.2f}'.format(i+1, ACCs[i] * 100)) 145 | print('--------') 146 | print('AVE {:.4f}'.format(np.mean(ACCs) * 100)) 147 | -------------------------------------------------------------------------------- /wu/eval_cfp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: eval_cfp.py 7 | @time: 2018/12/26 16:23 8 | @desc: this code is very similar with eval_lfw.py and eval_agedb30.py 9 | ''' 10 | 11 | 12 | import numpy as np 13 | import scipy.io 14 | import os 15 | import torch.utils.data 16 | from backbone import mobilefacenet, resnet, arcfacenet, cbam 17 | from dataset.cfp import CFP_FP 18 | import torchvision.transforms as transforms 19 | from torch.nn import DataParallel 20 | import argparse 21 | 22 | def getAccuracy(scores, flags, threshold): 23 | p = np.sum(scores[flags == 1] > threshold) 24 | n = np.sum(scores[flags == -1] < threshold) 25 | return 1.0 * (p + n) / len(scores) 26 | 27 | def getThreshold(scores, flags, thrNum): 28 | accuracys = np.zeros((2 * thrNum + 1, 1)) 29 | thresholds = np.arange(-thrNum, thrNum + 1) * 1.0 / thrNum 30 | for i in range(2 * thrNum + 1): 31 | accuracys[i] = getAccuracy(scores, flags, thresholds[i]) 32 | max_index = np.squeeze(accuracys == np.max(accuracys)) 33 | bestThreshold = np.mean(thresholds[max_index]) 34 | return bestThreshold 35 | 36 | def evaluation_10_fold(feature_path='./result/cur_epoch_cfp_result.mat'): 37 | ACCs = np.zeros(10) 38 | result = scipy.io.loadmat(feature_path) 39 | for i in range(10): 40 | fold = result['fold'] 41 | flags = result['flag'] 42 | featureLs = result['fl'] 43 | featureRs = result['fr'] 44 | 45 | valFold = fold != i 46 | testFold = fold == i 47 | flags = np.squeeze(flags) 48 | 49 | mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0) 50 | mu = np.expand_dims(mu, 0) 51 | featureLs = featureLs - mu 52 | featureRs = featureRs - mu 53 | featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1) 54 | featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1) 55 | 56 | scores = np.sum(np.multiply(featureLs, featureRs), 1) 57 | threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000) 58 | ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold) 59 | 60 | return ACCs 61 | 62 | def loadModel(data_root, file_list, backbone_net, gpus='0', resume=None): 63 | 64 | if backbone_net == 'MobileFace': 65 | net = mobilefacenet.MobileFaceNet() 66 | elif backbone_net == 'CBAM_50': 67 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') 68 | elif backbone_net == 'CBAM_50_SE': 69 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') 70 | elif backbone_net == 'CBAM_100': 71 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') 72 | elif backbone_net == 'CBAM_100_SE': 73 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') 74 | else: 75 | print(backbone_net, ' is not available!') 76 | 77 | # gpu init 78 | multi_gpus = False 79 | if len(gpus.split(',')) > 1: 80 | multi_gpus = True 81 | os.environ['CUDA_VISIBLE_DEVICES'] = gpus 82 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 83 | 84 | net.load_state_dict(torch.load(resume)['net_state_dict']) 85 | 86 | if multi_gpus: 87 | net = DataParallel(net).to(device) 88 | else: 89 | net = net.to(device) 90 | 91 | transform = transforms.Compose([ 92 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 93 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 94 | ]) 95 | cfp_dataset = CFP_FP(data_root, file_list, transform=transform) 96 | cfp_loader = torch.utils.data.DataLoader(cfp_dataset, batch_size=128, 97 | shuffle=False, num_workers=4, drop_last=False) 98 | 99 | return net.eval(), device, cfp_dataset, cfp_loader 100 | 101 | def getFeatureFromTorch(feature_save_dir, net, device, data_set, data_loader): 102 | featureLs = None 103 | featureRs = None 104 | count = 0 105 | for data in data_loader: 106 | for i in range(len(data)): 107 | data[i] = data[i].to(device) 108 | count += data[0].size(0) 109 | #print('extracing deep features from the face pair {}...'.format(count)) 110 | with torch.no_grad(): 111 | res = [net(d).data.cpu().numpy() for d in data] 112 | featureL = np.concatenate((res[0], res[1]), 1) 113 | featureR = np.concatenate((res[2], res[3]), 1) 114 | # print(featureL.shape, featureR.shape) 115 | if featureLs is None: 116 | featureLs = featureL 117 | else: 118 | featureLs = np.concatenate((featureLs, featureL), 0) 119 | if featureRs is None: 120 | featureRs = featureR 121 | else: 122 | featureRs = np.concatenate((featureRs, featureR), 0) 123 | # print(featureLs.shape, featureRs.shape) 124 | 125 | result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.flags} 126 | scipy.io.savemat(feature_save_dir, result) 127 | 128 | 129 | if __name__ == '__main__': 130 | parser = argparse.ArgumentParser(description='Testing') 131 | parser.add_argument('--root', type=str, default='/media/sda/CFP-FP/cfp_fp_aligned_112', help='The path of lfw data') 132 | parser.add_argument('--file_list', type=str, default='/media/sda/CFP-FP/cfp_fp_pair.txt', help='The path of lfw data') 133 | parser.add_argument('--resume', type=str, default='./model/SERES100_SERES100_IR_20190528_132635/Iter_342000_net.ckpt', help='The path pf save model') 134 | parser.add_argument('--backbone_net', type=str, default='CBAM_100_SE', help='MobileFace, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE') 135 | parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension') 136 | parser.add_argument('--feature_save_path', type=str, default='./result/cur_epoch_cfp_result.mat', 137 | help='The path of the extract features save, must be .mat file') 138 | parser.add_argument('--gpus', type=str, default='2,3', help='gpu list') 139 | args = parser.parse_args() 140 | 141 | net, device, agedb_dataset, agedb_loader = loadModel(args.root, args.file_list, args.backbone_net, args.gpus, args.resume) 142 | getFeatureFromTorch(args.feature_save_path, net, device, agedb_dataset, agedb_loader) 143 | ACCs = evaluation_10_fold(args.feature_save_path) 144 | for i in range(len(ACCs)): 145 | print('{} {:.2f}'.format(i + 1, ACCs[i] * 100)) 146 | print('--------') 147 | print('AVE {:.4f}'.format(np.mean(ACCs) * 100)) -------------------------------------------------------------------------------- /wu/eval_agedb30.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: eval_agedb30.py 7 | @time: 2018/12/25 19:05 8 | @desc: The AgeDB-30 test protocol is same with LFW, so I just copy the code from eval_lfw.py 9 | ''' 10 | 11 | 12 | import numpy as np 13 | import scipy.io 14 | import os 15 | import torch.utils.data 16 | from backbone import mobilefacenet, resnet, arcfacenet, cbam 17 | from dataset.agedb import AgeDB30 18 | import torchvision.transforms as transforms 19 | from torch.nn import DataParallel 20 | import argparse 21 | 22 | def getAccuracy(scores, flags, threshold): 23 | p = np.sum(scores[flags == 1] > threshold) 24 | n = np.sum(scores[flags == -1] < threshold) 25 | return 1.0 * (p + n) / len(scores) 26 | 27 | def getThreshold(scores, flags, thrNum): 28 | accuracys = np.zeros((2 * thrNum + 1, 1)) 29 | thresholds = np.arange(-thrNum, thrNum + 1) * 1.0 / thrNum 30 | for i in range(2 * thrNum + 1): 31 | accuracys[i] = getAccuracy(scores, flags, thresholds[i]) 32 | max_index = np.squeeze(accuracys == np.max(accuracys)) 33 | bestThreshold = np.mean(thresholds[max_index]) 34 | return bestThreshold 35 | 36 | def evaluation_10_fold(feature_path='./result/cur_epoch_agedb_result.mat'): 37 | ACCs = np.zeros(10) 38 | result = scipy.io.loadmat(feature_path) 39 | for i in range(10): 40 | fold = result['fold'] 41 | flags = result['flag'] 42 | featureLs = result['fl'] 43 | featureRs = result['fr'] 44 | 45 | valFold = fold != i 46 | testFold = fold == i 47 | flags = np.squeeze(flags) 48 | 49 | mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0) 50 | mu = np.expand_dims(mu, 0) 51 | featureLs = featureLs - mu 52 | featureRs = featureRs - mu 53 | featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1) 54 | featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1) 55 | 56 | scores = np.sum(np.multiply(featureLs, featureRs), 1) 57 | threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000) 58 | ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold) 59 | 60 | return ACCs 61 | 62 | def loadModel(data_root, file_list, backbone_net, gpus='0', resume=None): 63 | 64 | if backbone_net == 'MobileFace': 65 | net = mobilefacenet.MobileFaceNet() 66 | elif backbone_net == 'CBAM_50': 67 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') 68 | elif backbone_net == 'CBAM_50_SE': 69 | net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') 70 | elif backbone_net == 'CBAM_100': 71 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') 72 | elif backbone_net == 'CBAM_100_SE': 73 | net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') 74 | else: 75 | print(backbone_net, ' is not available!') 76 | 77 | # gpu init 78 | multi_gpus = False 79 | if len(gpus.split(',')) > 1: 80 | multi_gpus = True 81 | os.environ['CUDA_VISIBLE_DEVICES'] = gpus 82 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 83 | 84 | net.load_state_dict(torch.load(resume)['net_state_dict']) 85 | 86 | if multi_gpus: 87 | net = DataParallel(net).to(device) 88 | else: 89 | net = net.to(device) 90 | 91 | transform = transforms.Compose([ 92 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 93 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 94 | ]) 95 | agedb_dataset = AgeDB30(data_root, file_list, transform=transform) 96 | agedb_loader = torch.utils.data.DataLoader(agedb_dataset, batch_size=128, 97 | shuffle=False, num_workers=2, drop_last=False) 98 | 99 | return net.eval(), device, agedb_dataset, agedb_loader 100 | 101 | def getFeatureFromTorch(feature_save_dir, net, device, data_set, data_loader): 102 | featureLs = None 103 | featureRs = None 104 | count = 0 105 | for data in data_loader: 106 | for i in range(len(data)): 107 | data[i] = data[i].to(device) 108 | count += data[0].size(0) 109 | #print('extracing deep features from the face pair {}...'.format(count)) 110 | with torch.no_grad(): 111 | res = [net(d).data.cpu().numpy() for d in data] 112 | featureL = np.concatenate((res[0], res[1]), 1) 113 | featureR = np.concatenate((res[2], res[3]), 1) 114 | # print(featureL.shape, featureR.shape) 115 | if featureLs is None: 116 | featureLs = featureL 117 | else: 118 | featureLs = np.concatenate((featureLs, featureL), 0) 119 | if featureRs is None: 120 | featureRs = featureR 121 | else: 122 | featureRs = np.concatenate((featureRs, featureR), 0) 123 | # print(featureLs.shape, featureRs.shape) 124 | 125 | result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.flags} 126 | scipy.io.savemat(feature_save_dir, result) 127 | 128 | 129 | if __name__ == '__main__': 130 | parser = argparse.ArgumentParser(description='Testing') 131 | parser.add_argument('--root', type=str, default='/media/sda/AgeDB-30/agedb30_align_112', help='The path of lfw data') 132 | parser.add_argument('--file_list', type=str, default='/media/sda/AgeDB-30/agedb_30_pair.txt', help='The path of lfw data') 133 | parser.add_argument('--resume', type=str, default='./model/SERES100_SERES100_IR_20190528_132635/Iter_342000_net.ckpt', help='The path pf save model') 134 | parser.add_argument('--backbone_net', type=str, default='CBAM_100_SE', help='MobileFace, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE') 135 | parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension') 136 | parser.add_argument('--feature_save_path', type=str, default='./result/cur_epoch_agedb_result.mat', 137 | help='The path of the extract features save, must be .mat file') 138 | parser.add_argument('--gpus', type=str, default='2,3', help='gpu list') 139 | args = parser.parse_args() 140 | 141 | net, device, agedb_dataset, agedb_loader = loadModel(args.root, args.file_list, args.backbone_net, args.gpus, args.resume) 142 | getFeatureFromTorch(args.feature_save_path, net, device, agedb_dataset, agedb_loader) 143 | ACCs = evaluation_10_fold(args.feature_save_path) 144 | for i in range(len(ACCs)): 145 | print('{} {:.2f}'.format(i + 1, ACCs[i] * 100)) 146 | print('--------') 147 | print('AVE {:.4f}'.format(np.mean(ACCs) * 100)) 148 | 149 | -------------------------------------------------------------------------------- /backbone/arcfacenet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: arcfacenet.py 7 | @time: 2018/12/26 10:15 8 | @desc: Network structures used in the arcface paper, including ResNet50-IR, ResNet101-IR, SEResNet50-IR, SEResNet101-IR 9 | 10 | '''''' 11 | Update: This file has been deprecated, all the models build in this class have been rebuild in cbam.py 12 | Yet the code in this file still works. 13 | ''' 14 | 15 | 16 | import torch 17 | from torch import nn 18 | from collections import namedtuple 19 | 20 | class Flatten(nn.Module): 21 | def forward(self, input): 22 | return input.view(input.size(0), -1) 23 | 24 | 25 | class SEModule(nn.Module): 26 | def __init__(self, channels, reduction): 27 | super(SEModule, self).__init__() 28 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 29 | self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) 30 | self.relu = nn.ReLU(inplace=True) 31 | self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) 32 | self.sigmoid = nn.Sigmoid() 33 | 34 | def forward(self, x): 35 | input = x 36 | x = self.avg_pool(x) 37 | x = self.fc1(x) 38 | x = self.relu(x) 39 | x = self.fc2(x) 40 | x = self.sigmoid(x) 41 | 42 | return input * x 43 | 44 | 45 | class BottleNeck_IR(nn.Module): 46 | def __init__(self, in_channel, out_channel, stride): 47 | super(BottleNeck_IR, self).__init__() 48 | if in_channel == out_channel: 49 | self.shortcut_layer = nn.MaxPool2d(1, stride) 50 | else: 51 | self.shortcut_layer = nn.Sequential( 52 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 53 | nn.BatchNorm2d(out_channel) 54 | ) 55 | 56 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 57 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 58 | nn.BatchNorm2d(out_channel), 59 | nn.PReLU(out_channel), 60 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 61 | nn.BatchNorm2d(out_channel)) 62 | 63 | def forward(self, x): 64 | shortcut = self.shortcut_layer(x) 65 | res = self.res_layer(x) 66 | 67 | return shortcut + res 68 | 69 | class BottleNeck_IR_SE(nn.Module): 70 | def __init__(self, in_channel, out_channel, stride): 71 | super(BottleNeck_IR_SE, self).__init__() 72 | if in_channel == out_channel: 73 | self.shortcut_layer = nn.MaxPool2d(1, stride) 74 | else: 75 | self.shortcut_layer = nn.Sequential( 76 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 77 | nn.BatchNorm2d(out_channel) 78 | ) 79 | 80 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 81 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 82 | nn.BatchNorm2d(out_channel), 83 | nn.PReLU(out_channel), 84 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 85 | nn.BatchNorm2d(out_channel), 86 | SEModule(out_channel, 16)) 87 | 88 | def forward(self, x): 89 | shortcut = self.shortcut_layer(x) 90 | res = self.res_layer(x) 91 | 92 | return shortcut + res 93 | 94 | 95 | class Bottleneck(namedtuple('Block', ['in_channel', 'out_channel', 'stride'])): 96 | '''A named tuple describing a ResNet block.''' 97 | 98 | 99 | def get_block(in_channel, out_channel, num_units, stride=2): 100 | return [Bottleneck(in_channel, out_channel, stride)] + [Bottleneck(out_channel, out_channel, 1) for i in range(num_units - 1)] 101 | 102 | 103 | def get_blocks(num_layers): 104 | if num_layers == 50: 105 | blocks = [ 106 | get_block(in_channel=64, out_channel=64, num_units=3), 107 | get_block(in_channel=64, out_channel=128, num_units=4), 108 | get_block(in_channel=128, out_channel=256, num_units=14), 109 | get_block(in_channel=256, out_channel=512, num_units=3) 110 | ] 111 | elif num_layers == 100: 112 | blocks = [ 113 | get_block(in_channel=64, out_channel=64, num_units=3), 114 | get_block(in_channel=64, out_channel=128, num_units=13), 115 | get_block(in_channel=128, out_channel=256, num_units=30), 116 | get_block(in_channel=256, out_channel=512, num_units=3) 117 | ] 118 | elif num_layers == 152: 119 | blocks = [ 120 | get_block(in_channel=64, out_channel=64, num_units=3), 121 | get_block(in_channel=64, out_channel=128, num_units=8), 122 | get_block(in_channel=128, out_channel=256, num_units=36), 123 | get_block(in_channel=256, out_channel=512, num_units=3) 124 | ] 125 | return blocks 126 | 127 | 128 | class SEResNet_IR(nn.Module): 129 | def __init__(self, num_layers, feature_dim=512, drop_ratio=0.4, mode = 'ir'): 130 | super(SEResNet_IR, self).__init__() 131 | assert num_layers in [50, 100, 152], 'num_layers should be 50, 100 or 152' 132 | assert mode in ['ir', 'se_ir'], 'mode should be ir or se_ir' 133 | blocks = get_blocks(num_layers) 134 | if mode == 'ir': 135 | unit_module = BottleNeck_IR 136 | elif mode == 'se_ir': 137 | unit_module = BottleNeck_IR_SE 138 | self.input_layer = nn.Sequential(nn.Conv2d(3, 64, (3, 3), 1, 1, bias=False), 139 | nn.BatchNorm2d(64), 140 | nn.PReLU(64)) 141 | 142 | self.output_layer = nn.Sequential(nn.BatchNorm2d(512), 143 | nn.Dropout(drop_ratio), 144 | Flatten(), 145 | nn.Linear(512 * 7 * 7, feature_dim), 146 | nn.BatchNorm1d(feature_dim)) 147 | modules = [] 148 | for block in blocks: 149 | for bottleneck in block: 150 | modules.append( 151 | unit_module(bottleneck.in_channel, 152 | bottleneck.out_channel, 153 | bottleneck.stride)) 154 | self.body = nn.Sequential(*modules) 155 | 156 | def forward(self, x): 157 | x = self.input_layer(x) 158 | x = self.body(x) 159 | x = self.output_layer(x) 160 | 161 | return x 162 | 163 | 164 | if __name__ == '__main__': 165 | input = torch.Tensor(2, 3, 112, 112) 166 | net = SEResNet_IR(100, mode='se_ir') 167 | print(net) 168 | 169 | x = net(input) 170 | print(x.shape) -------------------------------------------------------------------------------- /backbone/resnet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: resnet.py 7 | @time: 2018/12/24 14:40 8 | @desc: Original ResNet backbone, including ResNet18, ResNet34, ResNet50, ResNet101 and ResNet152, we removed the last global average pooling layer 9 | and replaced it with a fully connected layer with dimension of 512. BN is used for fast convergence. 10 | ''' 11 | import torch 12 | import torch.nn as nn 13 | 14 | def ResNet18(): 15 | model = ResNet(BasicBlock, [2, 2, 2, 2]) 16 | return model 17 | 18 | def ResNet34(): 19 | model = ResNet(BasicBlock, [3, 4, 6, 3]) 20 | return model 21 | 22 | def ResNet50(): 23 | model = ResNet(Bottleneck, [3, 4, 6, 3]) 24 | return model 25 | 26 | def ResNet101(): 27 | model = ResNet(Bottleneck, [3, 4, 23, 3]) 28 | return model 29 | 30 | def ResNet152(): 31 | model = ResNet(Bottleneck, [3, 8, 36, 3]) 32 | return model 33 | 34 | __all__ = ['ResNet', 'ResNet18', 'ResNet34', 'ResNet50', 'ResNet101', 'ResNet152'] 35 | 36 | 37 | def conv3x3(in_planes, out_planes, stride=1): 38 | """3x3 convolution with padding""" 39 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) 40 | 41 | 42 | def conv1x1(in_planes, out_planes, stride=1): 43 | """1x1 convolution""" 44 | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) 45 | 46 | 47 | class BasicBlock(nn.Module): 48 | expansion = 1 49 | 50 | def __init__(self, inplanes, planes, stride=1, downsample=None): 51 | super(BasicBlock, self).__init__() 52 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) 53 | self.bn1 = nn.BatchNorm2d(planes) 54 | self.relu = nn.ReLU(inplace=True) 55 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) 56 | self.bn2 = nn.BatchNorm2d(planes) 57 | self.downsample = downsample 58 | self.stride = stride 59 | 60 | def forward(self, x): 61 | identity = x 62 | 63 | out = self.conv1(x) 64 | out = self.bn1(out) 65 | out = self.relu(out) 66 | out = self.conv2(out) 67 | out = self.bn2(out) 68 | 69 | if self.downsample is not None: 70 | identity = self.downsample(x) 71 | 72 | out += identity 73 | out = self.relu(out) 74 | 75 | return out 76 | 77 | 78 | class Bottleneck(nn.Module): 79 | expansion = 4 80 | def __init__(self, inplanes, planes, stride=1, downsample=None): 81 | super(Bottleneck, self).__init__() 82 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False) 83 | self.bn1 = nn.BatchNorm2d(planes) 84 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) 85 | self.bn2 = nn.BatchNorm2d(planes) 86 | self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, stride=1, bias=False) 87 | self.bn3 = nn.BatchNorm2d(planes * self.expansion) 88 | self.relu = nn.ReLU(inplace=True) 89 | self.downsample = downsample 90 | self.stride = stride 91 | 92 | def forward(self, x): 93 | identity = x 94 | 95 | out = self.conv1(x) 96 | out = self.bn1(out) 97 | out = self.relu(out) 98 | 99 | out = self.conv2(out) 100 | out = self.bn2(out) 101 | out = self.relu(out) 102 | 103 | out = self.conv3(out) 104 | out = self.bn3(out) 105 | 106 | if self.downsample is not None: 107 | identity = self.downsample(x) 108 | 109 | out += identity 110 | out = self.relu(out) 111 | 112 | return out 113 | 114 | 115 | class Flatten(nn.Module): 116 | def forward(self, input): 117 | return input.view(input.size(0), -1) 118 | 119 | 120 | class ResNet(nn.Module): 121 | 122 | def __init__(self, block, layers, feature_dim=512, drop_ratio=0.4, zero_init_residual=False): 123 | super(ResNet, self).__init__() 124 | self.inplanes = 64 125 | self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) 126 | self.bn1 = nn.BatchNorm2d(64) 127 | self.relu = nn.ReLU(inplace=True) 128 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 129 | self.layer1 = self._make_layer(block, 64, layers[0]) 130 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 131 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 132 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 133 | 134 | self.output_layer = nn.Sequential(nn.BatchNorm2d(512 * block.expansion), 135 | nn.Dropout(drop_ratio), 136 | Flatten(), 137 | nn.Linear(512 * block.expansion * 7 * 7, feature_dim), 138 | nn.BatchNorm1d(feature_dim)) 139 | 140 | for m in self.modules(): 141 | if isinstance(m, nn.Conv2d): 142 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 143 | elif isinstance(m, nn.BatchNorm2d): 144 | nn.init.constant_(m.weight, 1) 145 | nn.init.constant_(m.bias, 0) 146 | 147 | # Zero-initialize the last BN in each residual branch, 148 | # so that the residual branch starts with zeros, and each residual block behaves like an identity. 149 | # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 150 | if zero_init_residual: 151 | for m in self.modules(): 152 | if isinstance(m, Bottleneck): 153 | nn.init.constant_(m.bn3.weight, 0) 154 | elif isinstance(m, BasicBlock): 155 | nn.init.constant_(m.bn2.weight, 0) 156 | 157 | def _make_layer(self, block, planes, blocks, stride=1): 158 | downsample = None 159 | if stride != 1 or self.inplanes != planes * block.expansion: 160 | downsample = nn.Sequential( 161 | conv1x1(self.inplanes, planes * block.expansion, stride), 162 | nn.BatchNorm2d(planes * block.expansion), 163 | ) 164 | 165 | layers = [] 166 | layers.append(block(self.inplanes, planes, stride, downsample)) 167 | self.inplanes = planes * block.expansion 168 | for _ in range(1, blocks): 169 | layers.append(block(self.inplanes, planes)) 170 | 171 | return nn.Sequential(*layers) 172 | 173 | def forward(self, x): 174 | x = self.conv1(x) 175 | x = self.bn1(x) 176 | x = self.relu(x) 177 | x = self.maxpool(x) 178 | 179 | x = self.layer1(x) 180 | x = self.layer2(x) 181 | x = self.layer3(x) 182 | x = self.layer4(x) 183 | 184 | x = self.output_layer(x) 185 | 186 | return x 187 | 188 | 189 | if __name__ == "__main__": 190 | input = torch.Tensor(2, 3, 112, 112) 191 | net = ResNet50() 192 | print(net) 193 | 194 | x = net(input) 195 | print(x.shape) -------------------------------------------------------------------------------- /attack/preprocess_eval.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | import numpy as np 5 | import scipy.io 6 | import torch 7 | import torchvision.transforms as transforms 8 | from torch.nn import DataParallel 9 | from torch.utils.data.dataloader import DataLoader 10 | from tqdm import tqdm 11 | 12 | from backbone import mobilefacenet, cbam, attention 13 | from preprocess_dataset import LFW 14 | 15 | 16 | def loadModel(backbone_net, feature_dim, gpus, resume, root, dev_path, flip): 17 | if backbone_net == 'MobileFace': 18 | net = mobilefacenet.MobileFaceNet() 19 | elif backbone_net == 'CBAM_50': 20 | net = cbam.CBAMResNet(50, feature_dim=feature_dim, mode='ir') 21 | elif backbone_net == 'CBAM_50_SE': 22 | net = cbam.CBAMResNet(50, feature_dim=feature_dim, mode='ir_se') 23 | elif backbone_net == 'CBAM_100': 24 | net = cbam.CBAMResNet(100, feature_dim=feature_dim, mode='ir') 25 | elif backbone_net == 'CBAM_100_SE': 26 | net = cbam.CBAMResNet(100, feature_dim=feature_dim, mode='ir_se') 27 | elif backbone_net == 'CBAM_152': 28 | net = cbam.CBAMResNet(152, feature_dim=feature_dim, mode='ir') 29 | elif backbone_net == 'CBAM_152_SE': 30 | net = cbam.CBAMResNet(152, feature_dim=feature_dim, mode='ir_se') 31 | elif backbone_net == 'Attention_56': 32 | net = attention.ResidualAttentionNet_56(feature_dim=feature_dim) 33 | else: 34 | net = None 35 | print(backbone_net, ' is not available!') 36 | assert 1 == 0 37 | 38 | # gpu init 39 | multi_gpus = False 40 | if len(gpus.split(',')) > 1: 41 | multi_gpus = True 42 | os.environ['CUDA_VISIBLE_DEVICES'] = gpus 43 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 44 | 45 | net.load_state_dict(torch.load(resume)['net_state_dict']) 46 | 47 | if multi_gpus: 48 | net = DataParallel(net).to(device) 49 | else: 50 | net = net.to(device) 51 | 52 | transform = transforms.Compose([ 53 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 54 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 55 | ]) 56 | lfw_dataset = LFW(root, dev_path, transform=transform, flip=flip) 57 | lfw_loader = DataLoader(lfw_dataset, batch_size=1, shuffle=False) 58 | 59 | return net.eval(), device, lfw_dataset, lfw_loader 60 | 61 | 62 | def getFeatureFromTorch(net, device, data_loader, feature_save_path, is_hard, is_flip, start_idx=1): 63 | features_query = [] 64 | features_avg = [] 65 | with torch.no_grad(): 66 | for imgs in tqdm(data_loader): 67 | feature_query = net(imgs[0].to(device)).cpu().numpy() 68 | feature_avg = [net(img.to(device)).cpu().numpy() for img in imgs[start_idx:]] 69 | feature_avg = np.concatenate(feature_avg, axis=0) 70 | feature_avg = np.mean(feature_avg, axis=0, keepdims=True) 71 | 72 | features_query.append(feature_query) 73 | features_avg.append(feature_avg) 74 | 75 | features_query = np.concatenate(features_query, axis=0) 76 | features_avg = np.concatenate(features_avg, axis=0) 77 | 78 | result = {'features_query': features_query, 'features_avg': features_avg} 79 | 80 | tokens = list(os.path.split(feature_save_path)) 81 | if is_flip: 82 | tokens[-1] = "flip_" + tokens[-1] 83 | if is_hard: 84 | tokens.insert(1, "hard/") 85 | 86 | save_path = os.path.join(*tokens) 87 | scipy.io.savemat(save_path, result) 88 | 89 | return save_path 90 | 91 | 92 | def main(): 93 | args = argparser() 94 | n_models = len(args.backbone_net) 95 | 96 | for idx in range(n_models): 97 | for is_flip in [False, True]: 98 | net, device, lfw_dataset, lfw_loader = loadModel(args.backbone_net[idx], args.feature_dim[idx], args.gpus, 99 | args.resume[idx], args.root, args.dev_path, is_flip) 100 | 101 | is_hard = "hard" in args.dev_path 102 | save_path = getFeatureFromTorch(net, device, lfw_loader, args.feature_save_path[idx], is_hard, is_flip) 103 | 104 | acc = predict(save_path) 105 | 106 | print(args.backbone_net[idx], acc) 107 | 108 | 109 | def predict(feature_save_path): 110 | np.set_printoptions(precision=3, linewidth=1000) 111 | features_path = feature_save_path 112 | features = scipy.io.loadmat(features_path) 113 | features_query = features['features_query'] 114 | features_avg = features['features_avg'] 115 | 116 | num_features = features_query.shape[0] 117 | 118 | norm_query = np.linalg.norm(features_query, axis=1, keepdims=True) 119 | norm_avg = np.linalg.norm(features_avg, axis=1, keepdims=True) 120 | 121 | cos_dist = np.matmul(features_query, features_avg.T) / np.matmul(norm_query, norm_avg.T) 122 | 123 | preds = np.argmax(cos_dist, axis=1) 124 | 125 | acc = np.sum(preds == np.arange(0, num_features)) / num_features 126 | 127 | return acc 128 | 129 | 130 | def argparser(): 131 | parser = argparse.ArgumentParser(description='Testing LFW') 132 | parser.add_argument('--root', type=str, default='/data/FaceRecognition/securityAI/securityAI_round1_images', 133 | help='The path of lfw data') 134 | parser.add_argument('--dev_path', type=str, default='/data/FaceRecognition/securityAI/securityAI_round1_dev.csv', 135 | help='The path of lfw data') 136 | parser.add_argument('--backbone_net', type=list, 137 | default=['MobileFace', 'CBAM_50', 'CBAM_50_SE', 'CBAM_100_SE', 'Attention_56', 'CBAM_100', 138 | 'CBAM_152'], 139 | help='MobileFace, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE, Attention_56') 140 | parser.add_argument('--feature_dim', type=list, 141 | default=[128, 512, 512, 512, 512, 512, 512], 142 | help='feature dimension') 143 | parser.add_argument('--resume', type=list, 144 | default=['../model/Mobile_MOBILEFACE_20190813_112144/Iter_054000_net.ckpt', 145 | '../model/Res50_IR_RES50_IR_20190821_181502/Iter_108000_net.ckpt', 146 | '../model/SERes50_IR_SERES50_IR_20190819_165550/Iter_120000_net.ckpt', 147 | '../model/SERes100_IR_SERES100_IR_20190820_161900/Iter_078000_net.ckpt', 148 | '../model/Attention_56_ATTENTION_56_20190822_164221/Iter_093000_net.ckpt', 149 | '../model/Res100_IR_RES100_IR_20190824_180052/Iter_111000_net.ckpt', 150 | '../model/Res152_IR_RES152_IR_20190904_094041/Iter_099000_net.ckpt', ], 151 | help='The path pf save model') 152 | parser.add_argument('--feature_save_path', type=list, 153 | default=['../result/features_attacked_Mobile.mat', 154 | '../result/features_attacked_Res50.mat', 155 | '../result/features_attacked_SERes50.mat', 156 | '../result/features_attacked_SERes100.mat', 157 | '../result/features_attacked_Attention56.mat', 158 | '../result/features_attacked_Res100.mat', 159 | '../result/features_attacked_Res152.mat', ], 160 | help='The path of the extract features save, must be .mat file') 161 | parser.add_argument('--gpus', type=str, default='0', help='gpu list') 162 | args = parser.parse_args() 163 | return args 164 | 165 | 166 | if __name__ == '__main__': 167 | main() 168 | -------------------------------------------------------------------------------- /attack/attack.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import shutil 4 | import sys 5 | 6 | sys.path.append(os.path.join(os.path.dirname(__file__), '..')) 7 | 8 | import cv2 9 | import numpy as np 10 | import torch 11 | import torch.nn as nn 12 | import torch.optim as optim 13 | from torch.utils.data.dataloader import DataLoader 14 | 15 | from attack_dataset import AttackDataset 16 | from functions import loadModel, get_dist, get_dist_from_images 17 | from util import AverageMeter, init_log, get_log_filename, init_random_state 18 | 19 | device = 'cuda' 20 | 21 | 22 | def argparser(): 23 | parser = argparse.ArgumentParser(description='securityAI') 24 | parser.add_argument('--root', type=str, default='/data/FaceRecognition/securityAI/securityAI_round1_images') 25 | parser.add_argument('--dev_path', type=str, default='/data/FaceRecognition/securityAI/securityAI_round1_dev.csv') 26 | 27 | parser.add_argument('--backbone_net', type=list, 28 | default=['CBAM_50_SE', 'MobileFace', "CBAM_50", 'CBAM_100_SE', 'CBAM_100', 29 | 'CBAM_152', "Attention_56", ]) 30 | 31 | parser.add_argument('--feature_dim', type=list, 32 | default=[512, 128, 512, 512, 512, 512, 512]) 33 | 34 | parser.add_argument('--resume', type=list, 35 | default=['../model/SERes50_IR_SERES50_IR_20190819_165550/Iter_120000_net.ckpt', 36 | '../model/Mobile_MOBILEFACE_20190813_112144/Iter_054000_net.ckpt', 37 | '../model/Res50_IR_RES50_IR_20190821_181502/Iter_108000_net.ckpt', 38 | '../model/SERes100_IR_SERES100_IR_20190820_161900/Iter_078000_net.ckpt', 39 | '../model/Res100_IR_RES100_IR_20190824_180052/Iter_111000_net.ckpt', 40 | '../model/Res152_IR_RES152_IR_20190904_094041/Iter_099000_net.ckpt', 41 | '../model/Attention_56_ATTENTION_56_20190822_164221/Iter_093000_net.ckpt', ]) 42 | 43 | parser.add_argument('--features_path', type=list, 44 | default=['../result/features_attacked_SERes50.mat', 45 | '../result/features_attacked_Mobile.mat', 46 | '../result/features_attacked_Res50.mat', 47 | '../result/features_attacked_SERes100.mat', 48 | '../result/features_attacked_Res100.mat', 49 | '../result/features_attacked_Res152.mat', 50 | '../result/features_attacked_Attention56.mat', ]) 51 | 52 | parser.add_argument('--flip_features_path', type=list, 53 | default=['../result/flip_features_attacked_SERes50.mat', 54 | '../result/flip_features_attacked_Mobile.mat', 55 | '../result/flip_features_attacked_Res50.mat', 56 | '../result/flip_features_attacked_SERes100.mat', 57 | '../result/flip_features_attacked_Res100.mat', 58 | '../result/flip_features_attacked_Res152.mat', 59 | '../result/flip_features_attacked_Attention56.mat', ]) 60 | 61 | parser.add_argument('--masks_size', type=list, default=[712, 3, 64, 64]) 62 | parser.add_argument('--pt_x', type=int, default=22) 63 | parser.add_argument('--pt_y', type=int, default=36) 64 | 65 | parser.add_argument('--batch_size', type=int, default=32, help='batch_size') 66 | parser.add_argument('--epochs', type=int, default=17 * 2, help='epochs') 67 | parser.add_argument('--lr', type=float, default=1000000.0 / 2, help='learning_rate') 68 | parser.add_argument('--wd', type=float, default=0.0, help='weight_decay') 69 | parser.add_argument('--alpha', type=float, default=0.2, help='weight_decay') 70 | 71 | parser.add_argument('--output_path', type=str, default='/data/FaceRecognition/securityAI/images') 72 | parser.add_argument('--log_dir', type=str, default='log/') 73 | parser.add_argument('--masks_dir', type=str, default='masks/') 74 | parser.add_argument('--random_state', type=str, default='state/(3.4962) random_state.obj') 75 | 76 | args = parser.parse_args() 77 | return args 78 | 79 | 80 | def eval_after_attack(net, attack_loader): 81 | all_features = [] 82 | with torch.no_grad(): 83 | for imgs_t, _, _, _ in attack_loader: 84 | all_features.append(net(imgs_t).cpu().numpy()) 85 | 86 | all_features = np.concatenate(all_features, axis=0) 87 | acc, cos_dist = attack_loader.dataset.eval(all_features) 88 | 89 | return acc, cos_dist 90 | 91 | 92 | def attack(net, attack_loader, optimizer): 93 | loss_meter = AverageMeter() 94 | cosine_similarity = nn.CosineSimilarity(dim=1, eps=1e-6).to(device) 95 | 96 | for imgs_t, features, imgs, imgs_after_attack in attack_loader: 97 | features = features.to(device) 98 | outputs = net(imgs_t) 99 | 100 | similarity = cosine_similarity(outputs, features) 101 | loss = torch.mean(similarity) 102 | loss_meter.update(loss.item(), imgs_t.shape[0]) 103 | 104 | optimizer.zero_grad() 105 | loss.backward() 106 | optimizer.step() 107 | 108 | return loss_meter.avg 109 | 110 | 111 | def attack_with_dist_constraint(net, attack_loader, optimizer, alpha=0.2): 112 | loss_meter = AverageMeter() 113 | cos_criterion = nn.CosineEmbeddingLoss(margin=-1.0).to(device) 114 | dist_criterion = nn.L1Loss().to(device) 115 | 116 | for imgs_t, features, imgs, imgs_after_attack in attack_loader: 117 | features = features.to(device) 118 | outputs = net(imgs_t) 119 | 120 | cos_loss = cos_criterion(outputs, features, torch.tensor([-1.0]).to(device)) 121 | dist_loss = dist_criterion(imgs, imgs_after_attack) 122 | loss = torch.add(cos_loss, dist_loss, alpha=alpha) 123 | loss_meter.update(cos_loss.item() + cos_criterion.margin, imgs_t.shape[0]) 124 | 125 | optimizer.zero_grad() 126 | loss.backward() 127 | optimizer.step() 128 | 129 | return loss_meter.avg 130 | 131 | 132 | def main(): 133 | args = argparser() 134 | 135 | log_filename = get_log_filename(args) 136 | logger = init_log(args.log_dir, "{}.log".format(log_filename)) 137 | 138 | init_random_state(args) 139 | 140 | n_models = len(args.backbone_net) 141 | 142 | nets = [] 143 | attack_loaders = [] 144 | for idx in range(n_models): 145 | net = loadModel(args, idx) 146 | nets.append(net) 147 | attack_dataset = AttackDataset(args.root, args.dev_path, args.features_path[idx], args.flip_features_path[idx]) 148 | attack_loaders.append(DataLoader(attack_dataset, batch_size=args.batch_size, shuffle=False)) 149 | 150 | AttackDataset.init_attack_masks(args.masks_size, args.pt_x, args.pt_y) 151 | 152 | the_last_batch_size = args.masks_size[0] % args.batch_size 153 | the_last_batch_lr = args.lr * (the_last_batch_size / args.batch_size) 154 | optimizer = optim.SGD([ 155 | {'params': AttackDataset.attack_masks[:-the_last_batch_size]}, 156 | {'params': AttackDataset.attack_masks[-the_last_batch_size:], 'lr': the_last_batch_lr}], 157 | lr=args.lr, weight_decay=args.wd 158 | ) 159 | 160 | for epoch in range(args.epochs): 161 | for idx in range(n_models): 162 | cos_similarity = attack_with_dist_constraint(nets[idx], attack_loaders[idx], optimizer, args.alpha) 163 | acc, _ = eval_after_attack(nets[idx], attack_loaders[idx]) 164 | 165 | l2_dist = get_dist(args) 166 | 167 | logger.info("Model{}, Epoch {:02d}, Acc: {:.4f}, Cos_Similarity: {:6.4f}, L2_dist: {:6.4f}".format( 168 | idx, epoch, acc, cos_similarity, l2_dist)) 169 | 170 | torch.save(AttackDataset.attack_masks, "{}/{}.pth".format(args.masks_dir, log_filename)) 171 | 172 | 173 | def generate_imgs(load_attack_masks=False): 174 | args = argparser() 175 | 176 | attack_dataset = AttackDataset(args.root, args.dev_path, args.features_path[0], args.flip_features_path[0], True) 177 | 178 | if load_attack_masks: 179 | attack_dataset.init_attack_masks(args.masks_size, args.pt_x, args.pt_y) 180 | attack_dataset.load_attack_masks("{}/{}.pth".format(args.masks_dir, get_log_filename(args))) 181 | 182 | print("L2: {}".format(get_dist(args))) 183 | 184 | if os.path.exists(args.output_path): 185 | shutil.rmtree(args.output_path) 186 | os.mkdir(args.output_path) 187 | 188 | with torch.no_grad(): 189 | for i, (img, img_after_attack) in enumerate(attack_dataset): 190 | img_after_attack = np.transpose(img_after_attack.cpu().numpy(), [1, 2, 0]) 191 | cv2.imwrite("{}/{:05}.png".format(args.output_path, i + 1), img_after_attack) 192 | shutil.move("{}/{:05}.png".format(args.output_path, i + 1), "{}/{:05}.jpg".format(args.output_path, i + 1)) 193 | 194 | print("L2: {}".format(get_dist_from_images(args.root, args.output_path))) 195 | 196 | 197 | if __name__ == '__main__': 198 | main() 199 | generate_imgs(load_attack_masks=True) 200 | -------------------------------------------------------------------------------- /backbone/cbam.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: cbam.py 7 | @time: 2019/1/14 15:33 8 | @desc: Convolutional Block Attention Module in ECCV 2018, including channel attention module and spatial attention module. 9 | ''' 10 | 11 | import torch 12 | from torch import nn 13 | import time 14 | 15 | class Flatten(nn.Module): 16 | def forward(self, input): 17 | return input.view(input.size(0), -1) 18 | 19 | class SEModule(nn.Module): 20 | '''Squeeze and Excitation Module''' 21 | def __init__(self, channels, reduction): 22 | super(SEModule, self).__init__() 23 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 24 | self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) 25 | self.relu = nn.ReLU(inplace=True) 26 | self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) 27 | self.sigmoid = nn.Sigmoid() 28 | 29 | def forward(self, x): 30 | input = x 31 | x = self.avg_pool(x) 32 | x = self.fc1(x) 33 | x = self.relu(x) 34 | x = self.fc2(x) 35 | x = self.sigmoid(x) 36 | 37 | return input * x 38 | 39 | class CAModule(nn.Module): 40 | '''Channel Attention Module''' 41 | def __init__(self, channels, reduction): 42 | super(CAModule, self).__init__() 43 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 44 | self.max_pool = nn.AdaptiveMaxPool2d(1) 45 | self.shared_mlp = nn.Sequential(nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False), 46 | nn.ReLU(inplace=True), 47 | nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)) 48 | self.sigmoid = nn.Sigmoid() 49 | 50 | def forward(self, x): 51 | input = x 52 | avg_pool = self.avg_pool(x) 53 | max_pool = self.max_pool(x) 54 | x = self.shared_mlp(avg_pool) + self.shared_mlp(max_pool) 55 | x = self.sigmoid(x) 56 | 57 | return input * x 58 | 59 | class SAModule(nn.Module): 60 | '''Spatial Attention Module''' 61 | def __init__(self): 62 | super(SAModule, self).__init__() 63 | self.conv = nn.Conv2d(2, 1, kernel_size=3, padding=1, bias=False) 64 | self.sigmoid = nn.Sigmoid() 65 | 66 | def forward(self, x): 67 | input = x 68 | avg_c = torch.mean(x, 1, True) 69 | max_c, _ = torch.max(x, 1, True) 70 | x = torch.cat((avg_c, max_c), 1) 71 | x = self.conv(x) 72 | x = self.sigmoid(x) 73 | return input * x 74 | 75 | class BottleNeck_IR(nn.Module): 76 | '''Improved Residual Bottlenecks''' 77 | def __init__(self, in_channel, out_channel, stride, dim_match): 78 | super(BottleNeck_IR, self).__init__() 79 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 80 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 81 | nn.BatchNorm2d(out_channel), 82 | nn.PReLU(out_channel), 83 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 84 | nn.BatchNorm2d(out_channel)) 85 | if dim_match: 86 | self.shortcut_layer = None 87 | else: 88 | self.shortcut_layer = nn.Sequential( 89 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 90 | nn.BatchNorm2d(out_channel) 91 | ) 92 | 93 | def forward(self, x): 94 | shortcut = x 95 | res = self.res_layer(x) 96 | 97 | if self.shortcut_layer is not None: 98 | shortcut = self.shortcut_layer(x) 99 | 100 | return shortcut + res 101 | 102 | class BottleNeck_IR_SE(nn.Module): 103 | '''Improved Residual Bottlenecks with Squeeze and Excitation Module''' 104 | def __init__(self, in_channel, out_channel, stride, dim_match): 105 | super(BottleNeck_IR_SE, self).__init__() 106 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 107 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 108 | nn.BatchNorm2d(out_channel), 109 | nn.PReLU(out_channel), 110 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 111 | nn.BatchNorm2d(out_channel), 112 | SEModule(out_channel, 16)) 113 | if dim_match: 114 | self.shortcut_layer = None 115 | else: 116 | self.shortcut_layer = nn.Sequential( 117 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 118 | nn.BatchNorm2d(out_channel) 119 | ) 120 | 121 | def forward(self, x): 122 | shortcut = x 123 | res = self.res_layer(x) 124 | 125 | if self.shortcut_layer is not None: 126 | shortcut = self.shortcut_layer(x) 127 | 128 | return shortcut + res 129 | 130 | class BottleNeck_IR_CAM(nn.Module): 131 | '''Improved Residual Bottlenecks with Channel Attention Module''' 132 | def __init__(self, in_channel, out_channel, stride, dim_match): 133 | super(BottleNeck_IR_CAM, self).__init__() 134 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 135 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 136 | nn.BatchNorm2d(out_channel), 137 | nn.PReLU(out_channel), 138 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 139 | nn.BatchNorm2d(out_channel), 140 | CAModule(out_channel, 16)) 141 | if dim_match: 142 | self.shortcut_layer = None 143 | else: 144 | self.shortcut_layer = nn.Sequential( 145 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 146 | nn.BatchNorm2d(out_channel) 147 | ) 148 | 149 | def forward(self, x): 150 | shortcut = x 151 | res = self.res_layer(x) 152 | 153 | if self.shortcut_layer is not None: 154 | shortcut = self.shortcut_layer(x) 155 | 156 | return shortcut + res 157 | 158 | class BottleNeck_IR_SAM(nn.Module): 159 | '''Improved Residual Bottlenecks with Spatial Attention Module''' 160 | def __init__(self, in_channel, out_channel, stride, dim_match): 161 | super(BottleNeck_IR_SAM, self).__init__() 162 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 163 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 164 | nn.BatchNorm2d(out_channel), 165 | nn.PReLU(out_channel), 166 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 167 | nn.BatchNorm2d(out_channel), 168 | SAModule()) 169 | if dim_match: 170 | self.shortcut_layer = None 171 | else: 172 | self.shortcut_layer = nn.Sequential( 173 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 174 | nn.BatchNorm2d(out_channel) 175 | ) 176 | 177 | def forward(self, x): 178 | shortcut = x 179 | res = self.res_layer(x) 180 | 181 | if self.shortcut_layer is not None: 182 | shortcut = self.shortcut_layer(x) 183 | 184 | return shortcut + res 185 | 186 | class BottleNeck_IR_CBAM(nn.Module): 187 | '''Improved Residual Bottleneck with Channel Attention Module and Spatial Attention Module''' 188 | def __init__(self, in_channel, out_channel, stride, dim_match): 189 | super(BottleNeck_IR_CBAM, self).__init__() 190 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 191 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 192 | nn.BatchNorm2d(out_channel), 193 | nn.PReLU(out_channel), 194 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 195 | nn.BatchNorm2d(out_channel), 196 | CAModule(out_channel, 16), 197 | SAModule() 198 | ) 199 | if dim_match: 200 | self.shortcut_layer = None 201 | else: 202 | self.shortcut_layer = nn.Sequential( 203 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 204 | nn.BatchNorm2d(out_channel) 205 | ) 206 | 207 | def forward(self, x): 208 | shortcut = x 209 | res = self.res_layer(x) 210 | 211 | if self.shortcut_layer is not None: 212 | shortcut = self.shortcut_layer(x) 213 | 214 | return shortcut + res 215 | 216 | 217 | filter_list = [64, 64, 128, 256, 512] 218 | def get_layers(num_layers): 219 | if num_layers == 50: 220 | return [3, 4, 14, 3] 221 | elif num_layers == 100: 222 | return [3, 13, 30, 3] 223 | elif num_layers == 152: 224 | return [3, 8, 36, 3] 225 | 226 | class CBAMResNet(nn.Module): 227 | def __init__(self, num_layers, feature_dim=512, drop_ratio=0.4, mode='ir',filter_list=filter_list): 228 | super(CBAMResNet, self).__init__() 229 | assert num_layers in [50, 100, 152], 'num_layers should be 50, 100 or 152' 230 | assert mode in ['ir', 'ir_se', 'ir_cam', 'ir_sam', 'ir_cbam'], 'mode should be ir, ir_se, ir_cam, ir_sam or ir_cbam' 231 | layers = get_layers(num_layers) 232 | if mode == 'ir': 233 | block = BottleNeck_IR 234 | elif mode == 'ir_se': 235 | block = BottleNeck_IR_SE 236 | elif mode == 'ir_cam': 237 | block = BottleNeck_IR_CAM 238 | elif mode == 'ir_sam': 239 | block = BottleNeck_IR_SAM 240 | elif mode == 'ir_cbam': 241 | block = BottleNeck_IR_CBAM 242 | 243 | self.input_layer = nn.Sequential(nn.Conv2d(3, 64, (3, 3), stride=1, padding=1, bias=False), 244 | nn.BatchNorm2d(64), 245 | nn.PReLU(64)) 246 | self.layer1 = self._make_layer(block, filter_list[0], filter_list[1], layers[0], stride=2) 247 | self.layer2 = self._make_layer(block, filter_list[1], filter_list[2], layers[1], stride=2) 248 | self.layer3 = self._make_layer(block, filter_list[2], filter_list[3], layers[2], stride=2) 249 | self.layer4 = self._make_layer(block, filter_list[3], filter_list[4], layers[3], stride=2) 250 | 251 | self.output_layer = nn.Sequential(nn.BatchNorm2d(512), 252 | nn.Dropout(drop_ratio), 253 | Flatten(), 254 | nn.Linear(512 * 7 * 7, feature_dim), 255 | nn.BatchNorm1d(feature_dim)) 256 | 257 | # weight initialization 258 | for m in self.modules(): 259 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): 260 | nn.init.xavier_uniform_(m.weight) 261 | if m.bias is not None: 262 | nn.init.constant_(m.bias, 0.0) 263 | elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): 264 | nn.init.constant_(m.weight, 1) 265 | nn.init.constant_(m.bias, 0) 266 | 267 | def _make_layer(self, block, in_channel, out_channel, blocks, stride): 268 | layers = [] 269 | layers.append(block(in_channel, out_channel, stride, False)) 270 | for i in range(1, blocks): 271 | layers.append(block(out_channel, out_channel, 1, True)) 272 | 273 | return nn.Sequential(*layers) 274 | 275 | def forward(self, x): 276 | x = self.input_layer(x) 277 | x = self.layer1(x) 278 | x = self.layer2(x) 279 | x = self.layer3(x) 280 | x = self.layer4(x) 281 | x = self.output_layer(x) 282 | 283 | return x 284 | 285 | if __name__ == '__main__': 286 | input = torch.Tensor(2, 3, 112, 112) 287 | net = CBAMResNet(50, mode='ir') 288 | 289 | out = net(input) 290 | print(out.shape) 291 | -------------------------------------------------------------------------------- /wu/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: train.py.py 7 | @time: 2018/12/21 17:37 8 | @desc: train script for deep face recognition 9 | ''' 10 | 11 | import os 12 | import sys 13 | 14 | sys.path.append(os.path.join(os.path.dirname(__file__), '..')) 15 | 16 | import torch.utils.data 17 | from torch.nn import DataParallel 18 | from datetime import datetime 19 | from backbone.mobilefacenet import MobileFaceNet 20 | from backbone.cbam import CBAMResNet 21 | from backbone.attention import ResidualAttentionNet_56, ResidualAttentionNet_92 22 | from margin.ArcMarginProduct import ArcMarginProduct 23 | from margin.MultiMarginProduct import MultiMarginProduct 24 | from margin.CosineMarginProduct import CosineMarginProduct 25 | from margin.InnerProduct import InnerProduct 26 | # from utils.visualize import Visualizer 27 | from utils.logging import init_log 28 | from dataset.casia_webface import CASIAWebFace 29 | from dataset.lfw import LFW 30 | from dataset.agedb import AgeDB30 31 | from dataset.cfp import CFP_FP 32 | from torch.optim import lr_scheduler 33 | import torch.optim as optim 34 | import time 35 | from eval_lfw import evaluation_10_fold, getFeatureFromTorch 36 | import numpy as np 37 | import torchvision.transforms as transforms 38 | import argparse 39 | 40 | 41 | def train(args): 42 | # gpu init 43 | multi_gpus = False 44 | if len(args.gpus.split(',')) > 1: 45 | multi_gpus = True 46 | os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus 47 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 48 | 49 | # log init 50 | save_dir = os.path.join(args.save_dir, args.model_pre + args.backbone.upper() + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) 51 | if os.path.exists(save_dir): 52 | raise NameError('model dir exists!') 53 | os.makedirs(save_dir) 54 | logging = init_log(save_dir) 55 | _print = logging.info 56 | 57 | # dataset loader 58 | transform = transforms.Compose([ 59 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 60 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 61 | ]) 62 | # validation dataset 63 | trainset = CASIAWebFace(args.train_root, args.train_file_list, transform=transform) 64 | trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, 65 | shuffle=True, num_workers=8, drop_last=False) 66 | # test dataset 67 | lfwdataset = LFW(args.lfw_test_root, args.lfw_file_list, transform=transform) 68 | lfwloader = torch.utils.data.DataLoader(lfwdataset, batch_size=128, 69 | shuffle=False, num_workers=4, drop_last=False) 70 | # agedbdataset = AgeDB30(args.agedb_test_root, args.agedb_file_list, transform=transform) 71 | # agedbloader = torch.utils.data.DataLoader(agedbdataset, batch_size=128, 72 | # shuffle=False, num_workers=4, drop_last=False) 73 | # cfpfpdataset = CFP_FP(args.cfpfp_test_root, args.cfpfp_file_list, transform=transform) 74 | # cfpfploader = torch.utils.data.DataLoader(cfpfpdataset, batch_size=128, 75 | # shuffle=False, num_workers=4, drop_last=False) 76 | 77 | # define backbone and margin layer 78 | if args.backbone == 'MobileFace': 79 | net = MobileFaceNet() 80 | elif args.backbone == 'Res50_IR': 81 | net = CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') 82 | elif args.backbone == 'SERes50_IR': 83 | net = CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') 84 | elif args.backbone == 'Res100_IR': 85 | net = CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') 86 | elif args.backbone == 'SERes100_IR': 87 | net = CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') 88 | elif args.backbone == 'Res152_IR': 89 | net = CBAMResNet(152, feature_dim=args.feature_dim, mode='ir') 90 | elif args.backbone == 'SERes152_IR': 91 | net = CBAMResNet(152, feature_dim=args.feature_dim, mode='ir_se') 92 | elif args.backbone == 'Attention_56': 93 | net = ResidualAttentionNet_56(feature_dim=args.feature_dim) 94 | elif args.backbone == 'Attention_92': 95 | net = ResidualAttentionNet_92(feature_dim=args.feature_dim) 96 | else: 97 | print(args.backbone, ' is not available!') 98 | 99 | if args.margin_type == 'ArcFace': 100 | margin = ArcMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size) 101 | elif args.margin_type == 'MultiMargin': 102 | margin = MultiMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size) 103 | elif args.margin_type == 'CosFace': 104 | margin = CosineMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size) 105 | elif args.margin_type == 'Softmax': 106 | margin = InnerProduct(args.feature_dim, trainset.class_nums) 107 | elif args.margin_type == 'SphereFace': 108 | pass 109 | else: 110 | print(args.margin_type, 'is not available!') 111 | 112 | if args.resume: 113 | print('resume the model parameters from: ', args.net_path, args.margin_path) 114 | net.load_state_dict(torch.load(args.net_path)['net_state_dict']) 115 | margin.load_state_dict(torch.load(args.margin_path)['net_state_dict']) 116 | 117 | # define optimizers for different layer 118 | criterion = torch.nn.CrossEntropyLoss().to(device) 119 | optimizer_ft = optim.SGD([ 120 | {'params': net.parameters(), 'weight_decay': 5e-4}, 121 | {'params': margin.parameters(), 'weight_decay': 5e-4} 122 | ], lr=0.1, momentum=0.9, nesterov=True) 123 | exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer_ft, milestones=[6, 11, 16], gamma=0.1) 124 | 125 | if multi_gpus: 126 | net = DataParallel(net).to(device) 127 | margin = DataParallel(margin).to(device) 128 | else: 129 | net = net.to(device) 130 | margin = margin.to(device) 131 | 132 | 133 | best_lfw_acc = 0.0 134 | best_lfw_iters = 0 135 | # best_agedb30_acc = 0.0 136 | # best_agedb30_iters = 0 137 | # best_cfp_fp_acc = 0.0 138 | # best_cfp_fp_iters = 0 139 | total_iters = 0 140 | # vis = Visualizer(env=args.model_pre + args.backbone) 141 | for epoch in range(1, args.total_epoch + 1): 142 | exp_lr_scheduler.step() 143 | # train model 144 | _print('Train Epoch: {}/{} ...'.format(epoch, args.total_epoch)) 145 | net.train() 146 | 147 | since = time.time() 148 | for data in trainloader: 149 | img, label = data[0].to(device), data[1].to(device) 150 | optimizer_ft.zero_grad() 151 | 152 | raw_logits = net(img) 153 | output = margin(raw_logits, label) 154 | total_loss = criterion(output, label) 155 | total_loss.backward() 156 | optimizer_ft.step() 157 | 158 | total_iters += 1 159 | # print train information 160 | if total_iters % 100 == 0: 161 | # current training accuracy 162 | _, predict = torch.max(output.data, 1) 163 | total = label.size(0) 164 | correct = (np.array(predict.cpu()) == np.array(label.data.cpu())).sum() 165 | time_cur = (time.time() - since) / 100 166 | since = time.time() 167 | # vis.plot_curves({'softmax loss': total_loss.item()}, iters=total_iters, title='train loss', 168 | # xlabel='iters', ylabel='train loss') 169 | # vis.plot_curves({'train accuracy': correct / total}, iters=total_iters, title='train accuracy', xlabel='iters', 170 | # ylabel='train accuracy') 171 | 172 | _print("Iters: {:0>6d}/[{:0>2d}], loss: {:.4f}, train_accuracy: {:.4f}, time: {:.2f} s/iter, learning rate: {}".format(total_iters, epoch, total_loss.item(), correct/total, time_cur, exp_lr_scheduler.get_lr()[0])) 173 | 174 | # save model 175 | if total_iters % args.save_freq == 0: 176 | msg = 'Saving checkpoint: {}'.format(total_iters) 177 | _print(msg) 178 | if multi_gpus: 179 | net_state_dict = net.module.state_dict() 180 | margin_state_dict = margin.module.state_dict() 181 | else: 182 | net_state_dict = net.state_dict() 183 | margin_state_dict = margin.state_dict() 184 | if not os.path.exists(save_dir): 185 | os.mkdir(save_dir) 186 | torch.save({ 187 | 'iters': total_iters, 188 | 'net_state_dict': net_state_dict}, 189 | os.path.join(save_dir, 'Iter_%06d_net.ckpt' % total_iters)) 190 | torch.save({ 191 | 'iters': total_iters, 192 | 'net_state_dict': margin_state_dict}, 193 | os.path.join(save_dir, 'Iter_%06d_margin.ckpt' % total_iters)) 194 | 195 | # test accuracy 196 | if total_iters % args.test_freq == 0: 197 | 198 | # test model on lfw 199 | net.eval() 200 | getFeatureFromTorch('../result/cur_lfw_result.mat', net, device, lfwdataset, lfwloader) 201 | lfw_accs = evaluation_10_fold('../result/cur_lfw_result.mat') 202 | _print('LFW Ave Accuracy: {:.4f}'.format(np.mean(lfw_accs) * 100)) 203 | if best_lfw_acc <= np.mean(lfw_accs) * 100: 204 | best_lfw_acc = np.mean(lfw_accs) * 100 205 | best_lfw_iters = total_iters 206 | 207 | # # test model on AgeDB30 208 | # getFeatureFromTorch('./result/cur_agedb30_result.mat', net, device, agedbdataset, agedbloader) 209 | # age_accs = evaluation_10_fold('./result/cur_agedb30_result.mat') 210 | # _print('AgeDB-30 Ave Accuracy: {:.4f}'.format(np.mean(age_accs) * 100)) 211 | # if best_agedb30_acc <= np.mean(age_accs) * 100: 212 | # best_agedb30_acc = np.mean(age_accs) * 100 213 | # best_agedb30_iters = total_iters 214 | # 215 | # # test model on CFP-FP 216 | # getFeatureFromTorch('./result/cur_cfpfp_result.mat', net, device, cfpfpdataset, cfpfploader) 217 | # cfp_accs = evaluation_10_fold('./result/cur_cfpfp_result.mat') 218 | # _print('CFP-FP Ave Accuracy: {:.4f}'.format(np.mean(cfp_accs) * 100)) 219 | # if best_cfp_fp_acc <= np.mean(cfp_accs) * 100: 220 | # best_cfp_fp_acc = np.mean(cfp_accs) * 100 221 | # best_cfp_fp_iters = total_iters 222 | _print('Current Best Accuracy: LFW: {:.4f} in iters: {}'.format(best_lfw_acc, best_lfw_iters)) 223 | 224 | # vis.plot_curves({'lfw': np.mean(lfw_accs), 'agedb-30': np.mean(age_accs), 'cfp-fp': np.mean(cfp_accs)}, iters=total_iters, 225 | # title='test accuracy', xlabel='iters', ylabel='test accuracy') 226 | net.train() 227 | 228 | _print('Finally Best Accuracy: LFW: {:.4f} in iters: {}'.format(best_lfw_acc, best_lfw_iters)) 229 | print('finishing training') 230 | 231 | 232 | if __name__ == '__main__': 233 | parser = argparse.ArgumentParser(description='PyTorch for deep face recognition') 234 | parser.add_argument('--train_root', type=str, default='/data/FaceRecognition/WebFace/webface_align_112', help='train image root') 235 | parser.add_argument('--train_file_list', type=str, default='/data/FaceRecognition/WebFace/align_train.list', help='train list') 236 | parser.add_argument('--lfw_test_root', type=str, default='/data/FaceRecognition/LFW/lfw_align_112', help='lfw image root') 237 | parser.add_argument('--lfw_file_list', type=str, default='/data/FaceRecognition/LFW/pairs.txt', help='lfw pair file list') 238 | # parser.add_argument('--agedb_test_root', type=str, default='/media/sda/AgeDB-30/agedb30_align_112', help='agedb image root') 239 | # parser.add_argument('--agedb_file_list', type=str, default='/media/sda/AgeDB-30/agedb_30_pair.txt', help='agedb pair file list') 240 | # parser.add_argument('--cfpfp_test_root', type=str, default='/media/sda/CFP-FP/cfp_fp_aligned_112', help='agedb image root') 241 | # parser.add_argument('--cfpfp_file_list', type=str, default='/media/sda/CFP-FP/cfp_fp_pair.txt', help='agedb pair file list') 242 | 243 | parser.add_argument('--backbone', type=str, default='SERes152_IR', help='MobileFace, Res50_IR, SERes50_IR, Res100_IR, SERes100_IR, Attention_56, Attention_92') 244 | parser.add_argument('--margin_type', type=str, default='ArcFace', help='ArcFace, CosFace, SphereFace, MultiMargin, Softmax') 245 | parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension, 128 or 512') 246 | parser.add_argument('--scale_size', type=float, default=32.0, help='scale size') 247 | parser.add_argument('--batch_size', type=int, default=64, help='batch size') 248 | parser.add_argument('--total_epoch', type=int, default=18, help='total epochs') 249 | 250 | parser.add_argument('--save_freq', type=int, default=3000, help='save frequency') 251 | parser.add_argument('--test_freq', type=int, default=3000, help='test frequency') 252 | parser.add_argument('--resume', type=int, default=False, help='resume model') 253 | parser.add_argument('--net_path', type=str, default='', help='resume model') 254 | parser.add_argument('--margin_path', type=str, default='', help='resume model') 255 | parser.add_argument('--save_dir', type=str, default='../model', help='model save dir') 256 | parser.add_argument('--model_pre', type=str, default='SERes152_IR_', help='model prefix') 257 | parser.add_argument('--gpus', type=str, default='0', help='model prefix') 258 | 259 | args = parser.parse_args() 260 | 261 | train(args) 262 | 263 | 264 | -------------------------------------------------------------------------------- /wu/train_softmax.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: train_softmax.py 7 | @time: 2019/1/7 8:33 8 | @desc: original softmax training with Casia-Webface 9 | ''' 10 | ''' 11 | Pleause use the train.py for your training process. 12 | ''' 13 | 14 | import os 15 | import torch.utils.data 16 | from torch.nn import DataParallel 17 | from datetime import datetime 18 | from backbone.mobilefacenet import MobileFaceNet 19 | from backbone.resnet import ResNet50, ResNet101 20 | from backbone.arcfacenet import SEResNet_IR 21 | from backbone.spherenet import SphereNet 22 | from margin.ArcMarginProduct import ArcMarginProduct 23 | from margin.InnerProduct import InnerProduct 24 | from utils.visualize import Visualizer 25 | from utils.logging import init_log 26 | from dataset.casia_webface import CASIAWebFace 27 | from dataset.lfw import LFW 28 | from dataset.agedb import AgeDB30 29 | from dataset.cfp import CFP_FP 30 | from torch.optim import lr_scheduler 31 | import torch.optim as optim 32 | import time 33 | from eval_lfw import evaluation_10_fold, getFeatureFromTorch 34 | import numpy as np 35 | import torchvision.transforms as transforms 36 | import argparse 37 | 38 | def train(args): 39 | # gpu init 40 | multi_gpus = False 41 | if len(args.gpus.split(',')) > 1: 42 | multi_gpus = True 43 | os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus 44 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 45 | 46 | # log init 47 | save_dir = os.path.join(args.save_dir, args.model_pre + args.backbone.upper() + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) 48 | if os.path.exists(save_dir): 49 | raise NameError('model dir exists!') 50 | os.makedirs(save_dir) 51 | logging = init_log(save_dir) 52 | _print = logging.info 53 | 54 | # dataset loader 55 | transform = transforms.Compose([ 56 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 57 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 58 | ]) 59 | # validation dataset 60 | trainset = CASIAWebFace(args.train_root, args.train_file_list, transform=transform) 61 | trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, 62 | shuffle=True, num_workers=8, drop_last=False) 63 | # test dataset 64 | lfwdataset = LFW(args.lfw_test_root, args.lfw_file_list, transform=transform) 65 | lfwloader = torch.utils.data.DataLoader(lfwdataset, batch_size=128, 66 | shuffle=False, num_workers=4, drop_last=False) 67 | agedbdataset = AgeDB30(args.agedb_test_root, args.agedb_file_list, transform=transform) 68 | agedbloader = torch.utils.data.DataLoader(agedbdataset, batch_size=128, 69 | shuffle=False, num_workers=4, drop_last=False) 70 | cfpfpdataset = CFP_FP(args.cfpfp_test_root, args.cfpfp_file_list, transform=transform) 71 | cfpfploader = torch.utils.data.DataLoader(cfpfpdataset, batch_size=128, 72 | shuffle=False, num_workers=4, drop_last=False) 73 | 74 | # define backbone and margin layer 75 | if args.backbone == 'MobileFace': 76 | net = MobileFaceNet() 77 | elif args.backbone == 'Res50': 78 | net = ResNet50() 79 | elif args.backbone == 'Res101': 80 | net = ResNet101() 81 | elif args.backbone == 'Res50_IR': 82 | net = SEResNet_IR(50, feature_dim=args.feature_dim, mode='ir') 83 | elif args.backbone == 'SERes50_IR': 84 | net = SEResNet_IR(50, feature_dim=args.feature_dim, mode='se_ir') 85 | elif args.backbone == 'SphereNet': 86 | net = SphereNet(num_layers=64, feature_dim=args.feature_dim) 87 | else: 88 | print(args.backbone, ' is not available!') 89 | 90 | if args.margin_type == 'ArcFace': 91 | margin = ArcMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size) 92 | elif args.margin_type == 'CosFace': 93 | pass 94 | elif args.margin_type == 'SphereFace': 95 | pass 96 | elif args.margin_type == 'InnerProduct': 97 | margin = InnerProduct(args.feature_dim, trainset.class_nums) 98 | else: 99 | print(args.margin_type, 'is not available!') 100 | 101 | if args.resume: 102 | print('resume the model parameters from: ', args.net_path, args.margin_path) 103 | net.load_state_dict(torch.load(args.net_path)['net_state_dict']) 104 | margin.load_state_dict(torch.load(args.margin_path)['net_state_dict']) 105 | 106 | # define optimizers for different layer 107 | 108 | criterion_classi = torch.nn.CrossEntropyLoss().to(device) 109 | optimizer_classi = optim.SGD([ 110 | {'params': net.parameters(), 'weight_decay': 5e-4}, 111 | {'params': margin.parameters(), 'weight_decay': 5e-4} 112 | ], lr=0.1, momentum=0.9, nesterov=True) 113 | scheduler_classi = lr_scheduler.MultiStepLR(optimizer_classi, milestones=[20, 35, 45], gamma=0.1) 114 | 115 | if multi_gpus: 116 | net = DataParallel(net).to(device) 117 | margin = DataParallel(margin).to(device) 118 | else: 119 | net = net.to(device) 120 | margin = margin.to(device) 121 | 122 | best_lfw_acc = 0.0 123 | best_lfw_iters = 0 124 | best_agedb30_acc = 0.0 125 | best_agedb30_iters = 0 126 | best_cfp_fp_acc = 0.0 127 | best_cfp_fp_iters = 0 128 | total_iters = 0 129 | vis = Visualizer(env='softmax_train') 130 | for epoch in range(1, args.total_epoch + 1): 131 | scheduler_classi.step() 132 | # train model 133 | _print('Train Epoch: {}/{} ...'.format(epoch, args.total_epoch)) 134 | net.train() 135 | 136 | since = time.time() 137 | for data in trainloader: 138 | img, label = data[0].to(device), data[1].to(device) 139 | feature = net(img) 140 | output = margin(feature) 141 | loss_classi = criterion_classi(output, label) 142 | total_loss = loss_classi 143 | 144 | optimizer_classi.zero_grad() 145 | total_loss.backward() 146 | optimizer_classi.step() 147 | 148 | total_iters += 1 149 | # print train information 150 | if total_iters % 100 == 0: 151 | #current training accuracy 152 | _, predict = torch.max(output.data, 1) 153 | total = label.size(0) 154 | correct = (np.array(predict) == np.array(label.data)).sum() 155 | time_cur = (time.time() - since) / 100 156 | since = time.time() 157 | vis.plot_curves({'train loss': loss_classi.item()}, iters=total_iters, title='train loss', xlabel='iters', ylabel='train loss') 158 | vis.plot_curves({'train accuracy': correct/total}, iters=total_iters, title='train accuracy', xlabel='iters', ylabel='train accuracy') 159 | print("Iters: {:0>6d}/[{:0>2d}], loss_classi: {:.4f}, train_accuracy: {:.4f}, time: {:.2f} s/iter, learning rate: {}".format(total_iters, 160 | epoch, 161 | loss_classi.item(), 162 | correct/total, 163 | time_cur, 164 | scheduler_classi.get_lr()[ 165 | 0])) 166 | # save model 167 | if total_iters % args.save_freq == 0: 168 | msg = 'Saving checkpoint: {}'.format(total_iters) 169 | _print(msg) 170 | if multi_gpus: 171 | net_state_dict = net.module.state_dict() 172 | margin_state_dict = margin.module.state_dict() 173 | else: 174 | net_state_dict = net.state_dict() 175 | margin_state_dict = margin.state_dict() 176 | 177 | if not os.path.exists(save_dir): 178 | os.mkdir(save_dir) 179 | torch.save({ 180 | 'iters': total_iters, 181 | 'net_state_dict': net_state_dict}, 182 | os.path.join(save_dir, 'Iter_%06d_net.ckpt' % total_iters)) 183 | torch.save({ 184 | 'iters': total_iters, 185 | 'net_state_dict': margin_state_dict}, 186 | os.path.join(save_dir, 'Iter_%06d_margin.ckpt' % total_iters)) 187 | 188 | # test accuracy 189 | if total_iters % args.test_freq == 0: 190 | # test model on lfw 191 | net.eval() 192 | getFeatureFromTorch('./result/cur_lfw_result.mat', net, device, lfwdataset, lfwloader) 193 | lfw_accs = evaluation_10_fold('./result/cur_lfw_result.mat') 194 | _print('LFW Ave Accuracy: {:.4f}'.format(np.mean(lfw_accs) * 100)) 195 | if best_lfw_acc < np.mean(lfw_accs) * 100: 196 | best_lfw_acc = np.mean(lfw_accs) * 100 197 | best_lfw_iters = total_iters 198 | # test model on AgeDB30 199 | getFeatureFromTorch('./result/cur_agedb30_result.mat', net, device, agedbdataset, agedbloader) 200 | age_accs = evaluation_10_fold('./result/cur_agedb30_result.mat') 201 | _print('AgeDB-30 Ave Accuracy: {:.4f}'.format(np.mean(age_accs) * 100)) 202 | if best_agedb30_acc < np.mean(age_accs) * 100: 203 | best_agedb30_acc = np.mean(age_accs) * 100 204 | best_agedb30_iters = total_iters 205 | # test model on CFP-FP 206 | getFeatureFromTorch('./result/cur_cfpfp_result.mat', net, device, cfpfpdataset, cfpfploader) 207 | cfp_accs = evaluation_10_fold('./result/cur_cfpfp_result.mat') 208 | _print('CFP-FP Ave Accuracy: {:.4f}'.format(np.mean(cfp_accs) * 100)) 209 | if best_cfp_fp_acc < np.mean(cfp_accs) * 100: 210 | best_cfp_fp_acc = np.mean(cfp_accs) * 100 211 | best_cfp_fp_iters = total_iters 212 | _print('Current Best Accuracy: LFW: {:.4f} in iters: {}, AgeDB-30: {:.4f} in iters: {} and CFP-FP: {:.4f} in iters: {}'.format( 213 | best_lfw_acc, best_lfw_iters, best_agedb30_acc, best_agedb30_iters, best_cfp_fp_acc, best_cfp_fp_iters)) 214 | vis.plot_curves({'lfw': np.mean(lfw_accs), 'agedb-30': np.mean(age_accs), 'cfp-fp': np.mean(cfp_accs)}, iters=total_iters, title='test accuracy', xlabel='iters', ylabel='test accuracy') 215 | net.train() 216 | 217 | _print('Finally Best Accuracy: LFW: {:.4f} in iters: {}, AgeDB-30: {:.4f} in iters: {} and CFP-FP: {:.4f} in iters: {}'.format( 218 | best_lfw_acc, best_lfw_iters, best_agedb30_acc, best_agedb30_iters, best_cfp_fp_acc, best_cfp_fp_iters)) 219 | print('finishing training') 220 | 221 | 222 | if __name__ == '__main__': 223 | parser = argparse.ArgumentParser(description='PyTorch for deep face recognition') 224 | parser.add_argument('--train_root', type=str, default='/media/ramdisk/webface_align_112', help='train image root') 225 | parser.add_argument('--train_file_list', type=str, default='/media/ramdisk/webface_align_train.list', help='train list') 226 | parser.add_argument('--lfw_test_root', type=str, default='/media/ramdisk/lfw_align_112', help='lfw image root') 227 | parser.add_argument('--lfw_file_list', type=str, default='/media/ramdisk/pairs.txt', help='lfw pair file list') 228 | parser.add_argument('--agedb_test_root', type=str, default='/media/sda/AgeDB-30/agedb30_align_112', help='agedb image root') 229 | parser.add_argument('--agedb_file_list', type=str, default='/media/sda/AgeDB-30/agedb_30_pair.txt', help='agedb pair file list') 230 | parser.add_argument('--cfpfp_test_root', type=str, default='/media/sda/CFP-FP/cfp_fp_aligned_112', help='agedb image root') 231 | parser.add_argument('--cfpfp_file_list', type=str, default='/media/sda/CFP-FP/cfp_fp_pair.txt', help='agedb pair file list') 232 | 233 | parser.add_argument('--backbone', type=str, default='MobileFace', help='MobileFace, Res50, Res101, Res50_IR, SERes50_IR, SphereNet') 234 | parser.add_argument('--margin_type', type=str, default='InnerProduct', help='InnerProduct, ArcFace, CosFace, SphereFace') 235 | parser.add_argument('--feature_dim', type=int, default=128, help='feature dimension, 128 or 512') 236 | parser.add_argument('--scale_size', type=float, default=32.0, help='scale size') 237 | parser.add_argument('--batch_size', type=int, default=256, help='batch size') 238 | parser.add_argument('--total_epoch', type=int, default=50, help='total epochs') 239 | parser.add_argument('--weight_center', type=float, default=1.0, help='center loss weight') 240 | 241 | parser.add_argument('--save_freq', type=int, default=2000, help='save frequency') 242 | parser.add_argument('--test_freq', type=int, default=2000, help='test frequency') 243 | parser.add_argument('--resume', type=int, default=False, help='resume model') 244 | parser.add_argument('--net_path', type=str, default='', help='resume model') 245 | parser.add_argument('--margin_path', type=str, default='', help='resume model') 246 | parser.add_argument('--save_dir', type=str, default='./model', help='model save dir') 247 | parser.add_argument('--model_pre', type=str, default='Softmax_', help='model prefix') 248 | parser.add_argument('--gpus', type=str, default='2,3', help='model prefix') 249 | 250 | args = parser.parse_args() 251 | 252 | train(args) 253 | 254 | 255 | -------------------------------------------------------------------------------- /backbone/attention.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: attention.py 7 | @time: 2019/2/14 14:12 8 | @desc: Residual Attention Network for Image Classification, CVPR 2017. 9 | Attention 56 and Attention 92. 10 | ''' 11 | 12 | 13 | import torch 14 | import torch.nn as nn 15 | import numpy as np 16 | 17 | class Flatten(nn.Module): 18 | def forward(self, input): 19 | return input.view(input.size(0), -1) 20 | 21 | class ResidualBlock(nn.Module): 22 | 23 | def __init__(self, in_channel, out_channel, stride=1): 24 | super(ResidualBlock, self).__init__() 25 | self.in_channel = in_channel 26 | self.out_channel = out_channel 27 | self.stride = stride 28 | 29 | self.res_bottleneck = nn.Sequential(nn.BatchNorm2d(in_channel), 30 | nn.ReLU(inplace=True), 31 | nn.Conv2d(in_channel, out_channel//4, 1, 1, bias=False), 32 | nn.BatchNorm2d(out_channel//4), 33 | nn.ReLU(inplace=True), 34 | nn.Conv2d(out_channel//4, out_channel//4, 3, stride, padding=1, bias=False), 35 | nn.BatchNorm2d(out_channel//4), 36 | nn.ReLU(inplace=True), 37 | nn.Conv2d(out_channel//4, out_channel, 1, 1, bias=False)) 38 | self.shortcut = nn.Conv2d(in_channel, out_channel, 1, stride, bias=False) 39 | 40 | def forward(self, x): 41 | res = x 42 | out = self.res_bottleneck(x) 43 | if self.in_channel != self.out_channel or self.stride != 1: 44 | res = self.shortcut(x) 45 | 46 | out += res 47 | return out 48 | 49 | class AttentionModule_stage1(nn.Module): 50 | 51 | # input size is 56*56 52 | def __init__(self, in_channel, out_channel, size1=(56, 56), size2=(28, 28), size3=(14, 14)): 53 | super(AttentionModule_stage1, self).__init__() 54 | self.share_residual_block = ResidualBlock(in_channel, out_channel) 55 | self.trunk_branches = nn.Sequential(ResidualBlock(in_channel, out_channel), 56 | ResidualBlock(in_channel, out_channel)) 57 | 58 | self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 59 | self.mask_block1 = ResidualBlock(in_channel, out_channel) 60 | self.skip_connect1 = ResidualBlock(in_channel, out_channel) 61 | 62 | self.mpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 63 | self.mask_block2 = ResidualBlock(in_channel, out_channel) 64 | self.skip_connect2 = ResidualBlock(in_channel, out_channel) 65 | 66 | self.mpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 67 | self.mask_block3 = nn.Sequential(ResidualBlock(in_channel, out_channel), 68 | ResidualBlock(in_channel, out_channel)) 69 | 70 | self.interpolation3 = nn.UpsamplingBilinear2d(size=size3) 71 | self.mask_block4 = ResidualBlock(in_channel, out_channel) 72 | 73 | self.interpolation2 = nn.UpsamplingBilinear2d(size=size2) 74 | self.mask_block5 = ResidualBlock(in_channel, out_channel) 75 | 76 | self.interpolation1 = nn.UpsamplingBilinear2d(size=size1) 77 | self.mask_block6 = nn.Sequential(nn.BatchNorm2d(out_channel), 78 | nn.ReLU(inplace=True), 79 | nn.Conv2d(out_channel, out_channel, 1, 1, bias=False), 80 | nn.BatchNorm2d(out_channel), 81 | nn.ReLU(inplace=True), 82 | nn.Conv2d(out_channel, out_channel, 1, 1, bias=False), 83 | nn.Sigmoid()) 84 | 85 | self.last_block = ResidualBlock(in_channel, out_channel) 86 | 87 | def forward(self, x): 88 | x = self.share_residual_block(x) 89 | out_trunk = self.trunk_branches(x) 90 | 91 | out_pool1 = self.mpool1(x) 92 | out_block1 = self.mask_block1(out_pool1) 93 | out_skip_connect1 = self.skip_connect1(out_block1) 94 | 95 | out_pool2 = self.mpool2(out_block1) 96 | out_block2 = self.mask_block2(out_pool2) 97 | out_skip_connect2 = self.skip_connect2(out_block2) 98 | 99 | out_pool3 = self.mpool3(out_block2) 100 | out_block3 = self.mask_block3(out_pool3) 101 | # 102 | out_inter3 = self.interpolation3(out_block3) + out_block2 103 | out = out_inter3 + out_skip_connect2 104 | out_block4 = self.mask_block4(out) 105 | 106 | out_inter2 = self.interpolation2(out_block4) + out_block1 107 | out = out_inter2 + out_skip_connect1 108 | out_block5 = self.mask_block5(out) 109 | 110 | out_inter1 = self.interpolation1(out_block5) + out_trunk 111 | out_block6 = self.mask_block6(out_inter1) 112 | 113 | out = (1 + out_block6) + out_trunk 114 | out_last = self.last_block(out) 115 | 116 | return out_last 117 | 118 | class AttentionModule_stage2(nn.Module): 119 | 120 | # input image size is 28*28 121 | def __init__(self, in_channels, out_channels, size1=(28, 28), size2=(14, 14)): 122 | super(AttentionModule_stage2, self).__init__() 123 | self.first_residual_blocks = ResidualBlock(in_channels, out_channels) 124 | 125 | self.trunk_branches = nn.Sequential( 126 | ResidualBlock(in_channels, out_channels), 127 | ResidualBlock(in_channels, out_channels) 128 | ) 129 | 130 | self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 131 | self.softmax1_blocks = ResidualBlock(in_channels, out_channels) 132 | self.skip1_connection_residual_block = ResidualBlock(in_channels, out_channels) 133 | 134 | self.mpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 135 | self.softmax2_blocks = nn.Sequential( 136 | ResidualBlock(in_channels, out_channels), 137 | ResidualBlock(in_channels, out_channels) 138 | ) 139 | 140 | self.interpolation2 = nn.UpsamplingBilinear2d(size=size2) 141 | self.softmax3_blocks = ResidualBlock(in_channels, out_channels) 142 | self.interpolation1 = nn.UpsamplingBilinear2d(size=size1) 143 | self.softmax4_blocks = nn.Sequential( 144 | nn.BatchNorm2d(out_channels), 145 | nn.ReLU(inplace=True), 146 | nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), 147 | nn.BatchNorm2d(out_channels), 148 | nn.ReLU(inplace=True), 149 | nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), 150 | nn.Sigmoid() 151 | ) 152 | self.last_blocks = ResidualBlock(in_channels, out_channels) 153 | 154 | def forward(self, x): 155 | x = self.first_residual_blocks(x) 156 | out_trunk = self.trunk_branches(x) 157 | out_mpool1 = self.mpool1(x) 158 | out_softmax1 = self.softmax1_blocks(out_mpool1) 159 | out_skip1_connection = self.skip1_connection_residual_block(out_softmax1) 160 | 161 | out_mpool2 = self.mpool2(out_softmax1) 162 | out_softmax2 = self.softmax2_blocks(out_mpool2) 163 | 164 | out_interp2 = self.interpolation2(out_softmax2) + out_softmax1 165 | out = out_interp2 + out_skip1_connection 166 | 167 | out_softmax3 = self.softmax3_blocks(out) 168 | out_interp1 = self.interpolation1(out_softmax3) + out_trunk 169 | out_softmax4 = self.softmax4_blocks(out_interp1) 170 | out = (1 + out_softmax4) * out_trunk 171 | out_last = self.last_blocks(out) 172 | 173 | return out_last 174 | 175 | class AttentionModule_stage3(nn.Module): 176 | 177 | # input image size is 14*14 178 | def __init__(self, in_channels, out_channels, size1=(14, 14)): 179 | super(AttentionModule_stage3, self).__init__() 180 | self.first_residual_blocks = ResidualBlock(in_channels, out_channels) 181 | 182 | self.trunk_branches = nn.Sequential( 183 | ResidualBlock(in_channels, out_channels), 184 | ResidualBlock(in_channels, out_channels) 185 | ) 186 | 187 | self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 188 | self.softmax1_blocks = nn.Sequential( 189 | ResidualBlock(in_channels, out_channels), 190 | ResidualBlock(in_channels, out_channels) 191 | ) 192 | 193 | self.interpolation1 = nn.UpsamplingBilinear2d(size=size1) 194 | 195 | self.softmax2_blocks = nn.Sequential( 196 | nn.BatchNorm2d(out_channels), 197 | nn.ReLU(inplace=True), 198 | nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), 199 | nn.BatchNorm2d(out_channels), 200 | nn.ReLU(inplace=True), 201 | nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), 202 | nn.Sigmoid() 203 | ) 204 | 205 | self.last_blocks = ResidualBlock(in_channels, out_channels) 206 | 207 | def forward(self, x): 208 | x = self.first_residual_blocks(x) 209 | out_trunk = self.trunk_branches(x) 210 | out_mpool1 = self.mpool1(x) 211 | out_softmax1 = self.softmax1_blocks(out_mpool1) 212 | 213 | out_interp1 = self.interpolation1(out_softmax1) + out_trunk 214 | out_softmax2 = self.softmax2_blocks(out_interp1) 215 | out = (1 + out_softmax2) * out_trunk 216 | out_last = self.last_blocks(out) 217 | 218 | return out_last 219 | 220 | class ResidualAttentionNet_56(nn.Module): 221 | 222 | # for input size 112 223 | def __init__(self, feature_dim=512, drop_ratio=0.4): 224 | super(ResidualAttentionNet_56, self).__init__() 225 | self.conv1 = nn.Sequential( 226 | nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias = False), 227 | nn.BatchNorm2d(64), 228 | nn.ReLU(inplace=True) 229 | ) 230 | self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 231 | self.residual_block1 = ResidualBlock(64, 256) 232 | self.attention_module1 = AttentionModule_stage1(256, 256) 233 | self.residual_block2 = ResidualBlock(256, 512, 2) 234 | self.attention_module2 = AttentionModule_stage2(512, 512) 235 | self.residual_block3 = ResidualBlock(512, 512, 2) 236 | self.attention_module3 = AttentionModule_stage3(512, 512) 237 | self.residual_block4 = ResidualBlock(512, 512, 2) 238 | self.residual_block5 = ResidualBlock(512, 512) 239 | self.residual_block6 = ResidualBlock(512, 512) 240 | self.output_layer = nn.Sequential(nn.BatchNorm2d(512), 241 | nn.Dropout(drop_ratio), 242 | Flatten(), 243 | nn.Linear(512 * 7 * 7, feature_dim), 244 | nn.BatchNorm1d(feature_dim)) 245 | 246 | def forward(self, x): 247 | out = self.conv1(x) 248 | out = self.mpool1(out) 249 | # print(out.data) 250 | out = self.residual_block1(out) 251 | out = self.attention_module1(out) 252 | out = self.residual_block2(out) 253 | out = self.attention_module2(out) 254 | out = self.residual_block3(out) 255 | # print(out.data) 256 | out = self.attention_module3(out) 257 | out = self.residual_block4(out) 258 | out = self.residual_block5(out) 259 | out = self.residual_block6(out) 260 | out = self.output_layer(out) 261 | 262 | return out 263 | 264 | class ResidualAttentionNet_92(nn.Module): 265 | 266 | # for input size 112 267 | def __init__(self, feature_dim=512, drop_ratio=0.4): 268 | super(ResidualAttentionNet_92, self).__init__() 269 | self.conv1 = nn.Sequential( 270 | nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias = False), 271 | nn.BatchNorm2d(64), 272 | nn.ReLU(inplace=True) 273 | ) 274 | self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 275 | self.residual_block1 = ResidualBlock(64, 256) 276 | self.attention_module1 = AttentionModule_stage1(256, 256) 277 | self.residual_block2 = ResidualBlock(256, 512, 2) 278 | self.attention_module2 = AttentionModule_stage2(512, 512) 279 | self.attention_module2_2 = AttentionModule_stage2(512, 512) # tbq add 280 | self.residual_block3 = ResidualBlock(512, 1024, 2) 281 | self.attention_module3 = AttentionModule_stage3(1024, 1024) 282 | self.attention_module3_2 = AttentionModule_stage3(1024, 1024) # tbq add 283 | self.attention_module3_3 = AttentionModule_stage3(1024, 1024) # tbq add 284 | self.residual_block4 = ResidualBlock(1024, 2048, 2) 285 | self.residual_block5 = ResidualBlock(2048, 2048) 286 | self.residual_block6 = ResidualBlock(2048, 2048) 287 | self.output_layer = nn.Sequential(nn.BatchNorm2d(2048), 288 | nn.Dropout(drop_ratio), 289 | Flatten(), 290 | nn.Linear(2048 * 7 * 7, feature_dim), 291 | nn.BatchNorm1d(feature_dim)) 292 | 293 | def forward(self, x): 294 | out = self.conv1(x) 295 | out = self.mpool1(out) 296 | # print(out.data) 297 | out = self.residual_block1(out) 298 | out = self.attention_module1(out) 299 | out = self.residual_block2(out) 300 | out = self.attention_module2(out) 301 | out = self.attention_module2_2(out) 302 | out = self.residual_block3(out) 303 | # print(out.data) 304 | out = self.attention_module3(out) 305 | out = self.attention_module3_2(out) 306 | out = self.attention_module3_3(out) 307 | out = self.residual_block4(out) 308 | out = self.residual_block5(out) 309 | out = self.residual_block6(out) 310 | out = self.output_layer(out) 311 | 312 | return out 313 | 314 | 315 | if __name__ == '__main__': 316 | input = torch.Tensor(2, 3, 112, 112) 317 | net = ResidualAttentionNet_56() 318 | print(net) 319 | 320 | x = net(input) 321 | print(x.shape) -------------------------------------------------------------------------------- /wu/train_center.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: train_center.py 7 | @time: 2019/1/3 11:12 8 | @desc: train script for my attention net and center loss 9 | ''' 10 | 11 | ''' 12 | Pleause use the train.py for your training process. 13 | ''' 14 | 15 | import os 16 | import torch.utils.data 17 | from torch.nn import DataParallel 18 | from datetime import datetime 19 | from backbone.mobilefacenet import MobileFaceNet 20 | from backbone.resnet import ResNet50, ResNet101 21 | from backbone.arcfacenet import SEResNet_IR 22 | from backbone.spherenet import SphereNet 23 | from margin.ArcMarginProduct import ArcMarginProduct 24 | from margin.InnerProduct import InnerProduct 25 | from lossfunctions.centerloss import CenterLoss 26 | from utils.logging import init_log 27 | from dataset.casia_webface import CASIAWebFace 28 | from dataset.lfw import LFW 29 | from dataset.agedb import AgeDB30 30 | from dataset.cfp import CFP_FP 31 | from utils.visualize import Visualizer 32 | from torch.optim import lr_scheduler 33 | import torch.optim as optim 34 | import time 35 | from eval_lfw import evaluation_10_fold, getFeatureFromTorch 36 | import numpy as np 37 | import torchvision.transforms as transforms 38 | import argparse 39 | 40 | 41 | def train(args): 42 | # gpu init 43 | multi_gpus = False 44 | if len(args.gpus.split(',')) > 1: 45 | multi_gpus = True 46 | os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus 47 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 48 | 49 | # log init 50 | save_dir = os.path.join(args.save_dir, args.model_pre + args.backbone.upper() + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) 51 | if os.path.exists(save_dir): 52 | raise NameError('model dir exists!') 53 | os.makedirs(save_dir) 54 | logging = init_log(save_dir) 55 | _print = logging.info 56 | 57 | # dataset loader 58 | transform = transforms.Compose([ 59 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 60 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 61 | ]) 62 | # validation dataset 63 | trainset = CASIAWebFace(args.train_root, args.train_file_list, transform=transform) 64 | trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, 65 | shuffle=True, num_workers=8, drop_last=False) 66 | # test dataset 67 | lfwdataset = LFW(args.lfw_test_root, args.lfw_file_list, transform=transform) 68 | lfwloader = torch.utils.data.DataLoader(lfwdataset, batch_size=128, 69 | shuffle=False, num_workers=4, drop_last=False) 70 | agedbdataset = AgeDB30(args.agedb_test_root, args.agedb_file_list, transform=transform) 71 | agedbloader = torch.utils.data.DataLoader(agedbdataset, batch_size=128, 72 | shuffle=False, num_workers=4, drop_last=False) 73 | cfpfpdataset = CFP_FP(args.cfpfp_test_root, args.cfpfp_file_list, transform=transform) 74 | cfpfploader = torch.utils.data.DataLoader(cfpfpdataset, batch_size=128, 75 | shuffle=False, num_workers=4, drop_last=False) 76 | 77 | # define backbone and margin layer 78 | if args.backbone == 'MobileFace': 79 | net = MobileFaceNet() 80 | elif args.backbone == 'Res50': 81 | net = ResNet50() 82 | elif args.backbone == 'Res101': 83 | net = ResNet101() 84 | elif args.backbone == 'Res50_IR': 85 | net = SEResNet_IR(50, feature_dim=args.feature_dim, mode='ir') 86 | elif args.backbone == 'SERes50_IR': 87 | net = SEResNet_IR(50, feature_dim=args.feature_dim, mode='se_ir') 88 | elif args.backbone == 'SphereNet': 89 | net = SphereNet(num_layers=64, feature_dim=args.feature_dim) 90 | else: 91 | print(args.backbone, ' is not available!') 92 | 93 | if args.margin_type == 'ArcFace': 94 | margin = ArcMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size) 95 | elif args.margin_type == 'CosFace': 96 | pass 97 | elif args.margin_type == 'SphereFace': 98 | pass 99 | elif args.margin_type == 'InnerProduct': 100 | margin = InnerProduct(args.feature_dim, trainset.class_nums) 101 | else: 102 | print(args.margin_type, 'is not available!') 103 | 104 | if args.resume: 105 | print('resume the model parameters from: ', args.net_path, args.margin_path) 106 | net.load_state_dict(torch.load(args.net_path)['net_state_dict']) 107 | margin.load_state_dict(torch.load(args.margin_path)['net_state_dict']) 108 | 109 | # define optimizers for different layers 110 | criterion_classi = torch.nn.CrossEntropyLoss().to(device) 111 | optimizer_classi = optim.SGD([ 112 | {'params': net.parameters(), 'weight_decay': 5e-4}, 113 | {'params': margin.parameters(), 'weight_decay': 5e-4} 114 | ], lr=0.1, momentum=0.9, nesterov=True) 115 | 116 | #criterion_center = CenterLoss(trainset.class_nums, args.feature_dim).to(device) 117 | #optimizer_center = optim.SGD(criterion_center.parameters(), lr=0.5) 118 | 119 | scheduler_classi = lr_scheduler.MultiStepLR(optimizer_classi, milestones=[25, 50, 65], gamma=0.1) 120 | 121 | if multi_gpus: 122 | net = DataParallel(net).to(device) 123 | margin = DataParallel(margin).to(device) 124 | else: 125 | net = net.to(device) 126 | margin = margin.to(device) 127 | 128 | best_lfw_acc = 0.0 129 | best_lfw_iters = 0 130 | best_agedb30_acc = 0.0 131 | best_agedb30_iters = 0 132 | best_cfp_fp_acc = 0.0 133 | best_cfp_fp_iters = 0 134 | total_iters = 0 135 | #vis = Visualizer(env='softmax_center_xavier') 136 | for epoch in range(1, args.total_epoch + 1): 137 | scheduler_classi.step() 138 | # train model 139 | _print('Train Epoch: {}/{} ...'.format(epoch, args.total_epoch)) 140 | net.train() 141 | 142 | since = time.time() 143 | for data in trainloader: 144 | img, label = data[0].to(device), data[1].to(device) 145 | feature = net(img) 146 | output = margin(feature) 147 | loss_classi = criterion_classi(output, label) 148 | #loss_center = criterion_center(feature, label) 149 | total_loss = loss_classi #+ loss_center * args.weight_center 150 | 151 | optimizer_classi.zero_grad() 152 | #optimizer_center.zero_grad() 153 | total_loss.backward() 154 | optimizer_classi.step() 155 | #optimizer_center.step() 156 | 157 | total_iters += 1 158 | # print train information 159 | if total_iters % 100 == 0: 160 | # current training accuracy 161 | _, predict = torch.max(output.data, 1) 162 | total = label.size(0) 163 | correct = (np.array(predict) == np.array(label.data)).sum() 164 | time_cur = (time.time() - since) / 100 165 | since = time.time() 166 | #vis.plot_curves({'softmax loss': loss_classi.item(), 'center loss': loss_center.item()}, iters=total_iters, title='train loss', xlabel='iters', ylabel='train loss') 167 | #vis.plot_curves({'train accuracy': correct / total}, iters=total_iters, title='train accuracy', xlabel='iters', ylabel='train accuracy') 168 | print("Iters: {:0>6d}/[{:0>2d}], loss_classi: {:.4f}, loss_center: {:.4f}, train_accuracy: {:.4f}, time: {:.2f} s/iter, learning rate: {}".format(total_iters, 169 | epoch, 170 | loss_classi.item(), 171 | loss_center.item(), 172 | correct/total, 173 | time_cur, 174 | scheduler_classi.get_lr()[ 175 | 0])) 176 | # save model 177 | if total_iters % args.save_freq == 0: 178 | msg = 'Saving checkpoint: {}'.format(total_iters) 179 | _print(msg) 180 | if multi_gpus: 181 | net_state_dict = net.module.state_dict() 182 | margin_state_dict = margin.module.state_dict() 183 | else: 184 | net_state_dict = net.state_dict() 185 | margin_state_dict = margin.state_dict() 186 | 187 | if not os.path.exists(save_dir): 188 | os.mkdir(save_dir) 189 | torch.save({ 190 | 'iters': total_iters, 191 | 'net_state_dict': net_state_dict}, 192 | os.path.join(save_dir, 'Iter_%06d_net.ckpt' % total_iters)) 193 | torch.save({ 194 | 'iters': total_iters, 195 | 'net_state_dict': margin_state_dict}, 196 | os.path.join(save_dir, 'Iter_%06d_margin.ckpt' % total_iters)) 197 | #torch.save({ 198 | # 'iters': total_iters, 199 | # 'net_state_dict': criterion_center.state_dict()}, 200 | # os.path.join(save_dir, 'Iter_%06d_center.ckpt' % total_iters)) 201 | 202 | # test accuracy 203 | if total_iters % args.test_freq == 0: 204 | 205 | # test model on lfw 206 | net.eval() 207 | getFeatureFromTorch('./result/cur_lfw_result.mat', net, device, lfwdataset, lfwloader) 208 | lfw_accs = evaluation_10_fold('./result/cur_lfw_result.mat') 209 | _print('LFW Ave Accuracy: {:.4f}'.format(np.mean(lfw_accs) * 100)) 210 | if best_lfw_acc < np.mean(lfw_accs) * 100: 211 | best_lfw_acc = np.mean(lfw_accs) * 100 212 | best_lfw_iters = total_iters 213 | 214 | # test model on AgeDB30 215 | getFeatureFromTorch('./result/cur_agedb30_result.mat', net, device, agedbdataset, agedbloader) 216 | age_accs = evaluation_10_fold('./result/cur_agedb30_result.mat') 217 | _print('AgeDB-30 Ave Accuracy: {:.4f}'.format(np.mean(age_accs) * 100)) 218 | if best_agedb30_acc < np.mean(age_accs) * 100: 219 | best_agedb30_acc = np.mean(age_accs) * 100 220 | best_agedb30_iters = total_iters 221 | 222 | # test model on CFP-FP 223 | getFeatureFromTorch('./result/cur_cfpfp_result.mat', net, device, cfpfpdataset, cfpfploader) 224 | cfp_accs = evaluation_10_fold('./result/cur_cfpfp_result.mat') 225 | _print('CFP-FP Ave Accuracy: {:.4f}'.format(np.mean(cfp_accs) * 100)) 226 | if best_cfp_fp_acc < np.mean(cfp_accs) * 100: 227 | best_cfp_fp_acc = np.mean(cfp_accs) * 100 228 | best_cfp_fp_iters = total_iters 229 | _print('Current Best Accuracy: LFW: {:.4f} in iters: {}, AgeDB-30: {:.4f} in iters: {} and CFP-FP: {:.4f} in iters: {}'.format( 230 | best_lfw_acc, best_lfw_iters, best_agedb30_acc, best_agedb30_iters, best_cfp_fp_acc, best_cfp_fp_iters)) 231 | 232 | #vis.plot_curves({'lfw': np.mean(lfw_accs), 'agedb-30': np.mean(age_accs), 'cfp-fp': np.mean(cfp_accs)}, iters=total_iters, 233 | # title='test accuracy', xlabel='iters', ylabel='test accuracy') 234 | net.train() 235 | 236 | _print('Finally Best Accuracy: LFW: {:.4f} in iters: {}, AgeDB-30: {:.4f} in iters: {} and CFP-FP: {:.4f} in iters: {}'.format( 237 | best_lfw_acc, best_lfw_iters, best_agedb30_acc, best_agedb30_iters, best_cfp_fp_acc, best_cfp_fp_iters)) 238 | print('finishing training') 239 | 240 | 241 | if __name__ == '__main__': 242 | parser = argparse.ArgumentParser(description='PyTorch for deep face recognition') 243 | parser.add_argument('--train_root', type=str, default='/media/ramdisk/webface_align_112', help='train image root') 244 | parser.add_argument('--train_file_list', type=str, default='/media/ramdisk/webface_align_train.list', help='train list') 245 | parser.add_argument('--lfw_test_root', type=str, default='/media/ramdisk/lfw_align_112', help='lfw image root') 246 | parser.add_argument('--lfw_file_list', type=str, default='/media/ramdisk/pairs.txt', help='lfw pair file list') 247 | parser.add_argument('--agedb_test_root', type=str, default='/media/sda/AgeDB-30/agedb30_align_112', help='agedb image root') 248 | parser.add_argument('--agedb_file_list', type=str, default='/media/sda/AgeDB-30/agedb_30_pair.txt', help='agedb pair file list') 249 | parser.add_argument('--cfpfp_test_root', type=str, default='/media/sda/CFP-FP/cfp_fp_aligned_112', help='agedb image root') 250 | parser.add_argument('--cfpfp_file_list', type=str, default='/media/sda/CFP-FP/cfp_fp_pair.txt', help='agedb pair file list') 251 | 252 | parser.add_argument('--backbone', type=str, default='MobileFace', help='MobileFace, Res50, Res101, Res50_IR, SERes50_IR, SphereNet') 253 | parser.add_argument('--margin_type', type=str, default='InnerProduct', help='InnerProduct, ArcFace, CosFace, SphereFace') 254 | parser.add_argument('--feature_dim', type=int, default=128, help='feature dimension, 128 or 512') 255 | parser.add_argument('--scale_size', type=float, default=32.0, help='scale size') 256 | parser.add_argument('--batch_size', type=int, default=256, help='batch size') 257 | parser.add_argument('--total_epoch', type=int, default=80, help='total epochs') 258 | parser.add_argument('--weight_center', type=float, default=0.01, help='center loss weight') 259 | 260 | parser.add_argument('--save_freq', type=int, default=2000, help='save frequency') 261 | parser.add_argument('--test_freq', type=int, default=2000, help='test frequency') 262 | parser.add_argument('--resume', type=int, default=False, help='resume model') 263 | parser.add_argument('--net_path', type=str, default='', help='resume model') 264 | parser.add_argument('--margin_path', type=str, default='', help='resume model') 265 | parser.add_argument('--save_dir', type=str, default='./model', help='model save dir') 266 | parser.add_argument('--model_pre', type=str, default='Softmax_Center_', help='model prefix') 267 | parser.add_argument('--gpus', type=str, default='0,1', help='model prefix') 268 | 269 | args = parser.parse_args() 270 | 271 | train(args) 272 | 273 | 274 | -------------------------------------------------------------------------------- /attack/log/64x64, alpha=0.2, 7models, 34epochs, lr=5.0e+05, batch_size=32.log (3.4962): -------------------------------------------------------------------------------- 1 | 2019-09-08 16:46:26 Model0, Epoch 00, Acc: 0.9986, Cos_Similarity: 0.8756, L2_dist: 0.0961 2 | 2019-09-08 16:46:29 Model1, Epoch 00, Acc: 0.9930, Cos_Similarity: 0.8488, L2_dist: 0.1364 3 | 2019-09-08 16:46:36 Model2, Epoch 00, Acc: 0.9846, Cos_Similarity: 0.8392, L2_dist: 0.1854 4 | 2019-09-08 16:46:48 Model3, Epoch 00, Acc: 0.9747, Cos_Similarity: 0.8192, L2_dist: 0.2241 5 | 2019-09-08 16:46:59 Model4, Epoch 00, Acc: 0.9452, Cos_Similarity: 0.7851, L2_dist: 0.2849 6 | 2019-09-08 16:47:11 Model5, Epoch 00, Acc: 0.9073, Cos_Similarity: 0.7474, L2_dist: 0.3411 7 | 2019-09-08 16:47:20 Model6, Epoch 00, Acc: 0.9242, Cos_Similarity: 0.7601, L2_dist: 0.3831 8 | 2019-09-08 16:47:28 Model0, Epoch 01, Acc: 0.8301, Cos_Similarity: 0.6751, L2_dist: 0.4437 9 | 2019-09-08 16:47:30 Model1, Epoch 01, Acc: 0.8469, Cos_Similarity: 0.6895, L2_dist: 0.4825 10 | 2019-09-08 16:47:37 Model2, Epoch 01, Acc: 0.7739, Cos_Similarity: 0.6138, L2_dist: 0.5417 11 | 2019-09-08 16:47:50 Model3, Epoch 01, Acc: 0.7753, Cos_Similarity: 0.6115, L2_dist: 0.5857 12 | 2019-09-08 16:48:01 Model4, Epoch 01, Acc: 0.6868, Cos_Similarity: 0.5483, L2_dist: 0.6449 13 | 2019-09-08 16:48:12 Model5, Epoch 01, Acc: 0.6433, Cos_Similarity: 0.5193, L2_dist: 0.6978 14 | 2019-09-08 16:48:22 Model6, Epoch 01, Acc: 0.6994, Cos_Similarity: 0.5780, L2_dist: 0.7388 15 | 2019-09-08 16:48:29 Model0, Epoch 02, Acc: 0.5632, Cos_Similarity: 0.4573, L2_dist: 0.7913 16 | 2019-09-08 16:48:32 Model1, Epoch 02, Acc: 0.6194, Cos_Similarity: 0.5206, L2_dist: 0.8289 17 | 2019-09-08 16:48:39 Model2, Epoch 02, Acc: 0.4677, Cos_Similarity: 0.4075, L2_dist: 0.8786 18 | 2019-09-08 16:48:51 Model3, Epoch 02, Acc: 0.5197, Cos_Similarity: 0.4338, L2_dist: 0.9184 19 | 2019-09-08 16:49:02 Model4, Epoch 02, Acc: 0.4017, Cos_Similarity: 0.3628, L2_dist: 0.9668 20 | 2019-09-08 16:49:14 Model5, Epoch 02, Acc: 0.3919, Cos_Similarity: 0.3403, L2_dist: 1.0105 21 | 2019-09-08 16:49:23 Model6, Epoch 02, Acc: 0.4747, Cos_Similarity: 0.4301, L2_dist: 1.0466 22 | 2019-09-08 16:49:31 Model0, Epoch 03, Acc: 0.3202, Cos_Similarity: 0.2946, L2_dist: 1.0900 23 | 2019-09-08 16:49:33 Model1, Epoch 03, Acc: 0.3961, Cos_Similarity: 0.3821, L2_dist: 1.1241 24 | 2019-09-08 16:49:40 Model2, Epoch 03, Acc: 0.2697, Cos_Similarity: 0.2542, L2_dist: 1.1651 25 | 2019-09-08 16:49:53 Model3, Epoch 03, Acc: 0.3287, Cos_Similarity: 0.2993, L2_dist: 1.1994 26 | 2019-09-08 16:50:04 Model4, Epoch 03, Acc: 0.2528, Cos_Similarity: 0.2218, L2_dist: 1.2387 27 | 2019-09-08 16:50:15 Model5, Epoch 03, Acc: 0.2500, Cos_Similarity: 0.2083, L2_dist: 1.2745 28 | 2019-09-08 16:50:25 Model6, Epoch 03, Acc: 0.3258, Cos_Similarity: 0.3176, L2_dist: 1.3064 29 | 2019-09-08 16:50:33 Model0, Epoch 04, Acc: 0.2219, Cos_Similarity: 0.1699, L2_dist: 1.3424 30 | 2019-09-08 16:50:35 Model1, Epoch 04, Acc: 0.2697, Cos_Similarity: 0.2756, L2_dist: 1.3733 31 | 2019-09-08 16:50:42 Model2, Epoch 04, Acc: 0.1770, Cos_Similarity: 0.1386, L2_dist: 1.4069 32 | 2019-09-08 16:50:54 Model3, Epoch 04, Acc: 0.2191, Cos_Similarity: 0.1945, L2_dist: 1.4372 33 | 2019-09-08 16:51:06 Model4, Epoch 04, Acc: 0.1573, Cos_Similarity: 0.1156, L2_dist: 1.4692 34 | 2019-09-08 16:51:17 Model5, Epoch 04, Acc: 0.1573, Cos_Similarity: 0.1055, L2_dist: 1.4990 35 | 2019-09-08 16:51:27 Model6, Epoch 04, Acc: 0.2331, Cos_Similarity: 0.2259, L2_dist: 1.5272 36 | 2019-09-08 16:51:34 Model0, Epoch 05, Acc: 0.1306, Cos_Similarity: 0.0706, L2_dist: 1.5575 37 | 2019-09-08 16:51:37 Model1, Epoch 05, Acc: 0.1896, Cos_Similarity: 0.1876, L2_dist: 1.5852 38 | 2019-09-08 16:51:44 Model2, Epoch 05, Acc: 0.1124, Cos_Similarity: 0.0475, L2_dist: 1.6134 39 | 2019-09-08 16:51:56 Model3, Epoch 05, Acc: 0.1447, Cos_Similarity: 0.1086, L2_dist: 1.6395 40 | 2019-09-08 16:52:07 Model4, Epoch 05, Acc: 0.0927, Cos_Similarity: 0.0301, L2_dist: 1.6663 41 | 2019-09-08 16:52:19 Model5, Epoch 05, Acc: 0.1025, Cos_Similarity: 0.0235, L2_dist: 1.6913 42 | 2019-09-08 16:52:28 Model6, Epoch 05, Acc: 0.1629, Cos_Similarity: 0.1525, L2_dist: 1.7168 43 | 2019-09-08 16:52:36 Model0, Epoch 06, Acc: 0.0843, Cos_Similarity: -0.0068, L2_dist: 1.7416 44 | 2019-09-08 16:52:39 Model1, Epoch 06, Acc: 0.1194, Cos_Similarity: 0.1194, L2_dist: 1.7666 45 | 2019-09-08 16:52:45 Model2, Epoch 06, Acc: 0.0730, Cos_Similarity: -0.0263, L2_dist: 1.7900 46 | 2019-09-08 16:52:58 Model3, Epoch 06, Acc: 0.0927, Cos_Similarity: 0.0396, L2_dist: 1.8125 47 | 2019-09-08 16:53:09 Model4, Epoch 06, Acc: 0.0618, Cos_Similarity: -0.0408, L2_dist: 1.8347 48 | 2019-09-08 16:53:20 Model5, Epoch 06, Acc: 0.0562, Cos_Similarity: -0.0438, L2_dist: 1.8556 49 | 2019-09-08 16:53:30 Model6, Epoch 06, Acc: 0.1081, Cos_Similarity: 0.0883, L2_dist: 1.8784 50 | 2019-09-08 16:53:38 Model0, Epoch 07, Acc: 0.0379, Cos_Similarity: -0.0684, L2_dist: 1.8989 51 | 2019-09-08 16:53:40 Model1, Epoch 07, Acc: 0.0843, Cos_Similarity: 0.0619, L2_dist: 1.9216 52 | 2019-09-08 16:53:47 Model2, Epoch 07, Acc: 0.0449, Cos_Similarity: -0.0862, L2_dist: 1.9411 53 | 2019-09-08 16:54:00 Model3, Epoch 07, Acc: 0.0618, Cos_Similarity: -0.0157, L2_dist: 1.9609 54 | 2019-09-08 16:54:11 Model4, Epoch 07, Acc: 0.0365, Cos_Similarity: -0.0989, L2_dist: 1.9795 55 | 2019-09-08 16:54:22 Model5, Epoch 07, Acc: 0.0337, Cos_Similarity: -0.0961, L2_dist: 1.9970 56 | 2019-09-08 16:54:32 Model6, Epoch 07, Acc: 0.0829, Cos_Similarity: 0.0379, L2_dist: 2.0176 57 | 2019-09-08 16:54:39 Model0, Epoch 08, Acc: 0.0295, Cos_Similarity: -0.1206, L2_dist: 2.0351 58 | 2019-09-08 16:54:42 Model1, Epoch 08, Acc: 0.0646, Cos_Similarity: 0.0151, L2_dist: 2.0560 59 | 2019-09-08 16:54:49 Model2, Epoch 08, Acc: 0.0239, Cos_Similarity: -0.1364, L2_dist: 2.0723 60 | 2019-09-08 16:55:01 Model3, Epoch 08, Acc: 0.0463, Cos_Similarity: -0.0619, L2_dist: 2.0899 61 | 2019-09-08 16:55:12 Model4, Epoch 08, Acc: 0.0211, Cos_Similarity: -0.1447, L2_dist: 2.1055 62 | 2019-09-08 16:55:24 Model5, Epoch 08, Acc: 0.0197, Cos_Similarity: -0.1426, L2_dist: 2.1205 63 | 2019-09-08 16:55:33 Model6, Epoch 08, Acc: 0.0576, Cos_Similarity: -0.0046, L2_dist: 2.1395 64 | 2019-09-08 16:55:41 Model0, Epoch 09, Acc: 0.0183, Cos_Similarity: -0.1660, L2_dist: 2.1541 65 | 2019-09-08 16:55:44 Model1, Epoch 09, Acc: 0.0421, Cos_Similarity: -0.0266, L2_dist: 2.1730 66 | 2019-09-08 16:55:50 Model2, Epoch 09, Acc: 0.0197, Cos_Similarity: -0.1769, L2_dist: 2.1865 67 | 2019-09-08 16:56:03 Model3, Epoch 09, Acc: 0.0281, Cos_Similarity: -0.1023, L2_dist: 2.2021 68 | 2019-09-08 16:56:14 Model4, Epoch 09, Acc: 0.0126, Cos_Similarity: -0.1847, L2_dist: 2.2147 69 | 2019-09-08 16:56:26 Model5, Epoch 09, Acc: 0.0169, Cos_Similarity: -0.1822, L2_dist: 2.2274 70 | 2019-09-08 16:56:35 Model6, Epoch 09, Acc: 0.0393, Cos_Similarity: -0.0392, L2_dist: 2.2450 71 | 2019-09-08 16:56:43 Model0, Epoch 10, Acc: 0.0098, Cos_Similarity: -0.2008, L2_dist: 2.2577 72 | 2019-09-08 16:56:45 Model1, Epoch 10, Acc: 0.0337, Cos_Similarity: -0.0612, L2_dist: 2.2750 73 | 2019-09-08 16:56:52 Model2, Epoch 10, Acc: 0.0084, Cos_Similarity: -0.2109, L2_dist: 2.2865 74 | 2019-09-08 16:57:05 Model3, Epoch 10, Acc: 0.0126, Cos_Similarity: -0.1372, L2_dist: 2.3002 75 | 2019-09-08 16:57:16 Model4, Epoch 10, Acc: 0.0098, Cos_Similarity: -0.2187, L2_dist: 2.3110 76 | 2019-09-08 16:57:27 Model5, Epoch 10, Acc: 0.0070, Cos_Similarity: -0.2153, L2_dist: 2.3219 77 | 2019-09-08 16:57:37 Model6, Epoch 10, Acc: 0.0309, Cos_Similarity: -0.0739, L2_dist: 2.3381 78 | 2019-09-08 16:57:45 Model0, Epoch 11, Acc: 0.0070, Cos_Similarity: -0.2360, L2_dist: 2.3489 79 | 2019-09-08 16:57:47 Model1, Epoch 11, Acc: 0.0211, Cos_Similarity: -0.0920, L2_dist: 2.3652 80 | 2019-09-08 16:57:54 Model2, Epoch 11, Acc: 0.0028, Cos_Similarity: -0.2421, L2_dist: 2.3749 81 | 2019-09-08 16:58:07 Model3, Epoch 11, Acc: 0.0084, Cos_Similarity: -0.1666, L2_dist: 2.3874 82 | 2019-09-08 16:58:18 Model4, Epoch 11, Acc: 0.0042, Cos_Similarity: -0.2467, L2_dist: 2.3962 83 | 2019-09-08 16:58:29 Model5, Epoch 11, Acc: 0.0028, Cos_Similarity: -0.2416, L2_dist: 2.4057 84 | 2019-09-08 16:58:39 Model6, Epoch 11, Acc: 0.0239, Cos_Similarity: -0.1022, L2_dist: 2.4208 85 | 2019-09-08 16:58:46 Model0, Epoch 12, Acc: 0.0014, Cos_Similarity: -0.2616, L2_dist: 2.4303 86 | 2019-09-08 16:58:49 Model1, Epoch 12, Acc: 0.0098, Cos_Similarity: -0.1171, L2_dist: 2.4454 87 | 2019-09-08 16:58:56 Model2, Epoch 12, Acc: 0.0000, Cos_Similarity: -0.2685, L2_dist: 2.4540 88 | 2019-09-08 16:59:08 Model3, Epoch 12, Acc: 0.0028, Cos_Similarity: -0.1911, L2_dist: 2.4652 89 | 2019-09-08 16:59:19 Model4, Epoch 12, Acc: 0.0000, Cos_Similarity: -0.2727, L2_dist: 2.4727 90 | 2019-09-08 16:59:31 Model5, Epoch 12, Acc: 0.0000, Cos_Similarity: -0.2658, L2_dist: 2.4809 91 | 2019-09-08 16:59:41 Model6, Epoch 12, Acc: 0.0140, Cos_Similarity: -0.1246, L2_dist: 2.4952 92 | 2019-09-08 16:59:48 Model0, Epoch 13, Acc: 0.0000, Cos_Similarity: -0.2858, L2_dist: 2.5033 93 | 2019-09-08 16:59:51 Model1, Epoch 13, Acc: 0.0042, Cos_Similarity: -0.1382, L2_dist: 2.5176 94 | 2019-09-08 16:59:58 Model2, Epoch 13, Acc: 0.0000, Cos_Similarity: -0.2931, L2_dist: 2.5246 95 | 2019-09-08 17:00:10 Model3, Epoch 13, Acc: 0.0028, Cos_Similarity: -0.2144, L2_dist: 2.5345 96 | 2019-09-08 17:00:21 Model4, Epoch 13, Acc: 0.0000, Cos_Similarity: -0.2957, L2_dist: 2.5410 97 | 2019-09-08 17:00:33 Model5, Epoch 13, Acc: 0.0000, Cos_Similarity: -0.2892, L2_dist: 2.5479 98 | 2019-09-08 17:00:42 Model6, Epoch 13, Acc: 0.0056, Cos_Similarity: -0.1458, L2_dist: 2.5615 99 | 2019-09-08 17:00:50 Model0, Epoch 14, Acc: 0.0000, Cos_Similarity: -0.3057, L2_dist: 2.5684 100 | 2019-09-08 17:00:53 Model1, Epoch 14, Acc: 0.0000, Cos_Similarity: -0.1581, L2_dist: 2.5819 101 | 2019-09-08 17:00:59 Model2, Epoch 14, Acc: 0.0000, Cos_Similarity: -0.3123, L2_dist: 2.5876 102 | 2019-09-08 17:01:12 Model3, Epoch 14, Acc: 0.0014, Cos_Similarity: -0.2335, L2_dist: 2.5966 103 | 2019-09-08 17:01:23 Model4, Epoch 14, Acc: 0.0000, Cos_Similarity: -0.3140, L2_dist: 2.6022 104 | 2019-09-08 17:01:35 Model5, Epoch 14, Acc: 0.0000, Cos_Similarity: -0.3084, L2_dist: 2.6081 105 | 2019-09-08 17:01:44 Model6, Epoch 14, Acc: 0.0042, Cos_Similarity: -0.1647, L2_dist: 2.6210 106 | 2019-09-08 17:01:52 Model0, Epoch 15, Acc: 0.0000, Cos_Similarity: -0.3257, L2_dist: 2.6271 107 | 2019-09-08 17:01:54 Model1, Epoch 15, Acc: 0.0000, Cos_Similarity: -0.1764, L2_dist: 2.6395 108 | 2019-09-08 17:02:01 Model2, Epoch 15, Acc: 0.0000, Cos_Similarity: -0.3299, L2_dist: 2.6446 109 | 2019-09-08 17:02:14 Model3, Epoch 15, Acc: 0.0000, Cos_Similarity: -0.2486, L2_dist: 2.6526 110 | 2019-09-08 17:02:25 Model4, Epoch 15, Acc: 0.0000, Cos_Similarity: -0.3325, L2_dist: 2.6574 111 | 2019-09-08 17:02:36 Model5, Epoch 15, Acc: 0.0000, Cos_Similarity: -0.3255, L2_dist: 2.6627 112 | 2019-09-08 17:02:46 Model6, Epoch 15, Acc: 0.0014, Cos_Similarity: -0.1828, L2_dist: 2.6751 113 | 2019-09-08 17:02:54 Model0, Epoch 16, Acc: 0.0000, Cos_Similarity: -0.3428, L2_dist: 2.6806 114 | 2019-09-08 17:02:56 Model1, Epoch 16, Acc: 0.0000, Cos_Similarity: -0.1938, L2_dist: 2.6926 115 | 2019-09-08 17:03:03 Model2, Epoch 16, Acc: 0.0000, Cos_Similarity: -0.3447, L2_dist: 2.6965 116 | 2019-09-08 17:03:16 Model3, Epoch 16, Acc: 0.0000, Cos_Similarity: -0.2660, L2_dist: 2.7040 117 | 2019-09-08 17:03:27 Model4, Epoch 16, Acc: 0.0000, Cos_Similarity: -0.3498, L2_dist: 2.7078 118 | 2019-09-08 17:03:38 Model5, Epoch 16, Acc: 0.0000, Cos_Similarity: -0.3403, L2_dist: 2.7124 119 | 2019-09-08 17:03:48 Model6, Epoch 16, Acc: 0.0014, Cos_Similarity: -0.1998, L2_dist: 2.7242 120 | 2019-09-08 17:03:55 Model0, Epoch 17, Acc: 0.0000, Cos_Similarity: -0.3576, L2_dist: 2.7292 121 | 2019-09-08 17:03:58 Model1, Epoch 17, Acc: 0.0000, Cos_Similarity: -0.2065, L2_dist: 2.7406 122 | 2019-09-08 17:04:05 Model2, Epoch 17, Acc: 0.0000, Cos_Similarity: -0.3591, L2_dist: 2.7440 123 | 2019-09-08 17:04:17 Model3, Epoch 17, Acc: 0.0000, Cos_Similarity: -0.2794, L2_dist: 2.7510 124 | 2019-09-08 17:04:28 Model4, Epoch 17, Acc: 0.0000, Cos_Similarity: -0.3627, L2_dist: 2.7542 125 | 2019-09-08 17:04:40 Model5, Epoch 17, Acc: 0.0000, Cos_Similarity: -0.3532, L2_dist: 2.7579 126 | 2019-09-08 17:04:50 Model6, Epoch 17, Acc: 0.0000, Cos_Similarity: -0.2114, L2_dist: 2.7694 127 | 2019-09-08 17:04:57 Model0, Epoch 18, Acc: 0.0000, Cos_Similarity: -0.3715, L2_dist: 2.7731 128 | 2019-09-08 17:05:00 Model1, Epoch 18, Acc: 0.0000, Cos_Similarity: -0.2190, L2_dist: 2.7841 129 | 2019-09-08 17:05:06 Model2, Epoch 18, Acc: 0.0000, Cos_Similarity: -0.3718, L2_dist: 2.7869 130 | 2019-09-08 17:05:19 Model3, Epoch 18, Acc: 0.0000, Cos_Similarity: -0.2932, L2_dist: 2.7934 131 | 2019-09-08 17:05:30 Model4, Epoch 18, Acc: 0.0000, Cos_Similarity: -0.3762, L2_dist: 2.7960 132 | 2019-09-08 17:05:42 Model5, Epoch 18, Acc: 0.0000, Cos_Similarity: -0.3666, L2_dist: 2.7992 133 | 2019-09-08 17:05:51 Model6, Epoch 18, Acc: 0.0000, Cos_Similarity: -0.2263, L2_dist: 2.8104 134 | 2019-09-08 17:05:59 Model0, Epoch 19, Acc: 0.0000, Cos_Similarity: -0.3810, L2_dist: 2.8140 135 | 2019-09-08 17:06:01 Model1, Epoch 19, Acc: 0.0000, Cos_Similarity: -0.2305, L2_dist: 2.8246 136 | 2019-09-08 17:06:08 Model2, Epoch 19, Acc: 0.0000, Cos_Similarity: -0.3858, L2_dist: 2.8268 137 | 2019-09-08 17:06:21 Model3, Epoch 19, Acc: 0.0000, Cos_Similarity: -0.3040, L2_dist: 2.8325 138 | 2019-09-08 17:06:32 Model4, Epoch 19, Acc: 0.0000, Cos_Similarity: -0.3876, L2_dist: 2.8346 139 | 2019-09-08 17:06:43 Model5, Epoch 19, Acc: 0.0000, Cos_Similarity: -0.3765, L2_dist: 2.8375 140 | 2019-09-08 17:06:53 Model6, Epoch 19, Acc: 0.0000, Cos_Similarity: -0.2365, L2_dist: 2.8481 141 | 2019-09-08 17:07:01 Model0, Epoch 20, Acc: 0.0000, Cos_Similarity: -0.3931, L2_dist: 2.8510 142 | 2019-09-08 17:07:03 Model1, Epoch 20, Acc: 0.0000, Cos_Similarity: -0.2406, L2_dist: 2.8613 143 | 2019-09-08 17:07:10 Model2, Epoch 20, Acc: 0.0000, Cos_Similarity: -0.3956, L2_dist: 2.8628 144 | 2019-09-08 17:07:22 Model3, Epoch 20, Acc: 0.0000, Cos_Similarity: -0.3136, L2_dist: 2.8681 145 | 2019-09-08 17:07:33 Model4, Epoch 20, Acc: 0.0000, Cos_Similarity: -0.3974, L2_dist: 2.8698 146 | 2019-09-08 17:07:45 Model5, Epoch 20, Acc: 0.0000, Cos_Similarity: -0.3897, L2_dist: 2.8721 147 | 2019-09-08 17:07:55 Model6, Epoch 20, Acc: 0.0000, Cos_Similarity: -0.2478, L2_dist: 2.8824 148 | 2019-09-08 17:08:02 Model0, Epoch 21, Acc: 0.0000, Cos_Similarity: -0.4030, L2_dist: 2.8851 149 | 2019-09-08 17:08:05 Model1, Epoch 21, Acc: 0.0000, Cos_Similarity: -0.2508, L2_dist: 2.8950 150 | 2019-09-08 17:08:12 Model2, Epoch 21, Acc: 0.0000, Cos_Similarity: -0.4039, L2_dist: 2.8962 151 | 2019-09-08 17:08:24 Model3, Epoch 21, Acc: 0.0000, Cos_Similarity: -0.3242, L2_dist: 2.9009 152 | 2019-09-08 17:08:35 Model4, Epoch 21, Acc: 0.0000, Cos_Similarity: -0.4075, L2_dist: 2.9019 153 | 2019-09-08 17:08:47 Model5, Epoch 21, Acc: 0.0000, Cos_Similarity: -0.3963, L2_dist: 2.9039 154 | 2019-09-08 17:08:56 Model6, Epoch 21, Acc: 0.0000, Cos_Similarity: -0.2580, L2_dist: 2.9142 155 | 2019-09-08 17:09:04 Model0, Epoch 22, Acc: 0.0000, Cos_Similarity: -0.4121, L2_dist: 2.9167 156 | 2019-09-08 17:09:07 Model1, Epoch 22, Acc: 0.0000, Cos_Similarity: -0.2607, L2_dist: 2.9263 157 | 2019-09-08 17:09:13 Model2, Epoch 22, Acc: 0.0000, Cos_Similarity: -0.4125, L2_dist: 2.9271 158 | 2019-09-08 17:09:26 Model3, Epoch 22, Acc: 0.0000, Cos_Similarity: -0.3298, L2_dist: 2.9317 159 | 2019-09-08 17:09:37 Model4, Epoch 22, Acc: 0.0000, Cos_Similarity: -0.4165, L2_dist: 2.9325 160 | 2019-09-08 17:09:48 Model5, Epoch 22, Acc: 0.0000, Cos_Similarity: -0.4049, L2_dist: 2.9341 161 | 2019-09-08 17:09:58 Model6, Epoch 22, Acc: 0.0000, Cos_Similarity: -0.2639, L2_dist: 2.9442 162 | 2019-09-08 17:10:06 Model0, Epoch 23, Acc: 0.0000, Cos_Similarity: -0.4232, L2_dist: 2.9461 163 | 2019-09-08 17:10:08 Model1, Epoch 23, Acc: 0.0000, Cos_Similarity: -0.2653, L2_dist: 2.9554 164 | 2019-09-08 17:10:15 Model2, Epoch 23, Acc: 0.0000, Cos_Similarity: -0.4216, L2_dist: 2.9561 165 | 2019-09-08 17:10:28 Model3, Epoch 23, Acc: 0.0000, Cos_Similarity: -0.3386, L2_dist: 2.9604 166 | 2019-09-08 17:10:39 Model4, Epoch 23, Acc: 0.0000, Cos_Similarity: -0.4241, L2_dist: 2.9610 167 | 2019-09-08 17:10:50 Model5, Epoch 23, Acc: 0.0000, Cos_Similarity: -0.4128, L2_dist: 2.9624 168 | 2019-09-08 17:11:00 Model6, Epoch 23, Acc: 0.0000, Cos_Similarity: -0.2778, L2_dist: 2.9719 169 | 2019-09-08 17:11:07 Model0, Epoch 24, Acc: 0.0000, Cos_Similarity: -0.4262, L2_dist: 2.9736 170 | 2019-09-08 17:11:10 Model1, Epoch 24, Acc: 0.0000, Cos_Similarity: -0.2754, L2_dist: 2.9825 171 | 2019-09-08 17:11:17 Model2, Epoch 24, Acc: 0.0000, Cos_Similarity: -0.4287, L2_dist: 2.9826 172 | 2019-09-08 17:11:29 Model3, Epoch 24, Acc: 0.0000, Cos_Similarity: -0.3464, L2_dist: 2.9867 173 | 2019-09-08 17:11:40 Model4, Epoch 24, Acc: 0.0000, Cos_Similarity: -0.4312, L2_dist: 2.9870 174 | 2019-09-08 17:11:52 Model5, Epoch 24, Acc: 0.0000, Cos_Similarity: -0.4212, L2_dist: 2.9880 175 | 2019-09-08 17:12:02 Model6, Epoch 24, Acc: 0.0000, Cos_Similarity: -0.2818, L2_dist: 2.9975 176 | 2019-09-08 17:12:09 Model0, Epoch 25, Acc: 0.0000, Cos_Similarity: -0.4350, L2_dist: 2.9990 177 | 2019-09-08 17:12:12 Model1, Epoch 25, Acc: 0.0000, Cos_Similarity: -0.2812, L2_dist: 3.0078 178 | 2019-09-08 17:12:18 Model2, Epoch 25, Acc: 0.0000, Cos_Similarity: -0.4351, L2_dist: 3.0081 179 | 2019-09-08 17:12:31 Model3, Epoch 25, Acc: 0.0000, Cos_Similarity: -0.3529, L2_dist: 3.0118 180 | 2019-09-08 17:12:42 Model4, Epoch 25, Acc: 0.0000, Cos_Similarity: -0.4370, L2_dist: 3.0121 181 | 2019-09-08 17:12:54 Model5, Epoch 25, Acc: 0.0000, Cos_Similarity: -0.4262, L2_dist: 3.0129 182 | 2019-09-08 17:13:03 Model6, Epoch 25, Acc: 0.0000, Cos_Similarity: -0.2925, L2_dist: 3.0221 183 | 2019-09-08 17:13:11 Model0, Epoch 26, Acc: 0.0000, Cos_Similarity: -0.4408, L2_dist: 3.0234 184 | 2019-09-08 17:13:13 Model1, Epoch 26, Acc: 0.0000, Cos_Similarity: -0.2876, L2_dist: 3.0319 185 | 2019-09-08 17:13:20 Model2, Epoch 26, Acc: 0.0000, Cos_Similarity: -0.4430, L2_dist: 3.0319 186 | 2019-09-08 17:13:33 Model3, Epoch 26, Acc: 0.0000, Cos_Similarity: -0.3595, L2_dist: 3.0354 187 | 2019-09-08 17:13:44 Model4, Epoch 26, Acc: 0.0000, Cos_Similarity: -0.4449, L2_dist: 3.0353 188 | 2019-09-08 17:13:55 Model5, Epoch 26, Acc: 0.0000, Cos_Similarity: -0.4318, L2_dist: 3.0360 189 | 2019-09-08 17:14:05 Model6, Epoch 26, Acc: 0.0000, Cos_Similarity: -0.2981, L2_dist: 3.0450 190 | 2019-09-08 17:14:12 Model0, Epoch 27, Acc: 0.0000, Cos_Similarity: -0.4485, L2_dist: 3.0460 191 | 2019-09-08 17:14:15 Model1, Epoch 27, Acc: 0.0000, Cos_Similarity: -0.2944, L2_dist: 3.0544 192 | 2019-09-08 17:14:22 Model2, Epoch 27, Acc: 0.0000, Cos_Similarity: -0.4479, L2_dist: 3.0542 193 | 2019-09-08 17:14:34 Model3, Epoch 27, Acc: 0.0000, Cos_Similarity: -0.3657, L2_dist: 3.0573 194 | 2019-09-08 17:14:45 Model4, Epoch 27, Acc: 0.0000, Cos_Similarity: -0.4515, L2_dist: 3.0570 195 | 2019-09-08 17:14:57 Model5, Epoch 27, Acc: 0.0000, Cos_Similarity: -0.4384, L2_dist: 3.0577 196 | 2019-09-08 17:15:07 Model6, Epoch 27, Acc: 0.0000, Cos_Similarity: -0.3039, L2_dist: 3.0666 197 | 2019-09-08 17:15:14 Model0, Epoch 28, Acc: 0.0000, Cos_Similarity: -0.4517, L2_dist: 3.0674 198 | 2019-09-08 17:15:17 Model1, Epoch 28, Acc: 0.0000, Cos_Similarity: -0.3008, L2_dist: 3.0754 199 | 2019-09-08 17:15:24 Model2, Epoch 28, Acc: 0.0000, Cos_Similarity: -0.4534, L2_dist: 3.0749 200 | 2019-09-08 17:15:36 Model3, Epoch 28, Acc: 0.0000, Cos_Similarity: -0.3696, L2_dist: 3.0780 201 | 2019-09-08 17:15:47 Model4, Epoch 28, Acc: 0.0000, Cos_Similarity: -0.4572, L2_dist: 3.0774 202 | 2019-09-08 17:15:59 Model5, Epoch 28, Acc: 0.0000, Cos_Similarity: -0.4437, L2_dist: 3.0779 203 | 2019-09-08 17:16:08 Model6, Epoch 28, Acc: 0.0000, Cos_Similarity: -0.3077, L2_dist: 3.0868 204 | 2019-09-08 17:16:16 Model0, Epoch 29, Acc: 0.0000, Cos_Similarity: -0.4598, L2_dist: 3.0876 205 | 2019-09-08 17:16:18 Model1, Epoch 29, Acc: 0.0000, Cos_Similarity: -0.3043, L2_dist: 3.0956 206 | 2019-09-08 17:16:25 Model2, Epoch 29, Acc: 0.0000, Cos_Similarity: -0.4584, L2_dist: 3.0949 207 | 2019-09-08 17:16:38 Model3, Epoch 29, Acc: 0.0000, Cos_Similarity: -0.3752, L2_dist: 3.0978 208 | 2019-09-08 17:16:49 Model4, Epoch 29, Acc: 0.0000, Cos_Similarity: -0.4608, L2_dist: 3.0973 209 | 2019-09-08 17:17:00 Model5, Epoch 29, Acc: 0.0000, Cos_Similarity: -0.4495, L2_dist: 3.0974 210 | 2019-09-08 17:17:10 Model6, Epoch 29, Acc: 0.0000, Cos_Similarity: -0.3148, L2_dist: 3.1062 211 | 2019-09-08 17:17:18 Model0, Epoch 30, Acc: 0.0000, Cos_Similarity: -0.4635, L2_dist: 3.1069 212 | 2019-09-08 17:17:20 Model1, Epoch 30, Acc: 0.0000, Cos_Similarity: -0.3095, L2_dist: 3.1146 213 | 2019-09-08 17:17:27 Model2, Epoch 30, Acc: 0.0000, Cos_Similarity: -0.4625, L2_dist: 3.1138 214 | 2019-09-08 17:17:39 Model3, Epoch 30, Acc: 0.0000, Cos_Similarity: -0.3820, L2_dist: 3.1166 215 | 2019-09-08 17:17:50 Model4, Epoch 30, Acc: 0.0000, Cos_Similarity: -0.4663, L2_dist: 3.1159 216 | 2019-09-08 17:18:02 Model5, Epoch 30, Acc: 0.0000, Cos_Similarity: -0.4540, L2_dist: 3.1159 217 | 2019-09-08 17:18:12 Model6, Epoch 30, Acc: 0.0000, Cos_Similarity: -0.3223, L2_dist: 3.1243 218 | 2019-09-08 17:18:19 Model0, Epoch 31, Acc: 0.0000, Cos_Similarity: -0.4686, L2_dist: 3.1249 219 | 2019-09-08 17:18:22 Model1, Epoch 31, Acc: 0.0000, Cos_Similarity: -0.3144, L2_dist: 3.1326 220 | 2019-09-08 17:18:29 Model2, Epoch 31, Acc: 0.0000, Cos_Similarity: -0.4682, L2_dist: 3.1316 221 | 2019-09-08 17:18:41 Model3, Epoch 31, Acc: 0.0000, Cos_Similarity: -0.3842, L2_dist: 3.1342 222 | 2019-09-08 17:18:52 Model4, Epoch 31, Acc: 0.0000, Cos_Similarity: -0.4718, L2_dist: 3.1332 223 | 2019-09-08 17:19:04 Model5, Epoch 31, Acc: 0.0000, Cos_Similarity: -0.4599, L2_dist: 3.1331 224 | 2019-09-08 17:19:13 Model6, Epoch 31, Acc: 0.0000, Cos_Similarity: -0.3253, L2_dist: 3.1417 225 | 2019-09-08 17:19:21 Model0, Epoch 32, Acc: 0.0000, Cos_Similarity: -0.4727, L2_dist: 3.1421 226 | 2019-09-08 17:19:23 Model1, Epoch 32, Acc: 0.0000, Cos_Similarity: -0.3168, L2_dist: 3.1497 227 | 2019-09-08 17:19:30 Model2, Epoch 32, Acc: 0.0000, Cos_Similarity: -0.4722, L2_dist: 3.1485 228 | 2019-09-08 17:19:43 Model3, Epoch 32, Acc: 0.0000, Cos_Similarity: -0.3876, L2_dist: 3.1511 229 | 2019-09-08 17:19:54 Model4, Epoch 32, Acc: 0.0000, Cos_Similarity: -0.4744, L2_dist: 3.1504 230 | 2019-09-08 17:20:05 Model5, Epoch 32, Acc: 0.0000, Cos_Similarity: -0.4623, L2_dist: 3.1500 231 | 2019-09-08 17:20:15 Model6, Epoch 32, Acc: 0.0000, Cos_Similarity: -0.3289, L2_dist: 3.1586 232 | 2019-09-08 17:20:23 Model0, Epoch 33, Acc: 0.0000, Cos_Similarity: -0.4777, L2_dist: 3.1588 233 | 2019-09-08 17:20:25 Model1, Epoch 33, Acc: 0.0000, Cos_Similarity: -0.3214, L2_dist: 3.1662 234 | 2019-09-08 17:20:32 Model2, Epoch 33, Acc: 0.0000, Cos_Similarity: -0.4762, L2_dist: 3.1648 235 | 2019-09-08 17:20:45 Model3, Epoch 33, Acc: 0.0000, Cos_Similarity: -0.3917, L2_dist: 3.1671 236 | 2019-09-08 17:20:56 Model4, Epoch 33, Acc: 0.0000, Cos_Similarity: -0.4791, L2_dist: 3.1663 237 | 2019-09-08 17:21:07 Model5, Epoch 33, Acc: 0.0000, Cos_Similarity: -0.4659, L2_dist: 3.1659 238 | 2019-09-08 17:21:17 Model6, Epoch 33, Acc: 0.0000, Cos_Similarity: -0.3336, L2_dist: 3.1742 239 | --------------------------------------------------------------------------------