├── LICENSE ├── README.md ├── dataset.py ├── layer.py ├── lfw_eval.py ├── log ├── LResnet50E-IR_s30_m0.35.log ├── README.md ├── sphereface20_s30_m0.35.log └── sphereface20_s30_m0.40.log ├── main.py ├── net.py └── train.sh /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Muggle Wang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CosFace_pytorch 2 | 3 | ***Pytorch implementation of CosFace*** 4 | 5 | ------------ 6 | 7 | - Deep Learning Platform: PyTorch 0.4.1 8 | - OS: CentOS Linux release 7.5 9 | - Language: Python 2.7 10 | - CUDA: 8.0 11 | 12 | ------------ 13 | 14 | - Database: `WebFace` or `VggFace2` (You should first complete the data preprocessing section by following these steps https://github.com/wy1iu/sphereface#part-1-preprocessing) 15 | - Network: `sphere20`, `sphere64`, `LResnet50E-IR`(In ArcFace paper) 16 | 17 | ------------ 18 | 19 | ### Result(new) 20 | 21 | Single model trained on CAISA-WebFace achieves **~99.2%** accuracy on LFW (Link: https://pan.baidu.com/s/1uOBATynzBTzZwrIKC4kcAA Password: 69e6) 22 | 23 | Note: Pytorch 0.4 seems to be very different from 0.3, which leads me to not fully reproduce the previous results. Currently still adjusting parameters.... 24 | 25 | The initialization of the fully connected layer does not use Xavier but is more conducive to model convergence. 26 | 27 | ### Result(old) 28 | 29 | Network | Hyper-parameter  |  Accuracy on LFW 30 | ------------- | ------------- | ------------- 31 | Sphere20 | s=30, m=0.35 | 99.08% 32 | Sphere20 | s=30, m=0.40 | 99.23% 33 | LResnet50E-IR(In ArcFace paper) | s=30, m=0.35 | 99.45% 34 | -------------------------------------------------------------------------------- /dataset.py: -------------------------------------------------------------------------------- 1 | import torch.utils.data as data 2 | from PIL import Image, ImageFile 3 | import os 4 | 5 | ImageFile.LOAD_TRUNCATED_IAMGES = True 6 | 7 | 8 | # https://github.com/pytorch/vision/issues/81 9 | 10 | def PIL_loader(path): 11 | try: 12 | with open(path, 'rb') as f: 13 | return Image.open(f).convert('RGB') 14 | except IOError: 15 | print('Cannot load image ' + path) 16 | 17 | 18 | def default_reader(fileList): 19 | imgList = [] 20 | with open(fileList, 'r') as file: 21 | for line in file.readlines(): 22 | imgPath, label = line.strip().split(' ') 23 | imgList.append((imgPath, int(label))) 24 | return imgList 25 | 26 | 27 | class ImageList(data.Dataset): 28 | def __init__(self, root, fileList, transform=None, list_reader=default_reader, loader=PIL_loader): 29 | self.root = root 30 | self.imgList = list_reader(fileList) 31 | self.transform = transform 32 | self.loader = loader 33 | 34 | def __getitem__(self, index): 35 | imgPath, target = self.imgList[index] 36 | img = self.loader(os.path.join(self.root, imgPath)) 37 | 38 | if self.transform is not None: 39 | img = self.transform(img) 40 | return img, target 41 | 42 | def __len__(self): 43 | return len(self.imgList) 44 | -------------------------------------------------------------------------------- /layer.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from __future__ import division 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from torch.nn import Parameter 7 | import math 8 | 9 | def cosine_sim(x1, x2, dim=1, eps=1e-8): 10 | ip = torch.mm(x1, x2.t()) 11 | w1 = torch.norm(x1, 2, dim) 12 | w2 = torch.norm(x2, 2, dim) 13 | return ip / torch.ger(w1,w2).clamp(min=eps) 14 | 15 | class MarginCosineProduct(nn.Module): 16 | r"""Implement of large margin cosine distance: : 17 | Args: 18 | in_features: size of each input sample 19 | out_features: size of each output sample 20 | s: norm of input feature 21 | m: margin 22 | """ 23 | 24 | def __init__(self, in_features, out_features, s=30.0, m=0.40): 25 | super(MarginCosineProduct, self).__init__() 26 | self.in_features = in_features 27 | self.out_features = out_features 28 | self.s = s 29 | self.m = m 30 | self.weight = Parameter(torch.Tensor(out_features, in_features)) 31 | nn.init.xavier_uniform_(self.weight) 32 | #stdv = 1. / math.sqrt(self.weight.size(1)) 33 | #self.weight.data.uniform_(-stdv, stdv) 34 | 35 | def forward(self, input, label): 36 | cosine = cosine_sim(input, self.weight) 37 | # cosine = F.linear(F.normalize(input), F.normalize(self.weight)) 38 | # --------------------------- convert label to one-hot --------------------------- 39 | # https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507 40 | one_hot = torch.zeros_like(cosine) 41 | one_hot.scatter_(1, label.view(-1, 1), 1.0) 42 | # -------------torch.where(out_i = {x_i if condition_i else y_i) ------------- 43 | output = self.s * (cosine - one_hot * self.m) 44 | 45 | return output 46 | 47 | def __repr__(self): 48 | return self.__class__.__name__ + '(' \ 49 | + 'in_features=' + str(self.in_features) \ 50 | + ', out_features=' + str(self.out_features) \ 51 | + ', s=' + str(self.s) \ 52 | + ', m=' + str(self.m) + ')' 53 | 54 | 55 | class AngleLinear(nn.Module): 56 | def __init__(self, in_features, out_features, m=4): 57 | super(AngleLinear, self).__init__() 58 | self.in_features = in_features 59 | self.out_features = out_features 60 | self.m = m 61 | self.base = 1000.0 62 | self.gamma = 0.12 63 | self.power = 1 64 | self.LambdaMin = 5.0 65 | self.iter = 0 66 | self.weight = Parameter(torch.Tensor(out_features, in_features)) 67 | nn.init.xavier_uniform_(self.weight) 68 | 69 | # duplication formula 70 | self.mlambda = [ 71 | lambda x: x ** 0, 72 | lambda x: x ** 1, 73 | lambda x: 2 * x ** 2 - 1, 74 | lambda x: 4 * x ** 3 - 3 * x, 75 | lambda x: 8 * x ** 4 - 8 * x ** 2 + 1, 76 | lambda x: 16 * x ** 5 - 20 * x ** 3 + 5 * x 77 | ] 78 | 79 | def forward(self, input, label): 80 | # lambda = max(lambda_min,base*(1+gamma*iteration)^(-power)) 81 | self.iter += 1 82 | self.lamb = max(self.LambdaMin, self.base * (1 + self.gamma * self.iter) ** (-1 * self.power)) 83 | 84 | # --------------------------- cos(theta) & phi(theta) --------------------------- 85 | cos_theta = F.linear(F.normalize(input), F.normalize(self.weight)) 86 | cos_theta = cos_theta.clamp(-1, 1) 87 | cos_m_theta = self.mlambda[self.m](cos_theta) 88 | theta = cos_theta.data.acos() 89 | k = (self.m * theta / 3.14159265).floor() 90 | phi_theta = ((-1.0) ** k) * cos_m_theta - 2 * k 91 | NormOfFeature = torch.norm(input, 2, 1) 92 | 93 | # --------------------------- convert label to one-hot --------------------------- 94 | one_hot = torch.zeros_like(cos_theta) 95 | one_hot.scatter_(1, label.view(-1, 1), 1) 96 | 97 | # --------------------------- Calculate output --------------------------- 98 | output = (one_hot * (phi_theta - cos_theta) / (1 + self.lamb)) + cos_theta 99 | output *= NormOfFeature.view(-1, 1) 100 | 101 | return output 102 | 103 | def __repr__(self): 104 | return self.__class__.__name__ + '(' \ 105 | + 'in_features=' + str(self.in_features) \ 106 | + ', out_features=' + str(self.out_features) \ 107 | + ', m=' + str(self.m) + ')' 108 | -------------------------------------------------------------------------------- /lfw_eval.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | 4 | from torchvision.transforms import functional as F 5 | import torchvision.transforms as transforms 6 | import torch 7 | from torch.autograd import Variable 8 | import torch.backends.cudnn as cudnn 9 | 10 | cudnn.benchmark = True 11 | 12 | import net 13 | 14 | 15 | def extractDeepFeature(img, model, is_gray): 16 | if is_gray: 17 | transform = transforms.Compose([ 18 | transforms.Grayscale(), 19 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 20 | transforms.Normalize(mean=(0.5,), std=(0.5,)) # range [0.0, 1.0] -> [-1.0,1.0] 21 | ]) 22 | else: 23 | transform = transforms.Compose([ 24 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 25 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 26 | ]) 27 | img, img_ = transform(img), transform(F.hflip(img)) 28 | img, img_ = img.unsqueeze(0).to('cuda'), img_.unsqueeze(0).to('cuda') 29 | ft = torch.cat((model(img), model(img_)), 1)[0].to('cpu') 30 | return ft 31 | 32 | 33 | def KFold(n=6000, n_folds=10): 34 | folds = [] 35 | base = list(range(n)) 36 | for i in range(n_folds): 37 | test = base[i * n / n_folds:(i + 1) * n / n_folds] 38 | train = list(set(base) - set(test)) 39 | folds.append([train, test]) 40 | return folds 41 | 42 | 43 | def eval_acc(threshold, diff): 44 | y_true = [] 45 | y_predict = [] 46 | for d in diff: 47 | same = 1 if float(d[2]) > threshold else 0 48 | y_predict.append(same) 49 | y_true.append(int(d[3])) 50 | y_true = np.array(y_true) 51 | y_predict = np.array(y_predict) 52 | accuracy = 1.0 * np.count_nonzero(y_true == y_predict) / len(y_true) 53 | return accuracy 54 | 55 | 56 | def find_best_threshold(thresholds, predicts): 57 | best_threshold = best_acc = 0 58 | for threshold in thresholds: 59 | accuracy = eval_acc(threshold, predicts) 60 | if accuracy >= best_acc: 61 | best_acc = accuracy 62 | best_threshold = threshold 63 | return best_threshold 64 | 65 | 66 | def eval(model, model_path=None, is_gray=False): 67 | predicts = [] 68 | model.load_state_dict(torch.load(model_path)) 69 | model.eval() 70 | root = '/home/wangyf/dataset/lfw/lfw-112X96/' 71 | with open('/home/wangyf/Project/sphereface/test/data/pairs.txt') as f: 72 | pairs_lines = f.readlines()[1:] 73 | 74 | with torch.no_grad(): 75 | for i in range(6000): 76 | p = pairs_lines[i].replace('\n', '').split('\t') 77 | 78 | if 3 == len(p): 79 | sameflag = 1 80 | name1 = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[1])) 81 | name2 = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[2])) 82 | elif 4 == len(p): 83 | sameflag = 0 84 | name1 = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[1])) 85 | name2 = p[2] + '/' + p[2] + '_' + '{:04}.jpg'.format(int(p[3])) 86 | else: 87 | raise ValueError("WRONG LINE IN 'pairs.txt! ") 88 | 89 | with open(root + name1, 'rb') as f: 90 | img1 = Image.open(f).convert('RGB') 91 | with open(root + name2, 'rb') as f: 92 | img2 = Image.open(f).convert('RGB') 93 | f1 = extractDeepFeature(img1, model, is_gray) 94 | f2 = extractDeepFeature(img2, model, is_gray) 95 | 96 | distance = f1.dot(f2) / (f1.norm() * f2.norm() + 1e-5) 97 | predicts.append('{}\t{}\t{}\t{}\n'.format(name1, name2, distance, sameflag)) 98 | 99 | accuracy = [] 100 | thd = [] 101 | folds = KFold(n=6000, n_folds=10) 102 | thresholds = np.arange(-1.0, 1.0, 0.005) 103 | predicts = np.array(map(lambda line: line.strip('\n').split(), predicts)) 104 | for idx, (train, test) in enumerate(folds): 105 | best_thresh = find_best_threshold(thresholds, predicts[train]) 106 | accuracy.append(eval_acc(best_thresh, predicts[test])) 107 | thd.append(best_thresh) 108 | print('LFWACC={:.4f} std={:.4f} thd={:.4f}'.format(np.mean(accuracy), np.std(accuracy), np.mean(thd))) 109 | 110 | return np.mean(accuracy), predicts 111 | 112 | 113 | if __name__ == '__main__': 114 | _, result = eval(net.sphere().to('cuda'), model_path='checkpoint/CosFace_24_checkpoint.pth') 115 | np.savetxt("result.txt", result, '%s') 116 | -------------------------------------------------------------------------------- /log/LResnet50E-IR_s30_m0.35.log: -------------------------------------------------------------------------------- 1 | DataParallel( 2 | (module): LResNet( 3 | (conv1): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 4 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 5 | (prelu1): PReLU(num_parameters=64) 6 | (layer1): Sequential( 7 | (0): BlockIR( 8 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 9 | (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 10 | (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 11 | (prelu1): PReLU(num_parameters=64) 12 | (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 13 | (bn3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 14 | (downsample): Sequential( 15 | (0): Conv2d(64, 64, kernel_size=(1, 1), stride=(2, 2), bias=False) 16 | (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 17 | ) 18 | ) 19 | (1): BlockIR( 20 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 21 | (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 22 | (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 23 | (prelu1): PReLU(num_parameters=64) 24 | (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 25 | (bn3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 26 | ) 27 | (2): BlockIR( 28 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 29 | (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 30 | (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 31 | (prelu1): PReLU(num_parameters=64) 32 | (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 33 | (bn3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 34 | ) 35 | ) 36 | (layer2): Sequential( 37 | (0): BlockIR( 38 | (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) 39 | (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 40 | (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 41 | (prelu1): PReLU(num_parameters=128) 42 | (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 43 | (bn3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 44 | (downsample): Sequential( 45 | (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False) 46 | (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 47 | ) 48 | ) 49 | (1): BlockIR( 50 | (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 51 | (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 52 | (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 53 | (prelu1): PReLU(num_parameters=128) 54 | (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 55 | (bn3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 56 | ) 57 | (2): BlockIR( 58 | (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 59 | (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 60 | (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 61 | (prelu1): PReLU(num_parameters=128) 62 | (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 63 | (bn3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 64 | ) 65 | (3): BlockIR( 66 | (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 67 | (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 68 | (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 69 | (prelu1): PReLU(num_parameters=128) 70 | (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 71 | (bn3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 72 | ) 73 | ) 74 | (layer3): Sequential( 75 | (0): BlockIR( 76 | (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) 77 | (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 78 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 79 | (prelu1): PReLU(num_parameters=256) 80 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 81 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 82 | (downsample): Sequential( 83 | (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False) 84 | (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 85 | ) 86 | ) 87 | (1): BlockIR( 88 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 89 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 90 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 91 | (prelu1): PReLU(num_parameters=256) 92 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 93 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 94 | ) 95 | (2): BlockIR( 96 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 97 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 98 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 99 | (prelu1): PReLU(num_parameters=256) 100 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 101 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 102 | ) 103 | (3): BlockIR( 104 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 105 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 106 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 107 | (prelu1): PReLU(num_parameters=256) 108 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 109 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 110 | ) 111 | (4): BlockIR( 112 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 113 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 114 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 115 | (prelu1): PReLU(num_parameters=256) 116 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 117 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 118 | ) 119 | (5): BlockIR( 120 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 121 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 122 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 123 | (prelu1): PReLU(num_parameters=256) 124 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 125 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 126 | ) 127 | (6): BlockIR( 128 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 129 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 130 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 131 | (prelu1): PReLU(num_parameters=256) 132 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 133 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 134 | ) 135 | (7): BlockIR( 136 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 137 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 138 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 139 | (prelu1): PReLU(num_parameters=256) 140 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 141 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 142 | ) 143 | (8): BlockIR( 144 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 145 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 146 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 147 | (prelu1): PReLU(num_parameters=256) 148 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 149 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 150 | ) 151 | (9): BlockIR( 152 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 153 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 154 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 155 | (prelu1): PReLU(num_parameters=256) 156 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 157 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 158 | ) 159 | (10): BlockIR( 160 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 161 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 162 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 163 | (prelu1): PReLU(num_parameters=256) 164 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 165 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 166 | ) 167 | (11): BlockIR( 168 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 169 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 170 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 171 | (prelu1): PReLU(num_parameters=256) 172 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 173 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 174 | ) 175 | (12): BlockIR( 176 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 177 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 178 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 179 | (prelu1): PReLU(num_parameters=256) 180 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 181 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 182 | ) 183 | (13): BlockIR( 184 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 185 | (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 186 | (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 187 | (prelu1): PReLU(num_parameters=256) 188 | (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 189 | (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 190 | ) 191 | ) 192 | (layer4): Sequential( 193 | (0): BlockIR( 194 | (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) 195 | (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 196 | (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) 197 | (prelu1): PReLU(num_parameters=512) 198 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) 199 | (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) 200 | (downsample): Sequential( 201 | (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) 202 | (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) 203 | ) 204 | ) 205 | (1): BlockIR( 206 | (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) 207 | (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 208 | (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) 209 | (prelu1): PReLU(num_parameters=512) 210 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 211 | (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) 212 | ) 213 | (2): BlockIR( 214 | (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) 215 | (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 216 | (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) 217 | (prelu1): PReLU(num_parameters=512) 218 | (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 219 | (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) 220 | ) 221 | ) 222 | (fc): Sequential( 223 | (0): BatchNorm1d(21504, eps=1e-05, momentum=0.1, affine=True) 224 | (1): Dropout(p=0.4) 225 | (2): Linear(in_features=21504, out_features=512, bias=True) 226 | (3): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True) 227 | ) 228 | ) 229 | ) 230 | length of train Dataset: 490606 231 | Number of Classses: 10572 232 | 2018-05-23 16:54:26 Epoch 1 start training 233 | 2018-05-23 16:56:41 Train Epoch: 1 [51200/490606 (10%)]100, Loss: 19.839719, Elapsed time: 135.1265s(100 iters) Margin: 0.3500, Scale: 30.00 234 | 2018-05-23 16:58:37 Train Epoch: 1 [102400/490606 (21%)]200, Loss: 17.907286, Elapsed time: 116.1707s(100 iters) Margin: 0.3500, Scale: 30.00 235 | 2018-05-23 17:00:33 Train Epoch: 1 [153600/490606 (31%)]300, Loss: 16.510734, Elapsed time: 115.7828s(100 iters) Margin: 0.3500, Scale: 30.00 236 | 2018-05-23 17:02:27 Train Epoch: 1 [204800/490606 (42%)]400, Loss: 15.390483, Elapsed time: 114.3559s(100 iters) Margin: 0.3500, Scale: 30.00 237 | 2018-05-23 17:04:21 Train Epoch: 1 [256000/490606 (52%)]500, Loss: 14.372923, Elapsed time: 113.7888s(100 iters) Margin: 0.3500, Scale: 30.00 238 | 2018-05-23 17:06:16 Train Epoch: 1 [307200/490606 (63%)]600, Loss: 13.514383, Elapsed time: 114.6223s(100 iters) Margin: 0.3500, Scale: 30.00 239 | 2018-05-23 17:08:11 Train Epoch: 1 [358400/490606 (73%)]700, Loss: 12.712846, Elapsed time: 114.7875s(100 iters) Margin: 0.3500, Scale: 30.00 240 | 2018-05-23 17:10:04 Train Epoch: 1 [409600/490606 (84%)]800, Loss: 12.023481, Elapsed time: 113.8546s(100 iters) Margin: 0.3500, Scale: 30.00 241 | 2018-05-23 17:11:58 Train Epoch: 1 [460800/490606 (94%)]900, Loss: 11.434706, Elapsed time: 113.3382s(100 iters) Margin: 0.3500, Scale: 30.00 242 | LFWACC=0.9415 std=0.0125 thd=0.3385 243 | 2018-05-23 17:18:02 Epoch 2 start training 244 | 2018-05-23 17:19:58 Train Epoch: 2 [51200/490606 (10%)]1058, Loss: 10.109621, Elapsed time: 116.0335s(100 iters) Margin: 0.3500, Scale: 30.00 245 | 2018-05-23 17:21:54 Train Epoch: 2 [102400/490606 (21%)]1158, Loss: 9.896629, Elapsed time: 116.3538s(100 iters) Margin: 0.3500, Scale: 30.00 246 | 2018-05-23 17:23:50 Train Epoch: 2 [153600/490606 (31%)]1258, Loss: 9.637585, Elapsed time: 115.1183s(100 iters) Margin: 0.3500, Scale: 30.00 247 | 2018-05-23 17:25:44 Train Epoch: 2 [204800/490606 (42%)]1358, Loss: 9.355494, Elapsed time: 114.0129s(100 iters) Margin: 0.3500, Scale: 30.00 248 | 2018-05-23 17:27:37 Train Epoch: 2 [256000/490606 (52%)]1458, Loss: 9.119333, Elapsed time: 113.3256s(100 iters) Margin: 0.3500, Scale: 30.00 249 | 2018-05-23 17:29:32 Train Epoch: 2 [307200/490606 (63%)]1558, Loss: 8.828196, Elapsed time: 114.6317s(100 iters) Margin: 0.3500, Scale: 30.00 250 | 2018-05-23 17:31:26 Train Epoch: 2 [358400/490606 (73%)]1658, Loss: 8.666209, Elapsed time: 114.1582s(100 iters) Margin: 0.3500, Scale: 30.00 251 | 2018-05-23 17:33:20 Train Epoch: 2 [409600/490606 (84%)]1758, Loss: 8.452412, Elapsed time: 113.8725s(100 iters) Margin: 0.3500, Scale: 30.00 252 | 2018-05-23 17:35:13 Train Epoch: 2 [460800/490606 (94%)]1858, Loss: 8.208908, Elapsed time: 113.7600s(100 iters) Margin: 0.3500, Scale: 30.00 253 | LFWACC=0.9712 std=0.0083 thd=0.3325 254 | 2018-05-23 17:41:32 Epoch 3 start training 255 | 2018-05-23 17:43:26 Train Epoch: 3 [51200/490606 (10%)]2016, Loss: 7.346916, Elapsed time: 113.5997s(100 iters) Margin: 0.3500, Scale: 30.00 256 | 2018-05-23 17:45:22 Train Epoch: 3 [102400/490606 (21%)]2116, Loss: 7.424247, Elapsed time: 116.2026s(100 iters) Margin: 0.3500, Scale: 30.00 257 | 2018-05-23 17:47:17 Train Epoch: 3 [153600/490606 (31%)]2216, Loss: 7.412981, Elapsed time: 115.5125s(100 iters) Margin: 0.3500, Scale: 30.00 258 | 2018-05-23 17:49:12 Train Epoch: 3 [204800/490606 (42%)]2316, Loss: 7.304135, Elapsed time: 114.9517s(100 iters) Margin: 0.3500, Scale: 30.00 259 | 2018-05-23 17:51:07 Train Epoch: 3 [256000/490606 (52%)]2416, Loss: 7.213265, Elapsed time: 114.4825s(100 iters) Margin: 0.3500, Scale: 30.00 260 | 2018-05-23 17:53:02 Train Epoch: 3 [307200/490606 (63%)]2516, Loss: 7.152587, Elapsed time: 115.4336s(100 iters) Margin: 0.3500, Scale: 30.00 261 | 2018-05-23 17:54:57 Train Epoch: 3 [358400/490606 (73%)]2616, Loss: 7.084430, Elapsed time: 114.9885s(100 iters) Margin: 0.3500, Scale: 30.00 262 | 2018-05-23 17:56:52 Train Epoch: 3 [409600/490606 (84%)]2716, Loss: 7.007657, Elapsed time: 114.4569s(100 iters) Margin: 0.3500, Scale: 30.00 263 | 2018-05-23 17:58:45 Train Epoch: 3 [460800/490606 (94%)]2816, Loss: 6.933078, Elapsed time: 113.8153s(100 iters) Margin: 0.3500, Scale: 30.00 264 | LFWACC=0.9825 std=0.0060 thd=0.3080 265 | 2018-05-23 18:04:34 Epoch 4 start training 266 | 2018-05-23 18:06:28 Train Epoch: 4 [51200/490606 (10%)]2974, Loss: 6.190636, Elapsed time: 114.2435s(100 iters) Margin: 0.3500, Scale: 30.00 267 | 2018-05-23 18:08:25 Train Epoch: 4 [102400/490606 (21%)]3074, Loss: 6.372652, Elapsed time: 116.7259s(100 iters) Margin: 0.3500, Scale: 30.00 268 | 2018-05-23 18:10:21 Train Epoch: 4 [153600/490606 (31%)]3174, Loss: 6.366093, Elapsed time: 116.2019s(100 iters) Margin: 0.3500, Scale: 30.00 269 | 2018-05-23 18:12:16 Train Epoch: 4 [204800/490606 (42%)]3274, Loss: 6.465160, Elapsed time: 115.2730s(100 iters) Margin: 0.3500, Scale: 30.00 270 | 2018-05-23 18:14:11 Train Epoch: 4 [256000/490606 (52%)]3374, Loss: 6.421598, Elapsed time: 114.5138s(100 iters) Margin: 0.3500, Scale: 30.00 271 | 2018-05-23 18:16:05 Train Epoch: 4 [307200/490606 (63%)]3474, Loss: 6.384879, Elapsed time: 114.5349s(100 iters) Margin: 0.3500, Scale: 30.00 272 | 2018-05-23 18:18:00 Train Epoch: 4 [358400/490606 (73%)]3574, Loss: 6.411107, Elapsed time: 114.5010s(100 iters) Margin: 0.3500, Scale: 30.00 273 | 2018-05-23 18:19:54 Train Epoch: 4 [409600/490606 (84%)]3674, Loss: 6.300952, Elapsed time: 114.1113s(100 iters) Margin: 0.3500, Scale: 30.00 274 | 2018-05-23 18:21:47 Train Epoch: 4 [460800/490606 (94%)]3774, Loss: 6.268885, Elapsed time: 113.4159s(100 iters) Margin: 0.3500, Scale: 30.00 275 | LFWACC=0.9852 std=0.0043 thd=0.3320 276 | 2018-05-23 18:27:50 Epoch 5 start training 277 | 2018-05-23 18:29:44 Train Epoch: 5 [51200/490606 (10%)]3932, Loss: 5.646365, Elapsed time: 113.9083s(100 iters) Margin: 0.3500, Scale: 30.00 278 | 2018-05-23 18:31:40 Train Epoch: 5 [102400/490606 (21%)]4032, Loss: 5.816934, Elapsed time: 115.8667s(100 iters) Margin: 0.3500, Scale: 30.00 279 | 2018-05-23 18:33:35 Train Epoch: 5 [153600/490606 (31%)]4132, Loss: 5.912141, Elapsed time: 115.3982s(100 iters) Margin: 0.3500, Scale: 30.00 280 | 2018-05-23 18:35:30 Train Epoch: 5 [204800/490606 (42%)]4232, Loss: 5.908324, Elapsed time: 114.6472s(100 iters) Margin: 0.3500, Scale: 30.00 281 | 2018-05-23 18:37:23 Train Epoch: 5 [256000/490606 (52%)]4332, Loss: 5.973635, Elapsed time: 113.5598s(100 iters) Margin: 0.3500, Scale: 30.00 282 | 2018-05-23 18:39:17 Train Epoch: 5 [307200/490606 (63%)]4432, Loss: 5.947036, Elapsed time: 113.9827s(100 iters) Margin: 0.3500, Scale: 30.00 283 | 2018-05-23 18:41:12 Train Epoch: 5 [358400/490606 (73%)]4532, Loss: 5.958269, Elapsed time: 114.7438s(100 iters) Margin: 0.3500, Scale: 30.00 284 | 2018-05-23 18:43:06 Train Epoch: 5 [409600/490606 (84%)]4632, Loss: 5.943605, Elapsed time: 114.3731s(100 iters) Margin: 0.3500, Scale: 30.00 285 | 2018-05-23 18:45:00 Train Epoch: 5 [460800/490606 (94%)]4732, Loss: 5.938157, Elapsed time: 113.7440s(100 iters) Margin: 0.3500, Scale: 30.00 286 | LFWACC=0.9875 std=0.0047 thd=0.3020 287 | 2018-05-23 18:50:12 Epoch 6 start training 288 | 2018-05-23 18:52:07 Train Epoch: 6 [51200/490606 (10%)]4890, Loss: 5.245087, Elapsed time: 115.3927s(100 iters) Margin: 0.3500, Scale: 30.00 289 | 2018-05-23 18:54:05 Train Epoch: 6 [102400/490606 (21%)]4990, Loss: 5.460998, Elapsed time: 117.7866s(100 iters) Margin: 0.3500, Scale: 30.00 290 | 2018-05-23 18:56:03 Train Epoch: 6 [153600/490606 (31%)]5090, Loss: 5.629939, Elapsed time: 117.8101s(100 iters) Margin: 0.3500, Scale: 30.00 291 | 2018-05-23 18:57:59 Train Epoch: 6 [204800/490606 (42%)]5190, Loss: 5.625062, Elapsed time: 116.1092s(100 iters) Margin: 0.3500, Scale: 30.00 292 | 2018-05-23 18:59:54 Train Epoch: 6 [256000/490606 (52%)]5290, Loss: 5.663412, Elapsed time: 115.6658s(100 iters) Margin: 0.3500, Scale: 30.00 293 | 2018-05-23 19:01:50 Train Epoch: 6 [307200/490606 (63%)]5390, Loss: 5.679355, Elapsed time: 115.2119s(100 iters) Margin: 0.3500, Scale: 30.00 294 | 2018-05-23 19:03:45 Train Epoch: 6 [358400/490606 (73%)]5490, Loss: 5.704135, Elapsed time: 115.1987s(100 iters) Margin: 0.3500, Scale: 30.00 295 | 2018-05-23 19:05:41 Train Epoch: 6 [409600/490606 (84%)]5590, Loss: 5.675297, Elapsed time: 115.7467s(100 iters) Margin: 0.3500, Scale: 30.00 296 | 2018-05-23 19:07:36 Train Epoch: 6 [460800/490606 (94%)]5690, Loss: 5.682498, Elapsed time: 115.9070s(100 iters) Margin: 0.3500, Scale: 30.00 297 | LFWACC=0.9873 std=0.0059 thd=0.3250 298 | 2018-05-23 19:13:11 Epoch 7 start training 299 | 2018-05-23 19:15:06 Train Epoch: 7 [51200/490606 (10%)]5848, Loss: 5.014815, Elapsed time: 115.0687s(100 iters) Margin: 0.3500, Scale: 30.00 300 | 2018-05-23 19:17:02 Train Epoch: 7 [102400/490606 (21%)]5948, Loss: 5.188016, Elapsed time: 115.2664s(100 iters) Margin: 0.3500, Scale: 30.00 301 | 2018-05-23 19:18:56 Train Epoch: 7 [153600/490606 (31%)]6048, Loss: 5.404969, Elapsed time: 114.1985s(100 iters) Margin: 0.3500, Scale: 30.00 302 | 2018-05-23 19:20:51 Train Epoch: 7 [204800/490606 (42%)]6148, Loss: 5.425106, Elapsed time: 115.3635s(100 iters) Margin: 0.3500, Scale: 30.00 303 | 2018-05-23 19:22:46 Train Epoch: 7 [256000/490606 (52%)]6248, Loss: 5.442390, Elapsed time: 115.1659s(100 iters) Margin: 0.3500, Scale: 30.00 304 | 2018-05-23 19:24:41 Train Epoch: 7 [307200/490606 (63%)]6348, Loss: 5.455317, Elapsed time: 114.7140s(100 iters) Margin: 0.3500, Scale: 30.00 305 | 2018-05-23 19:26:35 Train Epoch: 7 [358400/490606 (73%)]6448, Loss: 5.475979, Elapsed time: 113.8504s(100 iters) Margin: 0.3500, Scale: 30.00 306 | 2018-05-23 19:28:30 Train Epoch: 7 [409600/490606 (84%)]6548, Loss: 5.502140, Elapsed time: 114.9464s(100 iters) Margin: 0.3500, Scale: 30.00 307 | 2018-05-23 19:30:25 Train Epoch: 7 [460800/490606 (94%)]6648, Loss: 5.493289, Elapsed time: 115.3134s(100 iters) Margin: 0.3500, Scale: 30.00 308 | LFWACC=0.9888 std=0.0040 thd=0.3190 309 | 2018-05-23 19:36:11 Epoch 8 start training 310 | 2018-05-23 19:38:06 Train Epoch: 8 [51200/490606 (10%)]6806, Loss: 4.873902, Elapsed time: 114.6484s(100 iters) Margin: 0.3500, Scale: 30.00 311 | 2018-05-23 19:40:02 Train Epoch: 8 [102400/490606 (21%)]6906, Loss: 5.080276, Elapsed time: 116.2699s(100 iters) Margin: 0.3500, Scale: 30.00 312 | 2018-05-23 19:41:57 Train Epoch: 8 [153600/490606 (31%)]7006, Loss: 5.222646, Elapsed time: 114.7664s(100 iters) Margin: 0.3500, Scale: 30.00 313 | 2018-05-23 19:43:52 Train Epoch: 8 [204800/490606 (42%)]7106, Loss: 5.244015, Elapsed time: 115.3822s(100 iters) Margin: 0.3500, Scale: 30.00 314 | 2018-05-23 19:45:47 Train Epoch: 8 [256000/490606 (52%)]7206, Loss: 5.289430, Elapsed time: 115.1516s(100 iters) Margin: 0.3500, Scale: 30.00 315 | 2018-05-23 19:47:42 Train Epoch: 8 [307200/490606 (63%)]7306, Loss: 5.288183, Elapsed time: 114.2911s(100 iters) Margin: 0.3500, Scale: 30.00 316 | 2018-05-23 19:49:35 Train Epoch: 8 [358400/490606 (73%)]7406, Loss: 5.297619, Elapsed time: 113.2877s(100 iters) Margin: 0.3500, Scale: 30.00 317 | 2018-05-23 19:51:29 Train Epoch: 8 [409600/490606 (84%)]7506, Loss: 5.339325, Elapsed time: 114.1270s(100 iters) Margin: 0.3500, Scale: 30.00 318 | 2018-05-23 19:53:23 Train Epoch: 8 [460800/490606 (94%)]7606, Loss: 5.307746, Elapsed time: 114.2470s(100 iters) Margin: 0.3500, Scale: 30.00 319 | LFWACC=0.9880 std=0.0044 thd=0.3250 320 | 2018-05-23 19:59:22 Epoch 9 start training 321 | 2018-05-23 20:01:16 Train Epoch: 9 [51200/490606 (10%)]7764, Loss: 4.673285, Elapsed time: 114.2080s(100 iters) Margin: 0.3500, Scale: 30.00 322 | 2018-05-23 20:03:12 Train Epoch: 9 [102400/490606 (21%)]7864, Loss: 4.923491, Elapsed time: 115.5802s(100 iters) Margin: 0.3500, Scale: 30.00 323 | 2018-05-23 20:05:07 Train Epoch: 9 [153600/490606 (31%)]7964, Loss: 5.044543, Elapsed time: 114.6124s(100 iters) Margin: 0.3500, Scale: 30.00 324 | 2018-05-23 20:07:02 Train Epoch: 9 [204800/490606 (42%)]8064, Loss: 5.101215, Elapsed time: 115.3873s(100 iters) Margin: 0.3500, Scale: 30.00 325 | 2018-05-23 20:08:57 Train Epoch: 9 [256000/490606 (52%)]8164, Loss: 5.179439, Elapsed time: 114.9231s(100 iters) Margin: 0.3500, Scale: 30.00 326 | 2018-05-23 20:10:51 Train Epoch: 9 [307200/490606 (63%)]8264, Loss: 5.208477, Elapsed time: 114.3796s(100 iters) Margin: 0.3500, Scale: 30.00 327 | 2018-05-23 20:12:45 Train Epoch: 9 [358400/490606 (73%)]8364, Loss: 5.209121, Elapsed time: 113.3983s(100 iters) Margin: 0.3500, Scale: 30.00 328 | 2018-05-23 20:14:40 Train Epoch: 9 [409600/490606 (84%)]8464, Loss: 5.210257, Elapsed time: 114.7546s(100 iters) Margin: 0.3500, Scale: 30.00 329 | 2018-05-23 20:16:34 Train Epoch: 9 [460800/490606 (94%)]8564, Loss: 5.260453, Elapsed time: 114.9132s(100 iters) Margin: 0.3500, Scale: 30.00 330 | LFWACC=0.9878 std=0.0062 thd=0.3290 331 | 2018-05-23 20:22:29 Epoch 10 start training 332 | 2018-05-23 20:24:24 Train Epoch: 10 [51200/490606 (10%)]8722, Loss: 4.641458, Elapsed time: 114.5786s(100 iters) Margin: 0.3500, Scale: 30.00 333 | 2018-05-23 20:26:21 Train Epoch: 10 [102400/490606 (21%)]8822, Loss: 4.815282, Elapsed time: 116.7388s(100 iters) Margin: 0.3500, Scale: 30.00 334 | 2018-05-23 20:28:15 Train Epoch: 10 [153600/490606 (31%)]8922, Loss: 4.939459, Elapsed time: 114.5794s(100 iters) Margin: 0.3500, Scale: 30.00 335 | 2018-05-23 20:30:11 Train Epoch: 10 [204800/490606 (42%)]9022, Loss: 5.014611, Elapsed time: 115.6252s(100 iters) Margin: 0.3500, Scale: 30.00 336 | 2018-05-23 20:32:06 Train Epoch: 10 [256000/490606 (52%)]9122, Loss: 5.125206, Elapsed time: 115.4168s(100 iters) Margin: 0.3500, Scale: 30.00 337 | 2018-05-23 20:34:01 Train Epoch: 10 [307200/490606 (63%)]9222, Loss: 5.071279, Elapsed time: 115.0166s(100 iters) Margin: 0.3500, Scale: 30.00 338 | 2018-05-23 20:35:55 Train Epoch: 10 [358400/490606 (73%)]9322, Loss: 5.121540, Elapsed time: 113.5832s(100 iters) Margin: 0.3500, Scale: 30.00 339 | 2018-05-23 20:37:49 Train Epoch: 10 [409600/490606 (84%)]9422, Loss: 5.110349, Elapsed time: 114.3080s(100 iters) Margin: 0.3500, Scale: 30.00 340 | 2018-05-23 20:39:44 Train Epoch: 10 [460800/490606 (94%)]9522, Loss: 5.073607, Elapsed time: 114.9454s(100 iters) Margin: 0.3500, Scale: 30.00 341 | LFWACC=0.9897 std=0.0041 thd=0.3100 342 | 2018-05-23 20:45:34 Epoch 11 start training 343 | 2018-05-23 20:47:29 Train Epoch: 11 [51200/490606 (10%)]9680, Loss: 4.531180, Elapsed time: 114.8832s(100 iters) Margin: 0.3500, Scale: 30.00 344 | 2018-05-23 20:49:26 Train Epoch: 11 [102400/490606 (21%)]9780, Loss: 4.737419, Elapsed time: 116.5707s(100 iters) Margin: 0.3500, Scale: 30.00 345 | 2018-05-23 20:51:21 Train Epoch: 11 [153600/490606 (31%)]9880, Loss: 4.865227, Elapsed time: 115.0135s(100 iters) Margin: 0.3500, Scale: 30.00 346 | 2018-05-23 20:53:16 Train Epoch: 11 [204800/490606 (42%)]9980, Loss: 4.954676, Elapsed time: 115.6578s(100 iters) Margin: 0.3500, Scale: 30.00 347 | 2018-05-23 20:55:12 Train Epoch: 11 [256000/490606 (52%)]10080, Loss: 4.956310, Elapsed time: 115.4802s(100 iters) Margin: 0.3500, Scale: 30.00 348 | 2018-05-23 20:57:07 Train Epoch: 11 [307200/490606 (63%)]10180, Loss: 4.974073, Elapsed time: 114.8682s(100 iters) Margin: 0.3500, Scale: 30.00 349 | 2018-05-23 20:59:01 Train Epoch: 11 [358400/490606 (73%)]10280, Loss: 4.980053, Elapsed time: 113.9168s(100 iters) Margin: 0.3500, Scale: 30.00 350 | 2018-05-23 21:00:55 Train Epoch: 11 [409600/490606 (84%)]10380, Loss: 5.001472, Elapsed time: 114.7497s(100 iters) Margin: 0.3500, Scale: 30.00 351 | 2018-05-23 21:02:50 Train Epoch: 11 [460800/490606 (94%)]10480, Loss: 5.088610, Elapsed time: 114.5405s(100 iters) Margin: 0.3500, Scale: 30.00 352 | LFWACC=0.9892 std=0.0045 thd=0.2995 353 | 2018-05-23 21:08:37 Epoch 12 start training 354 | 2018-05-23 21:10:32 Train Epoch: 12 [51200/490606 (10%)]10638, Loss: 4.430387, Elapsed time: 115.0907s(100 iters) Margin: 0.3500, Scale: 30.00 355 | 2018-05-23 21:12:29 Train Epoch: 12 [102400/490606 (21%)]10738, Loss: 4.676767, Elapsed time: 116.9357s(100 iters) Margin: 0.3500, Scale: 30.00 356 | 2018-05-23 21:14:24 Train Epoch: 12 [153600/490606 (31%)]10838, Loss: 4.767740, Elapsed time: 115.1309s(100 iters) Margin: 0.3500, Scale: 30.00 357 | 2018-05-23 21:16:20 Train Epoch: 12 [204800/490606 (42%)]10938, Loss: 4.867957, Elapsed time: 115.3868s(100 iters) Margin: 0.3500, Scale: 30.00 358 | 2018-05-23 21:18:14 Train Epoch: 12 [256000/490606 (52%)]11038, Loss: 4.890486, Elapsed time: 114.6754s(100 iters) Margin: 0.3500, Scale: 30.00 359 | 2018-05-23 21:20:09 Train Epoch: 12 [307200/490606 (63%)]11138, Loss: 4.949662, Elapsed time: 114.6340s(100 iters) Margin: 0.3500, Scale: 30.00 360 | 2018-05-23 21:22:03 Train Epoch: 12 [358400/490606 (73%)]11238, Loss: 4.933944, Elapsed time: 114.4409s(100 iters) Margin: 0.3500, Scale: 30.00 361 | 2018-05-23 21:23:58 Train Epoch: 12 [409600/490606 (84%)]11338, Loss: 5.001973, Elapsed time: 114.7263s(100 iters) Margin: 0.3500, Scale: 30.00 362 | 2018-05-23 21:25:53 Train Epoch: 12 [460800/490606 (94%)]11438, Loss: 4.980888, Elapsed time: 115.1311s(100 iters) Margin: 0.3500, Scale: 30.00 363 | LFWACC=0.9883 std=0.0055 thd=0.3160 364 | 2018-05-23 21:31:46 Epoch 13 start training 365 | 2018-05-23 21:33:41 Train Epoch: 13 [51200/490606 (10%)]11596, Loss: 4.361892, Elapsed time: 115.4490s(100 iters) Margin: 0.3500, Scale: 30.00 366 | 2018-05-23 21:35:37 Train Epoch: 13 [102400/490606 (21%)]11696, Loss: 4.579671, Elapsed time: 116.3190s(100 iters) Margin: 0.3500, Scale: 30.00 367 | 2018-05-23 21:37:32 Train Epoch: 13 [153600/490606 (31%)]11796, Loss: 4.682249, Elapsed time: 115.1342s(100 iters) Margin: 0.3500, Scale: 30.00 368 | 2018-05-23 21:39:28 Train Epoch: 13 [204800/490606 (42%)]11896, Loss: 4.781169, Elapsed time: 115.3091s(100 iters) Margin: 0.3500, Scale: 30.00 369 | 2018-05-23 21:41:23 Train Epoch: 13 [256000/490606 (52%)]11996, Loss: 4.828560, Elapsed time: 115.5525s(100 iters) Margin: 0.3500, Scale: 30.00 370 | 2018-05-23 21:43:19 Train Epoch: 13 [307200/490606 (63%)]12096, Loss: 4.873653, Elapsed time: 115.1873s(100 iters) Margin: 0.3500, Scale: 30.00 371 | 2018-05-23 21:45:13 Train Epoch: 13 [358400/490606 (73%)]12196, Loss: 4.885928, Elapsed time: 114.0126s(100 iters) Margin: 0.3500, Scale: 30.00 372 | 2018-05-23 21:47:07 Train Epoch: 13 [409600/490606 (84%)]12296, Loss: 4.922477, Elapsed time: 114.5610s(100 iters) Margin: 0.3500, Scale: 30.00 373 | 2018-05-23 21:49:03 Train Epoch: 13 [460800/490606 (94%)]12396, Loss: 4.945636, Elapsed time: 115.5906s(100 iters) Margin: 0.3500, Scale: 30.00 374 | LFWACC=0.9903 std=0.0037 thd=0.3195 375 | 2018-05-23 21:54:50 Epoch 14 start training 376 | 2018-05-23 21:56:45 Train Epoch: 14 [51200/490606 (10%)]12554, Loss: 4.351317, Elapsed time: 115.5930s(100 iters) Margin: 0.3500, Scale: 30.00 377 | 2018-05-23 21:58:42 Train Epoch: 14 [102400/490606 (21%)]12654, Loss: 4.452397, Elapsed time: 116.8281s(100 iters) Margin: 0.3500, Scale: 30.00 378 | 2018-05-23 22:00:37 Train Epoch: 14 [153600/490606 (31%)]12754, Loss: 4.642189, Elapsed time: 114.8725s(100 iters) Margin: 0.3500, Scale: 30.00 379 | 2018-05-23 22:02:32 Train Epoch: 14 [204800/490606 (42%)]12854, Loss: 4.717890, Elapsed time: 114.8387s(100 iters) Margin: 0.3500, Scale: 30.00 380 | 2018-05-23 22:04:27 Train Epoch: 14 [256000/490606 (52%)]12954, Loss: 4.788893, Elapsed time: 115.2531s(100 iters) Margin: 0.3500, Scale: 30.00 381 | 2018-05-23 22:06:22 Train Epoch: 14 [307200/490606 (63%)]13054, Loss: 4.857114, Elapsed time: 114.9479s(100 iters) Margin: 0.3500, Scale: 30.00 382 | 2018-05-23 22:08:16 Train Epoch: 14 [358400/490606 (73%)]13154, Loss: 4.912225, Elapsed time: 114.0901s(100 iters) Margin: 0.3500, Scale: 30.00 383 | 2018-05-23 22:10:10 Train Epoch: 14 [409600/490606 (84%)]13254, Loss: 4.906728, Elapsed time: 114.1588s(100 iters) Margin: 0.3500, Scale: 30.00 384 | 2018-05-23 22:12:05 Train Epoch: 14 [460800/490606 (94%)]13354, Loss: 4.890937, Elapsed time: 114.5931s(100 iters) Margin: 0.3500, Scale: 30.00 385 | LFWACC=0.9875 std=0.0042 thd=0.3350 386 | 2018-05-23 22:18:00 Epoch 15 start training 387 | 2018-05-23 22:19:55 Train Epoch: 15 [51200/490606 (10%)]13512, Loss: 4.233685, Elapsed time: 114.7942s(100 iters) Margin: 0.3500, Scale: 30.00 388 | 2018-05-23 22:21:51 Train Epoch: 15 [102400/490606 (21%)]13612, Loss: 4.441443, Elapsed time: 116.0109s(100 iters) Margin: 0.3500, Scale: 30.00 389 | 2018-05-23 22:23:46 Train Epoch: 15 [153600/490606 (31%)]13712, Loss: 4.617605, Elapsed time: 115.4818s(100 iters) Margin: 0.3500, Scale: 30.00 390 | 2018-05-23 22:25:42 Train Epoch: 15 [204800/490606 (42%)]13812, Loss: 4.734369, Elapsed time: 115.7649s(100 iters) Margin: 0.3500, Scale: 30.00 391 | 2018-05-23 22:27:37 Train Epoch: 15 [256000/490606 (52%)]13912, Loss: 4.775582, Elapsed time: 114.8409s(100 iters) Margin: 0.3500, Scale: 30.00 392 | 2018-05-23 22:29:31 Train Epoch: 15 [307200/490606 (63%)]14012, Loss: 4.808717, Elapsed time: 114.6342s(100 iters) Margin: 0.3500, Scale: 30.00 393 | 2018-05-23 22:31:25 Train Epoch: 15 [358400/490606 (73%)]14112, Loss: 4.855674, Elapsed time: 113.7436s(100 iters) Margin: 0.3500, Scale: 30.00 394 | 2018-05-23 22:33:19 Train Epoch: 15 [409600/490606 (84%)]14212, Loss: 4.770391, Elapsed time: 114.2885s(100 iters) Margin: 0.3500, Scale: 30.00 395 | 2018-05-23 22:35:14 Train Epoch: 15 [460800/490606 (94%)]14312, Loss: 4.893708, Elapsed time: 114.9271s(100 iters) Margin: 0.3500, Scale: 30.00 396 | LFWACC=0.9880 std=0.0045 thd=0.3315 397 | 2018-05-23 22:41:03 Epoch 16 start training 398 | 2018-05-23 22:42:58 Train Epoch: 16 [51200/490606 (10%)]14470, Loss: 4.270619, Elapsed time: 114.7901s(100 iters) Margin: 0.3500, Scale: 30.00 399 | 2018-05-23 22:44:55 Train Epoch: 16 [102400/490606 (21%)]14570, Loss: 4.443030, Elapsed time: 116.5432s(100 iters) Margin: 0.3500, Scale: 30.00 400 | 2018-05-23 22:46:50 Train Epoch: 16 [153600/490606 (31%)]14670, Loss: 4.546895, Elapsed time: 115.6250s(100 iters) Margin: 0.3500, Scale: 30.00 401 | 2018-05-23 22:48:45 Train Epoch: 16 [204800/490606 (42%)]14770, Loss: 4.657546, Elapsed time: 115.0307s(100 iters) Margin: 0.3500, Scale: 30.00 402 | 2018-05-23 22:50:40 Train Epoch: 16 [256000/490606 (52%)]14870, Loss: 4.704675, Elapsed time: 115.0638s(100 iters) Margin: 0.3500, Scale: 30.00 403 | 2018-05-23 22:52:35 Train Epoch: 16 [307200/490606 (63%)]14970, Loss: 4.832542, Elapsed time: 114.5356s(100 iters) Margin: 0.3500, Scale: 30.00 404 | 2018-05-23 22:54:29 Train Epoch: 16 [358400/490606 (73%)]15070, Loss: 4.768936, Elapsed time: 113.7819s(100 iters) Margin: 0.3500, Scale: 30.00 405 | 2018-05-23 22:57:04 Train Epoch: 16 [409600/490606 (84%)]15170, Loss: 4.828149, Elapsed time: 154.8046s(100 iters) Margin: 0.3500, Scale: 30.00 406 | 2018-05-23 23:00:41 Train Epoch: 16 [460800/490606 (94%)]15270, Loss: 4.823737, Elapsed time: 217.6718s(100 iters) Margin: 0.3500, Scale: 30.00 407 | LFWACC=0.9888 std=0.0048 thd=0.3290 408 | 2018-05-23 23:07:18 Epoch 17 start training 409 | 2018-05-23 23:09:19 Train Epoch: 17 [51200/490606 (10%)]15428, Loss: 4.205311, Elapsed time: 120.2520s(100 iters) Margin: 0.3500, Scale: 30.00 410 | 2018-05-23 23:11:16 Train Epoch: 17 [102400/490606 (21%)]15528, Loss: 4.420421, Elapsed time: 117.0755s(100 iters) Margin: 0.3500, Scale: 30.00 411 | 2018-05-23 23:13:12 Train Epoch: 17 [153600/490606 (31%)]15628, Loss: 4.574052, Elapsed time: 116.6817s(100 iters) Margin: 0.3500, Scale: 30.00 412 | 2018-05-23 23:15:09 Train Epoch: 17 [204800/490606 (42%)]15728, Loss: 4.687677, Elapsed time: 116.0828s(100 iters) Margin: 0.3500, Scale: 30.00 413 | 2018-05-23 23:17:04 Train Epoch: 17 [256000/490606 (52%)]15828, Loss: 4.735198, Elapsed time: 115.7833s(100 iters) Margin: 0.3500, Scale: 30.00 414 | 2018-05-23 23:19:00 Train Epoch: 17 [307200/490606 (63%)]15928, Loss: 4.720624, Elapsed time: 115.2515s(100 iters) Margin: 0.3500, Scale: 30.00 415 | 2018-05-23 23:20:21 Adjust learning rate to 0.01 416 | 2018-05-23 23:20:55 Train Epoch: 17 [358400/490606 (73%)]16028, Loss: 4.697576, Elapsed time: 114.9917s(100 iters) Margin: 0.3500, Scale: 30.00 417 | 2018-05-23 23:22:49 Train Epoch: 17 [409600/490606 (84%)]16128, Loss: 4.013065, Elapsed time: 114.3425s(100 iters) Margin: 0.3500, Scale: 30.00 418 | 2018-05-23 23:24:43 Train Epoch: 17 [460800/490606 (94%)]16228, Loss: 3.782262, Elapsed time: 114.2828s(100 iters) Margin: 0.3500, Scale: 30.00 419 | LFWACC=0.9923 std=0.0036 thd=0.2660 420 | 2018-05-23 23:29:55 Epoch 18 start training 421 | 2018-05-23 23:31:50 Train Epoch: 18 [51200/490606 (10%)]16386, Loss: 2.952711, Elapsed time: 115.1212s(100 iters) Margin: 0.3500, Scale: 30.00 422 | 2018-05-23 23:33:47 Train Epoch: 18 [102400/490606 (21%)]16486, Loss: 2.913076, Elapsed time: 117.0816s(100 iters) Margin: 0.3500, Scale: 30.00 423 | 2018-05-23 23:35:44 Train Epoch: 18 [153600/490606 (31%)]16586, Loss: 2.879539, Elapsed time: 116.9111s(100 iters) Margin: 0.3500, Scale: 30.00 424 | 2018-05-23 23:37:40 Train Epoch: 18 [204800/490606 (42%)]16686, Loss: 2.888622, Elapsed time: 115.7631s(100 iters) Margin: 0.3500, Scale: 30.00 425 | 2018-05-23 23:39:34 Train Epoch: 18 [256000/490606 (52%)]16786, Loss: 2.775572, Elapsed time: 114.5934s(100 iters) Margin: 0.3500, Scale: 30.00 426 | 2018-05-23 23:41:28 Train Epoch: 18 [307200/490606 (63%)]16886, Loss: 2.780477, Elapsed time: 113.3488s(100 iters) Margin: 0.3500, Scale: 30.00 427 | 2018-05-23 23:43:21 Train Epoch: 18 [358400/490606 (73%)]16986, Loss: 2.792198, Elapsed time: 113.6050s(100 iters) Margin: 0.3500, Scale: 30.00 428 | 2018-05-23 23:45:15 Train Epoch: 18 [409600/490606 (84%)]17086, Loss: 2.771859, Elapsed time: 114.0890s(100 iters) Margin: 0.3500, Scale: 30.00 429 | 2018-05-23 23:47:10 Train Epoch: 18 [460800/490606 (94%)]17186, Loss: 2.723979, Elapsed time: 114.5659s(100 iters) Margin: 0.3500, Scale: 30.00 430 | LFWACC=0.9925 std=0.0038 thd=0.2665 431 | 2018-05-23 23:52:21 Epoch 19 start training 432 | 2018-05-23 23:54:16 Train Epoch: 19 [51200/490606 (10%)]17344, Loss: 2.379973, Elapsed time: 114.5830s(100 iters) Margin: 0.3500, Scale: 30.00 433 | 2018-05-23 23:56:12 Train Epoch: 19 [102400/490606 (21%)]17444, Loss: 2.426001, Elapsed time: 116.3409s(100 iters) Margin: 0.3500, Scale: 30.00 434 | 2018-05-23 23:58:09 Train Epoch: 19 [153600/490606 (31%)]17544, Loss: 2.427925, Elapsed time: 116.4748s(100 iters) Margin: 0.3500, Scale: 30.00 435 | 2018-05-24 00:00:04 Train Epoch: 19 [204800/490606 (42%)]17644, Loss: 2.417889, Elapsed time: 114.9382s(100 iters) Margin: 0.3500, Scale: 30.00 436 | 2018-05-24 00:01:58 Train Epoch: 19 [256000/490606 (52%)]17744, Loss: 2.414182, Elapsed time: 114.2060s(100 iters) Margin: 0.3500, Scale: 30.00 437 | 2018-05-24 00:03:53 Train Epoch: 19 [307200/490606 (63%)]17844, Loss: 2.416475, Elapsed time: 115.0367s(100 iters) Margin: 0.3500, Scale: 30.00 438 | 2018-05-24 00:05:48 Train Epoch: 19 [358400/490606 (73%)]17944, Loss: 2.437770, Elapsed time: 115.0830s(100 iters) Margin: 0.3500, Scale: 30.00 439 | 2018-05-24 00:07:42 Train Epoch: 19 [409600/490606 (84%)]18044, Loss: 2.408043, Elapsed time: 114.4364s(100 iters) Margin: 0.3500, Scale: 30.00 440 | 2018-05-24 00:09:37 Train Epoch: 19 [460800/490606 (94%)]18144, Loss: 2.414138, Elapsed time: 114.1501s(100 iters) Margin: 0.3500, Scale: 30.00 441 | LFWACC=0.9922 std=0.0037 thd=0.2905 442 | 2018-05-24 00:14:49 Epoch 20 start training 443 | 2018-05-24 00:16:44 Train Epoch: 20 [51200/490606 (10%)]18302, Loss: 2.119445, Elapsed time: 114.7519s(100 iters) Margin: 0.3500, Scale: 30.00 444 | 2018-05-24 00:18:40 Train Epoch: 20 [102400/490606 (21%)]18402, Loss: 2.131431, Elapsed time: 115.8732s(100 iters) Margin: 0.3500, Scale: 30.00 445 | 2018-05-24 00:20:35 Train Epoch: 20 [153600/490606 (31%)]18502, Loss: 2.163558, Elapsed time: 115.0972s(100 iters) Margin: 0.3500, Scale: 30.00 446 | 2018-05-24 00:22:30 Train Epoch: 20 [204800/490606 (42%)]18602, Loss: 2.144411, Elapsed time: 115.4670s(100 iters) Margin: 0.3500, Scale: 30.00 447 | 2018-05-24 00:24:33 Train Epoch: 20 [256000/490606 (52%)]18702, Loss: 2.168069, Elapsed time: 123.3886s(100 iters) Margin: 0.3500, Scale: 30.00 448 | 2018-05-24 00:26:29 Train Epoch: 20 [307200/490606 (63%)]18802, Loss: 2.181481, Elapsed time: 115.9769s(100 iters) Margin: 0.3500, Scale: 30.00 449 | 2018-05-24 00:28:26 Train Epoch: 20 [358400/490606 (73%)]18902, Loss: 2.226052, Elapsed time: 116.9559s(100 iters) Margin: 0.3500, Scale: 30.00 450 | 2018-05-24 00:30:23 Train Epoch: 20 [409600/490606 (84%)]19002, Loss: 2.201263, Elapsed time: 116.9298s(100 iters) Margin: 0.3500, Scale: 30.00 451 | 2018-05-24 00:32:20 Train Epoch: 20 [460800/490606 (94%)]19102, Loss: 2.228261, Elapsed time: 116.7926s(100 iters) Margin: 0.3500, Scale: 30.00 452 | LFWACC=0.9928 std=0.0037 thd=0.2920 453 | 2018-05-24 00:39:08 Epoch 21 start training 454 | 2018-05-24 00:41:05 Train Epoch: 21 [51200/490606 (10%)]19260, Loss: 1.914494, Elapsed time: 116.6634s(100 iters) Margin: 0.3500, Scale: 30.00 455 | 2018-05-24 00:43:02 Train Epoch: 21 [102400/490606 (21%)]19360, Loss: 1.927755, Elapsed time: 116.7601s(100 iters) Margin: 0.3500, Scale: 30.00 456 | 2018-05-24 00:44:59 Train Epoch: 21 [153600/490606 (31%)]19460, Loss: 1.973473, Elapsed time: 116.7904s(100 iters) Margin: 0.3500, Scale: 30.00 457 | 2018-05-24 00:46:55 Train Epoch: 21 [204800/490606 (42%)]19560, Loss: 1.976277, Elapsed time: 116.4491s(100 iters) Margin: 0.3500, Scale: 30.00 458 | 2018-05-24 00:48:52 Train Epoch: 21 [256000/490606 (52%)]19660, Loss: 1.978515, Elapsed time: 117.1231s(100 iters) Margin: 0.3500, Scale: 30.00 459 | 2018-05-24 00:50:49 Train Epoch: 21 [307200/490606 (63%)]19760, Loss: 2.003073, Elapsed time: 116.6744s(100 iters) Margin: 0.3500, Scale: 30.00 460 | 2018-05-24 00:52:45 Train Epoch: 21 [358400/490606 (73%)]19860, Loss: 2.041111, Elapsed time: 116.5662s(100 iters) Margin: 0.3500, Scale: 30.00 461 | 2018-05-24 00:54:41 Train Epoch: 21 [409600/490606 (84%)]19960, Loss: 2.042741, Elapsed time: 116.0505s(100 iters) Margin: 0.3500, Scale: 30.00 462 | 2018-05-24 00:56:38 Train Epoch: 21 [460800/490606 (94%)]20060, Loss: 2.056493, Elapsed time: 116.8527s(100 iters) Margin: 0.3500, Scale: 30.00 463 | LFWACC=0.9945 std=0.0034 thd=0.2950 464 | 2018-05-24 01:03:31 Epoch 22 start training 465 | 2018-05-24 01:05:28 Train Epoch: 22 [51200/490606 (10%)]20218, Loss: 1.750655, Elapsed time: 116.9996s(100 iters) Margin: 0.3500, Scale: 30.00 466 | 2018-05-24 01:07:26 Train Epoch: 22 [102400/490606 (21%)]20318, Loss: 1.780399, Elapsed time: 117.4899s(100 iters) Margin: 0.3500, Scale: 30.00 467 | 2018-05-24 01:09:22 Train Epoch: 22 [153600/490606 (31%)]20418, Loss: 1.789397, Elapsed time: 116.4760s(100 iters) Margin: 0.3500, Scale: 30.00 468 | 2018-05-24 01:11:18 Train Epoch: 22 [204800/490606 (42%)]20518, Loss: 1.822021, Elapsed time: 116.2720s(100 iters) Margin: 0.3500, Scale: 30.00 469 | 2018-05-24 01:13:15 Train Epoch: 22 [256000/490606 (52%)]20618, Loss: 1.871975, Elapsed time: 116.5980s(100 iters) Margin: 0.3500, Scale: 30.00 470 | 2018-05-24 01:15:11 Train Epoch: 22 [307200/490606 (63%)]20718, Loss: 1.873389, Elapsed time: 116.3756s(100 iters) Margin: 0.3500, Scale: 30.00 471 | 2018-05-24 01:17:08 Train Epoch: 22 [358400/490606 (73%)]20818, Loss: 1.890845, Elapsed time: 116.5515s(100 iters) Margin: 0.3500, Scale: 30.00 472 | 2018-05-24 01:19:04 Train Epoch: 22 [409600/490606 (84%)]20918, Loss: 1.903723, Elapsed time: 116.6333s(100 iters) Margin: 0.3500, Scale: 30.00 473 | 2018-05-24 01:21:01 Train Epoch: 22 [460800/490606 (94%)]21018, Loss: 1.929470, Elapsed time: 116.8167s(100 iters) Margin: 0.3500, Scale: 30.00 474 | LFWACC=0.9933 std=0.0044 thd=0.2915 475 | 2018-05-24 01:28:00 Epoch 23 start training 476 | 2018-05-24 01:29:56 Train Epoch: 23 [51200/490606 (10%)]21176, Loss: 1.600109, Elapsed time: 116.5558s(100 iters) Margin: 0.3500, Scale: 30.00 477 | 2018-05-24 01:31:53 Train Epoch: 23 [102400/490606 (21%)]21276, Loss: 1.643251, Elapsed time: 116.5849s(100 iters) Margin: 0.3500, Scale: 30.00 478 | 2018-05-24 01:33:49 Train Epoch: 23 [153600/490606 (31%)]21376, Loss: 1.663968, Elapsed time: 116.2513s(100 iters) Margin: 0.3500, Scale: 30.00 479 | 2018-05-24 01:35:46 Train Epoch: 23 [204800/490606 (42%)]21476, Loss: 1.704895, Elapsed time: 116.7058s(100 iters) Margin: 0.3500, Scale: 30.00 480 | 2018-05-24 01:37:42 Train Epoch: 23 [256000/490606 (52%)]21576, Loss: 1.738491, Elapsed time: 116.8299s(100 iters) Margin: 0.3500, Scale: 30.00 481 | 2018-05-24 01:39:39 Train Epoch: 23 [307200/490606 (63%)]21676, Loss: 1.780882, Elapsed time: 116.7695s(100 iters) Margin: 0.3500, Scale: 30.00 482 | 2018-05-24 01:41:35 Train Epoch: 23 [358400/490606 (73%)]21776, Loss: 1.788507, Elapsed time: 116.2791s(100 iters) Margin: 0.3500, Scale: 30.00 483 | 2018-05-24 01:43:32 Train Epoch: 23 [409600/490606 (84%)]21876, Loss: 1.827848, Elapsed time: 116.3561s(100 iters) Margin: 0.3500, Scale: 30.00 484 | 2018-05-24 01:45:29 Train Epoch: 23 [460800/490606 (94%)]21976, Loss: 1.858745, Elapsed time: 117.0017s(100 iters) Margin: 0.3500, Scale: 30.00 485 | LFWACC=0.9927 std=0.0039 thd=0.2995 486 | 2018-05-24 01:52:22 Epoch 24 start training 487 | 2018-05-24 01:54:18 Train Epoch: 24 [51200/490606 (10%)]22134, Loss: 1.517243, Elapsed time: 116.4546s(100 iters) Margin: 0.3500, Scale: 30.00 488 | 2018-05-24 01:56:15 Train Epoch: 24 [102400/490606 (21%)]22234, Loss: 1.550956, Elapsed time: 116.5953s(100 iters) Margin: 0.3500, Scale: 30.00 489 | 2018-05-24 01:58:12 Train Epoch: 24 [153600/490606 (31%)]22334, Loss: 1.577678, Elapsed time: 116.9071s(100 iters) Margin: 0.3500, Scale: 30.00 490 | 2018-05-24 02:00:09 Train Epoch: 24 [204800/490606 (42%)]22434, Loss: 1.620689, Elapsed time: 116.6850s(100 iters) Margin: 0.3500, Scale: 30.00 491 | 2018-05-24 02:02:05 Train Epoch: 24 [256000/490606 (52%)]22534, Loss: 1.636586, Elapsed time: 116.5446s(100 iters) Margin: 0.3500, Scale: 30.00 492 | 2018-05-24 02:04:02 Train Epoch: 24 [307200/490606 (63%)]22634, Loss: 1.696062, Elapsed time: 116.6138s(100 iters) Margin: 0.3500, Scale: 30.00 493 | 2018-05-24 02:05:58 Train Epoch: 24 [358400/490606 (73%)]22734, Loss: 1.713690, Elapsed time: 116.0922s(100 iters) Margin: 0.3500, Scale: 30.00 494 | 2018-05-24 02:07:54 Train Epoch: 24 [409600/490606 (84%)]22834, Loss: 1.716282, Elapsed time: 116.5362s(100 iters) Margin: 0.3500, Scale: 30.00 495 | 2018-05-24 02:09:51 Train Epoch: 24 [460800/490606 (94%)]22934, Loss: 1.792434, Elapsed time: 116.8492s(100 iters) Margin: 0.3500, Scale: 30.00 496 | LFWACC=0.9928 std=0.0029 thd=0.2405 497 | 2018-05-24 02:16:47 Epoch 25 start training 498 | 2018-05-24 02:18:44 Train Epoch: 25 [51200/490606 (10%)]23092, Loss: 1.411527, Elapsed time: 117.2679s(100 iters) Margin: 0.3500, Scale: 30.00 499 | 2018-05-24 02:20:41 Train Epoch: 25 [102400/490606 (21%)]23192, Loss: 1.483703, Elapsed time: 116.9931s(100 iters) Margin: 0.3500, Scale: 30.00 500 | 2018-05-24 02:22:38 Train Epoch: 25 [153600/490606 (31%)]23292, Loss: 1.522952, Elapsed time: 116.8111s(100 iters) Margin: 0.3500, Scale: 30.00 501 | 2018-05-24 02:24:35 Train Epoch: 25 [204800/490606 (42%)]23392, Loss: 1.548755, Elapsed time: 116.8387s(100 iters) Margin: 0.3500, Scale: 30.00 502 | 2018-05-24 02:26:32 Train Epoch: 25 [256000/490606 (52%)]23492, Loss: 1.572608, Elapsed time: 117.0387s(100 iters) Margin: 0.3500, Scale: 30.00 503 | 2018-05-24 02:28:29 Train Epoch: 25 [307200/490606 (63%)]23592, Loss: 1.631073, Elapsed time: 117.2995s(100 iters) Margin: 0.3500, Scale: 30.00 504 | 2018-05-24 02:30:26 Train Epoch: 25 [358400/490606 (73%)]23692, Loss: 1.663401, Elapsed time: 116.5786s(100 iters) Margin: 0.3500, Scale: 30.00 505 | 2018-05-24 02:32:23 Train Epoch: 25 [409600/490606 (84%)]23792, Loss: 1.726326, Elapsed time: 116.7313s(100 iters) Margin: 0.3500, Scale: 30.00 506 | 2018-05-24 02:34:19 Train Epoch: 25 [460800/490606 (94%)]23892, Loss: 1.709272, Elapsed time: 116.4989s(100 iters) Margin: 0.3500, Scale: 30.00 507 | LFWACC=0.9925 std=0.0042 thd=0.2770 508 | 2018-05-24 02:41:11 Epoch 26 start training 509 | 2018-05-24 02:42:08 Adjust learning rate to 0.001 510 | 2018-05-24 02:43:07 Train Epoch: 26 [51200/490606 (10%)]24050, Loss: 1.375641, Elapsed time: 116.4720s(100 iters) Margin: 0.3500, Scale: 30.00 511 | 2018-05-24 02:45:04 Train Epoch: 26 [102400/490606 (21%)]24150, Loss: 1.286147, Elapsed time: 116.9846s(100 iters) Margin: 0.3500, Scale: 30.00 512 | 2018-05-24 02:47:01 Train Epoch: 26 [153600/490606 (31%)]24250, Loss: 1.264700, Elapsed time: 116.8042s(100 iters) Margin: 0.3500, Scale: 30.00 513 | 2018-05-24 02:48:57 Train Epoch: 26 [204800/490606 (42%)]24350, Loss: 1.257010, Elapsed time: 116.0751s(100 iters) Margin: 0.3500, Scale: 30.00 514 | 2018-05-24 02:50:54 Train Epoch: 26 [256000/490606 (52%)]24450, Loss: 1.214559, Elapsed time: 116.4940s(100 iters) Margin: 0.3500, Scale: 30.00 515 | 2018-05-24 02:52:50 Train Epoch: 26 [307200/490606 (63%)]24550, Loss: 1.213641, Elapsed time: 116.4407s(100 iters) Margin: 0.3500, Scale: 30.00 516 | 2018-05-24 02:54:47 Train Epoch: 26 [358400/490606 (73%)]24650, Loss: 1.220372, Elapsed time: 116.5195s(100 iters) Margin: 0.3500, Scale: 30.00 517 | 2018-05-24 02:56:43 Train Epoch: 26 [409600/490606 (84%)]24750, Loss: 1.195007, Elapsed time: 116.8226s(100 iters) Margin: 0.3500, Scale: 30.00 518 | 2018-05-24 02:58:40 Train Epoch: 26 [460800/490606 (94%)]24850, Loss: 1.188704, Elapsed time: 116.7465s(100 iters) Margin: 0.3500, Scale: 30.00 519 | LFWACC=0.9917 std=0.0045 thd=0.2630 520 | 2018-05-24 03:05:39 Epoch 27 start training 521 | 2018-05-24 03:07:36 Train Epoch: 27 [51200/490606 (10%)]25008, Loss: 1.128318, Elapsed time: 116.9388s(100 iters) Margin: 0.3500, Scale: 30.00 522 | 2018-05-24 03:09:33 Train Epoch: 27 [102400/490606 (21%)]25108, Loss: 1.085890, Elapsed time: 116.8888s(100 iters) Margin: 0.3500, Scale: 30.00 523 | 2018-05-24 03:11:30 Train Epoch: 27 [153600/490606 (31%)]25208, Loss: 1.120802, Elapsed time: 116.8872s(100 iters) Margin: 0.3500, Scale: 30.00 524 | 2018-05-24 03:13:26 Train Epoch: 27 [204800/490606 (42%)]25308, Loss: 1.115994, Elapsed time: 116.1157s(100 iters) Margin: 0.3500, Scale: 30.00 525 | 2018-05-24 03:15:22 Train Epoch: 27 [256000/490606 (52%)]25408, Loss: 1.107667, Elapsed time: 116.3587s(100 iters) Margin: 0.3500, Scale: 30.00 526 | 2018-05-24 03:17:19 Train Epoch: 27 [307200/490606 (63%)]25508, Loss: 1.132571, Elapsed time: 116.6650s(100 iters) Margin: 0.3500, Scale: 30.00 527 | 2018-05-24 03:19:15 Train Epoch: 27 [358400/490606 (73%)]25608, Loss: 1.102952, Elapsed time: 116.3048s(100 iters) Margin: 0.3500, Scale: 30.00 528 | 2018-05-24 03:21:12 Train Epoch: 27 [409600/490606 (84%)]25708, Loss: 1.099058, Elapsed time: 116.5058s(100 iters) Margin: 0.3500, Scale: 30.00 529 | 2018-05-24 03:23:08 Train Epoch: 27 [460800/490606 (94%)]25808, Loss: 1.109199, Elapsed time: 116.7298s(100 iters) Margin: 0.3500, Scale: 30.00 530 | LFWACC=0.9932 std=0.0036 thd=0.2650 531 | 2018-05-24 03:30:05 Epoch 28 start training 532 | 2018-05-24 03:32:02 Train Epoch: 28 [51200/490606 (10%)]25966, Loss: 1.033090, Elapsed time: 117.0469s(100 iters) Margin: 0.3500, Scale: 30.00 533 | 2018-05-24 03:33:58 Train Epoch: 28 [102400/490606 (21%)]26066, Loss: 1.029544, Elapsed time: 116.4177s(100 iters) Margin: 0.3500, Scale: 30.00 534 | 2018-05-24 03:35:55 Train Epoch: 28 [153600/490606 (31%)]26166, Loss: 1.044958, Elapsed time: 116.6385s(100 iters) Margin: 0.3500, Scale: 30.00 535 | 2018-05-24 03:37:52 Train Epoch: 28 [204800/490606 (42%)]26266, Loss: 1.058541, Elapsed time: 117.1109s(100 iters) Margin: 0.3500, Scale: 30.00 536 | 2018-05-24 03:39:49 Train Epoch: 28 [256000/490606 (52%)]26366, Loss: 1.056152, Elapsed time: 116.6373s(100 iters) Margin: 0.3500, Scale: 30.00 537 | 2018-05-24 03:41:45 Train Epoch: 28 [307200/490606 (63%)]26466, Loss: 1.033831, Elapsed time: 116.2741s(100 iters) Margin: 0.3500, Scale: 30.00 538 | 2018-05-24 03:43:42 Train Epoch: 28 [358400/490606 (73%)]26566, Loss: 1.057201, Elapsed time: 117.1233s(100 iters) Margin: 0.3500, Scale: 30.00 539 | 2018-05-24 03:45:38 Train Epoch: 28 [409600/490606 (84%)]26666, Loss: 1.045169, Elapsed time: 116.5377s(100 iters) Margin: 0.3500, Scale: 30.00 540 | 2018-05-24 03:47:35 Train Epoch: 28 [460800/490606 (94%)]26766, Loss: 1.050477, Elapsed time: 116.7261s(100 iters) Margin: 0.3500, Scale: 30.00 541 | LFWACC=0.9913 std=0.0048 thd=0.2620 542 | 2018-05-24 03:54:26 Epoch 29 start training 543 | 2018-05-24 03:56:23 Train Epoch: 29 [51200/490606 (10%)]26924, Loss: 0.983345, Elapsed time: 117.0121s(100 iters) Margin: 0.3500, Scale: 30.00 544 | 2018-05-24 03:58:20 Train Epoch: 29 [102400/490606 (21%)]27024, Loss: 0.979861, Elapsed time: 116.8907s(100 iters) Margin: 0.3500, Scale: 30.00 545 | 2018-05-24 04:00:17 Train Epoch: 29 [153600/490606 (31%)]27124, Loss: 1.005594, Elapsed time: 116.9933s(100 iters) Margin: 0.3500, Scale: 30.00 546 | 2018-05-24 04:02:14 Train Epoch: 29 [204800/490606 (42%)]27224, Loss: 1.005924, Elapsed time: 116.7850s(100 iters) Margin: 0.3500, Scale: 30.00 547 | 2018-05-24 04:04:11 Train Epoch: 29 [256000/490606 (52%)]27324, Loss: 0.995283, Elapsed time: 116.6555s(100 iters) Margin: 0.3500, Scale: 30.00 548 | 2018-05-24 04:06:07 Train Epoch: 29 [307200/490606 (63%)]27424, Loss: 0.992834, Elapsed time: 116.5564s(100 iters) Margin: 0.3500, Scale: 30.00 549 | 2018-05-24 04:08:04 Train Epoch: 29 [358400/490606 (73%)]27524, Loss: 0.982448, Elapsed time: 116.3870s(100 iters) Margin: 0.3500, Scale: 30.00 550 | 2018-05-24 04:10:00 Train Epoch: 29 [409600/490606 (84%)]27624, Loss: 1.025727, Elapsed time: 116.3292s(100 iters) Margin: 0.3500, Scale: 30.00 551 | 2018-05-24 04:11:57 Train Epoch: 29 [460800/490606 (94%)]27724, Loss: 1.013516, Elapsed time: 116.7582s(100 iters) Margin: 0.3500, Scale: 30.00 552 | LFWACC=0.9917 std=0.0048 thd=0.2740 553 | 2018-05-24 04:18:54 Epoch 30 start training 554 | 2018-05-24 04:20:52 Train Epoch: 30 [51200/490606 (10%)]27882, Loss: 0.953823, Elapsed time: 117.5633s(100 iters) Margin: 0.3500, Scale: 30.00 555 | 2018-05-24 04:22:49 Train Epoch: 30 [102400/490606 (21%)]27982, Loss: 0.936406, Elapsed time: 116.9526s(100 iters) Margin: 0.3500, Scale: 30.00 556 | 2018-05-24 04:23:09 Adjust learning rate to 0.0001 557 | 2018-05-24 04:24:45 Train Epoch: 30 [153600/490606 (31%)]28082, Loss: 0.949901, Elapsed time: 116.5543s(100 iters) Margin: 0.3500, Scale: 30.00 558 | 2018-05-24 04:26:42 Train Epoch: 30 [204800/490606 (42%)]28182, Loss: 0.947401, Elapsed time: 116.6222s(100 iters) Margin: 0.3500, Scale: 30.00 559 | 2018-05-24 04:28:38 Train Epoch: 30 [256000/490606 (52%)]28282, Loss: 0.949636, Elapsed time: 115.9811s(100 iters) Margin: 0.3500, Scale: 30.00 560 | 2018-05-24 04:30:35 Train Epoch: 30 [307200/490606 (63%)]28382, Loss: 0.943749, Elapsed time: 116.5809s(100 iters) Margin: 0.3500, Scale: 30.00 561 | 2018-05-24 04:32:31 Train Epoch: 30 [358400/490606 (73%)]28482, Loss: 0.965019, Elapsed time: 116.5996s(100 iters) Margin: 0.3500, Scale: 30.00 562 | 2018-05-24 04:34:28 Train Epoch: 30 [409600/490606 (84%)]28582, Loss: 0.929861, Elapsed time: 117.1071s(100 iters) Margin: 0.3500, Scale: 30.00 563 | 2018-05-24 04:36:25 Train Epoch: 30 [460800/490606 (94%)]28682, Loss: 0.960359, Elapsed time: 116.6442s(100 iters) Margin: 0.3500, Scale: 30.00 564 | LFWACC=0.9912 std=0.0051 thd=0.2720 565 | Finished Training 566 | -------------------------------------------------------------------------------- /log/README.md: -------------------------------------------------------------------------------- 1 | upload training log here 2 | -------------------------------------------------------------------------------- /log/sphereface20_s30_m0.35.log: -------------------------------------------------------------------------------- 1 | Best Acc: 99.08% 2 | s = 30 3 | m = 0.35 4 | DataParallel( 5 | (module): sphere20( 6 | (conv1_1): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) 7 | (relu1_1): PReLU(num_parameters=64) 8 | (conv1_2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 9 | (relu1_2): PReLU(num_parameters=64) 10 | (conv1_3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 11 | (relu1_3): PReLU(num_parameters=64) 12 | (conv2_1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) 13 | (relu2_1): PReLU(num_parameters=128) 14 | (conv2_2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 15 | (relu2_2): PReLU(num_parameters=128) 16 | (conv2_3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 17 | (relu2_3): PReLU(num_parameters=128) 18 | (conv2_4): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 19 | (relu2_4): PReLU(num_parameters=128) 20 | (conv2_5): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 21 | (relu2_5): PReLU(num_parameters=128) 22 | (conv3_1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) 23 | (relu3_1): PReLU(num_parameters=256) 24 | (conv3_2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 25 | (relu3_2): PReLU(num_parameters=256) 26 | (conv3_3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 27 | (relu3_3): PReLU(num_parameters=256) 28 | (conv3_4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 29 | (relu3_4): PReLU(num_parameters=256) 30 | (conv3_5): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 31 | (relu3_5): PReLU(num_parameters=256) 32 | (conv3_6): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 33 | (relu3_6): PReLU(num_parameters=256) 34 | (conv3_7): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 35 | (relu3_7): PReLU(num_parameters=256) 36 | (conv3_8): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 37 | (relu3_8): PReLU(num_parameters=256) 38 | (conv3_9): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 39 | (relu3_9): PReLU(num_parameters=256) 40 | (conv4_1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) 41 | (relu4_1): PReLU(num_parameters=512) 42 | (conv4_2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 43 | (relu4_2): PReLU(num_parameters=512) 44 | (conv4_3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 45 | (relu4_3): PReLU(num_parameters=512) 46 | (fc5): Linear(in_features=21504, out_features=512, bias=True) 47 | ) 48 | ) 49 | length of train Dataset: 490606 50 | Number of Classses: 10572 51 | 2018-05-22 21:45:22 Epoch 1 start training 52 | 2018-05-22 21:46:07 Train Epoch: 1 [51200/490606 (10%)]100, Loss: 22.600954, Elapsed time: 45.0899s(100 iters) Margin: 0.3500, Scale: 30.00 53 | 2018-05-22 21:46:40 Train Epoch: 1 [102400/490606 (21%)]200, Loss: 21.635449, Elapsed time: 33.0874s(100 iters) Margin: 0.3500, Scale: 30.00 54 | 2018-05-22 21:47:14 Train Epoch: 1 [153600/490606 (31%)]300, Loss: 21.067246, Elapsed time: 33.7100s(100 iters) Margin: 0.3500, Scale: 30.00 55 | 2018-05-22 21:47:48 Train Epoch: 1 [204800/490606 (42%)]400, Loss: 20.728319, Elapsed time: 34.4642s(100 iters) Margin: 0.3500, Scale: 30.00 56 | 2018-05-22 21:48:23 Train Epoch: 1 [256000/490606 (52%)]500, Loss: 20.514118, Elapsed time: 34.5442s(100 iters) Margin: 0.3500, Scale: 30.00 57 | 2018-05-22 21:48:57 Train Epoch: 1 [307200/490606 (63%)]600, Loss: 20.296195, Elapsed time: 34.7273s(100 iters) Margin: 0.3500, Scale: 30.00 58 | 2018-05-22 21:49:32 Train Epoch: 1 [358400/490606 (73%)]700, Loss: 19.980296, Elapsed time: 34.9502s(100 iters) Margin: 0.3500, Scale: 30.00 59 | 2018-05-22 21:50:07 Train Epoch: 1 [409600/490606 (84%)]800, Loss: 19.786006, Elapsed time: 34.9108s(100 iters) Margin: 0.3500, Scale: 30.00 60 | 2018-05-22 21:50:42 Train Epoch: 1 [460800/490606 (94%)]900, Loss: 19.543513, Elapsed time: 35.0684s(100 iters) Margin: 0.3500, Scale: 30.00 61 | LFWACC=0.7613 std=0.0166 thd=0.6495 62 | 2018-05-22 21:53:01 Epoch 2 start training 63 | 2018-05-22 21:53:35 Train Epoch: 2 [51200/490606 (10%)]1058, Loss: 18.982758, Elapsed time: 34.3254s(100 iters) Margin: 0.3500, Scale: 30.00 64 | 2018-05-22 21:54:10 Train Epoch: 2 [102400/490606 (21%)]1158, Loss: 18.653946, Elapsed time: 35.0302s(100 iters) Margin: 0.3500, Scale: 30.00 65 | 2018-05-22 21:54:45 Train Epoch: 2 [153600/490606 (31%)]1258, Loss: 18.255760, Elapsed time: 35.4200s(100 iters) Margin: 0.3500, Scale: 30.00 66 | 2018-05-22 21:55:21 Train Epoch: 2 [204800/490606 (42%)]1358, Loss: 17.850857, Elapsed time: 35.5379s(100 iters) Margin: 0.3500, Scale: 30.00 67 | 2018-05-22 21:55:56 Train Epoch: 2 [256000/490606 (52%)]1458, Loss: 17.440493, Elapsed time: 35.4471s(100 iters) Margin: 0.3500, Scale: 30.00 68 | 2018-05-22 21:56:32 Train Epoch: 2 [307200/490606 (63%)]1558, Loss: 17.037631, Elapsed time: 35.3990s(100 iters) Margin: 0.3500, Scale: 30.00 69 | 2018-05-22 21:57:07 Train Epoch: 2 [358400/490606 (73%)]1658, Loss: 16.577483, Elapsed time: 35.2975s(100 iters) Margin: 0.3500, Scale: 30.00 70 | 2018-05-22 21:57:42 Train Epoch: 2 [409600/490606 (84%)]1758, Loss: 16.167652, Elapsed time: 35.3091s(100 iters) Margin: 0.3500, Scale: 30.00 71 | 2018-05-22 21:58:18 Train Epoch: 2 [460800/490606 (94%)]1858, Loss: 15.735231, Elapsed time: 35.4039s(100 iters) Margin: 0.3500, Scale: 30.00 72 | LFWACC=0.8923 std=0.0127 thd=0.3850 73 | 2018-05-22 22:00:34 Epoch 3 start training 74 | 2018-05-22 22:01:08 Train Epoch: 3 [51200/490606 (10%)]2016, Loss: 14.670126, Elapsed time: 34.3753s(100 iters) Margin: 0.3500, Scale: 30.00 75 | 2018-05-22 22:01:43 Train Epoch: 3 [102400/490606 (21%)]2116, Loss: 14.273571, Elapsed time: 35.0561s(100 iters) Margin: 0.3500, Scale: 30.00 76 | 2018-05-22 22:02:19 Train Epoch: 3 [153600/490606 (31%)]2216, Loss: 13.689304, Elapsed time: 35.4784s(100 iters) Margin: 0.3500, Scale: 30.00 77 | 2018-05-22 22:02:54 Train Epoch: 3 [204800/490606 (42%)]2316, Loss: 13.096583, Elapsed time: 35.3116s(100 iters) Margin: 0.3500, Scale: 30.00 78 | 2018-05-22 22:03:29 Train Epoch: 3 [256000/490606 (52%)]2416, Loss: 12.476149, Elapsed time: 35.4366s(100 iters) Margin: 0.3500, Scale: 30.00 79 | 2018-05-22 22:04:05 Train Epoch: 3 [307200/490606 (63%)]2516, Loss: 11.934728, Elapsed time: 35.3232s(100 iters) Margin: 0.3500, Scale: 30.00 80 | 2018-05-22 22:04:40 Train Epoch: 3 [358400/490606 (73%)]2616, Loss: 11.412854, Elapsed time: 35.4174s(100 iters) Margin: 0.3500, Scale: 30.00 81 | 2018-05-22 22:05:15 Train Epoch: 3 [409600/490606 (84%)]2716, Loss: 10.971903, Elapsed time: 35.3072s(100 iters) Margin: 0.3500, Scale: 30.00 82 | 2018-05-22 22:05:51 Train Epoch: 3 [460800/490606 (94%)]2816, Loss: 10.577932, Elapsed time: 35.3240s(100 iters) Margin: 0.3500, Scale: 30.00 83 | LFWACC=0.9553 std=0.0103 thd=0.2995 84 | 2018-05-22 22:08:04 Epoch 4 start training 85 | 2018-05-22 22:08:39 Train Epoch: 4 [51200/490606 (10%)]2974, Loss: 9.375189, Elapsed time: 34.3327s(100 iters) Margin: 0.3500, Scale: 30.00 86 | 2018-05-22 22:09:14 Train Epoch: 4 [102400/490606 (21%)]3074, Loss: 9.274599, Elapsed time: 34.8808s(100 iters) Margin: 0.3500, Scale: 30.00 87 | 2018-05-22 22:09:49 Train Epoch: 4 [153600/490606 (31%)]3174, Loss: 9.088168, Elapsed time: 35.3627s(100 iters) Margin: 0.3500, Scale: 30.00 88 | 2018-05-22 22:10:24 Train Epoch: 4 [204800/490606 (42%)]3274, Loss: 9.011744, Elapsed time: 35.2787s(100 iters) Margin: 0.3500, Scale: 30.00 89 | 2018-05-22 22:11:00 Train Epoch: 4 [256000/490606 (52%)]3374, Loss: 8.822991, Elapsed time: 35.2338s(100 iters) Margin: 0.3500, Scale: 30.00 90 | 2018-05-22 22:11:35 Train Epoch: 4 [307200/490606 (63%)]3474, Loss: 8.670180, Elapsed time: 35.1527s(100 iters) Margin: 0.3500, Scale: 30.00 91 | 2018-05-22 22:12:10 Train Epoch: 4 [358400/490606 (73%)]3574, Loss: 8.482811, Elapsed time: 35.0152s(100 iters) Margin: 0.3500, Scale: 30.00 92 | 2018-05-22 22:12:45 Train Epoch: 4 [409600/490606 (84%)]3674, Loss: 8.339943, Elapsed time: 35.1037s(100 iters) Margin: 0.3500, Scale: 30.00 93 | 2018-05-22 22:13:20 Train Epoch: 4 [460800/490606 (94%)]3774, Loss: 8.169740, Elapsed time: 34.9259s(100 iters) Margin: 0.3500, Scale: 30.00 94 | LFWACC=0.9740 std=0.0067 thd=0.3040 95 | 2018-05-22 22:15:35 Epoch 5 start training 96 | 2018-05-22 22:16:09 Train Epoch: 5 [51200/490606 (10%)]3932, Loss: 7.288937, Elapsed time: 34.1931s(100 iters) Margin: 0.3500, Scale: 30.00 97 | 2018-05-22 22:16:44 Train Epoch: 5 [102400/490606 (21%)]4032, Loss: 7.379745, Elapsed time: 35.2267s(100 iters) Margin: 0.3500, Scale: 30.00 98 | 2018-05-22 22:17:20 Train Epoch: 5 [153600/490606 (31%)]4132, Loss: 7.449170, Elapsed time: 35.6028s(100 iters) Margin: 0.3500, Scale: 30.00 99 | 2018-05-22 22:17:55 Train Epoch: 5 [204800/490606 (42%)]4232, Loss: 7.447756, Elapsed time: 35.3010s(100 iters) Margin: 0.3500, Scale: 30.00 100 | 2018-05-22 22:18:30 Train Epoch: 5 [256000/490606 (52%)]4332, Loss: 7.408133, Elapsed time: 35.0234s(100 iters) Margin: 0.3500, Scale: 30.00 101 | 2018-05-22 22:19:05 Train Epoch: 5 [307200/490606 (63%)]4432, Loss: 7.355981, Elapsed time: 34.8612s(100 iters) Margin: 0.3500, Scale: 30.00 102 | 2018-05-22 22:19:40 Train Epoch: 5 [358400/490606 (73%)]4532, Loss: 7.270527, Elapsed time: 34.8908s(100 iters) Margin: 0.3500, Scale: 30.00 103 | 2018-05-22 22:20:15 Train Epoch: 5 [409600/490606 (84%)]4632, Loss: 7.239150, Elapsed time: 34.7472s(100 iters) Margin: 0.3500, Scale: 30.00 104 | 2018-05-22 22:20:50 Train Epoch: 5 [460800/490606 (94%)]4732, Loss: 7.233232, Elapsed time: 34.9264s(100 iters) Margin: 0.3500, Scale: 30.00 105 | LFWACC=0.9775 std=0.0076 thd=0.2775 106 | 2018-05-22 22:23:05 Epoch 6 start training 107 | 2018-05-22 22:23:40 Train Epoch: 6 [51200/490606 (10%)]4890, Loss: 6.359903, Elapsed time: 34.4088s(100 iters) Margin: 0.3500, Scale: 30.00 108 | 2018-05-22 22:24:15 Train Epoch: 6 [102400/490606 (21%)]4990, Loss: 6.555435, Elapsed time: 34.7548s(100 iters) Margin: 0.3500, Scale: 30.00 109 | 2018-05-22 22:24:50 Train Epoch: 6 [153600/490606 (31%)]5090, Loss: 6.672913, Elapsed time: 35.1948s(100 iters) Margin: 0.3500, Scale: 30.00 110 | 2018-05-22 22:25:25 Train Epoch: 6 [204800/490606 (42%)]5190, Loss: 6.736073, Elapsed time: 35.1223s(100 iters) Margin: 0.3500, Scale: 30.00 111 | 2018-05-22 22:26:00 Train Epoch: 6 [256000/490606 (52%)]5290, Loss: 6.746384, Elapsed time: 35.1058s(100 iters) Margin: 0.3500, Scale: 30.00 112 | 2018-05-22 22:26:35 Train Epoch: 6 [307200/490606 (63%)]5390, Loss: 6.757408, Elapsed time: 35.0717s(100 iters) Margin: 0.3500, Scale: 30.00 113 | 2018-05-22 22:27:10 Train Epoch: 6 [358400/490606 (73%)]5490, Loss: 6.674938, Elapsed time: 35.0432s(100 iters) Margin: 0.3500, Scale: 30.00 114 | 2018-05-22 22:27:45 Train Epoch: 6 [409600/490606 (84%)]5590, Loss: 6.701476, Elapsed time: 35.0503s(100 iters) Margin: 0.3500, Scale: 30.00 115 | 2018-05-22 22:28:20 Train Epoch: 6 [460800/490606 (94%)]5690, Loss: 6.726889, Elapsed time: 34.9406s(100 iters) Margin: 0.3500, Scale: 30.00 116 | LFWACC=0.9828 std=0.0058 thd=0.2715 117 | 2018-05-22 22:30:39 Epoch 7 start training 118 | 2018-05-22 22:31:13 Train Epoch: 7 [51200/490606 (10%)]5848, Loss: 5.889477, Elapsed time: 34.3826s(100 iters) Margin: 0.3500, Scale: 30.00 119 | 2018-05-22 22:31:48 Train Epoch: 7 [102400/490606 (21%)]5948, Loss: 6.168547, Elapsed time: 34.7548s(100 iters) Margin: 0.3500, Scale: 30.00 120 | 2018-05-22 22:32:23 Train Epoch: 7 [153600/490606 (31%)]6048, Loss: 6.198733, Elapsed time: 35.1798s(100 iters) Margin: 0.3500, Scale: 30.00 121 | 2018-05-22 22:32:59 Train Epoch: 7 [204800/490606 (42%)]6148, Loss: 6.335482, Elapsed time: 35.1067s(100 iters) Margin: 0.3500, Scale: 30.00 122 | 2018-05-22 22:33:34 Train Epoch: 7 [256000/490606 (52%)]6248, Loss: 6.339082, Elapsed time: 35.1257s(100 iters) Margin: 0.3500, Scale: 30.00 123 | 2018-05-22 22:34:09 Train Epoch: 7 [307200/490606 (63%)]6348, Loss: 6.341745, Elapsed time: 35.0927s(100 iters) Margin: 0.3500, Scale: 30.00 124 | 2018-05-22 22:34:44 Train Epoch: 7 [358400/490606 (73%)]6448, Loss: 6.348263, Elapsed time: 35.0882s(100 iters) Margin: 0.3500, Scale: 30.00 125 | 2018-05-22 22:35:19 Train Epoch: 7 [409600/490606 (84%)]6548, Loss: 6.336185, Elapsed time: 34.9500s(100 iters) Margin: 0.3500, Scale: 30.00 126 | 2018-05-22 22:35:54 Train Epoch: 7 [460800/490606 (94%)]6648, Loss: 6.331119, Elapsed time: 35.0763s(100 iters) Margin: 0.3500, Scale: 30.00 127 | LFWACC=0.9850 std=0.0058 thd=0.2620 128 | 2018-05-22 22:38:12 Epoch 8 start training 129 | 2018-05-22 22:38:46 Train Epoch: 8 [51200/490606 (10%)]6806, Loss: 5.530350, Elapsed time: 34.2303s(100 iters) Margin: 0.3500, Scale: 30.00 130 | 2018-05-22 22:39:21 Train Epoch: 8 [102400/490606 (21%)]6906, Loss: 5.815683, Elapsed time: 34.7794s(100 iters) Margin: 0.3500, Scale: 30.00 131 | 2018-05-22 22:39:56 Train Epoch: 8 [153600/490606 (31%)]7006, Loss: 5.971776, Elapsed time: 35.2423s(100 iters) Margin: 0.3500, Scale: 30.00 132 | 2018-05-22 22:40:32 Train Epoch: 8 [204800/490606 (42%)]7106, Loss: 5.997392, Elapsed time: 35.2328s(100 iters) Margin: 0.3500, Scale: 30.00 133 | 2018-05-22 22:41:07 Train Epoch: 8 [256000/490606 (52%)]7206, Loss: 6.054575, Elapsed time: 35.1988s(100 iters) Margin: 0.3500, Scale: 30.00 134 | 2018-05-22 22:41:42 Train Epoch: 8 [307200/490606 (63%)]7306, Loss: 6.068667, Elapsed time: 35.2065s(100 iters) Margin: 0.3500, Scale: 30.00 135 | 2018-05-22 22:42:17 Train Epoch: 8 [358400/490606 (73%)]7406, Loss: 6.058443, Elapsed time: 35.0308s(100 iters) Margin: 0.3500, Scale: 30.00 136 | 2018-05-22 22:42:52 Train Epoch: 8 [409600/490606 (84%)]7506, Loss: 6.108605, Elapsed time: 35.0202s(100 iters) Margin: 0.3500, Scale: 30.00 137 | 2018-05-22 22:43:27 Train Epoch: 8 [460800/490606 (94%)]7606, Loss: 6.110675, Elapsed time: 34.9316s(100 iters) Margin: 0.3500, Scale: 30.00 138 | LFWACC=0.9868 std=0.0050 thd=0.2595 139 | 2018-05-22 22:45:41 Epoch 9 start training 140 | 2018-05-22 22:46:15 Train Epoch: 9 [51200/490606 (10%)]7764, Loss: 5.321429, Elapsed time: 34.3351s(100 iters) Margin: 0.3500, Scale: 30.00 141 | 2018-05-22 22:46:50 Train Epoch: 9 [102400/490606 (21%)]7864, Loss: 5.573366, Elapsed time: 34.8284s(100 iters) Margin: 0.3500, Scale: 30.00 142 | 2018-05-22 22:47:25 Train Epoch: 9 [153600/490606 (31%)]7964, Loss: 5.738787, Elapsed time: 35.2380s(100 iters) Margin: 0.3500, Scale: 30.00 143 | 2018-05-22 22:48:00 Train Epoch: 9 [204800/490606 (42%)]8064, Loss: 5.782237, Elapsed time: 35.2815s(100 iters) Margin: 0.3500, Scale: 30.00 144 | 2018-05-22 22:48:36 Train Epoch: 9 [256000/490606 (52%)]8164, Loss: 5.830540, Elapsed time: 35.3485s(100 iters) Margin: 0.3500, Scale: 30.00 145 | 2018-05-22 22:49:11 Train Epoch: 9 [307200/490606 (63%)]8264, Loss: 5.865085, Elapsed time: 35.2456s(100 iters) Margin: 0.3500, Scale: 30.00 146 | 2018-05-22 22:49:46 Train Epoch: 9 [358400/490606 (73%)]8364, Loss: 5.907460, Elapsed time: 35.1378s(100 iters) Margin: 0.3500, Scale: 30.00 147 | 2018-05-22 22:50:21 Train Epoch: 9 [409600/490606 (84%)]8464, Loss: 5.886113, Elapsed time: 35.0700s(100 iters) Margin: 0.3500, Scale: 30.00 148 | 2018-05-22 22:50:56 Train Epoch: 9 [460800/490606 (94%)]8564, Loss: 5.946792, Elapsed time: 35.0518s(100 iters) Margin: 0.3500, Scale: 30.00 149 | LFWACC=0.9858 std=0.0057 thd=0.2550 150 | 2018-05-22 22:53:10 Epoch 10 start training 151 | 2018-05-22 22:53:44 Train Epoch: 10 [51200/490606 (10%)]8722, Loss: 5.142571, Elapsed time: 34.3763s(100 iters) Margin: 0.3500, Scale: 30.00 152 | 2018-05-22 22:54:19 Train Epoch: 10 [102400/490606 (21%)]8822, Loss: 5.366993, Elapsed time: 35.1353s(100 iters) Margin: 0.3500, Scale: 30.00 153 | 2018-05-22 22:54:55 Train Epoch: 10 [153600/490606 (31%)]8922, Loss: 5.550301, Elapsed time: 35.4777s(100 iters) Margin: 0.3500, Scale: 30.00 154 | 2018-05-22 22:55:30 Train Epoch: 10 [204800/490606 (42%)]9022, Loss: 5.680198, Elapsed time: 35.3487s(100 iters) Margin: 0.3500, Scale: 30.00 155 | 2018-05-22 22:56:06 Train Epoch: 10 [256000/490606 (52%)]9122, Loss: 5.656593, Elapsed time: 35.3255s(100 iters) Margin: 0.3500, Scale: 30.00 156 | 2018-05-22 22:56:41 Train Epoch: 10 [307200/490606 (63%)]9222, Loss: 5.695698, Elapsed time: 35.3045s(100 iters) Margin: 0.3500, Scale: 30.00 157 | 2018-05-22 22:57:16 Train Epoch: 10 [358400/490606 (73%)]9322, Loss: 5.766745, Elapsed time: 35.3371s(100 iters) Margin: 0.3500, Scale: 30.00 158 | 2018-05-22 22:57:52 Train Epoch: 10 [409600/490606 (84%)]9422, Loss: 5.768542, Elapsed time: 35.3369s(100 iters) Margin: 0.3500, Scale: 30.00 159 | 2018-05-22 22:58:27 Train Epoch: 10 [460800/490606 (94%)]9522, Loss: 5.763000, Elapsed time: 35.2162s(100 iters) Margin: 0.3500, Scale: 30.00 160 | LFWACC=0.9855 std=0.0064 thd=0.2805 161 | 2018-05-22 23:00:42 Epoch 11 start training 162 | 2018-05-22 23:01:17 Train Epoch: 11 [51200/490606 (10%)]9680, Loss: 5.067659, Elapsed time: 34.5888s(100 iters) Margin: 0.3500, Scale: 30.00 163 | 2018-05-22 23:01:52 Train Epoch: 11 [102400/490606 (21%)]9780, Loss: 5.214870, Elapsed time: 35.1515s(100 iters) Margin: 0.3500, Scale: 30.00 164 | 2018-05-22 23:02:27 Train Epoch: 11 [153600/490606 (31%)]9880, Loss: 5.334287, Elapsed time: 35.5170s(100 iters) Margin: 0.3500, Scale: 30.00 165 | 2018-05-22 23:03:03 Train Epoch: 11 [204800/490606 (42%)]9980, Loss: 5.477006, Elapsed time: 35.2556s(100 iters) Margin: 0.3500, Scale: 30.00 166 | 2018-05-22 23:03:38 Train Epoch: 11 [256000/490606 (52%)]10080, Loss: 5.493786, Elapsed time: 35.1654s(100 iters) Margin: 0.3500, Scale: 30.00 167 | 2018-05-22 23:04:13 Train Epoch: 11 [307200/490606 (63%)]10180, Loss: 5.639891, Elapsed time: 35.0905s(100 iters) Margin: 0.3500, Scale: 30.00 168 | 2018-05-22 23:04:48 Train Epoch: 11 [358400/490606 (73%)]10280, Loss: 5.634824, Elapsed time: 35.1074s(100 iters) Margin: 0.3500, Scale: 30.00 169 | 2018-05-22 23:05:23 Train Epoch: 11 [409600/490606 (84%)]10380, Loss: 5.585205, Elapsed time: 35.1950s(100 iters) Margin: 0.3500, Scale: 30.00 170 | 2018-05-22 23:05:58 Train Epoch: 11 [460800/490606 (94%)]10480, Loss: 5.687141, Elapsed time: 35.0830s(100 iters) Margin: 0.3500, Scale: 30.00 171 | LFWACC=0.9862 std=0.0051 thd=0.2645 172 | 2018-05-22 23:08:16 Epoch 12 start training 173 | 2018-05-22 23:08:51 Train Epoch: 12 [51200/490606 (10%)]10638, Loss: 4.917343, Elapsed time: 34.3082s(100 iters) Margin: 0.3500, Scale: 30.00 174 | 2018-05-22 23:09:26 Train Epoch: 12 [102400/490606 (21%)]10738, Loss: 5.148166, Elapsed time: 35.1818s(100 iters) Margin: 0.3500, Scale: 30.00 175 | 2018-05-22 23:10:01 Train Epoch: 12 [153600/490606 (31%)]10838, Loss: 5.270394, Elapsed time: 35.5913s(100 iters) Margin: 0.3500, Scale: 30.00 176 | 2018-05-22 23:10:37 Train Epoch: 12 [204800/490606 (42%)]10938, Loss: 5.381381, Elapsed time: 35.4713s(100 iters) Margin: 0.3500, Scale: 30.00 177 | 2018-05-22 23:11:12 Train Epoch: 12 [256000/490606 (52%)]11038, Loss: 5.455194, Elapsed time: 35.4538s(100 iters) Margin: 0.3500, Scale: 30.00 178 | 2018-05-22 23:11:48 Train Epoch: 12 [307200/490606 (63%)]11138, Loss: 5.521281, Elapsed time: 35.4404s(100 iters) Margin: 0.3500, Scale: 30.00 179 | 2018-05-22 23:12:23 Train Epoch: 12 [358400/490606 (73%)]11238, Loss: 5.463669, Elapsed time: 35.2922s(100 iters) Margin: 0.3500, Scale: 30.00 180 | 2018-05-22 23:12:58 Train Epoch: 12 [409600/490606 (84%)]11338, Loss: 5.489256, Elapsed time: 35.2772s(100 iters) Margin: 0.3500, Scale: 30.00 181 | 2018-05-22 23:13:34 Train Epoch: 12 [460800/490606 (94%)]11438, Loss: 5.572077, Elapsed time: 35.2020s(100 iters) Margin: 0.3500, Scale: 30.00 182 | LFWACC=0.9870 std=0.0051 thd=0.2550 183 | 2018-05-22 23:15:47 Epoch 13 start training 184 | 2018-05-22 23:16:21 Train Epoch: 13 [51200/490606 (10%)]11596, Loss: 4.789563, Elapsed time: 34.1861s(100 iters) Margin: 0.3500, Scale: 30.00 185 | 2018-05-22 23:16:56 Train Epoch: 13 [102400/490606 (21%)]11696, Loss: 5.035744, Elapsed time: 35.1321s(100 iters) Margin: 0.3500, Scale: 30.00 186 | 2018-05-22 23:17:32 Train Epoch: 13 [153600/490606 (31%)]11796, Loss: 5.169541, Elapsed time: 35.5848s(100 iters) Margin: 0.3500, Scale: 30.00 187 | 2018-05-22 23:18:07 Train Epoch: 13 [204800/490606 (42%)]11896, Loss: 5.290563, Elapsed time: 35.5094s(100 iters) Margin: 0.3500, Scale: 30.00 188 | 2018-05-22 23:18:43 Train Epoch: 13 [256000/490606 (52%)]11996, Loss: 5.377353, Elapsed time: 35.4856s(100 iters) Margin: 0.3500, Scale: 30.00 189 | 2018-05-22 23:19:18 Train Epoch: 13 [307200/490606 (63%)]12096, Loss: 5.406241, Elapsed time: 35.2799s(100 iters) Margin: 0.3500, Scale: 30.00 190 | 2018-05-22 23:19:53 Train Epoch: 13 [358400/490606 (73%)]12196, Loss: 5.407582, Elapsed time: 35.3432s(100 iters) Margin: 0.3500, Scale: 30.00 191 | 2018-05-22 23:20:29 Train Epoch: 13 [409600/490606 (84%)]12296, Loss: 5.430258, Elapsed time: 35.1904s(100 iters) Margin: 0.3500, Scale: 30.00 192 | 2018-05-22 23:21:04 Train Epoch: 13 [460800/490606 (94%)]12396, Loss: 5.482270, Elapsed time: 35.0904s(100 iters) Margin: 0.3500, Scale: 30.00 193 | LFWACC=0.9873 std=0.0064 thd=0.2605 194 | 2018-05-22 23:23:18 Epoch 14 start training 195 | 2018-05-22 23:23:52 Train Epoch: 14 [51200/490606 (10%)]12554, Loss: 4.700509, Elapsed time: 34.3754s(100 iters) Margin: 0.3500, Scale: 30.00 196 | 2018-05-22 23:24:27 Train Epoch: 14 [102400/490606 (21%)]12654, Loss: 4.907626, Elapsed time: 35.0799s(100 iters) Margin: 0.3500, Scale: 30.00 197 | 2018-05-22 23:25:03 Train Epoch: 14 [153600/490606 (31%)]12754, Loss: 5.103592, Elapsed time: 35.6084s(100 iters) Margin: 0.3500, Scale: 30.00 198 | 2018-05-22 23:25:39 Train Epoch: 14 [204800/490606 (42%)]12854, Loss: 5.215897, Elapsed time: 35.4522s(100 iters) Margin: 0.3500, Scale: 30.00 199 | 2018-05-22 23:26:14 Train Epoch: 14 [256000/490606 (52%)]12954, Loss: 5.232947, Elapsed time: 35.4237s(100 iters) Margin: 0.3500, Scale: 30.00 200 | 2018-05-22 23:26:49 Train Epoch: 14 [307200/490606 (63%)]13054, Loss: 5.311103, Elapsed time: 35.4532s(100 iters) Margin: 0.3500, Scale: 30.00 201 | 2018-05-22 23:27:25 Train Epoch: 14 [358400/490606 (73%)]13154, Loss: 5.336309, Elapsed time: 35.3234s(100 iters) Margin: 0.3500, Scale: 30.00 202 | 2018-05-22 23:28:00 Train Epoch: 14 [409600/490606 (84%)]13254, Loss: 5.437646, Elapsed time: 35.2710s(100 iters) Margin: 0.3500, Scale: 30.00 203 | 2018-05-22 23:28:35 Train Epoch: 14 [460800/490606 (94%)]13354, Loss: 5.393375, Elapsed time: 35.2365s(100 iters) Margin: 0.3500, Scale: 30.00 204 | LFWACC=0.9877 std=0.0042 thd=0.2475 205 | 2018-05-22 23:30:49 Epoch 15 start training 206 | 2018-05-22 23:31:23 Train Epoch: 15 [51200/490606 (10%)]13512, Loss: 4.655903, Elapsed time: 34.2714s(100 iters) Margin: 0.3500, Scale: 30.00 207 | 2018-05-22 23:31:58 Train Epoch: 15 [102400/490606 (21%)]13612, Loss: 4.849881, Elapsed time: 35.3053s(100 iters) Margin: 0.3500, Scale: 30.00 208 | 2018-05-22 23:32:34 Train Epoch: 15 [153600/490606 (31%)]13712, Loss: 4.989246, Elapsed time: 35.6771s(100 iters) Margin: 0.3500, Scale: 30.00 209 | 2018-05-22 23:33:09 Train Epoch: 15 [204800/490606 (42%)]13812, Loss: 5.143055, Elapsed time: 35.5542s(100 iters) Margin: 0.3500, Scale: 30.00 210 | 2018-05-22 23:33:45 Train Epoch: 15 [256000/490606 (52%)]13912, Loss: 5.169729, Elapsed time: 35.5004s(100 iters) Margin: 0.3500, Scale: 30.00 211 | 2018-05-22 23:34:20 Train Epoch: 15 [307200/490606 (63%)]14012, Loss: 5.214453, Elapsed time: 35.4116s(100 iters) Margin: 0.3500, Scale: 30.00 212 | 2018-05-22 23:34:56 Train Epoch: 15 [358400/490606 (73%)]14112, Loss: 5.240951, Elapsed time: 35.3782s(100 iters) Margin: 0.3500, Scale: 30.00 213 | 2018-05-22 23:35:31 Train Epoch: 15 [409600/490606 (84%)]14212, Loss: 5.270314, Elapsed time: 35.4207s(100 iters) Margin: 0.3500, Scale: 30.00 214 | 2018-05-22 23:36:06 Train Epoch: 15 [460800/490606 (94%)]14312, Loss: 5.345285, Elapsed time: 35.2293s(100 iters) Margin: 0.3500, Scale: 30.00 215 | LFWACC=0.9870 std=0.0059 thd=0.2405 216 | 2018-05-22 23:38:21 Epoch 16 start training 217 | 2018-05-22 23:38:55 Train Epoch: 16 [51200/490606 (10%)]14470, Loss: 4.567621, Elapsed time: 34.2929s(100 iters) Margin: 0.3500, Scale: 30.00 218 | 2018-05-22 23:39:30 Train Epoch: 16 [102400/490606 (21%)]14570, Loss: 4.797665, Elapsed time: 35.2119s(100 iters) Margin: 0.3500, Scale: 30.00 219 | 2018-05-22 23:40:06 Train Epoch: 16 [153600/490606 (31%)]14670, Loss: 4.945240, Elapsed time: 35.6355s(100 iters) Margin: 0.3500, Scale: 30.00 220 | 2018-05-22 23:40:42 Train Epoch: 16 [204800/490606 (42%)]14770, Loss: 5.050015, Elapsed time: 35.6600s(100 iters) Margin: 0.3500, Scale: 30.00 221 | 2018-05-22 23:41:17 Train Epoch: 16 [256000/490606 (52%)]14870, Loss: 5.128154, Elapsed time: 35.4247s(100 iters) Margin: 0.3500, Scale: 30.00 222 | 2018-05-22 23:41:53 Train Epoch: 16 [307200/490606 (63%)]14970, Loss: 5.157999, Elapsed time: 35.3828s(100 iters) Margin: 0.3500, Scale: 30.00 223 | 2018-05-22 23:42:28 Train Epoch: 16 [358400/490606 (73%)]15070, Loss: 5.167270, Elapsed time: 35.1905s(100 iters) Margin: 0.3500, Scale: 30.00 224 | 2018-05-22 23:43:03 Train Epoch: 16 [409600/490606 (84%)]15170, Loss: 5.229934, Elapsed time: 35.3342s(100 iters) Margin: 0.3500, Scale: 30.00 225 | 2018-05-22 23:43:38 Train Epoch: 16 [460800/490606 (94%)]15270, Loss: 5.259656, Elapsed time: 35.2687s(100 iters) Margin: 0.3500, Scale: 30.00 226 | LFWACC=0.9882 std=0.0047 thd=0.2240 227 | 2018-05-22 23:45:58 Epoch 17 start training 228 | 2018-05-22 23:46:32 Train Epoch: 17 [51200/490606 (10%)]15428, Loss: 4.512006, Elapsed time: 34.1700s(100 iters) Margin: 0.3500, Scale: 30.00 229 | 2018-05-22 23:47:07 Train Epoch: 17 [102400/490606 (21%)]15528, Loss: 4.710182, Elapsed time: 34.8363s(100 iters) Margin: 0.3500, Scale: 30.00 230 | 2018-05-22 23:47:42 Train Epoch: 17 [153600/490606 (31%)]15628, Loss: 4.887441, Elapsed time: 35.3376s(100 iters) Margin: 0.3500, Scale: 30.00 231 | 2018-05-22 23:48:17 Train Epoch: 17 [204800/490606 (42%)]15728, Loss: 4.957819, Elapsed time: 35.2749s(100 iters) Margin: 0.3500, Scale: 30.00 232 | 2018-05-22 23:48:52 Train Epoch: 17 [256000/490606 (52%)]15828, Loss: 5.078830, Elapsed time: 35.2719s(100 iters) Margin: 0.3500, Scale: 30.00 233 | 2018-05-22 23:49:28 Train Epoch: 17 [307200/490606 (63%)]15928, Loss: 5.158377, Elapsed time: 35.2370s(100 iters) Margin: 0.3500, Scale: 30.00 234 | 2018-05-22 23:49:53 Adjust learning rate to 0.01 235 | 2018-05-22 23:50:03 Train Epoch: 17 [358400/490606 (73%)]16028, Loss: 5.070725, Elapsed time: 35.1548s(100 iters) Margin: 0.3500, Scale: 30.00 236 | 2018-05-22 23:50:38 Train Epoch: 17 [409600/490606 (84%)]16128, Loss: 4.411720, Elapsed time: 35.2391s(100 iters) Margin: 0.3500, Scale: 30.00 237 | 2018-05-22 23:51:13 Train Epoch: 17 [460800/490606 (94%)]16228, Loss: 4.249382, Elapsed time: 35.1592s(100 iters) Margin: 0.3500, Scale: 30.00 238 | LFWACC=0.9903 std=0.0050 thd=0.2370 239 | 2018-05-22 23:53:32 Epoch 18 start training 240 | 2018-05-22 23:54:06 Train Epoch: 18 [51200/490606 (10%)]16386, Loss: 3.292109, Elapsed time: 34.2940s(100 iters) Margin: 0.3500, Scale: 30.00 241 | 2018-05-22 23:54:41 Train Epoch: 18 [102400/490606 (21%)]16486, Loss: 3.274331, Elapsed time: 34.6985s(100 iters) Margin: 0.3500, Scale: 30.00 242 | 2018-05-22 23:55:16 Train Epoch: 18 [153600/490606 (31%)]16586, Loss: 3.231943, Elapsed time: 35.2813s(100 iters) Margin: 0.3500, Scale: 30.00 243 | 2018-05-22 23:55:51 Train Epoch: 18 [204800/490606 (42%)]16686, Loss: 3.223001, Elapsed time: 35.0915s(100 iters) Margin: 0.3500, Scale: 30.00 244 | 2018-05-22 23:56:26 Train Epoch: 18 [256000/490606 (52%)]16786, Loss: 3.202021, Elapsed time: 35.1575s(100 iters) Margin: 0.3500, Scale: 30.00 245 | 2018-05-22 23:57:01 Train Epoch: 18 [307200/490606 (63%)]16886, Loss: 3.183747, Elapsed time: 35.0470s(100 iters) Margin: 0.3500, Scale: 30.00 246 | 2018-05-22 23:57:36 Train Epoch: 18 [358400/490606 (73%)]16986, Loss: 3.155177, Elapsed time: 34.9893s(100 iters) Margin: 0.3500, Scale: 30.00 247 | 2018-05-22 23:58:11 Train Epoch: 18 [409600/490606 (84%)]17086, Loss: 3.132971, Elapsed time: 34.9572s(100 iters) Margin: 0.3500, Scale: 30.00 248 | 2018-05-22 23:58:46 Train Epoch: 18 [460800/490606 (94%)]17186, Loss: 3.140817, Elapsed time: 34.9012s(100 iters) Margin: 0.3500, Scale: 30.00 249 | LFWACC=0.9903 std=0.0049 thd=0.2235 250 | 2018-05-23 00:01:06 Epoch 19 start training 251 | 2018-05-23 00:01:40 Train Epoch: 19 [51200/490606 (10%)]17344, Loss: 2.854041, Elapsed time: 34.2458s(100 iters) Margin: 0.3500, Scale: 30.00 252 | 2018-05-23 00:02:15 Train Epoch: 19 [102400/490606 (21%)]17444, Loss: 2.783621, Elapsed time: 35.0830s(100 iters) Margin: 0.3500, Scale: 30.00 253 | 2018-05-23 00:02:50 Train Epoch: 19 [153600/490606 (31%)]17544, Loss: 2.843280, Elapsed time: 35.5041s(100 iters) Margin: 0.3500, Scale: 30.00 254 | 2018-05-23 00:03:26 Train Epoch: 19 [204800/490606 (42%)]17644, Loss: 2.860288, Elapsed time: 35.5899s(100 iters) Margin: 0.3500, Scale: 30.00 255 | 2018-05-23 00:04:01 Train Epoch: 19 [256000/490606 (52%)]17744, Loss: 2.839766, Elapsed time: 35.4159s(100 iters) Margin: 0.3500, Scale: 30.00 256 | 2018-05-23 00:04:37 Train Epoch: 19 [307200/490606 (63%)]17844, Loss: 2.880108, Elapsed time: 35.3900s(100 iters) Margin: 0.3500, Scale: 30.00 257 | 2018-05-23 00:05:12 Train Epoch: 19 [358400/490606 (73%)]17944, Loss: 2.900396, Elapsed time: 35.2266s(100 iters) Margin: 0.3500, Scale: 30.00 258 | 2018-05-23 00:05:47 Train Epoch: 19 [409600/490606 (84%)]18044, Loss: 2.867618, Elapsed time: 35.3016s(100 iters) Margin: 0.3500, Scale: 30.00 259 | 2018-05-23 00:06:23 Train Epoch: 19 [460800/490606 (94%)]18144, Loss: 2.875468, Elapsed time: 35.1908s(100 iters) Margin: 0.3500, Scale: 30.00 260 | LFWACC=0.9903 std=0.0050 thd=0.2125 261 | 2018-05-23 00:08:36 Epoch 20 start training 262 | 2018-05-23 00:09:10 Train Epoch: 20 [51200/490606 (10%)]18302, Loss: 2.554133, Elapsed time: 34.3287s(100 iters) Margin: 0.3500, Scale: 30.00 263 | 2018-05-23 00:09:45 Train Epoch: 20 [102400/490606 (21%)]18402, Loss: 2.607153, Elapsed time: 35.3701s(100 iters) Margin: 0.3500, Scale: 30.00 264 | 2018-05-23 00:10:21 Train Epoch: 20 [153600/490606 (31%)]18502, Loss: 2.612296, Elapsed time: 35.6037s(100 iters) Margin: 0.3500, Scale: 30.00 265 | 2018-05-23 00:10:57 Train Epoch: 20 [204800/490606 (42%)]18602, Loss: 2.642180, Elapsed time: 35.5793s(100 iters) Margin: 0.3500, Scale: 30.00 266 | 2018-05-23 00:11:32 Train Epoch: 20 [256000/490606 (52%)]18702, Loss: 2.685085, Elapsed time: 35.5143s(100 iters) Margin: 0.3500, Scale: 30.00 267 | 2018-05-23 00:12:07 Train Epoch: 20 [307200/490606 (63%)]18802, Loss: 2.684187, Elapsed time: 35.2929s(100 iters) Margin: 0.3500, Scale: 30.00 268 | 2018-05-23 00:12:42 Train Epoch: 20 [358400/490606 (73%)]18902, Loss: 2.683294, Elapsed time: 35.0651s(100 iters) Margin: 0.3500, Scale: 30.00 269 | 2018-05-23 00:13:18 Train Epoch: 20 [409600/490606 (84%)]19002, Loss: 2.721427, Elapsed time: 35.2448s(100 iters) Margin: 0.3500, Scale: 30.00 270 | 2018-05-23 00:13:53 Train Epoch: 20 [460800/490606 (94%)]19102, Loss: 2.778452, Elapsed time: 34.9152s(100 iters) Margin: 0.3500, Scale: 30.00 271 | LFWACC=0.9908 std=0.0052 thd=0.2365 272 | 2018-05-23 00:16:06 Epoch 21 start training 273 | 2018-05-23 00:16:41 Train Epoch: 21 [51200/490606 (10%)]19260, Loss: 2.414846, Elapsed time: 34.1809s(100 iters) Margin: 0.3500, Scale: 30.00 274 | 2018-05-23 00:17:16 Train Epoch: 21 [102400/490606 (21%)]19360, Loss: 2.458174, Elapsed time: 34.9075s(100 iters) Margin: 0.3500, Scale: 30.00 275 | 2018-05-23 00:17:51 Train Epoch: 21 [153600/490606 (31%)]19460, Loss: 2.482589, Elapsed time: 35.4105s(100 iters) Margin: 0.3500, Scale: 30.00 276 | 2018-05-23 00:18:26 Train Epoch: 21 [204800/490606 (42%)]19560, Loss: 2.530692, Elapsed time: 35.2552s(100 iters) Margin: 0.3500, Scale: 30.00 277 | 2018-05-23 00:19:02 Train Epoch: 21 [256000/490606 (52%)]19660, Loss: 2.546041, Elapsed time: 35.3687s(100 iters) Margin: 0.3500, Scale: 30.00 278 | 2018-05-23 00:19:37 Train Epoch: 21 [307200/490606 (63%)]19760, Loss: 2.540463, Elapsed time: 35.3726s(100 iters) Margin: 0.3500, Scale: 30.00 279 | 2018-05-23 00:20:12 Train Epoch: 21 [358400/490606 (73%)]19860, Loss: 2.604936, Elapsed time: 35.2030s(100 iters) Margin: 0.3500, Scale: 30.00 280 | 2018-05-23 00:20:47 Train Epoch: 21 [409600/490606 (84%)]19960, Loss: 2.611385, Elapsed time: 35.2878s(100 iters) Margin: 0.3500, Scale: 30.00 281 | 2018-05-23 00:21:23 Train Epoch: 21 [460800/490606 (94%)]20060, Loss: 2.583186, Elapsed time: 35.1266s(100 iters) Margin: 0.3500, Scale: 30.00 282 | LFWACC=0.9900 std=0.0044 thd=0.2235 283 | 2018-05-23 00:23:38 Epoch 22 start training 284 | 2018-05-23 00:24:12 Train Epoch: 22 [51200/490606 (10%)]20218, Loss: 2.312288, Elapsed time: 34.3688s(100 iters) Margin: 0.3500, Scale: 30.00 285 | 2018-05-23 00:24:47 Train Epoch: 22 [102400/490606 (21%)]20318, Loss: 2.322858, Elapsed time: 35.1162s(100 iters) Margin: 0.3500, Scale: 30.00 286 | 2018-05-23 00:25:23 Train Epoch: 22 [153600/490606 (31%)]20418, Loss: 2.376300, Elapsed time: 35.5070s(100 iters) Margin: 0.3500, Scale: 30.00 287 | 2018-05-23 00:25:58 Train Epoch: 22 [204800/490606 (42%)]20518, Loss: 2.400634, Elapsed time: 35.3175s(100 iters) Margin: 0.3500, Scale: 30.00 288 | 2018-05-23 00:26:33 Train Epoch: 22 [256000/490606 (52%)]20618, Loss: 2.447151, Elapsed time: 35.4576s(100 iters) Margin: 0.3500, Scale: 30.00 289 | 2018-05-23 00:27:09 Train Epoch: 22 [307200/490606 (63%)]20718, Loss: 2.463280, Elapsed time: 35.3186s(100 iters) Margin: 0.3500, Scale: 30.00 290 | 2018-05-23 00:27:44 Train Epoch: 22 [358400/490606 (73%)]20818, Loss: 2.495648, Elapsed time: 35.2715s(100 iters) Margin: 0.3500, Scale: 30.00 291 | 2018-05-23 00:28:19 Train Epoch: 22 [409600/490606 (84%)]20918, Loss: 2.510246, Elapsed time: 35.2716s(100 iters) Margin: 0.3500, Scale: 30.00 292 | 2018-05-23 00:28:54 Train Epoch: 22 [460800/490606 (94%)]21018, Loss: 2.538454, Elapsed time: 35.2471s(100 iters) Margin: 0.3500, Scale: 30.00 293 | LFWACC=0.9900 std=0.0051 thd=0.2140 294 | 2018-05-23 00:31:09 Epoch 23 start training 295 | 2018-05-23 00:31:43 Train Epoch: 23 [51200/490606 (10%)]21176, Loss: 2.197756, Elapsed time: 34.3331s(100 iters) Margin: 0.3500, Scale: 30.00 296 | 2018-05-23 00:32:18 Train Epoch: 23 [102400/490606 (21%)]21276, Loss: 2.232229, Elapsed time: 34.9979s(100 iters) Margin: 0.3500, Scale: 30.00 297 | 2018-05-23 00:32:53 Train Epoch: 23 [153600/490606 (31%)]21376, Loss: 2.257729, Elapsed time: 35.4569s(100 iters) Margin: 0.3500, Scale: 30.00 298 | 2018-05-23 00:33:29 Train Epoch: 23 [204800/490606 (42%)]21476, Loss: 2.333342, Elapsed time: 35.1632s(100 iters) Margin: 0.3500, Scale: 30.00 299 | 2018-05-23 00:34:04 Train Epoch: 23 [256000/490606 (52%)]21576, Loss: 2.363562, Elapsed time: 35.2012s(100 iters) Margin: 0.3500, Scale: 30.00 300 | 2018-05-23 00:34:39 Train Epoch: 23 [307200/490606 (63%)]21676, Loss: 2.413824, Elapsed time: 35.1760s(100 iters) Margin: 0.3500, Scale: 30.00 301 | 2018-05-23 00:35:14 Train Epoch: 23 [358400/490606 (73%)]21776, Loss: 2.418349, Elapsed time: 34.8895s(100 iters) Margin: 0.3500, Scale: 30.00 302 | 2018-05-23 00:35:49 Train Epoch: 23 [409600/490606 (84%)]21876, Loss: 2.444954, Elapsed time: 34.8233s(100 iters) Margin: 0.3500, Scale: 30.00 303 | 2018-05-23 00:36:23 Train Epoch: 23 [460800/490606 (94%)]21976, Loss: 2.462782, Elapsed time: 34.7011s(100 iters) Margin: 0.3500, Scale: 30.00 304 | LFWACC=0.9892 std=0.0047 thd=0.2380 305 | 2018-05-23 00:38:36 Epoch 24 start training 306 | 2018-05-23 00:39:10 Train Epoch: 24 [51200/490606 (10%)]22134, Loss: 2.120939, Elapsed time: 34.1980s(100 iters) Margin: 0.3500, Scale: 30.00 307 | 2018-05-23 00:39:46 Train Epoch: 24 [102400/490606 (21%)]22234, Loss: 2.181790, Elapsed time: 35.0652s(100 iters) Margin: 0.3500, Scale: 30.00 308 | 2018-05-23 00:40:21 Train Epoch: 24 [153600/490606 (31%)]22334, Loss: 2.196826, Elapsed time: 35.3712s(100 iters) Margin: 0.3500, Scale: 30.00 309 | 2018-05-23 00:40:56 Train Epoch: 24 [204800/490606 (42%)]22434, Loss: 2.279552, Elapsed time: 35.1335s(100 iters) Margin: 0.3500, Scale: 30.00 310 | 2018-05-23 00:41:31 Train Epoch: 24 [256000/490606 (52%)]22534, Loss: 2.260789, Elapsed time: 35.1803s(100 iters) Margin: 0.3500, Scale: 30.00 311 | 2018-05-23 00:42:06 Train Epoch: 24 [307200/490606 (63%)]22634, Loss: 2.321239, Elapsed time: 35.1143s(100 iters) Margin: 0.3500, Scale: 30.00 312 | 2018-05-23 00:42:41 Train Epoch: 24 [358400/490606 (73%)]22734, Loss: 2.340150, Elapsed time: 35.0766s(100 iters) Margin: 0.3500, Scale: 30.00 313 | 2018-05-23 00:43:17 Train Epoch: 24 [409600/490606 (84%)]22834, Loss: 2.402619, Elapsed time: 35.1346s(100 iters) Margin: 0.3500, Scale: 30.00 314 | 2018-05-23 00:43:52 Train Epoch: 24 [460800/490606 (94%)]22934, Loss: 2.407765, Elapsed time: 35.0243s(100 iters) Margin: 0.3500, Scale: 30.00 315 | LFWACC=0.9898 std=0.0042 thd=0.2300 316 | 2018-05-23 00:46:08 Epoch 25 start training 317 | 2018-05-23 00:46:42 Train Epoch: 25 [51200/490606 (10%)]23092, Loss: 2.066123, Elapsed time: 34.2343s(100 iters) Margin: 0.3500, Scale: 30.00 318 | 2018-05-23 00:47:17 Train Epoch: 25 [102400/490606 (21%)]23192, Loss: 2.068774, Elapsed time: 34.8117s(100 iters) Margin: 0.3500, Scale: 30.00 319 | 2018-05-23 00:47:52 Train Epoch: 25 [153600/490606 (31%)]23292, Loss: 2.130189, Elapsed time: 35.2171s(100 iters) Margin: 0.3500, Scale: 30.00 320 | 2018-05-23 00:48:27 Train Epoch: 25 [204800/490606 (42%)]23392, Loss: 2.184955, Elapsed time: 35.0019s(100 iters) Margin: 0.3500, Scale: 30.00 321 | 2018-05-23 00:49:02 Train Epoch: 25 [256000/490606 (52%)]23492, Loss: 2.249126, Elapsed time: 35.0431s(100 iters) Margin: 0.3500, Scale: 30.00 322 | 2018-05-23 00:49:37 Train Epoch: 25 [307200/490606 (63%)]23592, Loss: 2.283695, Elapsed time: 34.9946s(100 iters) Margin: 0.3500, Scale: 30.00 323 | 2018-05-23 00:50:12 Train Epoch: 25 [358400/490606 (73%)]23692, Loss: 2.311159, Elapsed time: 34.9964s(100 iters) Margin: 0.3500, Scale: 30.00 324 | 2018-05-23 00:50:47 Train Epoch: 25 [409600/490606 (84%)]23792, Loss: 2.312220, Elapsed time: 35.0199s(100 iters) Margin: 0.3500, Scale: 30.00 325 | 2018-05-23 00:51:22 Train Epoch: 25 [460800/490606 (94%)]23892, Loss: 2.394800, Elapsed time: 35.0061s(100 iters) Margin: 0.3500, Scale: 30.00 326 | LFWACC=0.9887 std=0.0045 thd=0.2190 327 | 2018-05-23 00:53:37 Epoch 26 start training 328 | 2018-05-23 00:53:54 Adjust learning rate to 0.001 329 | 2018-05-23 00:54:11 Train Epoch: 26 [51200/490606 (10%)]24050, Loss: 1.950384, Elapsed time: 34.1717s(100 iters) Margin: 0.3500, Scale: 30.00 330 | 2018-05-23 00:54:46 Train Epoch: 26 [102400/490606 (21%)]24150, Loss: 1.888394, Elapsed time: 34.9734s(100 iters) Margin: 0.3500, Scale: 30.00 331 | 2018-05-23 00:55:21 Train Epoch: 26 [153600/490606 (31%)]24250, Loss: 1.875045, Elapsed time: 35.4192s(100 iters) Margin: 0.3500, Scale: 30.00 332 | 2018-05-23 00:55:57 Train Epoch: 26 [204800/490606 (42%)]24350, Loss: 1.864290, Elapsed time: 35.2158s(100 iters) Margin: 0.3500, Scale: 30.00 333 | 2018-05-23 00:56:32 Train Epoch: 26 [256000/490606 (52%)]24450, Loss: 1.888378, Elapsed time: 35.2611s(100 iters) Margin: 0.3500, Scale: 30.00 334 | 2018-05-23 00:57:07 Train Epoch: 26 [307200/490606 (63%)]24550, Loss: 1.873999, Elapsed time: 35.1157s(100 iters) Margin: 0.3500, Scale: 30.00 335 | 2018-05-23 00:57:42 Train Epoch: 26 [358400/490606 (73%)]24650, Loss: 1.878325, Elapsed time: 35.1907s(100 iters) Margin: 0.3500, Scale: 30.00 336 | 2018-05-23 00:58:17 Train Epoch: 26 [409600/490606 (84%)]24750, Loss: 1.856046, Elapsed time: 35.0270s(100 iters) Margin: 0.3500, Scale: 30.00 337 | 2018-05-23 00:58:52 Train Epoch: 26 [460800/490606 (94%)]24850, Loss: 1.856536, Elapsed time: 34.9795s(100 iters) Margin: 0.3500, Scale: 30.00 338 | LFWACC=0.9903 std=0.0052 thd=0.2305 339 | 2018-05-23 01:01:07 Epoch 27 start training 340 | 2018-05-23 01:01:41 Train Epoch: 27 [51200/490606 (10%)]25008, Loss: 1.738820, Elapsed time: 34.2393s(100 iters) Margin: 0.3500, Scale: 30.00 341 | 2018-05-23 01:02:16 Train Epoch: 27 [102400/490606 (21%)]25108, Loss: 1.790705, Elapsed time: 35.0232s(100 iters) Margin: 0.3500, Scale: 30.00 342 | 2018-05-23 01:02:51 Train Epoch: 27 [153600/490606 (31%)]25208, Loss: 1.802848, Elapsed time: 35.2499s(100 iters) Margin: 0.3500, Scale: 30.00 343 | 2018-05-23 01:03:27 Train Epoch: 27 [204800/490606 (42%)]25308, Loss: 1.773464, Elapsed time: 35.2591s(100 iters) Margin: 0.3500, Scale: 30.00 344 | 2018-05-23 01:04:02 Train Epoch: 27 [256000/490606 (52%)]25408, Loss: 1.787410, Elapsed time: 35.2097s(100 iters) Margin: 0.3500, Scale: 30.00 345 | 2018-05-23 01:04:37 Train Epoch: 27 [307200/490606 (63%)]25508, Loss: 1.809478, Elapsed time: 34.8425s(100 iters) Margin: 0.3500, Scale: 30.00 346 | 2018-05-23 01:05:12 Train Epoch: 27 [358400/490606 (73%)]25608, Loss: 1.822759, Elapsed time: 34.8035s(100 iters) Margin: 0.3500, Scale: 30.00 347 | 2018-05-23 01:05:46 Train Epoch: 27 [409600/490606 (84%)]25708, Loss: 1.804524, Elapsed time: 34.8979s(100 iters) Margin: 0.3500, Scale: 30.00 348 | 2018-05-23 01:06:21 Train Epoch: 27 [460800/490606 (94%)]25808, Loss: 1.825293, Elapsed time: 34.7194s(100 iters) Margin: 0.3500, Scale: 30.00 349 | LFWACC=0.9905 std=0.0047 thd=0.2245 350 | 2018-05-23 01:08:35 Epoch 28 start training 351 | 2018-05-23 01:09:09 Train Epoch: 28 [51200/490606 (10%)]25966, Loss: 1.765702, Elapsed time: 34.1676s(100 iters) Margin: 0.3500, Scale: 30.00 352 | 2018-05-23 01:09:44 Train Epoch: 28 [102400/490606 (21%)]26066, Loss: 1.766602, Elapsed time: 35.0128s(100 iters) Margin: 0.3500, Scale: 30.00 353 | 2018-05-23 01:10:19 Train Epoch: 28 [153600/490606 (31%)]26166, Loss: 1.745782, Elapsed time: 35.3007s(100 iters) Margin: 0.3500, Scale: 30.00 354 | 2018-05-23 01:10:55 Train Epoch: 28 [204800/490606 (42%)]26266, Loss: 1.763547, Elapsed time: 35.0860s(100 iters) Margin: 0.3500, Scale: 30.00 355 | 2018-05-23 01:11:30 Train Epoch: 28 [256000/490606 (52%)]26366, Loss: 1.759258, Elapsed time: 35.0722s(100 iters) Margin: 0.3500, Scale: 30.00 356 | 2018-05-23 01:12:04 Train Epoch: 28 [307200/490606 (63%)]26466, Loss: 1.742912, Elapsed time: 34.8199s(100 iters) Margin: 0.3500, Scale: 30.00 357 | 2018-05-23 01:12:39 Train Epoch: 28 [358400/490606 (73%)]26566, Loss: 1.784234, Elapsed time: 34.8042s(100 iters) Margin: 0.3500, Scale: 30.00 358 | 2018-05-23 01:13:14 Train Epoch: 28 [409600/490606 (84%)]26666, Loss: 1.764379, Elapsed time: 34.7352s(100 iters) Margin: 0.3500, Scale: 30.00 359 | 2018-05-23 01:13:49 Train Epoch: 28 [460800/490606 (94%)]26766, Loss: 1.776731, Elapsed time: 34.7032s(100 iters) Margin: 0.3500, Scale: 30.00 360 | LFWACC=0.9898 std=0.0051 thd=0.2220 361 | 2018-05-23 01:16:03 Epoch 29 start training 362 | 2018-05-23 01:16:37 Train Epoch: 29 [51200/490606 (10%)]26924, Loss: 1.726539, Elapsed time: 34.1740s(100 iters) Margin: 0.3500, Scale: 30.00 363 | 2018-05-23 01:17:12 Train Epoch: 29 [102400/490606 (21%)]27024, Loss: 1.734869, Elapsed time: 34.8935s(100 iters) Margin: 0.3500, Scale: 30.00 364 | 2018-05-23 01:17:47 Train Epoch: 29 [153600/490606 (31%)]27124, Loss: 1.743535, Elapsed time: 35.2736s(100 iters) Margin: 0.3500, Scale: 30.00 365 | 2018-05-23 01:18:22 Train Epoch: 29 [204800/490606 (42%)]27224, Loss: 1.719379, Elapsed time: 35.2191s(100 iters) Margin: 0.3500, Scale: 30.00 366 | 2018-05-23 01:18:58 Train Epoch: 29 [256000/490606 (52%)]27324, Loss: 1.712499, Elapsed time: 35.3052s(100 iters) Margin: 0.3500, Scale: 30.00 367 | 2018-05-23 01:19:33 Train Epoch: 29 [307200/490606 (63%)]27424, Loss: 1.747275, Elapsed time: 35.2560s(100 iters) Margin: 0.3500, Scale: 30.00 368 | 2018-05-23 01:20:08 Train Epoch: 29 [358400/490606 (73%)]27524, Loss: 1.763670, Elapsed time: 35.2012s(100 iters) Margin: 0.3500, Scale: 30.00 369 | 2018-05-23 01:20:43 Train Epoch: 29 [409600/490606 (84%)]27624, Loss: 1.763478, Elapsed time: 35.1901s(100 iters) Margin: 0.3500, Scale: 30.00 370 | 2018-05-23 01:21:18 Train Epoch: 29 [460800/490606 (94%)]27724, Loss: 1.750996, Elapsed time: 35.0691s(100 iters) Margin: 0.3500, Scale: 30.00 371 | LFWACC=0.9888 std=0.0057 thd=0.2240 372 | 2018-05-23 01:23:33 Epoch 30 start training 373 | 2018-05-23 01:24:07 Train Epoch: 30 [51200/490606 (10%)]27882, Loss: 1.693540, Elapsed time: 34.2031s(100 iters) Margin: 0.3500, Scale: 30.00 374 | 2018-05-23 01:24:43 Train Epoch: 30 [102400/490606 (21%)]27982, Loss: 1.718465, Elapsed time: 35.1225s(100 iters) Margin: 0.3500, Scale: 30.00 375 | 2018-05-23 01:25:18 Train Epoch: 30 [153600/490606 (31%)]28082, Loss: 1.708006, Elapsed time: 35.4531s(100 iters) Margin: 0.3500, Scale: 30.00 376 | 2018-05-23 01:25:53 Train Epoch: 30 [204800/490606 (42%)]28182, Loss: 1.715883, Elapsed time: 35.4763s(100 iters) Margin: 0.3500, Scale: 30.00 377 | 2018-05-23 01:26:29 Train Epoch: 30 [256000/490606 (52%)]28282, Loss: 1.726821, Elapsed time: 35.4548s(100 iters) Margin: 0.3500, Scale: 30.00 378 | 2018-05-23 01:27:04 Train Epoch: 30 [307200/490606 (63%)]28382, Loss: 1.739319, Elapsed time: 35.2314s(100 iters) Margin: 0.3500, Scale: 30.00 379 | 2018-05-23 01:27:39 Train Epoch: 30 [358400/490606 (73%)]28482, Loss: 1.736377, Elapsed time: 35.2901s(100 iters) Margin: 0.3500, Scale: 30.00 380 | 2018-05-23 01:28:15 Train Epoch: 30 [409600/490606 (84%)]28582, Loss: 1.727251, Elapsed time: 35.2052s(100 iters) Margin: 0.3500, Scale: 30.00 381 | 2018-05-23 01:28:50 Train Epoch: 30 [460800/490606 (94%)]28682, Loss: 1.746549, Elapsed time: 35.2006s(100 iters) Margin: 0.3500, Scale: 30.00 382 | LFWACC=0.9893 std=0.0047 thd=0.2235 383 | Finished Training 384 | -------------------------------------------------------------------------------- /log/sphereface20_s30_m0.40.log: -------------------------------------------------------------------------------- 1 | DataParallel( 2 | (module): sphere20( 3 | (conv1_1): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) 4 | (relu1_1): PReLU(num_parameters=64) 5 | (conv1_2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 6 | (relu1_2): PReLU(num_parameters=64) 7 | (conv1_3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 8 | (relu1_3): PReLU(num_parameters=64) 9 | (conv2_1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) 10 | (relu2_1): PReLU(num_parameters=128) 11 | (conv2_2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 12 | (relu2_2): PReLU(num_parameters=128) 13 | (conv2_3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 14 | (relu2_3): PReLU(num_parameters=128) 15 | (conv2_4): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 16 | (relu2_4): PReLU(num_parameters=128) 17 | (conv2_5): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 18 | (relu2_5): PReLU(num_parameters=128) 19 | (conv3_1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) 20 | (relu3_1): PReLU(num_parameters=256) 21 | (conv3_2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 22 | (relu3_2): PReLU(num_parameters=256) 23 | (conv3_3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 24 | (relu3_3): PReLU(num_parameters=256) 25 | (conv3_4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 26 | (relu3_4): PReLU(num_parameters=256) 27 | (conv3_5): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 28 | (relu3_5): PReLU(num_parameters=256) 29 | (conv3_6): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 30 | (relu3_6): PReLU(num_parameters=256) 31 | (conv3_7): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 32 | (relu3_7): PReLU(num_parameters=256) 33 | (conv3_8): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 34 | (relu3_8): PReLU(num_parameters=256) 35 | (conv3_9): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 36 | (relu3_9): PReLU(num_parameters=256) 37 | (conv4_1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) 38 | (relu4_1): PReLU(num_parameters=512) 39 | (conv4_2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 40 | (relu4_2): PReLU(num_parameters=512) 41 | (conv4_3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) 42 | (relu4_3): PReLU(num_parameters=512) 43 | (fc5): Linear(in_features=21504, out_features=512, bias=True) 44 | ) 45 | ) 46 | length of train Dataset: 490606 47 | Number of Classses: 10572 48 | 2018-05-23 19:11:19 Epoch 1 start training 49 | 2018-05-23 19:12:04 Train Epoch: 1 [51200/490606 (10%)]100, Loss: 24.126947, Elapsed time: 44.7278s(100 iters) Margin: 0.4000, Scale: 30.00 50 | 2018-05-23 19:12:37 Train Epoch: 1 [102400/490606 (21%)]200, Loss: 23.085680, Elapsed time: 32.9913s(100 iters) Margin: 0.4000, Scale: 30.00 51 | 2018-05-23 19:13:13 Train Epoch: 1 [153600/490606 (31%)]300, Loss: 22.335430, Elapsed time: 35.6989s(100 iters) Margin: 0.4000, Scale: 30.00 52 | 2018-05-23 19:13:49 Train Epoch: 1 [204800/490606 (42%)]400, Loss: 21.910509, Elapsed time: 35.9493s(100 iters) Margin: 0.4000, Scale: 30.00 53 | 2018-05-23 19:14:25 Train Epoch: 1 [256000/490606 (52%)]500, Loss: 21.596847, Elapsed time: 35.9335s(100 iters) Margin: 0.4000, Scale: 30.00 54 | 2018-05-23 19:15:01 Train Epoch: 1 [307200/490606 (63%)]600, Loss: 21.271390, Elapsed time: 35.9589s(100 iters) Margin: 0.4000, Scale: 30.00 55 | 2018-05-23 19:15:37 Train Epoch: 1 [358400/490606 (73%)]700, Loss: 20.849685, Elapsed time: 36.0220s(100 iters) Margin: 0.4000, Scale: 30.00 56 | 2018-05-23 19:16:13 Train Epoch: 1 [409600/490606 (84%)]800, Loss: 20.403136, Elapsed time: 35.9300s(100 iters) Margin: 0.4000, Scale: 30.00 57 | 2018-05-23 19:16:49 Train Epoch: 1 [460800/490606 (94%)]900, Loss: 19.928651, Elapsed time: 35.9860s(100 iters) Margin: 0.4000, Scale: 30.00 58 | LFWACC=0.8275 std=0.0115 thd=0.4150 59 | 2018-05-23 19:19:11 Epoch 2 start training 60 | 2018-05-23 19:19:45 Train Epoch: 2 [51200/490606 (10%)]1058, Loss: 19.063481, Elapsed time: 34.2460s(100 iters) Margin: 0.4000, Scale: 30.00 61 | 2018-05-23 19:20:20 Train Epoch: 2 [102400/490606 (21%)]1158, Loss: 18.698709, Elapsed time: 35.3193s(100 iters) Margin: 0.4000, Scale: 30.00 62 | 2018-05-23 19:20:56 Train Epoch: 2 [153600/490606 (31%)]1258, Loss: 18.295765, Elapsed time: 35.9927s(100 iters) Margin: 0.4000, Scale: 30.00 63 | 2018-05-23 19:21:32 Train Epoch: 2 [204800/490606 (42%)]1358, Loss: 17.849183, Elapsed time: 35.9956s(100 iters) Margin: 0.4000, Scale: 30.00 64 | 2018-05-23 19:22:08 Train Epoch: 2 [256000/490606 (52%)]1458, Loss: 17.417370, Elapsed time: 35.9591s(100 iters) Margin: 0.4000, Scale: 30.00 65 | 2018-05-23 19:22:44 Train Epoch: 2 [307200/490606 (63%)]1558, Loss: 16.919949, Elapsed time: 35.9746s(100 iters) Margin: 0.4000, Scale: 30.00 66 | 2018-05-23 19:23:20 Train Epoch: 2 [358400/490606 (73%)]1658, Loss: 16.335805, Elapsed time: 35.9729s(100 iters) Margin: 0.4000, Scale: 30.00 67 | 2018-05-23 19:23:56 Train Epoch: 2 [409600/490606 (84%)]1758, Loss: 15.764087, Elapsed time: 35.9849s(100 iters) Margin: 0.4000, Scale: 30.00 68 | 2018-05-23 19:24:32 Train Epoch: 2 [460800/490606 (94%)]1858, Loss: 15.063875, Elapsed time: 35.9373s(100 iters) Margin: 0.4000, Scale: 30.00 69 | LFWACC=0.9233 std=0.0117 thd=0.3670 70 | 2018-05-23 19:26:57 Epoch 3 start training 71 | 2018-05-23 19:27:31 Train Epoch: 3 [51200/490606 (10%)]2016, Loss: 13.618626, Elapsed time: 34.1148s(100 iters) Margin: 0.4000, Scale: 30.00 72 | 2018-05-23 19:28:07 Train Epoch: 3 [102400/490606 (21%)]2116, Loss: 13.121652, Elapsed time: 35.4327s(100 iters) Margin: 0.4000, Scale: 30.00 73 | 2018-05-23 19:28:43 Train Epoch: 3 [153600/490606 (31%)]2216, Loss: 12.713780, Elapsed time: 36.0354s(100 iters) Margin: 0.4000, Scale: 30.00 74 | 2018-05-23 19:29:19 Train Epoch: 3 [204800/490606 (42%)]2316, Loss: 12.266833, Elapsed time: 36.0122s(100 iters) Margin: 0.4000, Scale: 30.00 75 | 2018-05-23 19:29:55 Train Epoch: 3 [256000/490606 (52%)]2416, Loss: 11.883569, Elapsed time: 36.0241s(100 iters) Margin: 0.4000, Scale: 30.00 76 | 2018-05-23 19:30:31 Train Epoch: 3 [307200/490606 (63%)]2516, Loss: 11.463918, Elapsed time: 36.0170s(100 iters) Margin: 0.4000, Scale: 30.00 77 | 2018-05-23 19:31:07 Train Epoch: 3 [358400/490606 (73%)]2616, Loss: 11.128602, Elapsed time: 36.0190s(100 iters) Margin: 0.4000, Scale: 30.00 78 | 2018-05-23 19:31:43 Train Epoch: 3 [409600/490606 (84%)]2716, Loss: 10.812119, Elapsed time: 35.9626s(100 iters) Margin: 0.4000, Scale: 30.00 79 | 2018-05-23 19:32:19 Train Epoch: 3 [460800/490606 (94%)]2816, Loss: 10.578463, Elapsed time: 35.9703s(100 iters) Margin: 0.4000, Scale: 30.00 80 | LFWACC=0.9702 std=0.0076 thd=0.2950 81 | 2018-05-23 19:34:34 Epoch 4 start training 82 | 2018-05-23 19:35:08 Train Epoch: 4 [51200/490606 (10%)]2974, Loss: 9.457474, Elapsed time: 34.2395s(100 iters) Margin: 0.4000, Scale: 30.00 83 | 2018-05-23 19:35:44 Train Epoch: 4 [102400/490606 (21%)]3074, Loss: 9.541868, Elapsed time: 35.5723s(100 iters) Margin: 0.4000, Scale: 30.00 84 | 2018-05-23 19:36:20 Train Epoch: 4 [153600/490606 (31%)]3174, Loss: 9.497403, Elapsed time: 35.9900s(100 iters) Margin: 0.4000, Scale: 30.00 85 | 2018-05-23 19:36:56 Train Epoch: 4 [204800/490606 (42%)]3274, Loss: 9.347027, Elapsed time: 36.0266s(100 iters) Margin: 0.4000, Scale: 30.00 86 | 2018-05-23 19:37:32 Train Epoch: 4 [256000/490606 (52%)]3374, Loss: 9.233130, Elapsed time: 36.0077s(100 iters) Margin: 0.4000, Scale: 30.00 87 | 2018-05-23 19:38:08 Train Epoch: 4 [307200/490606 (63%)]3474, Loss: 9.117838, Elapsed time: 35.9983s(100 iters) Margin: 0.4000, Scale: 30.00 88 | 2018-05-23 19:38:44 Train Epoch: 4 [358400/490606 (73%)]3574, Loss: 9.118716, Elapsed time: 35.9918s(100 iters) Margin: 0.4000, Scale: 30.00 89 | 2018-05-23 19:39:20 Train Epoch: 4 [409600/490606 (84%)]3674, Loss: 8.936196, Elapsed time: 35.9953s(100 iters) Margin: 0.4000, Scale: 30.00 90 | 2018-05-23 19:39:56 Train Epoch: 4 [460800/490606 (94%)]3774, Loss: 8.825119, Elapsed time: 36.0084s(100 iters) Margin: 0.4000, Scale: 30.00 91 | LFWACC=0.9803 std=0.0041 thd=0.2750 92 | 2018-05-23 19:42:19 Epoch 5 start training 93 | 2018-05-23 19:42:53 Train Epoch: 5 [51200/490606 (10%)]3932, Loss: 7.975343, Elapsed time: 34.2936s(100 iters) Margin: 0.4000, Scale: 30.00 94 | 2018-05-23 19:43:29 Train Epoch: 5 [102400/490606 (21%)]4032, Loss: 8.176207, Elapsed time: 35.5250s(100 iters) Margin: 0.4000, Scale: 30.00 95 | 2018-05-23 19:44:05 Train Epoch: 5 [153600/490606 (31%)]4132, Loss: 8.215052, Elapsed time: 36.0704s(100 iters) Margin: 0.4000, Scale: 30.00 96 | 2018-05-23 19:44:41 Train Epoch: 5 [204800/490606 (42%)]4232, Loss: 8.252319, Elapsed time: 36.0169s(100 iters) Margin: 0.4000, Scale: 30.00 97 | 2018-05-23 19:45:17 Train Epoch: 5 [256000/490606 (52%)]4332, Loss: 8.244916, Elapsed time: 35.9891s(100 iters) Margin: 0.4000, Scale: 30.00 98 | 2018-05-23 19:45:53 Train Epoch: 5 [307200/490606 (63%)]4432, Loss: 8.189108, Elapsed time: 35.9863s(100 iters) Margin: 0.4000, Scale: 30.00 99 | 2018-05-23 19:46:29 Train Epoch: 5 [358400/490606 (73%)]4532, Loss: 8.111030, Elapsed time: 35.9836s(100 iters) Margin: 0.4000, Scale: 30.00 100 | 2018-05-23 19:47:05 Train Epoch: 5 [409600/490606 (84%)]4632, Loss: 8.137013, Elapsed time: 35.9499s(100 iters) Margin: 0.4000, Scale: 30.00 101 | 2018-05-23 19:47:41 Train Epoch: 5 [460800/490606 (94%)]4732, Loss: 8.104407, Elapsed time: 35.9972s(100 iters) Margin: 0.4000, Scale: 30.00 102 | LFWACC=0.9833 std=0.0050 thd=0.2610 103 | 2018-05-23 19:50:07 Epoch 6 start training 104 | 2018-05-23 19:50:41 Train Epoch: 6 [51200/490606 (10%)]4890, Loss: 7.283485, Elapsed time: 34.3658s(100 iters) Margin: 0.4000, Scale: 30.00 105 | 2018-05-23 19:51:17 Train Epoch: 6 [102400/490606 (21%)]4990, Loss: 7.462644, Elapsed time: 35.3778s(100 iters) Margin: 0.4000, Scale: 30.00 106 | 2018-05-23 19:51:53 Train Epoch: 6 [153600/490606 (31%)]5090, Loss: 7.583212, Elapsed time: 36.0255s(100 iters) Margin: 0.4000, Scale: 30.00 107 | 2018-05-23 19:52:29 Train Epoch: 6 [204800/490606 (42%)]5190, Loss: 7.667313, Elapsed time: 36.1216s(100 iters) Margin: 0.4000, Scale: 30.00 108 | 2018-05-23 19:53:05 Train Epoch: 6 [256000/490606 (52%)]5290, Loss: 7.673120, Elapsed time: 36.0225s(100 iters) Margin: 0.4000, Scale: 30.00 109 | 2018-05-23 19:53:41 Train Epoch: 6 [307200/490606 (63%)]5390, Loss: 7.724990, Elapsed time: 36.0475s(100 iters) Margin: 0.4000, Scale: 30.00 110 | 2018-05-23 19:54:17 Train Epoch: 6 [358400/490606 (73%)]5490, Loss: 7.708799, Elapsed time: 35.9940s(100 iters) Margin: 0.4000, Scale: 30.00 111 | 2018-05-23 19:54:53 Train Epoch: 6 [409600/490606 (84%)]5590, Loss: 7.613770, Elapsed time: 36.0046s(100 iters) Margin: 0.4000, Scale: 30.00 112 | 2018-05-23 19:55:29 Train Epoch: 6 [460800/490606 (94%)]5690, Loss: 7.596351, Elapsed time: 35.9959s(100 iters) Margin: 0.4000, Scale: 30.00 113 | LFWACC=0.9833 std=0.0055 thd=0.2850 114 | 2018-05-23 19:57:46 Epoch 7 start training 115 | 2018-05-23 19:58:20 Train Epoch: 7 [51200/490606 (10%)]5848, Loss: 6.883032, Elapsed time: 34.2158s(100 iters) Margin: 0.4000, Scale: 30.00 116 | 2018-05-23 19:58:55 Train Epoch: 7 [102400/490606 (21%)]5948, Loss: 7.070419, Elapsed time: 35.5407s(100 iters) Margin: 0.4000, Scale: 30.00 117 | 2018-05-23 19:59:31 Train Epoch: 7 [153600/490606 (31%)]6048, Loss: 7.202237, Elapsed time: 36.0301s(100 iters) Margin: 0.4000, Scale: 30.00 118 | 2018-05-23 20:00:07 Train Epoch: 7 [204800/490606 (42%)]6148, Loss: 7.269621, Elapsed time: 36.0324s(100 iters) Margin: 0.4000, Scale: 30.00 119 | 2018-05-23 20:00:44 Train Epoch: 7 [256000/490606 (52%)]6248, Loss: 7.259326, Elapsed time: 36.0684s(100 iters) Margin: 0.4000, Scale: 30.00 120 | 2018-05-23 20:01:20 Train Epoch: 7 [307200/490606 (63%)]6348, Loss: 7.282149, Elapsed time: 36.0494s(100 iters) Margin: 0.4000, Scale: 30.00 121 | 2018-05-23 20:01:56 Train Epoch: 7 [358400/490606 (73%)]6448, Loss: 7.338722, Elapsed time: 35.9955s(100 iters) Margin: 0.4000, Scale: 30.00 122 | 2018-05-23 20:02:32 Train Epoch: 7 [409600/490606 (84%)]6548, Loss: 7.352073, Elapsed time: 36.0790s(100 iters) Margin: 0.4000, Scale: 30.00 123 | 2018-05-23 20:03:08 Train Epoch: 7 [460800/490606 (94%)]6648, Loss: 7.323556, Elapsed time: 35.9563s(100 iters) Margin: 0.4000, Scale: 30.00 124 | LFWACC=0.9845 std=0.0055 thd=0.2595 125 | 2018-05-23 20:05:32 Epoch 8 start training 126 | 2018-05-23 20:06:07 Train Epoch: 8 [51200/490606 (10%)]6806, Loss: 6.518858, Elapsed time: 34.2755s(100 iters) Margin: 0.4000, Scale: 30.00 127 | 2018-05-23 20:06:42 Train Epoch: 8 [102400/490606 (21%)]6906, Loss: 6.757237, Elapsed time: 35.6220s(100 iters) Margin: 0.4000, Scale: 30.00 128 | 2018-05-23 20:07:18 Train Epoch: 8 [153600/490606 (31%)]7006, Loss: 6.922932, Elapsed time: 36.0293s(100 iters) Margin: 0.4000, Scale: 30.00 129 | 2018-05-23 20:07:54 Train Epoch: 8 [204800/490606 (42%)]7106, Loss: 6.964305, Elapsed time: 36.0323s(100 iters) Margin: 0.4000, Scale: 30.00 130 | 2018-05-23 20:08:30 Train Epoch: 8 [256000/490606 (52%)]7206, Loss: 7.057364, Elapsed time: 36.0452s(100 iters) Margin: 0.4000, Scale: 30.00 131 | 2018-05-23 20:09:06 Train Epoch: 8 [307200/490606 (63%)]7306, Loss: 7.079147, Elapsed time: 35.9717s(100 iters) Margin: 0.4000, Scale: 30.00 132 | 2018-05-23 20:09:42 Train Epoch: 8 [358400/490606 (73%)]7406, Loss: 7.086933, Elapsed time: 35.9631s(100 iters) Margin: 0.4000, Scale: 30.00 133 | 2018-05-23 20:10:18 Train Epoch: 8 [409600/490606 (84%)]7506, Loss: 7.058536, Elapsed time: 36.0568s(100 iters) Margin: 0.4000, Scale: 30.00 134 | 2018-05-23 20:10:54 Train Epoch: 8 [460800/490606 (94%)]7606, Loss: 7.173009, Elapsed time: 36.0088s(100 iters) Margin: 0.4000, Scale: 30.00 135 | LFWACC=0.9858 std=0.0057 thd=0.2540 136 | 2018-05-23 20:13:19 Epoch 9 start training 137 | 2018-05-23 20:13:53 Train Epoch: 9 [51200/490606 (10%)]7764, Loss: 6.341177, Elapsed time: 34.1438s(100 iters) Margin: 0.4000, Scale: 30.00 138 | 2018-05-23 20:14:29 Train Epoch: 9 [102400/490606 (21%)]7864, Loss: 6.574517, Elapsed time: 35.5881s(100 iters) Margin: 0.4000, Scale: 30.00 139 | 2018-05-23 20:15:05 Train Epoch: 9 [153600/490606 (31%)]7964, Loss: 6.693040, Elapsed time: 36.0941s(100 iters) Margin: 0.4000, Scale: 30.00 140 | 2018-05-23 20:15:41 Train Epoch: 9 [204800/490606 (42%)]8064, Loss: 6.794232, Elapsed time: 36.0516s(100 iters) Margin: 0.4000, Scale: 30.00 141 | 2018-05-23 20:16:17 Train Epoch: 9 [256000/490606 (52%)]8164, Loss: 6.827219, Elapsed time: 36.0137s(100 iters) Margin: 0.4000, Scale: 30.00 142 | 2018-05-23 20:16:53 Train Epoch: 9 [307200/490606 (63%)]8264, Loss: 6.856205, Elapsed time: 35.9791s(100 iters) Margin: 0.4000, Scale: 30.00 143 | 2018-05-23 20:17:29 Train Epoch: 9 [358400/490606 (73%)]8364, Loss: 6.907160, Elapsed time: 36.0321s(100 iters) Margin: 0.4000, Scale: 30.00 144 | 2018-05-23 20:18:05 Train Epoch: 9 [409600/490606 (84%)]8464, Loss: 6.809432, Elapsed time: 35.9543s(100 iters) Margin: 0.4000, Scale: 30.00 145 | 2018-05-23 20:18:41 Train Epoch: 9 [460800/490606 (94%)]8564, Loss: 6.894050, Elapsed time: 35.9482s(100 iters) Margin: 0.4000, Scale: 30.00 146 | LFWACC=0.9875 std=0.0061 thd=0.2585 147 | 2018-05-23 20:20:58 Epoch 10 start training 148 | 2018-05-23 20:21:32 Train Epoch: 10 [51200/490606 (10%)]8722, Loss: 6.146353, Elapsed time: 34.1100s(100 iters) Margin: 0.4000, Scale: 30.00 149 | 2018-05-23 20:22:08 Train Epoch: 10 [102400/490606 (21%)]8822, Loss: 6.370776, Elapsed time: 35.6021s(100 iters) Margin: 0.4000, Scale: 30.00 150 | 2018-05-23 20:22:44 Train Epoch: 10 [153600/490606 (31%)]8922, Loss: 6.509662, Elapsed time: 36.0019s(100 iters) Margin: 0.4000, Scale: 30.00 151 | 2018-05-23 20:23:20 Train Epoch: 10 [204800/490606 (42%)]9022, Loss: 6.593780, Elapsed time: 36.0084s(100 iters) Margin: 0.4000, Scale: 30.00 152 | 2018-05-23 20:23:56 Train Epoch: 10 [256000/490606 (52%)]9122, Loss: 6.688924, Elapsed time: 35.9761s(100 iters) Margin: 0.4000, Scale: 30.00 153 | 2018-05-23 20:24:32 Train Epoch: 10 [307200/490606 (63%)]9222, Loss: 6.723807, Elapsed time: 35.9641s(100 iters) Margin: 0.4000, Scale: 30.00 154 | 2018-05-23 20:25:08 Train Epoch: 10 [358400/490606 (73%)]9322, Loss: 6.724404, Elapsed time: 35.9614s(100 iters) Margin: 0.4000, Scale: 30.00 155 | 2018-05-23 20:25:44 Train Epoch: 10 [409600/490606 (84%)]9422, Loss: 6.753703, Elapsed time: 35.9535s(100 iters) Margin: 0.4000, Scale: 30.00 156 | 2018-05-23 20:26:20 Train Epoch: 10 [460800/490606 (94%)]9522, Loss: 6.762195, Elapsed time: 35.9634s(100 iters) Margin: 0.4000, Scale: 30.00 157 | LFWACC=0.9875 std=0.0046 thd=0.2485 158 | 2018-05-23 20:28:46 Epoch 11 start training 159 | 2018-05-23 20:29:20 Train Epoch: 11 [51200/490606 (10%)]9680, Loss: 6.019210, Elapsed time: 34.2957s(100 iters) Margin: 0.4000, Scale: 30.00 160 | 2018-05-23 20:29:56 Train Epoch: 11 [102400/490606 (21%)]9780, Loss: 6.258551, Elapsed time: 35.5094s(100 iters) Margin: 0.4000, Scale: 30.00 161 | 2018-05-23 20:30:32 Train Epoch: 11 [153600/490606 (31%)]9880, Loss: 6.381166, Elapsed time: 36.0586s(100 iters) Margin: 0.4000, Scale: 30.00 162 | 2018-05-23 20:31:08 Train Epoch: 11 [204800/490606 (42%)]9980, Loss: 6.420512, Elapsed time: 36.0161s(100 iters) Margin: 0.4000, Scale: 30.00 163 | 2018-05-23 20:31:44 Train Epoch: 11 [256000/490606 (52%)]10080, Loss: 6.456196, Elapsed time: 36.0215s(100 iters) Margin: 0.4000, Scale: 30.00 164 | 2018-05-23 20:32:20 Train Epoch: 11 [307200/490606 (63%)]10180, Loss: 6.540070, Elapsed time: 36.0166s(100 iters) Margin: 0.4000, Scale: 30.00 165 | 2018-05-23 20:32:56 Train Epoch: 11 [358400/490606 (73%)]10280, Loss: 6.612662, Elapsed time: 36.0512s(100 iters) Margin: 0.4000, Scale: 30.00 166 | 2018-05-23 20:33:32 Train Epoch: 11 [409600/490606 (84%)]10380, Loss: 6.599439, Elapsed time: 35.9883s(100 iters) Margin: 0.4000, Scale: 30.00 167 | 2018-05-23 20:34:08 Train Epoch: 11 [460800/490606 (94%)]10480, Loss: 6.671313, Elapsed time: 36.0013s(100 iters) Margin: 0.4000, Scale: 30.00 168 | LFWACC=0.9875 std=0.0064 thd=0.2440 169 | 2018-05-23 20:36:29 Epoch 12 start training 170 | 2018-05-23 20:37:04 Train Epoch: 12 [51200/490606 (10%)]10638, Loss: 5.909317, Elapsed time: 34.2472s(100 iters) Margin: 0.4000, Scale: 30.00 171 | 2018-05-23 20:37:39 Train Epoch: 12 [102400/490606 (21%)]10738, Loss: 6.078120, Elapsed time: 35.5797s(100 iters) Margin: 0.4000, Scale: 30.00 172 | 2018-05-23 20:38:15 Train Epoch: 12 [153600/490606 (31%)]10838, Loss: 6.207509, Elapsed time: 36.0700s(100 iters) Margin: 0.4000, Scale: 30.00 173 | 2018-05-23 20:38:51 Train Epoch: 12 [204800/490606 (42%)]10938, Loss: 6.333955, Elapsed time: 36.0800s(100 iters) Margin: 0.4000, Scale: 30.00 174 | 2018-05-23 20:39:27 Train Epoch: 12 [256000/490606 (52%)]11038, Loss: 6.396277, Elapsed time: 36.0462s(100 iters) Margin: 0.4000, Scale: 30.00 175 | 2018-05-23 20:40:03 Train Epoch: 12 [307200/490606 (63%)]11138, Loss: 6.491610, Elapsed time: 36.0692s(100 iters) Margin: 0.4000, Scale: 30.00 176 | 2018-05-23 20:40:40 Train Epoch: 12 [358400/490606 (73%)]11238, Loss: 6.472386, Elapsed time: 36.0143s(100 iters) Margin: 0.4000, Scale: 30.00 177 | 2018-05-23 20:41:15 Train Epoch: 12 [409600/490606 (84%)]11338, Loss: 6.480846, Elapsed time: 35.9454s(100 iters) Margin: 0.4000, Scale: 30.00 178 | 2018-05-23 20:41:51 Train Epoch: 12 [460800/490606 (94%)]11438, Loss: 6.510494, Elapsed time: 36.0071s(100 iters) Margin: 0.4000, Scale: 30.00 179 | LFWACC=0.9862 std=0.0063 thd=0.2380 180 | 2018-05-23 20:44:11 Epoch 13 start training 181 | 2018-05-23 20:44:45 Train Epoch: 13 [51200/490606 (10%)]11596, Loss: 5.720556, Elapsed time: 34.1528s(100 iters) Margin: 0.4000, Scale: 30.00 182 | 2018-05-23 20:45:21 Train Epoch: 13 [102400/490606 (21%)]11696, Loss: 5.996943, Elapsed time: 35.6931s(100 iters) Margin: 0.4000, Scale: 30.00 183 | 2018-05-23 20:45:57 Train Epoch: 13 [153600/490606 (31%)]11796, Loss: 6.114484, Elapsed time: 36.0215s(100 iters) Margin: 0.4000, Scale: 30.00 184 | 2018-05-23 20:46:33 Train Epoch: 13 [204800/490606 (42%)]11896, Loss: 6.226144, Elapsed time: 36.0102s(100 iters) Margin: 0.4000, Scale: 30.00 185 | 2018-05-23 20:47:09 Train Epoch: 13 [256000/490606 (52%)]11996, Loss: 6.261764, Elapsed time: 35.9950s(100 iters) Margin: 0.4000, Scale: 30.00 186 | 2018-05-23 20:47:45 Train Epoch: 13 [307200/490606 (63%)]12096, Loss: 6.391793, Elapsed time: 35.9827s(100 iters) Margin: 0.4000, Scale: 30.00 187 | 2018-05-23 20:48:21 Train Epoch: 13 [358400/490606 (73%)]12196, Loss: 6.326674, Elapsed time: 35.9750s(100 iters) Margin: 0.4000, Scale: 30.00 188 | 2018-05-23 20:48:57 Train Epoch: 13 [409600/490606 (84%)]12296, Loss: 6.416332, Elapsed time: 35.9841s(100 iters) Margin: 0.4000, Scale: 30.00 189 | 2018-05-23 20:49:33 Train Epoch: 13 [460800/490606 (94%)]12396, Loss: 6.438344, Elapsed time: 35.9856s(100 iters) Margin: 0.4000, Scale: 30.00 190 | LFWACC=0.9875 std=0.0067 thd=0.2455 191 | 2018-05-23 20:51:55 Epoch 14 start training 192 | 2018-05-23 20:52:29 Train Epoch: 14 [51200/490606 (10%)]12554, Loss: 5.633001, Elapsed time: 34.1787s(100 iters) Margin: 0.4000, Scale: 30.00 193 | 2018-05-23 20:53:05 Train Epoch: 14 [102400/490606 (21%)]12654, Loss: 5.907697, Elapsed time: 35.5436s(100 iters) Margin: 0.4000, Scale: 30.00 194 | 2018-05-23 20:53:41 Train Epoch: 14 [153600/490606 (31%)]12754, Loss: 6.060194, Elapsed time: 36.0258s(100 iters) Margin: 0.4000, Scale: 30.00 195 | 2018-05-23 20:54:17 Train Epoch: 14 [204800/490606 (42%)]12854, Loss: 6.146974, Elapsed time: 35.9897s(100 iters) Margin: 0.4000, Scale: 30.00 196 | 2018-05-23 20:54:53 Train Epoch: 14 [256000/490606 (52%)]12954, Loss: 6.179490, Elapsed time: 35.9865s(100 iters) Margin: 0.4000, Scale: 30.00 197 | 2018-05-23 20:55:29 Train Epoch: 14 [307200/490606 (63%)]13054, Loss: 6.261081, Elapsed time: 35.9876s(100 iters) Margin: 0.4000, Scale: 30.00 198 | 2018-05-23 20:56:05 Train Epoch: 14 [358400/490606 (73%)]13154, Loss: 6.285656, Elapsed time: 35.9946s(100 iters) Margin: 0.4000, Scale: 30.00 199 | 2018-05-23 20:56:41 Train Epoch: 14 [409600/490606 (84%)]13254, Loss: 6.283706, Elapsed time: 35.9870s(100 iters) Margin: 0.4000, Scale: 30.00 200 | 2018-05-23 20:57:17 Train Epoch: 14 [460800/490606 (94%)]13354, Loss: 6.333812, Elapsed time: 35.9504s(100 iters) Margin: 0.4000, Scale: 30.00 201 | LFWACC=0.9885 std=0.0051 thd=0.2450 202 | 2018-05-23 20:59:40 Epoch 15 start training 203 | 2018-05-23 21:00:14 Train Epoch: 15 [51200/490606 (10%)]13512, Loss: 5.604967, Elapsed time: 34.1728s(100 iters) Margin: 0.4000, Scale: 30.00 204 | 2018-05-23 21:00:50 Train Epoch: 15 [102400/490606 (21%)]13612, Loss: 5.814933, Elapsed time: 35.4953s(100 iters) Margin: 0.4000, Scale: 30.00 205 | 2018-05-23 21:01:26 Train Epoch: 15 [153600/490606 (31%)]13712, Loss: 5.942712, Elapsed time: 36.0638s(100 iters) Margin: 0.4000, Scale: 30.00 206 | 2018-05-23 21:02:02 Train Epoch: 15 [204800/490606 (42%)]13812, Loss: 6.017539, Elapsed time: 36.0276s(100 iters) Margin: 0.4000, Scale: 30.00 207 | 2018-05-23 21:02:38 Train Epoch: 15 [256000/490606 (52%)]13912, Loss: 6.177509, Elapsed time: 36.0272s(100 iters) Margin: 0.4000, Scale: 30.00 208 | 2018-05-23 21:03:14 Train Epoch: 15 [307200/490606 (63%)]14012, Loss: 6.184337, Elapsed time: 36.0582s(100 iters) Margin: 0.4000, Scale: 30.00 209 | 2018-05-23 21:03:50 Train Epoch: 15 [358400/490606 (73%)]14112, Loss: 6.239037, Elapsed time: 35.9641s(100 iters) Margin: 0.4000, Scale: 30.00 210 | 2018-05-23 21:04:26 Train Epoch: 15 [409600/490606 (84%)]14212, Loss: 6.151098, Elapsed time: 36.0412s(100 iters) Margin: 0.4000, Scale: 30.00 211 | 2018-05-23 21:05:02 Train Epoch: 15 [460800/490606 (94%)]14312, Loss: 6.229830, Elapsed time: 35.9628s(100 iters) Margin: 0.4000, Scale: 30.00 212 | LFWACC=0.9875 std=0.0067 thd=0.2310 213 | 2018-05-23 21:07:20 Epoch 16 start training 214 | 2018-05-23 21:07:55 Train Epoch: 16 [51200/490606 (10%)]14470, Loss: 5.556650, Elapsed time: 34.1821s(100 iters) Margin: 0.4000, Scale: 30.00 215 | 2018-05-23 21:08:30 Train Epoch: 16 [102400/490606 (21%)]14570, Loss: 5.657033, Elapsed time: 35.5504s(100 iters) Margin: 0.4000, Scale: 30.00 216 | 2018-05-23 21:09:06 Train Epoch: 16 [153600/490606 (31%)]14670, Loss: 5.889253, Elapsed time: 36.1067s(100 iters) Margin: 0.4000, Scale: 30.00 217 | 2018-05-23 21:09:42 Train Epoch: 16 [204800/490606 (42%)]14770, Loss: 5.989058, Elapsed time: 36.0825s(100 iters) Margin: 0.4000, Scale: 30.00 218 | 2018-05-23 21:10:18 Train Epoch: 16 [256000/490606 (52%)]14870, Loss: 6.012755, Elapsed time: 36.0556s(100 iters) Margin: 0.4000, Scale: 30.00 219 | 2018-05-23 21:10:54 Train Epoch: 16 [307200/490606 (63%)]14970, Loss: 6.100257, Elapsed time: 36.0014s(100 iters) Margin: 0.4000, Scale: 30.00 220 | 2018-05-23 21:11:30 Train Epoch: 16 [358400/490606 (73%)]15070, Loss: 6.107724, Elapsed time: 35.9479s(100 iters) Margin: 0.4000, Scale: 30.00 221 | 2018-05-23 21:12:06 Train Epoch: 16 [409600/490606 (84%)]15170, Loss: 6.121854, Elapsed time: 36.0512s(100 iters) Margin: 0.4000, Scale: 30.00 222 | 2018-05-23 21:12:42 Train Epoch: 16 [460800/490606 (94%)]15270, Loss: 6.221931, Elapsed time: 35.9819s(100 iters) Margin: 0.4000, Scale: 30.00 223 | LFWACC=0.9883 std=0.0046 thd=0.2310 224 | 2018-05-23 21:15:07 Epoch 17 start training 225 | 2018-05-23 21:15:42 Train Epoch: 17 [51200/490606 (10%)]15428, Loss: 5.366854, Elapsed time: 34.4522s(100 iters) Margin: 0.4000, Scale: 30.00 226 | 2018-05-23 21:16:17 Train Epoch: 17 [102400/490606 (21%)]15528, Loss: 5.650527, Elapsed time: 35.5099s(100 iters) Margin: 0.4000, Scale: 30.00 227 | 2018-05-23 21:16:53 Train Epoch: 17 [153600/490606 (31%)]15628, Loss: 5.815484, Elapsed time: 36.0421s(100 iters) Margin: 0.4000, Scale: 30.00 228 | 2018-05-23 21:17:29 Train Epoch: 17 [204800/490606 (42%)]15728, Loss: 5.889817, Elapsed time: 36.0160s(100 iters) Margin: 0.4000, Scale: 30.00 229 | 2018-05-23 21:18:05 Train Epoch: 17 [256000/490606 (52%)]15828, Loss: 5.966596, Elapsed time: 36.0015s(100 iters) Margin: 0.4000, Scale: 30.00 230 | 2018-05-23 21:18:41 Train Epoch: 17 [307200/490606 (63%)]15928, Loss: 6.043575, Elapsed time: 36.1007s(100 iters) Margin: 0.4000, Scale: 30.00 231 | 2018-05-23 21:19:07 Adjust learning rate to 0.01 232 | 2018-05-23 21:19:17 Train Epoch: 17 [358400/490606 (73%)]16028, Loss: 5.964264, Elapsed time: 35.9844s(100 iters) Margin: 0.4000, Scale: 30.00 233 | 2018-05-23 21:19:54 Train Epoch: 17 [409600/490606 (84%)]16128, Loss: 5.271739, Elapsed time: 36.1476s(100 iters) Margin: 0.4000, Scale: 30.00 234 | 2018-05-23 21:20:30 Train Epoch: 17 [460800/490606 (94%)]16228, Loss: 5.070320, Elapsed time: 35.9479s(100 iters) Margin: 0.4000, Scale: 30.00 235 | LFWACC=0.9913 std=0.0045 thd=0.2250 236 | 2018-05-23 21:23:00 Epoch 18 start training 237 | 2018-05-23 21:23:34 Train Epoch: 18 [51200/490606 (10%)]16386, Loss: 4.068153, Elapsed time: 34.3613s(100 iters) Margin: 0.4000, Scale: 30.00 238 | 2018-05-23 21:24:10 Train Epoch: 18 [102400/490606 (21%)]16486, Loss: 3.999995, Elapsed time: 35.5394s(100 iters) Margin: 0.4000, Scale: 30.00 239 | 2018-05-23 21:24:46 Train Epoch: 18 [153600/490606 (31%)]16586, Loss: 3.985728, Elapsed time: 36.1158s(100 iters) Margin: 0.4000, Scale: 30.00 240 | 2018-05-23 21:25:22 Train Epoch: 18 [204800/490606 (42%)]16686, Loss: 3.922392, Elapsed time: 36.0689s(100 iters) Margin: 0.4000, Scale: 30.00 241 | 2018-05-23 21:25:58 Train Epoch: 18 [256000/490606 (52%)]16786, Loss: 3.889177, Elapsed time: 36.0607s(100 iters) Margin: 0.4000, Scale: 30.00 242 | 2018-05-23 21:26:34 Train Epoch: 18 [307200/490606 (63%)]16886, Loss: 3.873768, Elapsed time: 36.0275s(100 iters) Margin: 0.4000, Scale: 30.00 243 | 2018-05-23 21:27:10 Train Epoch: 18 [358400/490606 (73%)]16986, Loss: 3.863787, Elapsed time: 35.9610s(100 iters) Margin: 0.4000, Scale: 30.00 244 | 2018-05-23 21:27:46 Train Epoch: 18 [409600/490606 (84%)]17086, Loss: 3.856158, Elapsed time: 36.0257s(100 iters) Margin: 0.4000, Scale: 30.00 245 | 2018-05-23 21:28:22 Train Epoch: 18 [460800/490606 (94%)]17186, Loss: 3.842379, Elapsed time: 35.9140s(100 iters) Margin: 0.4000, Scale: 30.00 246 | LFWACC=0.9913 std=0.0038 thd=0.2030 247 | 2018-05-23 21:30:35 Epoch 19 start training 248 | 2018-05-23 21:31:09 Train Epoch: 19 [51200/490606 (10%)]17344, Loss: 3.459191, Elapsed time: 34.1803s(100 iters) Margin: 0.4000, Scale: 30.00 249 | 2018-05-23 21:31:45 Train Epoch: 19 [102400/490606 (21%)]17444, Loss: 3.538409, Elapsed time: 35.6680s(100 iters) Margin: 0.4000, Scale: 30.00 250 | 2018-05-23 21:32:21 Train Epoch: 19 [153600/490606 (31%)]17544, Loss: 3.506114, Elapsed time: 36.0558s(100 iters) Margin: 0.4000, Scale: 30.00 251 | 2018-05-23 21:32:57 Train Epoch: 19 [204800/490606 (42%)]17644, Loss: 3.571455, Elapsed time: 35.9935s(100 iters) Margin: 0.4000, Scale: 30.00 252 | 2018-05-23 21:33:33 Train Epoch: 19 [256000/490606 (52%)]17744, Loss: 3.575744, Elapsed time: 36.0146s(100 iters) Margin: 0.4000, Scale: 30.00 253 | 2018-05-23 21:34:09 Train Epoch: 19 [307200/490606 (63%)]17844, Loss: 3.560832, Elapsed time: 35.9722s(100 iters) Margin: 0.4000, Scale: 30.00 254 | 2018-05-23 21:34:45 Train Epoch: 19 [358400/490606 (73%)]17944, Loss: 3.518319, Elapsed time: 35.8639s(100 iters) Margin: 0.4000, Scale: 30.00 255 | 2018-05-23 21:35:21 Train Epoch: 19 [409600/490606 (84%)]18044, Loss: 3.533097, Elapsed time: 36.0289s(100 iters) Margin: 0.4000, Scale: 30.00 256 | 2018-05-23 21:35:57 Train Epoch: 19 [460800/490606 (94%)]18144, Loss: 3.559964, Elapsed time: 35.9101s(100 iters) Margin: 0.4000, Scale: 30.00 257 | LFWACC=0.9912 std=0.0041 thd=0.2340 258 | 2018-05-23 21:38:18 Epoch 20 start training 259 | 2018-05-23 21:38:52 Train Epoch: 20 [51200/490606 (10%)]18302, Loss: 3.219106, Elapsed time: 34.1652s(100 iters) Margin: 0.4000, Scale: 30.00 260 | 2018-05-23 21:39:28 Train Epoch: 20 [102400/490606 (21%)]18402, Loss: 3.260992, Elapsed time: 35.6308s(100 iters) Margin: 0.4000, Scale: 30.00 261 | 2018-05-23 21:40:04 Train Epoch: 20 [153600/490606 (31%)]18502, Loss: 3.303162, Elapsed time: 36.0723s(100 iters) Margin: 0.4000, Scale: 30.00 262 | 2018-05-23 21:40:40 Train Epoch: 20 [204800/490606 (42%)]18602, Loss: 3.322323, Elapsed time: 36.0742s(100 iters) Margin: 0.4000, Scale: 30.00 263 | 2018-05-23 21:41:16 Train Epoch: 20 [256000/490606 (52%)]18702, Loss: 3.327122, Elapsed time: 36.0417s(100 iters) Margin: 0.4000, Scale: 30.00 264 | 2018-05-23 21:41:52 Train Epoch: 20 [307200/490606 (63%)]18802, Loss: 3.370712, Elapsed time: 36.0534s(100 iters) Margin: 0.4000, Scale: 30.00 265 | 2018-05-23 21:42:28 Train Epoch: 20 [358400/490606 (73%)]18902, Loss: 3.374115, Elapsed time: 35.9189s(100 iters) Margin: 0.4000, Scale: 30.00 266 | 2018-05-23 21:43:04 Train Epoch: 20 [409600/490606 (84%)]19002, Loss: 3.339902, Elapsed time: 36.0231s(100 iters) Margin: 0.4000, Scale: 30.00 267 | 2018-05-23 21:43:40 Train Epoch: 20 [460800/490606 (94%)]19102, Loss: 3.367363, Elapsed time: 35.9528s(100 iters) Margin: 0.4000, Scale: 30.00 268 | LFWACC=0.9912 std=0.0043 thd=0.2280 269 | 2018-05-23 21:46:02 Epoch 21 start training 270 | 2018-05-23 21:46:36 Train Epoch: 21 [51200/490606 (10%)]19260, Loss: 3.065654, Elapsed time: 34.3261s(100 iters) Margin: 0.4000, Scale: 30.00 271 | 2018-05-23 21:47:12 Train Epoch: 21 [102400/490606 (21%)]19360, Loss: 3.087423, Elapsed time: 35.5919s(100 iters) Margin: 0.4000, Scale: 30.00 272 | 2018-05-23 21:47:48 Train Epoch: 21 [153600/490606 (31%)]19460, Loss: 3.115926, Elapsed time: 36.1019s(100 iters) Margin: 0.4000, Scale: 30.00 273 | 2018-05-23 21:48:24 Train Epoch: 21 [204800/490606 (42%)]19560, Loss: 3.165975, Elapsed time: 35.9941s(100 iters) Margin: 0.4000, Scale: 30.00 274 | 2018-05-23 21:49:00 Train Epoch: 21 [256000/490606 (52%)]19660, Loss: 3.188658, Elapsed time: 36.0730s(100 iters) Margin: 0.4000, Scale: 30.00 275 | 2018-05-23 21:49:36 Train Epoch: 21 [307200/490606 (63%)]19760, Loss: 3.221699, Elapsed time: 35.9748s(100 iters) Margin: 0.4000, Scale: 30.00 276 | 2018-05-23 21:50:12 Train Epoch: 21 [358400/490606 (73%)]19860, Loss: 3.258297, Elapsed time: 35.9744s(100 iters) Margin: 0.4000, Scale: 30.00 277 | 2018-05-23 21:50:48 Train Epoch: 21 [409600/490606 (84%)]19960, Loss: 3.241807, Elapsed time: 36.0216s(100 iters) Margin: 0.4000, Scale: 30.00 278 | 2018-05-23 21:51:24 Train Epoch: 21 [460800/490606 (94%)]20060, Loss: 3.259449, Elapsed time: 35.9584s(100 iters) Margin: 0.4000, Scale: 30.00 279 | LFWACC=0.9925 std=0.0039 thd=0.2300 280 | 2018-05-23 21:53:39 Epoch 22 start training 281 | 2018-05-23 21:54:14 Train Epoch: 22 [51200/490606 (10%)]20218, Loss: 2.922102, Elapsed time: 34.2337s(100 iters) Margin: 0.4000, Scale: 30.00 282 | 2018-05-23 21:54:49 Train Epoch: 22 [102400/490606 (21%)]20318, Loss: 2.952286, Elapsed time: 35.8667s(100 iters) Margin: 0.4000, Scale: 30.00 283 | 2018-05-23 21:55:26 Train Epoch: 22 [153600/490606 (31%)]20418, Loss: 2.982787, Elapsed time: 36.1352s(100 iters) Margin: 0.4000, Scale: 30.00 284 | 2018-05-23 21:56:01 Train Epoch: 22 [204800/490606 (42%)]20518, Loss: 3.026738, Elapsed time: 35.9384s(100 iters) Margin: 0.4000, Scale: 30.00 285 | 2018-05-23 21:56:38 Train Epoch: 22 [256000/490606 (52%)]20618, Loss: 3.118796, Elapsed time: 36.1309s(100 iters) Margin: 0.4000, Scale: 30.00 286 | 2018-05-23 21:57:14 Train Epoch: 22 [307200/490606 (63%)]20718, Loss: 3.104802, Elapsed time: 35.9991s(100 iters) Margin: 0.4000, Scale: 30.00 287 | 2018-05-23 21:57:50 Train Epoch: 22 [358400/490606 (73%)]20818, Loss: 3.126713, Elapsed time: 35.9491s(100 iters) Margin: 0.4000, Scale: 30.00 288 | 2018-05-23 21:58:26 Train Epoch: 22 [409600/490606 (84%)]20918, Loss: 3.128467, Elapsed time: 36.0358s(100 iters) Margin: 0.4000, Scale: 30.00 289 | 2018-05-23 21:59:02 Train Epoch: 22 [460800/490606 (94%)]21018, Loss: 3.204703, Elapsed time: 35.9242s(100 iters) Margin: 0.4000, Scale: 30.00 290 | LFWACC=0.9908 std=0.0045 thd=0.2185 291 | 2018-05-23 22:01:29 Epoch 23 start training 292 | 2018-05-23 22:02:03 Train Epoch: 23 [51200/490606 (10%)]21176, Loss: 2.807432, Elapsed time: 34.2499s(100 iters) Margin: 0.4000, Scale: 30.00 293 | 2018-05-23 22:02:38 Train Epoch: 23 [102400/490606 (21%)]21276, Loss: 2.858192, Elapsed time: 35.4849s(100 iters) Margin: 0.4000, Scale: 30.00 294 | 2018-05-23 22:03:14 Train Epoch: 23 [153600/490606 (31%)]21376, Loss: 2.885761, Elapsed time: 36.0666s(100 iters) Margin: 0.4000, Scale: 30.00 295 | 2018-05-23 22:03:50 Train Epoch: 23 [204800/490606 (42%)]21476, Loss: 2.952499, Elapsed time: 35.9498s(100 iters) Margin: 0.4000, Scale: 30.00 296 | 2018-05-23 22:04:26 Train Epoch: 23 [256000/490606 (52%)]21576, Loss: 2.993947, Elapsed time: 36.0281s(100 iters) Margin: 0.4000, Scale: 30.00 297 | 2018-05-23 22:05:02 Train Epoch: 23 [307200/490606 (63%)]21676, Loss: 3.008468, Elapsed time: 36.0045s(100 iters) Margin: 0.4000, Scale: 30.00 298 | 2018-05-23 22:05:38 Train Epoch: 23 [358400/490606 (73%)]21776, Loss: 3.075610, Elapsed time: 36.0116s(100 iters) Margin: 0.4000, Scale: 30.00 299 | 2018-05-23 22:06:15 Train Epoch: 23 [409600/490606 (84%)]21876, Loss: 3.069281, Elapsed time: 36.1270s(100 iters) Margin: 0.4000, Scale: 30.00 300 | 2018-05-23 22:06:51 Train Epoch: 23 [460800/490606 (94%)]21976, Loss: 3.123618, Elapsed time: 35.9815s(100 iters) Margin: 0.4000, Scale: 30.00 301 | LFWACC=0.9923 std=0.0040 thd=0.2245 302 | 2018-05-23 22:09:14 Epoch 24 start training 303 | 2018-05-23 22:09:48 Train Epoch: 24 [51200/490606 (10%)]22134, Loss: 2.691407, Elapsed time: 34.1015s(100 iters) Margin: 0.4000, Scale: 30.00 304 | 2018-05-23 22:10:24 Train Epoch: 24 [102400/490606 (21%)]22234, Loss: 2.810353, Elapsed time: 35.5100s(100 iters) Margin: 0.4000, Scale: 30.00 305 | 2018-05-23 22:11:00 Train Epoch: 24 [153600/490606 (31%)]22334, Loss: 2.808768, Elapsed time: 36.0167s(100 iters) Margin: 0.4000, Scale: 30.00 306 | 2018-05-23 22:11:36 Train Epoch: 24 [204800/490606 (42%)]22434, Loss: 2.868716, Elapsed time: 35.9744s(100 iters) Margin: 0.4000, Scale: 30.00 307 | 2018-05-23 22:12:12 Train Epoch: 24 [256000/490606 (52%)]22534, Loss: 2.919757, Elapsed time: 36.0570s(100 iters) Margin: 0.4000, Scale: 30.00 308 | 2018-05-23 22:12:48 Train Epoch: 24 [307200/490606 (63%)]22634, Loss: 2.959254, Elapsed time: 35.9358s(100 iters) Margin: 0.4000, Scale: 30.00 309 | 2018-05-23 22:13:24 Train Epoch: 24 [358400/490606 (73%)]22734, Loss: 3.001285, Elapsed time: 36.0712s(100 iters) Margin: 0.4000, Scale: 30.00 310 | 2018-05-23 22:14:00 Train Epoch: 24 [409600/490606 (84%)]22834, Loss: 2.997495, Elapsed time: 36.0941s(100 iters) Margin: 0.4000, Scale: 30.00 311 | 2018-05-23 22:14:36 Train Epoch: 24 [460800/490606 (94%)]22934, Loss: 3.059158, Elapsed time: 35.9824s(100 iters) Margin: 0.4000, Scale: 30.00 312 | LFWACC=0.9910 std=0.0037 thd=0.2225 313 | 2018-05-23 22:16:52 Epoch 25 start training 314 | 2018-05-23 22:17:26 Train Epoch: 25 [51200/490606 (10%)]23092, Loss: 2.641940, Elapsed time: 34.2178s(100 iters) Margin: 0.4000, Scale: 30.00 315 | 2018-05-23 22:18:02 Train Epoch: 25 [102400/490606 (21%)]23192, Loss: 2.711390, Elapsed time: 35.6883s(100 iters) Margin: 0.4000, Scale: 30.00 316 | 2018-05-23 22:18:38 Train Epoch: 25 [153600/490606 (31%)]23292, Loss: 2.755289, Elapsed time: 36.1016s(100 iters) Margin: 0.4000, Scale: 30.00 317 | 2018-05-23 22:19:14 Train Epoch: 25 [204800/490606 (42%)]23392, Loss: 2.789568, Elapsed time: 35.9956s(100 iters) Margin: 0.4000, Scale: 30.00 318 | 2018-05-23 22:19:50 Train Epoch: 25 [256000/490606 (52%)]23492, Loss: 2.842773, Elapsed time: 36.0698s(100 iters) Margin: 0.4000, Scale: 30.00 319 | 2018-05-23 22:20:26 Train Epoch: 25 [307200/490606 (63%)]23592, Loss: 2.916521, Elapsed time: 35.9936s(100 iters) Margin: 0.4000, Scale: 30.00 320 | 2018-05-23 22:21:02 Train Epoch: 25 [358400/490606 (73%)]23692, Loss: 2.942162, Elapsed time: 36.0118s(100 iters) Margin: 0.4000, Scale: 30.00 321 | 2018-05-23 22:21:38 Train Epoch: 25 [409600/490606 (84%)]23792, Loss: 2.917640, Elapsed time: 36.0422s(100 iters) Margin: 0.4000, Scale: 30.00 322 | 2018-05-23 22:22:14 Train Epoch: 25 [460800/490606 (94%)]23892, Loss: 3.022196, Elapsed time: 35.9415s(100 iters) Margin: 0.4000, Scale: 30.00 323 | LFWACC=0.9905 std=0.0038 thd=0.2245 324 | 2018-05-23 22:24:39 Epoch 26 start training 325 | 2018-05-23 22:24:57 Adjust learning rate to 0.001 326 | 2018-05-23 22:25:13 Train Epoch: 26 [51200/490606 (10%)]24050, Loss: 2.551205, Elapsed time: 34.3868s(100 iters) Margin: 0.4000, Scale: 30.00 327 | 2018-05-23 22:25:49 Train Epoch: 26 [102400/490606 (21%)]24150, Loss: 2.445069, Elapsed time: 35.5165s(100 iters) Margin: 0.4000, Scale: 30.00 328 | 2018-05-23 22:26:25 Train Epoch: 26 [153600/490606 (31%)]24250, Loss: 2.453759, Elapsed time: 36.0173s(100 iters) Margin: 0.4000, Scale: 30.00 329 | 2018-05-23 22:27:01 Train Epoch: 26 [204800/490606 (42%)]24350, Loss: 2.444307, Elapsed time: 35.9456s(100 iters) Margin: 0.4000, Scale: 30.00 330 | 2018-05-23 22:27:37 Train Epoch: 26 [256000/490606 (52%)]24450, Loss: 2.438290, Elapsed time: 36.0195s(100 iters) Margin: 0.4000, Scale: 30.00 331 | 2018-05-23 22:28:13 Train Epoch: 26 [307200/490606 (63%)]24550, Loss: 2.421530, Elapsed time: 35.9486s(100 iters) Margin: 0.4000, Scale: 30.00 332 | 2018-05-23 22:28:49 Train Epoch: 26 [358400/490606 (73%)]24650, Loss: 2.425142, Elapsed time: 36.0274s(100 iters) Margin: 0.4000, Scale: 30.00 333 | 2018-05-23 22:29:25 Train Epoch: 26 [409600/490606 (84%)]24750, Loss: 2.411506, Elapsed time: 36.0143s(100 iters) Margin: 0.4000, Scale: 30.00 334 | 2018-05-23 22:30:01 Train Epoch: 26 [460800/490606 (94%)]24850, Loss: 2.414110, Elapsed time: 36.0176s(100 iters) Margin: 0.4000, Scale: 30.00 335 | LFWACC=0.9915 std=0.0043 thd=0.2230 336 | 2018-05-23 22:32:25 Epoch 27 start training 337 | 2018-05-23 22:33:00 Train Epoch: 27 [51200/490606 (10%)]25008, Loss: 2.328808, Elapsed time: 34.2826s(100 iters) Margin: 0.4000, Scale: 30.00 338 | 2018-05-23 22:33:35 Train Epoch: 27 [102400/490606 (21%)]25108, Loss: 2.334921, Elapsed time: 35.5975s(100 iters) Margin: 0.4000, Scale: 30.00 339 | 2018-05-23 22:34:11 Train Epoch: 27 [153600/490606 (31%)]25208, Loss: 2.341867, Elapsed time: 36.0002s(100 iters) Margin: 0.4000, Scale: 30.00 340 | 2018-05-23 22:34:47 Train Epoch: 27 [204800/490606 (42%)]25308, Loss: 2.360965, Elapsed time: 35.9519s(100 iters) Margin: 0.4000, Scale: 30.00 341 | 2018-05-23 22:35:23 Train Epoch: 27 [256000/490606 (52%)]25408, Loss: 2.331785, Elapsed time: 36.0566s(100 iters) Margin: 0.4000, Scale: 30.00 342 | 2018-05-23 22:35:59 Train Epoch: 27 [307200/490606 (63%)]25508, Loss: 2.359483, Elapsed time: 35.8897s(100 iters) Margin: 0.4000, Scale: 30.00 343 | 2018-05-23 22:36:35 Train Epoch: 27 [358400/490606 (73%)]25608, Loss: 2.345174, Elapsed time: 35.9170s(100 iters) Margin: 0.4000, Scale: 30.00 344 | 2018-05-23 22:37:11 Train Epoch: 27 [409600/490606 (84%)]25708, Loss: 2.331788, Elapsed time: 36.0671s(100 iters) Margin: 0.4000, Scale: 30.00 345 | 2018-05-23 22:37:47 Train Epoch: 27 [460800/490606 (94%)]25808, Loss: 2.356719, Elapsed time: 36.0207s(100 iters) Margin: 0.4000, Scale: 30.00 346 | LFWACC=0.9923 std=0.0042 thd=0.2100 347 | 2018-05-23 22:40:07 Epoch 28 start training 348 | 2018-05-23 22:40:41 Train Epoch: 28 [51200/490606 (10%)]25966, Loss: 2.307153, Elapsed time: 34.1551s(100 iters) Margin: 0.4000, Scale: 30.00 349 | 2018-05-23 22:41:16 Train Epoch: 28 [102400/490606 (21%)]26066, Loss: 2.274802, Elapsed time: 35.6277s(100 iters) Margin: 0.4000, Scale: 30.00 350 | 2018-05-23 22:41:52 Train Epoch: 28 [153600/490606 (31%)]26166, Loss: 2.292232, Elapsed time: 35.9855s(100 iters) Margin: 0.4000, Scale: 30.00 351 | 2018-05-23 22:42:28 Train Epoch: 28 [204800/490606 (42%)]26266, Loss: 2.288551, Elapsed time: 35.9658s(100 iters) Margin: 0.4000, Scale: 30.00 352 | 2018-05-23 22:43:04 Train Epoch: 28 [256000/490606 (52%)]26366, Loss: 2.322403, Elapsed time: 36.0355s(100 iters) Margin: 0.4000, Scale: 30.00 353 | 2018-05-23 22:43:40 Train Epoch: 28 [307200/490606 (63%)]26466, Loss: 2.312792, Elapsed time: 35.9256s(100 iters) Margin: 0.4000, Scale: 30.00 354 | 2018-05-23 22:44:16 Train Epoch: 28 [358400/490606 (73%)]26566, Loss: 2.337278, Elapsed time: 35.9649s(100 iters) Margin: 0.4000, Scale: 30.00 355 | 2018-05-23 22:44:52 Train Epoch: 28 [409600/490606 (84%)]26666, Loss: 2.306321, Elapsed time: 35.9730s(100 iters) Margin: 0.4000, Scale: 30.00 356 | 2018-05-23 22:45:28 Train Epoch: 28 [460800/490606 (94%)]26766, Loss: 2.331279, Elapsed time: 35.9543s(100 iters) Margin: 0.4000, Scale: 30.00 357 | LFWACC=0.9917 std=0.0045 thd=0.2110 358 | 2018-05-23 22:47:51 Epoch 29 start training 359 | 2018-05-23 22:48:25 Train Epoch: 29 [51200/490606 (10%)]26924, Loss: 2.245058, Elapsed time: 34.0651s(100 iters) Margin: 0.4000, Scale: 30.00 360 | 2018-05-23 22:49:01 Train Epoch: 29 [102400/490606 (21%)]27024, Loss: 2.264334, Elapsed time: 35.6141s(100 iters) Margin: 0.4000, Scale: 30.00 361 | 2018-05-23 22:49:37 Train Epoch: 29 [153600/490606 (31%)]27124, Loss: 2.311448, Elapsed time: 35.9998s(100 iters) Margin: 0.4000, Scale: 30.00 362 | 2018-05-23 22:50:13 Train Epoch: 29 [204800/490606 (42%)]27224, Loss: 2.265077, Elapsed time: 35.9665s(100 iters) Margin: 0.4000, Scale: 30.00 363 | 2018-05-23 22:50:49 Train Epoch: 29 [256000/490606 (52%)]27324, Loss: 2.324094, Elapsed time: 36.0864s(100 iters) Margin: 0.4000, Scale: 30.00 364 | 2018-05-23 22:51:25 Train Epoch: 29 [307200/490606 (63%)]27424, Loss: 2.267978, Elapsed time: 35.9455s(100 iters) Margin: 0.4000, Scale: 30.00 365 | 2018-05-23 22:52:01 Train Epoch: 29 [358400/490606 (73%)]27524, Loss: 2.305675, Elapsed time: 36.0461s(100 iters) Margin: 0.4000, Scale: 30.00 366 | 2018-05-23 22:52:37 Train Epoch: 29 [409600/490606 (84%)]27624, Loss: 2.298694, Elapsed time: 36.0304s(100 iters) Margin: 0.4000, Scale: 30.00 367 | 2018-05-23 22:53:13 Train Epoch: 29 [460800/490606 (94%)]27724, Loss: 2.307750, Elapsed time: 36.1599s(100 iters) Margin: 0.4000, Scale: 30.00 368 | LFWACC=0.9912 std=0.0048 thd=0.2225 369 | 2018-05-23 22:55:47 Epoch 30 start training 370 | 2018-05-23 22:59:24 Train Epoch: 30 [51200/490606 (10%)]27882, Loss: 2.260411, Elapsed time: 217.2888s(100 iters) Margin: 0.4000, Scale: 30.00 371 | 2018-05-23 23:02:47 Train Epoch: 30 [102400/490606 (21%)]27982, Loss: 2.235316, Elapsed time: 202.9995s(100 iters) Margin: 0.4000, Scale: 30.00 372 | 2018-05-23 23:03:14 Adjust learning rate to 0.0001 373 | 2018-05-23 23:05:09 Train Epoch: 30 [153600/490606 (31%)]28082, Loss: 2.238195, Elapsed time: 141.9803s(100 iters) Margin: 0.4000, Scale: 30.00 374 | 2018-05-23 23:07:34 Train Epoch: 30 [204800/490606 (42%)]28182, Loss: 2.258540, Elapsed time: 144.9503s(100 iters) Margin: 0.4000, Scale: 30.00 375 | 2018-05-23 23:10:20 Train Epoch: 30 [256000/490606 (52%)]28282, Loss: 2.249819, Elapsed time: 165.5039s(100 iters) Margin: 0.4000, Scale: 30.00 376 | 2018-05-23 23:12:32 Train Epoch: 30 [307200/490606 (63%)]28382, Loss: 2.229275, Elapsed time: 131.7649s(100 iters) Margin: 0.4000, Scale: 30.00 377 | 2018-05-23 23:14:22 Train Epoch: 30 [358400/490606 (73%)]28482, Loss: 2.241612, Elapsed time: 110.8490s(100 iters) Margin: 0.4000, Scale: 30.00 378 | 2018-05-23 23:15:57 Train Epoch: 30 [409600/490606 (84%)]28582, Loss: 2.215702, Elapsed time: 94.4214s(100 iters) Margin: 0.4000, Scale: 30.00 379 | 2018-05-23 23:17:15 Train Epoch: 30 [460800/490606 (94%)]28682, Loss: 2.251951, Elapsed time: 78.3591s(100 iters) Margin: 0.4000, Scale: 30.00 380 | LFWACC=0.9913 std=0.0049 thd=0.2130 381 | Finished Training 382 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from __future__ import division 3 | import argparse 4 | import os 5 | import time 6 | 7 | import torch 8 | import torch.utils.data 9 | import torch.optim 10 | import torchvision.transforms as transforms 11 | import torch.backends.cudnn as cudnn 12 | 13 | cudnn.benchmark = True 14 | 15 | import net 16 | from dataset import ImageList 17 | import lfw_eval 18 | import layer 19 | 20 | #os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" 21 | 22 | # Training settings 23 | parser = argparse.ArgumentParser(description='PyTorch CosFace') 24 | 25 | # DATA 26 | parser.add_argument('--root_path', type=str, default='', 27 | help='path to root path of images') 28 | parser.add_argument('--database', type=str, default='WebFace', 29 | help='Which Database for train. (WebFace, VggFace2)') 30 | parser.add_argument('--train_list', type=str, default=None, 31 | help='path to training list') 32 | parser.add_argument('--batch_size', type=int, default=512, 33 | help='input batch size for training (default: 512)') 34 | parser.add_argument('--is_gray', type=bool, default=False, 35 | help='Transform input image to gray or not (default: False)') 36 | # Network 37 | parser.add_argument('--network', type=str, default='sphere20', 38 | help='Which network for train. (sphere20, sphere64, LResNet50E_IR)') 39 | # Classifier 40 | parser.add_argument('--num_class', type=int, default=None, 41 | help='number of people(class)') 42 | parser.add_argument('--classifier_type', type=str, default='MCP', 43 | help='Which classifier for train. (MCP, AL, L)') 44 | # LR policy 45 | parser.add_argument('--epochs', type=int, default=30, 46 | help='number of epochs to train (default: 30)') 47 | parser.add_argument('--lr', type=float, default=0.1, 48 | help='learning rate (default: 0.1)') 49 | parser.add_argument('--step_size', type=list, default=None, 50 | help='lr decay step') # [15000, 22000, 26000][80000,120000,140000][100000, 140000, 160000] 51 | parser.add_argument('--momentum', type=float, default=0.9, 52 | help='SGD momentum (default: 0.9)') 53 | parser.add_argument('--weight_decay', type=float, default=5e-4, 54 | metavar='W', help='weight decay (default: 0.0005)') 55 | # Common settings 56 | parser.add_argument('--log_interval', type=int, default=100, 57 | help='how many batches to wait before logging training status') 58 | parser.add_argument('--save_path', type=str, default='checkpoint/', 59 | help='path to save checkpoint') 60 | parser.add_argument('--no_cuda', type=bool, default=False, 61 | help='disables CUDA training') 62 | parser.add_argument('--workers', type=int, default=4, 63 | help='how many workers to load data') 64 | args = parser.parse_args() 65 | args.cuda = not args.no_cuda and torch.cuda.is_available() 66 | device = torch.device("cuda" if args.cuda else "cpu") 67 | 68 | if args.database is 'WebFace': 69 | args.train_list = '/home/wangyf/dataset/CASIA-WebFace/CASIA-WebFace-112X96.txt' 70 | args.num_class = 10572 71 | args.step_size = [16000, 24000] 72 | elif args.database is 'VggFace2': 73 | args.train_list = '/home/wangyf/dataset/VGG-Face2/VGG-Face2-112X96.txt' 74 | args.num_class = 8069 75 | args.step_size = [80000, 120000, 140000] 76 | else: 77 | raise ValueError("NOT SUPPORT DATABASE! ") 78 | 79 | 80 | def main(): 81 | # --------------------------------------model---------------------------------------- 82 | if args.network is 'sphere20': 83 | model = net.sphere(type=20, is_gray=args.is_gray) 84 | model_eval = net.sphere(type=20, is_gray=args.is_gray) 85 | elif args.network is 'sphere64': 86 | model = net.sphere(type=64, is_gray=args.is_gray) 87 | model_eval = net.sphere(type=64, is_gray=args.is_gray) 88 | elif args.network is 'LResNet50E_IR': 89 | model = net.LResNet50E_IR(is_gray=args.is_gray) 90 | model_eval = net.LResNet50E_IR(is_gray=args.is_gray) 91 | else: 92 | raise ValueError("NOT SUPPORT NETWORK! ") 93 | 94 | 95 | model = torch.nn.DataParallel(model).to(device) 96 | model_eval = model_eval.to(device) 97 | print(model) 98 | if not os.path.exists(args.save_path): 99 | os.makedirs(args.save_path) 100 | model.module.save(args.save_path + 'CosFace_0_checkpoint.pth') 101 | 102 | # 512 is dimension of feature 103 | classifier = { 104 | 'MCP': layer.MarginCosineProduct(512, args.num_class).to(device), 105 | 'AL' : layer.AngleLinear(512, args.num_class).to(device), 106 | 'L' : torch.nn.Linear(512, args.num_class, bias=False).to(device) 107 | }[args.classifier_type] 108 | 109 | # ------------------------------------load image--------------------------------------- 110 | if args.is_gray: 111 | train_transform = transforms.Compose([ 112 | transforms.Grayscale(), 113 | transforms.RandomHorizontalFlip(), 114 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 115 | transforms.Normalize(mean=(0.5,), std=(0.5,)) 116 | ]) # gray 117 | else: 118 | train_transform = transforms.Compose([ 119 | transforms.RandomHorizontalFlip(), 120 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] 121 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] 122 | ]) 123 | train_loader = torch.utils.data.DataLoader( 124 | ImageList(root=args.root_path, fileList=args.train_list, 125 | transform=train_transform), 126 | batch_size=args.batch_size, shuffle=True, 127 | num_workers=args.workers, pin_memory=True, drop_last=True) 128 | 129 | print('length of train Database: ' + str(len(train_loader.dataset))) 130 | print('Number of Identities: ' + str(args.num_class)) 131 | 132 | # --------------------------------loss function and optimizer----------------------------- 133 | criterion = torch.nn.CrossEntropyLoss().to(device) 134 | optimizer = torch.optim.SGD([{'params': model.parameters()}, {'params': classifier.parameters()}], 135 | lr=args.lr, 136 | momentum=args.momentum, 137 | weight_decay=args.weight_decay) 138 | 139 | # ----------------------------------------train---------------------------------------- 140 | # lfw_eval.eval(args.save_path + 'CosFace_0_checkpoint.pth') 141 | for epoch in range(1, args.epochs + 1): 142 | train(train_loader, model, classifier, criterion, optimizer, epoch) 143 | model.module.save(args.save_path + 'CosFace_' + str(epoch) + '_checkpoint.pth') 144 | lfw_eval.eval(model_eval, args.save_path + 'CosFace_' + str(epoch) + '_checkpoint.pth', args.is_gray) 145 | print('Finished Training') 146 | 147 | 148 | def train(train_loader, model, classifier, criterion, optimizer, epoch): 149 | model.train() 150 | print_with_time('Epoch {} start training'.format(epoch)) 151 | time_curr = time.time() 152 | loss_display = 0.0 153 | 154 | for batch_idx, (data, target) in enumerate(train_loader, 1): 155 | iteration = (epoch - 1) * len(train_loader) + batch_idx 156 | adjust_learning_rate(optimizer, iteration, args.step_size) 157 | data, target = data.to(device), target.to(device) 158 | # compute output 159 | output = model(data) 160 | if isinstance(classifier, torch.nn.Linear): 161 | output = classifier(output) 162 | else: 163 | output = classifier(output, target) 164 | loss = criterion(output, target) 165 | loss_display += loss.item() 166 | # compute gradient and do SGD step 167 | optimizer.zero_grad() 168 | loss.backward() 169 | optimizer.step() 170 | 171 | if batch_idx % args.log_interval == 0: 172 | time_used = time.time() - time_curr 173 | loss_display /= args.log_interval 174 | if args.classifier_type is 'MCP': 175 | INFO = ' Margin: {:.4f}, Scale: {:.2f}'.format(classifier.m, classifier.s) 176 | elif args.classifier_type is 'AL': 177 | INFO = ' lambda: {:.4f}'.format(classifier.lamb) 178 | else: 179 | INFO = '' 180 | print_with_time( 181 | 'Train Epoch: {} [{}/{} ({:.0f}%)]{}, Loss: {:.6f}, Elapsed time: {:.4f}s({} iters)'.format( 182 | epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), 183 | iteration, loss_display, time_used, args.log_interval) + INFO 184 | ) 185 | time_curr = time.time() 186 | loss_display = 0.0 187 | 188 | 189 | def print_with_time(string): 190 | print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + string) 191 | 192 | 193 | def adjust_learning_rate(optimizer, iteration, step_size): 194 | """Sets the learning rate to the initial LR decayed by 10 each step size""" 195 | if iteration in step_size: 196 | lr = args.lr * (0.1 ** (step_size.index(iteration) + 1)) 197 | print_with_time('Adjust learning rate to {}'.format(lr)) 198 | for param_group in optimizer.param_groups: 199 | param_group['lr'] = lr 200 | else: 201 | pass 202 | 203 | 204 | if __name__ == '__main__': 205 | print(args) 206 | main() 207 | -------------------------------------------------------------------------------- /net.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | 6 | # -------------------------------------- sphere network Begin -------------------------------------- 7 | class Block(nn.Module): 8 | def __init__(self, planes): 9 | super(Block, self).__init__() 10 | self.conv1 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) 11 | self.prelu1 = nn.PReLU(planes) 12 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) 13 | self.prelu2 = nn.PReLU(planes) 14 | 15 | def forward(self, x): 16 | return x + self.prelu2(self.conv2(self.prelu1(self.conv1(x)))) 17 | 18 | 19 | class sphere(nn.Module): 20 | def __init__(self, type=20, is_gray=False): 21 | super(sphere, self).__init__() 22 | block = Block 23 | if type is 20: 24 | layers = [1, 2, 4, 1] 25 | elif type is 64: 26 | layers = [3, 7, 16, 3] 27 | else: 28 | raise ValueError('sphere' + str(type) + " IS NOT SUPPORTED! (sphere20 or sphere64)") 29 | filter_list = [3, 64, 128, 256, 512] 30 | if is_gray: 31 | filter_list[0] = 1 32 | 33 | self.layer1 = self._make_layer(block, filter_list[0], filter_list[1], layers[0], stride=2) 34 | self.layer2 = self._make_layer(block, filter_list[1], filter_list[2], layers[1], stride=2) 35 | self.layer3 = self._make_layer(block, filter_list[2], filter_list[3], layers[2], stride=2) 36 | self.layer4 = self._make_layer(block, filter_list[3], filter_list[4], layers[3], stride=2) 37 | self.fc = nn.Linear(512 * 7 * 6, 512) 38 | 39 | # Weight initialization 40 | for m in self.modules(): 41 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): 42 | if m.bias is not None: 43 | nn.init.xavier_uniform_(m.weight) 44 | nn.init.constant_(m.bias, 0.0) 45 | else: 46 | nn.init.normal_(m.weight, 0, 0.01) 47 | 48 | 49 | def _make_layer(self, block, inplanes, planes, blocks, stride): 50 | layers = [] 51 | layers.append(nn.Conv2d(inplanes, planes, 3, stride, 1)) 52 | layers.append(nn.PReLU(planes)) 53 | for i in range(blocks): 54 | layers.append(block(planes)) 55 | 56 | return nn.Sequential(*layers) 57 | 58 | def forward(self, x): 59 | x = self.layer1(x) 60 | x = self.layer2(x) 61 | x = self.layer3(x) 62 | x = self.layer4(x) 63 | 64 | x = x.view(x.size(0), -1) 65 | x = self.fc(x) 66 | 67 | return x 68 | 69 | def save(self, file_path): 70 | with open(file_path, 'wb') as f: 71 | torch.save(self.state_dict(), f) 72 | 73 | 74 | # -------------------------------------- sphere network END -------------------------------------- 75 | 76 | # ---------------------------------- LResNet50E-IR network Begin ---------------------------------- 77 | 78 | class BlockIR(nn.Module): 79 | def __init__(self, inplanes, planes, stride, dim_match): 80 | super(BlockIR, self).__init__() 81 | self.bn1 = nn.BatchNorm2d(inplanes) 82 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False) 83 | self.bn2 = nn.BatchNorm2d(planes) 84 | self.prelu1 = nn.PReLU(planes) 85 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) 86 | self.bn3 = nn.BatchNorm2d(planes) 87 | 88 | if dim_match: 89 | self.downsample = None 90 | else: 91 | self.downsample = nn.Sequential( 92 | nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False), 93 | nn.BatchNorm2d(planes), 94 | ) 95 | 96 | def forward(self, x): 97 | residual = x 98 | 99 | out = self.bn1(x) 100 | out = self.conv1(out) 101 | out = self.bn2(out) 102 | out = self.prelu1(out) 103 | out = self.conv2(out) 104 | out = self.bn3(out) 105 | 106 | if self.downsample is not None: 107 | residual = self.downsample(x) 108 | 109 | out += residual 110 | 111 | return out 112 | 113 | 114 | class LResNet(nn.Module): 115 | 116 | def __init__(self, block, layers, filter_list, is_gray=False): 117 | self.inplanes = 64 118 | super(LResNet, self).__init__() 119 | # input is (mini-batch,3 or 1,112,96) 120 | # use (conv3x3, stride=1, padding=1) instead of (conv7x7, stride=2, padding=3) 121 | if is_gray: 122 | self.conv1 = nn.Conv2d(1, filter_list[0], kernel_size=3, stride=1, padding=1, bias=False) # gray 123 | else: 124 | self.conv1 = nn.Conv2d(3, filter_list[0], kernel_size=3, stride=1, padding=1, bias=False) 125 | self.bn1 = nn.BatchNorm2d(filter_list[0]) 126 | self.prelu1 = nn.PReLU(filter_list[0]) 127 | self.layer1 = self._make_layer(block, filter_list[0], filter_list[1], layers[0], stride=2) 128 | self.layer2 = self._make_layer(block, filter_list[1], filter_list[2], layers[1], stride=2) 129 | self.layer3 = self._make_layer(block, filter_list[2], filter_list[3], layers[2], stride=2) 130 | self.layer4 = self._make_layer(block, filter_list[3], filter_list[4], layers[3], stride=2) 131 | self.fc = nn.Sequential( 132 | nn.BatchNorm1d(filter_list[4] * 7 * 6), 133 | nn.Dropout(p=0.4), 134 | nn.Linear(filter_list[4] * 7 * 6, 512), 135 | nn.BatchNorm1d(512), # fix gamma ??? 136 | ) 137 | 138 | # Weight initialization 139 | for m in self.modules(): 140 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): 141 | nn.init.xavier_uniform_(m.weight) 142 | if m.bias is not None: 143 | nn.init.constant_(m.bias, 0.0) 144 | elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): 145 | nn.init.constant_(m.weight,1) 146 | nn.init.constant_(m.bias,0) 147 | 148 | 149 | def _make_layer(self, block, inplanes, planes, blocks, stride): 150 | layers = [] 151 | layers.append(block(inplanes, planes, stride, False)) 152 | for i in range(1, blocks): 153 | layers.append(block(planes, planes, stride=1, dim_match=True)) 154 | 155 | return nn.Sequential(*layers) 156 | 157 | def forward(self, x): 158 | x = self.conv1(x) 159 | x = self.bn1(x) 160 | x = self.prelu1(x) 161 | 162 | x = self.layer1(x) 163 | x = self.layer2(x) 164 | x = self.layer3(x) 165 | x = self.layer4(x) 166 | 167 | x = x.view(x.size(0), -1) 168 | x = self.fc(x) 169 | 170 | return x 171 | 172 | def save(self, file_path): 173 | with open(file_path, 'wb') as f: 174 | torch.save(self.state_dict(), f) 175 | 176 | 177 | def LResNet50E_IR(is_gray=False): 178 | filter_list = [64, 64, 128, 256, 512] 179 | layers = [3, 4, 14, 3] 180 | return LResNet(BlockIR, layers, filter_list, is_gray) 181 | # ---------------------------------- LResNet50E-IR network End ---------------------------------- 182 | -------------------------------------------------------------------------------- /train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: 3 | # ./code/sphereface_train.sh GPU 4 | # 5 | # Example: 6 | # ./code/sphereface_train.sh 0,1,2,3 7 | 8 | GPU_ID=$1 9 | CUDA_VISIBLE_DEVICES=${GPU_ID} python -u main.py 2>&1 | tee ./log/cosface_trainlog_`date +%Y%m%d%H%M`.log --------------------------------------------------------------------------------