├── .gitignore ├── AgeDB-30_Evaluation.py ├── Backbones ├── Backbone │ ├── CBAM.py │ ├── MobileFaceNet.py │ ├── __init__.py │ └── __pycache__ │ │ ├── CBAM.cpython-37.pyc │ │ ├── MobileFaceNet.cpython-36.pyc │ │ ├── MobileFaceNet.cpython-37.pyc │ │ ├── __init__.cpython-36.pyc │ │ └── __init__.cpython-37.pyc └── Margin │ ├── ArcMarginProduct.py │ ├── CosineMarginProduct.py │ ├── InnerProduct.py │ ├── __init__.py │ └── __pycache__ │ ├── ArcMarginProduct.cpython-36.pyc │ ├── ArcMarginProduct.cpython-37.pyc │ ├── CosineMarginProduct.cpython-37.pyc │ ├── InnerProduct.cpython-37.pyc │ ├── __init__.cpython-36.pyc │ └── __init__.cpython-37.pyc ├── CFP-FP_Evaluation.py ├── Config ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── config.cpython-36.pyc │ └── config.cpython-37.pyc └── config.py ├── Datasets ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── agedb.cpython-37.pyc │ ├── cfp.cpython-37.pyc │ ├── lfw.cpython-36.pyc │ ├── lfw.cpython-37.pyc │ ├── megaface.cpython-37.pyc │ ├── ms1m.cpython-37.pyc │ ├── webface.cpython-36.pyc │ └── webface.cpython-37.pyc ├── agedb.py ├── cfp.py ├── lfw.py ├── megaface.py ├── ms1m.py └── webface.py ├── LFW_Evaluation.py ├── LICENSE ├── MegaFace_Evaluation ├── Extract_MegaFace_Features.py ├── Get_Evaluation_Results.py ├── Plot_Evaluation_Results.py ├── bin │ ├── FuseResults │ └── Identification ├── models │ ├── jb_LBP.bin │ └── jb_identity.bin ├── scripts │ └── matio.py └── templatelists │ ├── facescrub_features_list.json │ └── facescrub_uncropped_features_list.json ├── README.md ├── Test_Data └── readme.txt ├── Train.py ├── Trained_Models └── readme.txt ├── Utils ├── Datasets_Utils │ ├── generate_dataset_list.py │ └── load_images_from_bin.py ├── Other_Utils │ ├── ChangeTimeFormat.py │ ├── Logging.py │ ├── Visualizer.py │ └── __pycache__ │ │ ├── ChangeTimeFormat.cpython-36.pyc │ │ ├── ChangeTimeFormat.cpython-37.pyc │ │ ├── Logging.cpython-37.pyc │ │ └── Visualizer.cpython-37.pyc ├── __init__.py └── __pycache__ │ ├── __init__.cpython-36.pyc │ └── __init__.cpython-37.pyc └── __pycache__ └── LFW_Evaluation.cpython-37.pyc /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /AgeDB-30_Evaluation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import scipy.io 4 | import numpy as np 5 | from Config import args 6 | from Datasets import AgeDB30 7 | from torch.nn import DataParallel 8 | from torch.utils.data import DataLoader 9 | from Backbones.Backbone import MobileFacenet, CBAMResNet 10 | 11 | def getAccuracy(scores, flags, threshold): 12 | p = np.sum(scores[flags == 1] > threshold) 13 | n = np.sum(scores[flags == -1] < threshold) 14 | return 1.0 * (p + n) / len(scores) 15 | 16 | def getThreshold(scores, flags, thrNum): 17 | accuracys = np.zeros((2 * thrNum + 1, 1)) 18 | thresholds = np.arange(-thrNum, thrNum + 1) * 1.0 / thrNum 19 | for i in range(2 * thrNum + 1): 20 | accuracys[i] = getAccuracy(scores, flags, thresholds[i]) 21 | max_index = np.squeeze(accuracys == np.max(accuracys)) 22 | bestThreshold = np.mean(thresholds[max_index]) 23 | return bestThreshold 24 | 25 | def evaluation_10_fold(feature_path='./result/cur_epoch_agedb_result.mat'): 26 | ACCs = np.zeros(10) 27 | result = scipy.io.loadmat(feature_path) 28 | for i in range(10): 29 | fold = result['fold'] 30 | flags = result['flag'] 31 | featureLs = result['fl'] 32 | featureRs = result['fr'] 33 | 34 | valFold = fold != i 35 | testFold = fold == i 36 | flags = np.squeeze(flags) 37 | 38 | mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0) 39 | mu = np.expand_dims(mu, 0) 40 | featureLs = featureLs - mu 41 | featureRs = featureRs - mu 42 | featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1) 43 | featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1) 44 | 45 | scores = np.sum(np.multiply(featureLs, featureRs), 1) 46 | threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000) 47 | ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold) 48 | 49 | return ACCs 50 | 51 | def loadModel(data_root, file_list, backbone_net, gpus='0', model_para_path=None): 52 | # gpu init 53 | os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, args.gpus)) 54 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 55 | 56 | # backbone 57 | backbones = {'MobileFaceNet': MobileFacenet(), 58 | 'ResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir'), 59 | 'SEResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se'), 60 | 'ResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir'), 61 | 'SEResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')} 62 | if backbone_net in backbones: 63 | net = backbones[backbone_net] 64 | else: 65 | print(backbone_net + ' is not available!') 66 | 67 | # load parameter 68 | net.load_state_dict(torch.load(model_para_path)) 69 | 70 | if args.use_multi_gpus == True: 71 | net = DataParallel(net).to(device) 72 | else: 73 | net = net.to(device) 74 | 75 | # dataset and dataloader 76 | agedb_dataset = AgeDB30(data_root, file_list) 77 | agedb_loader = DataLoader(agedb_dataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False) 78 | 79 | return net.eval(), device, agedb_dataset, agedb_loader 80 | 81 | def getFeatureFromTorch(feature_save_dir, net, device, data_set, data_loader): 82 | featureLs = None 83 | featureRs = None 84 | count = 0 85 | for data in data_loader: 86 | for i in range(len(data)): 87 | data[i] = data[i].to(device) 88 | count += data[0].size(0) 89 | #print('extracing deep features from the face pair {}...'.format(count)) 90 | with torch.no_grad(): 91 | res = [net(d).data.cpu().numpy() for d in data] 92 | featureL = np.concatenate((res[0], res[1]), 1) 93 | featureR = np.concatenate((res[2], res[3]), 1) 94 | # print(featureL.shape, featureR.shape) 95 | if featureLs is None: 96 | featureLs = featureL 97 | else: 98 | featureLs = np.concatenate((featureLs, featureL), 0) 99 | if featureRs is None: 100 | featureRs = featureR 101 | else: 102 | featureRs = np.concatenate((featureRs, featureR), 0) 103 | # print(featureLs.shape, featureRs.shape) 104 | 105 | result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.labels} 106 | scipy.io.savemat(feature_save_dir, result) 107 | 108 | if __name__ == '__main__': 109 | model_para_path = 'Trained_Models/CASIA_WebFace_MobileFace_2020-08-12 16:24:48/Iter_53400_net.pth' 110 | net, device, agedb_dataset, agedb_loader = loadModel(args.agedb_dataset_path, args.agedb_file_list, args.backbone, args.gpus, model_para_path) 111 | getFeatureFromTorch('Test_Data/cur_agedb_result.mat', net, device, agedb_dataset, agedb_loader) 112 | ACCs = evaluation_10_fold('Test_Data/cur_agedb_result.mat') 113 | for i in range(len(ACCs)): 114 | print('{} {:.2f}'.format(i + 1, ACCs[i] * 100)) 115 | print('--------') 116 | print('AVE {:.4f}'.format(np.mean(ACCs) * 100)) 117 | 118 | -------------------------------------------------------------------------------- /Backbones/Backbone/CBAM.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class Flatten(nn.Module): 5 | def forward(self, input): 6 | return input.view(input.size(0), -1) 7 | 8 | class SEModule(nn.Module): 9 | '''Squeeze and Excitation Module''' 10 | def __init__(self, channels, reduction): 11 | super(SEModule, self).__init__() 12 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 13 | self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) 14 | self.relu = nn.ReLU(inplace=True) 15 | self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) 16 | self.sigmoid = nn.Sigmoid() 17 | 18 | def forward(self, x): 19 | input = x 20 | x = self.avg_pool(x) 21 | x = self.fc1(x) 22 | x = self.relu(x) 23 | x = self.fc2(x) 24 | x = self.sigmoid(x) 25 | 26 | return input * x 27 | 28 | class CAModule(nn.Module): 29 | '''Channel Attention Module''' 30 | def __init__(self, channels, reduction): 31 | super(CAModule, self).__init__() 32 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 33 | self.max_pool = nn.AdaptiveMaxPool2d(1) 34 | self.shared_mlp = nn.Sequential(nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False), 35 | nn.ReLU(inplace=True), 36 | nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)) 37 | self.sigmoid = nn.Sigmoid() 38 | 39 | def forward(self, x): 40 | input = x 41 | avg_pool = self.avg_pool(x) 42 | max_pool = self.max_pool(x) 43 | x = self.shared_mlp(avg_pool) + self.shared_mlp(max_pool) 44 | x = self.sigmoid(x) 45 | 46 | return input * x 47 | 48 | class SAModule(nn.Module): 49 | '''Spatial Attention Module''' 50 | def __init__(self): 51 | super(SAModule, self).__init__() 52 | self.conv = nn.Conv2d(2, 1, kernel_size=3, padding=1, bias=False) 53 | self.sigmoid = nn.Sigmoid() 54 | 55 | def forward(self, x): 56 | input = x 57 | avg_c = torch.mean(x, 1, True) 58 | max_c, _ = torch.max(x, 1, True) 59 | x = torch.cat((avg_c, max_c), 1) 60 | x = self.conv(x) 61 | x = self.sigmoid(x) 62 | return input * x 63 | 64 | class BottleNeck_IR(nn.Module): 65 | '''Improved Residual Bottlenecks''' 66 | def __init__(self, in_channel, out_channel, stride, dim_match): 67 | super(BottleNeck_IR, self).__init__() 68 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 69 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 70 | nn.BatchNorm2d(out_channel), 71 | nn.PReLU(out_channel), 72 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 73 | nn.BatchNorm2d(out_channel)) 74 | if dim_match: 75 | self.shortcut_layer = None 76 | else: 77 | self.shortcut_layer = nn.Sequential( 78 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 79 | nn.BatchNorm2d(out_channel) 80 | ) 81 | 82 | def forward(self, x): 83 | shortcut = x 84 | res = self.res_layer(x) 85 | 86 | if self.shortcut_layer is not None: 87 | shortcut = self.shortcut_layer(x) 88 | 89 | return shortcut + res 90 | 91 | class BottleNeck_IR_SE(nn.Module): 92 | '''Improved Residual Bottlenecks with Squeeze and Excitation Module''' 93 | def __init__(self, in_channel, out_channel, stride, dim_match): 94 | super(BottleNeck_IR_SE, self).__init__() 95 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 96 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 97 | nn.BatchNorm2d(out_channel), 98 | nn.PReLU(out_channel), 99 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 100 | nn.BatchNorm2d(out_channel), 101 | SEModule(out_channel, 16)) 102 | if dim_match: 103 | self.shortcut_layer = None 104 | else: 105 | self.shortcut_layer = nn.Sequential( 106 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 107 | nn.BatchNorm2d(out_channel) 108 | ) 109 | 110 | def forward(self, x): 111 | shortcut = x 112 | res = self.res_layer(x) 113 | 114 | if self.shortcut_layer is not None: 115 | shortcut = self.shortcut_layer(x) 116 | 117 | return shortcut + res 118 | 119 | class BottleNeck_IR_CAM(nn.Module): 120 | '''Improved Residual Bottlenecks with Channel Attention Module''' 121 | def __init__(self, in_channel, out_channel, stride, dim_match): 122 | super(BottleNeck_IR_CAM, self).__init__() 123 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 124 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 125 | nn.BatchNorm2d(out_channel), 126 | nn.PReLU(out_channel), 127 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 128 | nn.BatchNorm2d(out_channel), 129 | CAModule(out_channel, 16)) 130 | if dim_match: 131 | self.shortcut_layer = None 132 | else: 133 | self.shortcut_layer = nn.Sequential( 134 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 135 | nn.BatchNorm2d(out_channel) 136 | ) 137 | 138 | def forward(self, x): 139 | shortcut = x 140 | res = self.res_layer(x) 141 | 142 | if self.shortcut_layer is not None: 143 | shortcut = self.shortcut_layer(x) 144 | 145 | return shortcut + res 146 | 147 | class BottleNeck_IR_SAM(nn.Module): 148 | '''Improved Residual Bottlenecks with Spatial Attention Module''' 149 | def __init__(self, in_channel, out_channel, stride, dim_match): 150 | super(BottleNeck_IR_SAM, self).__init__() 151 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 152 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 153 | nn.BatchNorm2d(out_channel), 154 | nn.PReLU(out_channel), 155 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 156 | nn.BatchNorm2d(out_channel), 157 | SAModule()) 158 | if dim_match: 159 | self.shortcut_layer = None 160 | else: 161 | self.shortcut_layer = nn.Sequential( 162 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 163 | nn.BatchNorm2d(out_channel) 164 | ) 165 | 166 | def forward(self, x): 167 | shortcut = x 168 | res = self.res_layer(x) 169 | 170 | if self.shortcut_layer is not None: 171 | shortcut = self.shortcut_layer(x) 172 | 173 | return shortcut + res 174 | 175 | class BottleNeck_IR_CBAM(nn.Module): 176 | '''Improved Residual Bottleneck with Channel Attention Module and Spatial Attention Module''' 177 | def __init__(self, in_channel, out_channel, stride, dim_match): 178 | super(BottleNeck_IR_CBAM, self).__init__() 179 | self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), 180 | nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), 181 | nn.BatchNorm2d(out_channel), 182 | nn.PReLU(out_channel), 183 | nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), 184 | nn.BatchNorm2d(out_channel), 185 | CAModule(out_channel, 16), 186 | SAModule() 187 | ) 188 | if dim_match: 189 | self.shortcut_layer = None 190 | else: 191 | self.shortcut_layer = nn.Sequential( 192 | nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), 193 | nn.BatchNorm2d(out_channel) 194 | ) 195 | 196 | def forward(self, x): 197 | shortcut = x 198 | res = self.res_layer(x) 199 | 200 | if self.shortcut_layer is not None: 201 | shortcut = self.shortcut_layer(x) 202 | 203 | return shortcut + res 204 | 205 | 206 | filter_list = [64, 64, 128, 256, 512] 207 | def get_layers(num_layers): 208 | if num_layers == 50: 209 | return [3, 4, 14, 3] 210 | elif num_layers == 100: 211 | return [3, 13, 30, 3] 212 | elif num_layers == 152: 213 | return [3, 8, 36, 3] 214 | 215 | class CBAMResNet(nn.Module): 216 | def __init__(self, num_layers, feature_dim=512, drop_ratio=0.4, mode='ir',filter_list=filter_list): 217 | super(CBAMResNet, self).__init__() 218 | assert num_layers in [50, 100, 152], 'num_layers should be 50, 100 or 152' 219 | assert mode in ['ir', 'ir_se', 'ir_cam', 'ir_sam', 'ir_cbam'], 'mode should be ir, ir_se, ir_cam, ir_sam or ir_cbam' 220 | layers = get_layers(num_layers) 221 | if mode == 'ir': 222 | block = BottleNeck_IR 223 | elif mode == 'ir_se': 224 | block = BottleNeck_IR_SE 225 | elif mode == 'ir_cam': 226 | block = BottleNeck_IR_CAM 227 | elif mode == 'ir_sam': 228 | block = BottleNeck_IR_SAM 229 | elif mode == 'ir_cbam': 230 | block = BottleNeck_IR_CBAM 231 | 232 | self.input_layer = nn.Sequential(nn.Conv2d(3, 64, (3, 3), stride=1, padding=1, bias=False), 233 | nn.BatchNorm2d(64), 234 | nn.PReLU(64)) 235 | self.layer1 = self._make_layer(block, filter_list[0], filter_list[1], layers[0], stride=2) 236 | self.layer2 = self._make_layer(block, filter_list[1], filter_list[2], layers[1], stride=2) 237 | self.layer3 = self._make_layer(block, filter_list[2], filter_list[3], layers[2], stride=2) 238 | self.layer4 = self._make_layer(block, filter_list[3], filter_list[4], layers[3], stride=2) 239 | 240 | self.output_layer = nn.Sequential(nn.BatchNorm2d(512), 241 | nn.Dropout(drop_ratio), 242 | Flatten(), 243 | nn.Linear(512 * 7 * 7, feature_dim), 244 | nn.BatchNorm1d(feature_dim)) 245 | 246 | # weight initialization 247 | for m in self.modules(): 248 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): 249 | nn.init.xavier_uniform_(m.weight) 250 | if m.bias is not None: 251 | nn.init.constant_(m.bias, 0.0) 252 | elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): 253 | nn.init.constant_(m.weight, 1) 254 | nn.init.constant_(m.bias, 0) 255 | 256 | def _make_layer(self, block, in_channel, out_channel, blocks, stride): 257 | layers = [] 258 | layers.append(block(in_channel, out_channel, stride, False)) 259 | for i in range(1, blocks): 260 | layers.append(block(out_channel, out_channel, 1, True)) 261 | 262 | return nn.Sequential(*layers) 263 | 264 | def forward(self, x): 265 | x = self.input_layer(x) 266 | x = self.layer1(x) 267 | x = self.layer2(x) 268 | x = self.layer3(x) 269 | x = self.layer4(x) 270 | x = self.output_layer(x) 271 | 272 | return x 273 | 274 | if __name__ == '__main__': 275 | input = torch.Tensor(256, 3, 112, 112) 276 | net = CBAMResNet(50, mode='ir') 277 | out = net(input) 278 | print(out.shape) 279 | -------------------------------------------------------------------------------- /Backbones/Backbone/MobileFaceNet.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | import math 4 | from apex import amp 5 | from Config import args 6 | 7 | Mobilefacenet_bottleneck_setting = [ 8 | # t, c , n ,s 9 | [2, 64, 5, 2], 10 | [4, 128, 1, 2], 11 | [2, 128, 6, 1], 12 | [4, 128, 1, 2], 13 | [2, 128, 2, 1] 14 | ] 15 | 16 | class Bottleneck(nn.Module): 17 | def __init__(self, in_channels, out_channels, stride, expansion): 18 | super(Bottleneck, self).__init__() 19 | self.connect = stride == 1 and in_channels == out_channels 20 | 21 | self.conv = nn.Sequential( 22 | #1*1 conv 23 | nn.Conv2d(in_channels, in_channels * expansion, 1, 1, 0, bias=False), 24 | nn.BatchNorm2d(in_channels * expansion), 25 | nn.PReLU(in_channels * expansion), 26 | 27 | #3*3 depth wise conv 28 | nn.Conv2d(in_channels * expansion, in_channels * expansion, 3, stride, 1, groups=in_channels * expansion, bias=False), 29 | nn.BatchNorm2d(in_channels * expansion), 30 | nn.PReLU(in_channels * expansion), 31 | 32 | #1*1 conv linear 33 | nn.Conv2d(in_channels * expansion, out_channels, 1, 1, 0, bias=False), 34 | nn.BatchNorm2d(out_channels), 35 | ) 36 | 37 | def forward(self, x): 38 | if self.connect: 39 | return x + self.conv(x) 40 | else: 41 | return self.conv(x) 42 | 43 | class ConvBlock(nn.Module): 44 | def __init__(self, in_channels, out_channels, k, s, p, dw=False, linear=False): 45 | super(ConvBlock, self).__init__() 46 | self.linear = linear 47 | 48 | # conv, bn, prelu 49 | if dw: 50 | self.conv = nn.Conv2d(in_channels, out_channels, k, s, p, groups = in_channels, bias = False) 51 | else: 52 | self.conv = nn.Conv2d(in_channels, out_channels, k, s, p, bias = False) 53 | self.bn = nn.BatchNorm2d(out_channels) 54 | if not linear: 55 | self.prelu = nn.PReLU(out_channels) 56 | 57 | def forward(self, x): 58 | x = self.conv(x) 59 | x = self.bn(x) 60 | if self.linear: 61 | return x 62 | else: 63 | return self.prelu(x) 64 | 65 | class MobileFacenet(nn.Module): 66 | def __init__(self, bottleneck_setting=Mobilefacenet_bottleneck_setting): 67 | super(MobileFacenet, self).__init__() 68 | 69 | self.conv3 = ConvBlock(3, 64, 3, 2, 1) 70 | 71 | self.dw_conv3 = ConvBlock(64, 64, 3, 1, 1, dw=True) 72 | 73 | self.in_channels = 64 74 | bottleneck = Bottleneck 75 | self.bottlenecks = self._make_layer(bottleneck, bottleneck_setting) 76 | 77 | self.conv1 = ConvBlock(128, 512, 1, 1, 0) 78 | 79 | self.linear_GDConv7 = ConvBlock(512, 512, 7, 1, 0, dw=True, linear=True) 80 | 81 | self.linear_conv1 = ConvBlock(512, 128, 1, 1, 0, linear=True) 82 | 83 | # parameter init 84 | for m in self.modules(): 85 | if isinstance(m, nn.Conv2d): 86 | # kaiming_normal 87 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 88 | m.weight.data.normal_(0, math.sqrt(2. / n)) 89 | elif isinstance(m, nn.BatchNorm2d): 90 | m.weight.data.fill_(1) 91 | m.bias.data.zero_() 92 | 93 | # prevent overflow errors 94 | if args.use_amp == True: 95 | amp.register_float_function(torch, 'sigmoid') 96 | amp.register_float_function(torch, 'softmax') 97 | 98 | def _make_layer(self, block, setting): 99 | layers = [] 100 | for t, c, n, s in setting: 101 | for i in range(n): 102 | if i == 0: 103 | layers.append(block(self.in_channels, c, s, t)) 104 | else: 105 | layers.append(block(self.in_channels, c, 1, t)) 106 | self.in_channels = c 107 | 108 | return nn.Sequential(*layers) 109 | 110 | def forward(self, x): 111 | x = self.conv3(x) 112 | x = self.dw_conv3(x) 113 | x = self.bottlenecks(x) 114 | x = self.conv1(x) 115 | x = self.linear_GDConv7(x) 116 | x = self.linear_conv1(x) 117 | x = x.view(x.shape[0], -1) 118 | return x 119 | 120 | if __name__ == "__main__": 121 | input = torch.Tensor(256, 3, 112, 112) 122 | net = MobileFacenet() 123 | x = net(input) 124 | print(input.shape) 125 | print(x.shape) -------------------------------------------------------------------------------- /Backbones/Backbone/__init__.py: -------------------------------------------------------------------------------- 1 | from Backbones.Backbone.MobileFaceNet import MobileFacenet 2 | from Backbones.Backbone.CBAM import CBAMResNet -------------------------------------------------------------------------------- /Backbones/Backbone/__pycache__/CBAM.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Backbone/__pycache__/CBAM.cpython-37.pyc -------------------------------------------------------------------------------- /Backbones/Backbone/__pycache__/MobileFaceNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Backbone/__pycache__/MobileFaceNet.cpython-36.pyc -------------------------------------------------------------------------------- /Backbones/Backbone/__pycache__/MobileFaceNet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Backbone/__pycache__/MobileFaceNet.cpython-37.pyc -------------------------------------------------------------------------------- /Backbones/Backbone/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Backbone/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Backbones/Backbone/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Backbone/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /Backbones/Margin/ArcMarginProduct.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | from apex import amp 6 | from Config import args 7 | from torch.nn import Parameter 8 | 9 | class ArcMarginProduct(nn.Module): 10 | def __init__(self, in_feature=128, out_feature=10575, s=32.0, m=0.50, easy_margin=False): 11 | super(ArcMarginProduct, self).__init__() 12 | self.in_feature = in_feature 13 | self.out_feature = out_feature 14 | self.s = s 15 | self.m = m 16 | self.weight = Parameter(torch.Tensor(out_feature, in_feature)) 17 | nn.init.xavier_uniform_(self.weight) 18 | 19 | self.easy_margin = easy_margin 20 | self.cos_m = math.cos(m) 21 | self.sin_m = math.sin(m) 22 | 23 | # make the function cos(theta+m) monotonic decreasing while theta in [0°,180°] 24 | self.th = math.cos(math.pi - m) 25 | self.mm = math.sin(math.pi - m) * m 26 | 27 | if args.use_amp == True: 28 | amp.register_half_function(torch, 'where') 29 | 30 | def forward(self, x, label): 31 | # cos(theta) 32 | cosine = F.linear(F.normalize(x), F.normalize(self.weight)) 33 | 34 | # cos(theta + m) 35 | sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) 36 | phi = cosine * self.cos_m - sine * self.sin_m 37 | 38 | if self.easy_margin: 39 | phi = torch.where(cosine > 0, phi, cosine) 40 | else: 41 | phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm) 42 | 43 | # one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu') 44 | one_hot = torch.zeros_like(cosine) 45 | one_hot.scatter_(1, label.view(-1, 1), 1) 46 | output = (one_hot * phi) + ((1.0 - one_hot) * cosine) 47 | output = output * self.s 48 | 49 | return output 50 | 51 | if __name__ == '__main__': 52 | input = torch.Tensor(256, 128) 53 | label = torch.LongTensor(256).zero_() 54 | margin = ArcMarginProduct() 55 | output = margin(input, label) 56 | print(output.shape) 57 | -------------------------------------------------------------------------------- /Backbones/Margin/CosineMarginProduct.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.nn import Parameter 5 | 6 | class CosineMarginProduct(nn.Module): 7 | def __init__(self, in_feature=128, out_feature=10575, s=30.0, m=0.35): 8 | super(CosineMarginProduct, self).__init__() 9 | self.in_feature = in_feature 10 | self.out_feature = out_feature 11 | self.s = s 12 | self.m = m 13 | self.weight = Parameter(torch.Tensor(out_feature, in_feature)) 14 | nn.init.xavier_uniform_(self.weight) 15 | 16 | 17 | def forward(self, input, label): 18 | cosine = F.linear(F.normalize(input), F.normalize(self.weight)) 19 | # one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu') 20 | one_hot = torch.zeros_like(cosine) 21 | one_hot.scatter_(1, label.view(-1, 1), 1.0) 22 | 23 | output = self.s * (cosine - one_hot * self.m) 24 | return output 25 | 26 | if __name__ == '__main__': 27 | input = torch.Tensor(256, 128) 28 | label = torch.LongTensor(256).zero_() 29 | margin = CosineMarginProduct() 30 | output = margin(input, label) 31 | print(output.shape) -------------------------------------------------------------------------------- /Backbones/Margin/InnerProduct.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | ''' 4 | @author: wujiyang 5 | @contact: wujiyang@hust.edu.cn 6 | @file: InnerProduct.py 7 | @time: 2019/1/4 16:54 8 | @desc: just normal inner product as fully connected layer do. 9 | ''' 10 | import torch 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | from torch.nn import Parameter 14 | 15 | class InnerProduct(nn.Module): 16 | def __init__(self, in_feature=128, out_feature=10575): 17 | super(InnerProduct, self).__init__() 18 | self.in_feature = in_feature 19 | self.out_feature = out_feature 20 | 21 | self.weight = Parameter(torch.Tensor(out_feature, in_feature)) 22 | nn.init.xavier_uniform_(self.weight) 23 | 24 | 25 | def forward(self, input, label): 26 | # label not used 27 | output = F.linear(input, self.weight) 28 | return output 29 | 30 | 31 | if __name__ == '__main__': 32 | pass -------------------------------------------------------------------------------- /Backbones/Margin/__init__.py: -------------------------------------------------------------------------------- 1 | from Backbones.Margin.ArcMarginProduct import ArcMarginProduct 2 | from Backbones.Margin.CosineMarginProduct import CosineMarginProduct 3 | from Backbones.Margin.InnerProduct import InnerProduct -------------------------------------------------------------------------------- /Backbones/Margin/__pycache__/ArcMarginProduct.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Margin/__pycache__/ArcMarginProduct.cpython-36.pyc -------------------------------------------------------------------------------- /Backbones/Margin/__pycache__/ArcMarginProduct.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Margin/__pycache__/ArcMarginProduct.cpython-37.pyc -------------------------------------------------------------------------------- /Backbones/Margin/__pycache__/CosineMarginProduct.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Margin/__pycache__/CosineMarginProduct.cpython-37.pyc -------------------------------------------------------------------------------- /Backbones/Margin/__pycache__/InnerProduct.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Margin/__pycache__/InnerProduct.cpython-37.pyc -------------------------------------------------------------------------------- /Backbones/Margin/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Margin/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Backbones/Margin/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Backbones/Margin/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /CFP-FP_Evaluation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import scipy.io 4 | import numpy as np 5 | from Config import args 6 | from Datasets import CFP_FP 7 | from torch.nn import DataParallel 8 | from torch.utils.data import DataLoader 9 | from Backbones.Backbone import MobileFacenet, CBAMResNet 10 | 11 | def getAccuracy(scores, flags, threshold): 12 | p = np.sum(scores[flags == 1] > threshold) 13 | n = np.sum(scores[flags == -1] < threshold) 14 | return 1.0 * (p + n) / len(scores) 15 | 16 | def getThreshold(scores, flags, thrNum): 17 | accuracys = np.zeros((2 * thrNum + 1, 1)) 18 | thresholds = np.arange(-thrNum, thrNum + 1) * 1.0 / thrNum 19 | for i in range(2 * thrNum + 1): 20 | accuracys[i] = getAccuracy(scores, flags, thresholds[i]) 21 | max_index = np.squeeze(accuracys == np.max(accuracys)) 22 | bestThreshold = np.mean(thresholds[max_index]) 23 | return bestThreshold 24 | 25 | def evaluation_10_fold(feature_path='./result/cur_epoch_cfp_result.mat'): 26 | ACCs = np.zeros(10) 27 | result = scipy.io.loadmat(feature_path) 28 | for i in range(10): 29 | fold = result['fold'] 30 | flags = result['flag'] 31 | featureLs = result['fl'] 32 | featureRs = result['fr'] 33 | 34 | valFold = fold != i 35 | testFold = fold == i 36 | flags = np.squeeze(flags) 37 | 38 | mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0) 39 | mu = np.expand_dims(mu, 0) 40 | featureLs = featureLs - mu 41 | featureRs = featureRs - mu 42 | featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1) 43 | featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1) 44 | 45 | scores = np.sum(np.multiply(featureLs, featureRs), 1) 46 | threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000) 47 | ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold) 48 | 49 | return ACCs 50 | 51 | def loadModel(data_root, file_list, backbone_net, gpus='0', model_para_path=None): 52 | # gpu init 53 | os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, args.gpus)) 54 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 55 | 56 | # backbone 57 | backbones = {'MobileFaceNet': MobileFacenet(), 58 | 'ResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir'), 59 | 'SEResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se'), 60 | 'ResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir'), 61 | 'SEResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')} 62 | if backbone_net in backbones: 63 | net = backbones[backbone_net] 64 | else: 65 | print(backbone_net + ' is not available!') 66 | 67 | # load parameter 68 | net.load_state_dict(torch.load(model_para_path)) 69 | 70 | if args.use_multi_gpus == True: 71 | net = DataParallel(net).to(device) 72 | else: 73 | net = net.to(device) 74 | 75 | # dataset and dataloader 76 | cfp_dataset = CFP_FP(data_root, file_list) 77 | cfp_loader = DataLoader(cfp_dataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False) 78 | 79 | return net.eval(), device, cfp_dataset, cfp_loader 80 | 81 | def getFeatureFromTorch(feature_save_dir, net, device, data_set, data_loader): 82 | featureLs = None 83 | featureRs = None 84 | count = 0 85 | for data in data_loader: 86 | for i in range(len(data)): 87 | data[i] = data[i].to(device) 88 | count += data[0].size(0) 89 | #print('extracing deep features from the face pair {}...'.format(count)) 90 | with torch.no_grad(): 91 | res = [net(d).data.cpu().numpy() for d in data] 92 | featureL = np.concatenate((res[0], res[1]), 1) 93 | featureR = np.concatenate((res[2], res[3]), 1) 94 | # print(featureL.shape, featureR.shape) 95 | if featureLs is None: 96 | featureLs = featureL 97 | else: 98 | featureLs = np.concatenate((featureLs, featureL), 0) 99 | if featureRs is None: 100 | featureRs = featureR 101 | else: 102 | featureRs = np.concatenate((featureRs, featureR), 0) 103 | # print(featureLs.shape, featureRs.shape) 104 | 105 | result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.labels} 106 | scipy.io.savemat(feature_save_dir, result) 107 | 108 | if __name__ == '__main__': 109 | model_para_path = 'Trained_Models/CASIA_WebFace_ResNet50_IR_2020-08-18 15:29:15/Iter_64000_net.pth' 110 | net, device, cfp_dataset, cfp_loader = loadModel(args.cfp_dataset_path, args.cfp_file_list, args.backbone, args.gpus, model_para_path) 111 | getFeatureFromTorch('Test_Data/cur_cfp_result.mat', net, device, cfp_dataset, cfp_loader) 112 | ACCs = evaluation_10_fold('Test_Data/cur_cfp_result.mat') 113 | for i in range(len(ACCs)): 114 | print('{} {:.2f}'.format(i + 1, ACCs[i] * 100)) 115 | print('--------') 116 | print('AVE {:.4f}'.format(np.mean(ACCs) * 100)) 117 | -------------------------------------------------------------------------------- /Config/__init__.py: -------------------------------------------------------------------------------- 1 | from Config.config import args -------------------------------------------------------------------------------- /Config/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Config/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Config/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Config/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /Config/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Config/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /Config/__pycache__/config.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Config/__pycache__/config.cpython-37.pyc -------------------------------------------------------------------------------- /Config/config.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | parser = argparse.ArgumentParser(description='Pytorch For Deep Face Recognition') 4 | 5 | # parameter adjustment mode 6 | parser.add_argument('--para_adj_mode', type=bool, default=False,help='parameter adjustment mode') 7 | 8 | # visualizer 9 | parser.add_argument('--use_visdom', type=bool, default=False,help='whether to use visdom') 10 | 11 | # device parameters 12 | parser.add_argument('--use_amp', type=bool, default=True,help='whether to use automatic mixed precision (AMP)') 13 | parser.add_argument('--use_multi_gpus', type=bool, default=True,help='whether to use multiple GPU devices') 14 | parser.add_argument('--gpus', type=list, default=[0, 1],help='appoint GPU devices') 15 | 16 | # dataset parameters 17 | parser.add_argument('--train_dataset', type=str, default='MS_Celeb_1M', help='CASIA_WebFace, MS_Celeb_1M') 18 | parser.add_argument('--webface_dataset_path', type=str, default='/home/CaiMao/Face_Pytorch-master/dataset/webface-112x112/casia-112x112', help='webface dataset path') 19 | parser.add_argument('--webface_file_list', type=str, default='/home/CaiMao/Face_Pytorch-master/dataset/webface-112x112/casia-112x112.list', help='webface files list') 20 | parser.add_argument('--ms1m_dataset_path', type=str, default='/home/CaiMao/MS1M_112x112/MS1M_112x112', help='ms1m dataset path') 21 | parser.add_argument('--ms1m_file_list', type=str, default='/home/CaiMao/MS1M_112x112/MS1M-112x112.txt', help='ms1m files list') 22 | parser.add_argument('--lfw_dataset_path', type=str, default='/home/CaiMao/Face_Pytorch-master/dataset/lfw-112x112/lfw-112x112', help='lfw dataset path') 23 | parser.add_argument('--lfw_file_list', type=str, default='/home/CaiMao/Face_Pytorch-master/dataset/lfw-112x112/pairs.txt', help='lfw pair file list') 24 | parser.add_argument('--cfp_dataset_path', type=str, default='/data/face_datasets/train_datasets/MS1M_112x112/cfp_fp', help='cfp-fp dataset path') 25 | parser.add_argument('--cfp_file_list', type=str, default='/data/face_datasets/train_datasets/MS1M_112x112/cfp_fp_pair.txt', help='cfp-fp pair file list') 26 | parser.add_argument('--agedb_dataset_path', type=str, default='/data/face_datasets/train_datasets/MS1M_112x112/agedb_30', help='agedb-30 dataset path') 27 | parser.add_argument('--agedb_file_list', type=str, default='/data/face_datasets/train_datasets/MS1M_112x112/agedb_30_pair.txt', help='agedb-30 pair file list') 28 | 29 | # training parameters 30 | parser.add_argument('--initial_lr', type=float, default=0.1, help='initial learning rate') 31 | parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay') 32 | parser.add_argument('--batch_size', type=int, default=256, help='batch size') 33 | parser.add_argument('--total_epoch', type=int, default=25, help='total epochs') 34 | parser.add_argument('--backbone', type=str, default='ResNet50_IR', help='MobileFaceNet, ResNet50_IR, SEResNet50_IR, ResNet100_IR, SEResNet100_IR') 35 | parser.add_argument('--margin', type=str, default='ArcFace', help='ArcFace, CosFace, Softmax') 36 | parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension, 128 or 512, if backbone is MobileFaceNet,this option must be 128') 37 | parser.add_argument('--scale_size', type=float, default=32.0, help='scale size') 38 | 39 | # testing parameters 40 | parser.add_argument('--test_freq', type=int, default=1000, help='the frequency of testing model') 41 | parser.add_argument('--test_on_megaface', type=bool, default=True, help='whether to test model on megaface at the end of the iteration') 42 | 43 | # saving parameters 44 | parser.add_argument('--save_freq', type=int, default=1000, help='the frequency of saving model') 45 | parser.add_argument('--save_dir', type=str, default='./Trained_Models', help='model save dir') 46 | 47 | # resume model parameters 48 | parser.add_argument('--resume', type=bool, default=False, help='resume model') 49 | parser.add_argument('--resume_backbone_path', type=str, default='Trained_Models/CASIA_WebFace_MobileFace_2020-08-12 16:24:48/Iter_53400_net.pth', help='resume backbone path') 50 | parser.add_argument('--resume_margin_path', type=str, default='Trained_Models/CASIA_WebFace_MobileFace_2020-08-12 16:24:48/Iter_53400_margin.pth', help='resume margin path') 51 | 52 | args = parser.parse_args() -------------------------------------------------------------------------------- /Datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from Datasets.webface import CASIA_WebFace 2 | from Datasets.lfw import LFW 3 | from Datasets.cfp import CFP_FP 4 | from Datasets.agedb import AgeDB30 5 | from Datasets.megaface import MegaFace 6 | from Datasets.ms1m import MS_Celeb_1M -------------------------------------------------------------------------------- /Datasets/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /Datasets/__pycache__/agedb.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/agedb.cpython-37.pyc -------------------------------------------------------------------------------- /Datasets/__pycache__/cfp.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/cfp.cpython-37.pyc -------------------------------------------------------------------------------- /Datasets/__pycache__/lfw.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/lfw.cpython-36.pyc -------------------------------------------------------------------------------- /Datasets/__pycache__/lfw.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/lfw.cpython-37.pyc -------------------------------------------------------------------------------- /Datasets/__pycache__/megaface.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/megaface.cpython-37.pyc -------------------------------------------------------------------------------- /Datasets/__pycache__/ms1m.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/ms1m.cpython-37.pyc -------------------------------------------------------------------------------- /Datasets/__pycache__/webface.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/webface.cpython-36.pyc -------------------------------------------------------------------------------- /Datasets/__pycache__/webface.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Datasets/__pycache__/webface.cpython-37.pyc -------------------------------------------------------------------------------- /Datasets/agedb.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import numpy as np 4 | import torchvision.transforms as transforms 5 | from torch.utils.data import Dataset, DataLoader 6 | 7 | def image_loader(image_path): 8 | try: 9 | image = cv2.imread(image_path) 10 | if len(image.shape) == 2: 11 | image = np.stack([image]*3, 2) 12 | return image 13 | except IOError: 14 | print('fail to load image:' + image_path) 15 | 16 | transform = transforms.Compose([ 17 | transforms.ToTensor(), 18 | transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5)) 19 | ]) 20 | 21 | class AgeDB30(Dataset): 22 | def __init__(self, root, file_list): 23 | 24 | self.root = root 25 | self.file_list = file_list 26 | self.nameLs = [] 27 | self.nameRs = [] 28 | self.folds = [] 29 | self.labels = [] 30 | 31 | with open(file_list) as f: 32 | pairs = f.read().splitlines() 33 | for i, p in enumerate(pairs): 34 | p = p.split(' ') 35 | nameL = p[0] 36 | nameR = p[1] 37 | fold = i // 600 38 | label = int(p[2]) 39 | 40 | self.nameLs.append(nameL) 41 | self.nameRs.append(nameR) 42 | self.folds.append(fold) 43 | self.labels.append(label) 44 | 45 | def __getitem__(self, index): 46 | 47 | img_l = image_loader(os.path.join(self.root, self.nameLs[index])) 48 | img_r = image_loader(os.path.join(self.root, self.nameRs[index])) 49 | image_list = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] 50 | 51 | for i in range(len(image_list)): 52 | image_list[i] = transform(image_list[i]) 53 | 54 | return image_list 55 | 56 | def __len__(self): 57 | return len(self.nameLs) 58 | 59 | 60 | if __name__ == '__main__': 61 | dataset_path = '/data/face_datasets/test_datasets/face_verification/AgeDB-30/agedb30_align_112' 62 | file_list = '/data/face_datasets/test_datasets/face_verification/AgeDB-30/agedb_30_pair.txt' 63 | 64 | agedb_dataset = AgeDB30(dataset_path, file_list) 65 | agedb_dataloader = DataLoader(agedb_dataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False) 66 | print(len(agedb_dataset)) 67 | print(len(agedb_dataloader)) 68 | for data in agedb_dataloader: 69 | print(len(data)) -------------------------------------------------------------------------------- /Datasets/cfp.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import numpy as np 4 | import torchvision.transforms as transforms 5 | from torch.utils.data import Dataset, DataLoader 6 | 7 | def image_loader(image_path): 8 | try: 9 | image = cv2.imread(image_path) 10 | if len(image.shape) == 2: 11 | image = np.stack([image]*3, 2) 12 | return image 13 | except IOError: 14 | print('fail to load image:' + image_path) 15 | 16 | transform = transforms.Compose([ 17 | transforms.ToTensor(), 18 | transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5)) 19 | ]) 20 | 21 | class CFP_FP(Dataset): 22 | def __init__(self, root, file_list): 23 | 24 | self.root = root 25 | self.file_list = file_list 26 | self.nameLs = [] 27 | self.nameRs = [] 28 | self.folds = [] 29 | self.labels = [] 30 | 31 | with open(file_list) as f: 32 | pairs = f.read().splitlines() 33 | for i, p in enumerate(pairs): 34 | p = p.split(' ') 35 | nameL = p[0] 36 | nameR = p[1] 37 | fold = i // 700 38 | label = int(p[2]) 39 | 40 | self.nameLs.append(nameL) 41 | self.nameRs.append(nameR) 42 | self.folds.append(fold) 43 | self.labels.append(label) 44 | 45 | def __getitem__(self, index): 46 | 47 | img_l = image_loader(os.path.join(self.root, self.nameLs[index])) 48 | img_r = image_loader(os.path.join(self.root, self.nameRs[index])) 49 | image_list = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] 50 | 51 | for i in range(len(image_list)): 52 | image_list[i] = transform(image_list[i]) 53 | 54 | return image_list 55 | 56 | def __len__(self): 57 | return len(self.nameLs) 58 | 59 | if __name__ == '__main__': 60 | dataset_path = '/data/face_datasets/test_datasets/face_verification/CFP-FP/CFP_FP_aligned_112' 61 | file_list = '/data/face_datasets/test_datasets/face_verification/CFP-FP/cfp_fp_pair.txt' 62 | 63 | cfp_dataset = CFP_FP(dataset_path, file_list) 64 | cfp_dataloader = DataLoader(cfp_dataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False) 65 | print(len(cfp_dataset)) 66 | print(len(cfp_dataloader)) 67 | for data in cfp_dataloader: 68 | print(len(data)) -------------------------------------------------------------------------------- /Datasets/lfw.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import Dataset, DataLoader 2 | import torchvision.transforms as transforms 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | def image_loader(image_path): 8 | try: 9 | image = cv2.imread(image_path) 10 | if len(image.shape) == 2: 11 | image = np.stack([image]*3, 2) 12 | return image 13 | except IOError: 14 | print('fail to load image:' + image_path) 15 | 16 | transform = transforms.Compose([ 17 | transforms.ToTensor(), 18 | transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5)) 19 | ]) 20 | 21 | class LFW(Dataset): 22 | def __init__(self, dataset_path, file_list): 23 | self.dataset_path = dataset_path 24 | self.file_list = file_list 25 | self.left_images = [] 26 | self.right_images = [] 27 | self.folds = [] 28 | self.labels = [] 29 | 30 | with open(file_list) as f: 31 | pairs = f.read().splitlines()[1:] 32 | for i, p in enumerate(pairs): 33 | p = p.split('\t') 34 | if len(p) == 3: 35 | left_image = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[1])) 36 | right_image = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[2])) 37 | fold = i // 600 38 | label = 1 39 | elif len(p) == 4: 40 | left_image = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[1])) 41 | right_image = p[2] + '/' + p[2] + '_' + '{:04}.jpg'.format(int(p[3])) 42 | fold = i // 600 43 | label = -1 44 | self.left_images.append(left_image) 45 | self.right_images.append(right_image) 46 | self.folds.append(fold) 47 | self.labels.append(label) 48 | 49 | def __getitem__(self, index): 50 | 51 | image_left = image_loader(os.path.join(self.dataset_path, self.left_images[index])) 52 | image_right = image_loader(os.path.join(self.dataset_path, self.right_images[index])) 53 | image_list = [image_left, cv2.flip(image_left, 1), image_right, cv2.flip(image_right, 1)] 54 | 55 | for i in range(len(image_list)): 56 | image_list[i] = transform(image_list[i]) 57 | 58 | return image_list 59 | 60 | def __len__(self): 61 | return len(self.left_images) 62 | 63 | 64 | if __name__ == '__main__': 65 | dataset_path = '/home/CaiMao/Face_Pytorch-master/dataset/lfw-112x112/lfw-112x112' 66 | file_list = '/home/CaiMao/Face_Pytorch-master/dataset/lfw-112x112/pairs.txt' 67 | 68 | lfw_dataset = LFW(dataset_path, file_list) 69 | lfw_dataloader = DataLoader(lfw_dataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False) 70 | print(len(lfw_dataset)) 71 | print(len(lfw_dataloader)) 72 | for data in lfw_dataloader: 73 | print(len(data)) -------------------------------------------------------------------------------- /Datasets/megaface.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import numpy as np 4 | import torchvision.transforms as transforms 5 | from torch.utils.data import Dataset, DataLoader 6 | 7 | def image_loader(image_path): 8 | try: 9 | image = cv2.imread(image_path) 10 | if len(image.shape) == 2: 11 | image = np.stack([image]*3, 2) 12 | return image 13 | except IOError: 14 | print('fail to load image:' + image_path) 15 | 16 | transform = transforms.Compose([ 17 | transforms.ToTensor(), 18 | transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5)) 19 | ]) 20 | 21 | class MegaFace(Dataset): 22 | def __init__(self, facescrub_dir, megaface_dir): 23 | test_image_file_list = [] 24 | print('Scanning files under facescrub and megaface...') 25 | for root, dirs, files in os.walk(facescrub_dir): 26 | for e in files: 27 | filename = os.path.join(root, e) 28 | ext = os.path.splitext(filename)[1].lower() 29 | if ext in ('.png', '.bmp', '.jpg', '.jpeg'): 30 | test_image_file_list.append(filename) 31 | for root, dirs, files in os.walk(megaface_dir): 32 | for e in files: 33 | filename = os.path.join(root, e) 34 | ext = os.path.splitext(filename)[1].lower() 35 | if ext in ('.png', '.bmp', '.jpg', '.jpeg'): 36 | test_image_file_list.append(filename) 37 | 38 | self.image_list = test_image_file_list 39 | 40 | def __getitem__(self, index): 41 | img_path = self.image_list[index] 42 | img = image_loader(img_path) 43 | 44 | #random flip with ratio of 0.5 45 | if np.random.choice(2) == 1: 46 | img = cv2.flip(img, 1) 47 | 48 | img = transform(img) 49 | 50 | return img, img_path 51 | 52 | def __len__(self): 53 | return len(self.image_list) 54 | 55 | 56 | if __name__ == '__main__': 57 | facescrub = '/data/face_datasets/test_datasets/face_recognition/MegaFace/facescrub_align_112/' 58 | megaface = '/data/face_datasets/test_datasets/face_recognition/MegaFace/megaface_align_112/' 59 | 60 | megaface_dataset = MegaFace(facescrub, megaface) 61 | megaface_dataloader = DataLoader(megaface_dataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False) 62 | print(len(megaface_dataset)) 63 | print(len(megaface_dataloader)) 64 | for data in megaface_dataloader: 65 | print(len(data)) -------------------------------------------------------------------------------- /Datasets/ms1m.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import Dataset, DataLoader 2 | import torchvision.transforms as transforms 3 | import numpy as np 4 | import cv2 5 | 6 | def image_loader(image_path): 7 | try: 8 | image = cv2.imread(image_path) 9 | if len(image.shape) == 2: 10 | image = np.stack([image]*3, 2) 11 | return image 12 | except IOError: 13 | print('fail to load image:' + image_path) 14 | 15 | transform = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5)) 18 | ]) 19 | 20 | class MS_Celeb_1M(Dataset): 21 | def __init__(self, dataset_path, file_list): 22 | self.dataset_path = dataset_path 23 | 24 | img_list = [] 25 | label_list = [] 26 | with open(file_list) as f: 27 | img_label_list = f.read().splitlines() 28 | for img_label in img_label_list: 29 | img_path, label = img_label.split(' ') 30 | img_list.append(img_path) 31 | label_list.append(int(label)) 32 | 33 | self.img_list = img_list 34 | self.label_list = label_list 35 | self.num_images = len(self.img_list) 36 | self.num_classes = len(np.unique(self.label_list)) 37 | print('dataset size: ', 'num_images/num_classes ', self.num_images, '/', self.num_classes) 38 | 39 | def __getitem__(self, index): 40 | image_path = self.img_list[index] 41 | label = self.label_list[index] 42 | 43 | # load image 44 | image = image_loader(image_path) 45 | 46 | # random flip with ratio of 0.5 47 | if np.random.choice(2) == 1: 48 | image = cv2.flip(image, 1) 49 | 50 | # transform numpy.ndarray to tensor and normalize it 51 | image = transform(image) 52 | 53 | return image, label 54 | 55 | def __len__(self): 56 | return self.num_images 57 | 58 | if __name__ == '__main__': 59 | dataset_path = '/home/CaiMao/MS1M_112x112/MS1M_112x112' 60 | file_list = '/home/CaiMao/MS1M_112x112/MS1M-112x112.txt' 61 | 62 | dataset = MS_Celeb_1M(dataset_path, file_list) 63 | trainloader = DataLoader(dataset, batch_size = 256, shuffle = False, num_workers = 32, drop_last = False, pin_memory = True) 64 | 65 | print(len(trainloader)) 66 | for data in trainloader: 67 | image_batch = data[0] 68 | label = data[1] 69 | print(type(data[0]),type(data[1])) 70 | print(image_batch.shape) 71 | print(len(label)) 72 | 73 | -------------------------------------------------------------------------------- /Datasets/webface.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import Dataset, DataLoader 2 | import torchvision.transforms as transforms 3 | import numpy as np 4 | import cv2 5 | 6 | def image_loader(image_path): 7 | try: 8 | image = cv2.imread(image_path) 9 | if len(image.shape) == 2: 10 | image = np.stack([image]*3, 2) 11 | return image 12 | except IOError: 13 | print('fail to load image:' + image_path) 14 | 15 | transform = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5)) 18 | ]) 19 | 20 | class CASIA_WebFace(Dataset): 21 | def __init__(self, dataset_path, file_list): 22 | self.dataset_path = dataset_path 23 | 24 | img_list = [] 25 | label_list = [] 26 | with open(file_list) as f: 27 | img_label_list = f.read().splitlines() 28 | for img_label in img_label_list: 29 | img_path, label = img_label.split(' ') 30 | img_list.append(img_path) 31 | label_list.append(int(label)) 32 | 33 | self.img_list = img_list 34 | self.label_list = label_list 35 | self.num_images = len(self.img_list) 36 | self.num_classes = len(np.unique(self.label_list)) 37 | print('dataset size: ', 'num_images/num_classes ', self.num_images, '/', self.num_classes) 38 | 39 | def __getitem__(self, index): 40 | image_path = self.img_list[index] 41 | label = self.label_list[index] 42 | 43 | # load image 44 | image = image_loader(image_path) 45 | 46 | # random flip with ratio of 0.5 47 | if np.random.choice(2) == 1: 48 | image = cv2.flip(image, 1) 49 | 50 | # transform numpy.ndarray to tensor and normalize it 51 | image = transform(image) 52 | 53 | return image, label 54 | 55 | def __len__(self): 56 | return self.num_images 57 | 58 | if __name__ == '__main__': 59 | dataset_path = '/home/CaiMao/Face_Pytorch-master/dataset/webface-112x112/casia-112x112' 60 | file_list = '/home/CaiMao/Face_Pytorch-master/dataset/webface-112x112/casia-112x112.list' 61 | 62 | dataset = CASIA_WebFace(dataset_path, file_list) 63 | trainloader = DataLoader(dataset, batch_size = 256, shuffle = True, num_workers = 32, drop_last = False) 64 | 65 | print(len(trainloader)) 66 | for data in trainloader: 67 | image_batch = data[0] 68 | label = data[1] 69 | print(type(data[0]),type(data[1])) 70 | # print(image_batch.shape) 71 | # print(len(label)) 72 | 73 | -------------------------------------------------------------------------------- /LFW_Evaluation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import scipy.io 4 | import numpy as np 5 | from Config import args 6 | from Datasets import LFW 7 | from torch.nn import DataParallel 8 | from torch.utils.data import DataLoader 9 | from Backbones.Backbone import MobileFacenet, CBAMResNet 10 | 11 | def getAccuracy(scores, flags, threshold): 12 | p = np.sum(scores[flags == 1] > threshold) 13 | n = np.sum(scores[flags == -1] < threshold) 14 | return 1.0 * (p + n) / len(scores) 15 | 16 | def getThreshold(scores, flags, thrNum): 17 | accuracys = np.zeros((2 * thrNum + 1, 1)) 18 | thresholds = np.arange(-thrNum, thrNum + 1) * 1.0 / thrNum 19 | for i in range(2 * thrNum + 1): 20 | accuracys[i] = getAccuracy(scores, flags, thresholds[i]) 21 | max_index = np.squeeze(accuracys == np.max(accuracys)) 22 | bestThreshold = np.mean(thresholds[max_index]) 23 | return bestThreshold 24 | 25 | def evaluation_10_fold(feature_path='./result/cur_epoch_result.mat'): 26 | ACCs = np.zeros(10) 27 | result = scipy.io.loadmat(feature_path) 28 | for i in range(10): 29 | fold = result['fold'] 30 | flags = result['flag'] 31 | featureLs = result['fl'] 32 | featureRs = result['fr'] 33 | 34 | valFold = fold != i 35 | testFold = fold == i 36 | flags = np.squeeze(flags) 37 | 38 | mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0) 39 | mu = np.expand_dims(mu, 0) 40 | featureLs = featureLs - mu 41 | featureRs = featureRs - mu 42 | featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1) 43 | featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1) 44 | 45 | scores = np.sum(np.multiply(featureLs, featureRs), 1) 46 | threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000) 47 | ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold) 48 | 49 | return ACCs 50 | 51 | def loadModel(data_root, file_list, backbone_net, gpus='0', model_para_path=None): 52 | # gpu init 53 | os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, args.gpus)) 54 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 55 | 56 | # backbone 57 | backbones = {'MobileFaceNet': MobileFacenet(), 58 | 'ResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir'), 59 | 'SEResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se'), 60 | 'ResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir'), 61 | 'SEResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')} 62 | if backbone_net in backbones: 63 | net = backbones[backbone_net] 64 | else: 65 | print(backbone_net + ' is not available!') 66 | 67 | # load parameter 68 | net.load_state_dict(torch.load(model_para_path)) 69 | 70 | if args.use_multi_gpus == True: 71 | net = DataParallel(net).to(device) 72 | else: 73 | net = net.to(device) 74 | 75 | # dataset and dataloader 76 | lfw_dataset = LFW(data_root, file_list) 77 | lfw_loader = DataLoader(lfw_dataset, batch_size=128, shuffle=False, num_workers=4, drop_last=False) 78 | 79 | return net.eval(), device, lfw_dataset, lfw_loader 80 | 81 | def getFeatureFromTorch(feature_save_dir, net, device, data_set, data_loader): 82 | featureLs = None 83 | featureRs = None 84 | count = 0 85 | for data in data_loader: 86 | for i in range(len(data)): 87 | data[i] = data[i].to(device) 88 | count += data[0].size(0) 89 | #print('extracing deep features from the face pair {}...'.format(count)) 90 | with torch.no_grad(): 91 | res = [net(d).data.cpu().numpy() for d in data] 92 | featureL = np.concatenate((res[0], res[1]), 1) 93 | featureR = np.concatenate((res[2], res[3]), 1) 94 | # print(featureL.shape, featureR.shape) 95 | if featureLs is None: 96 | featureLs = featureL 97 | else: 98 | featureLs = np.concatenate((featureLs, featureL), 0) 99 | if featureRs is None: 100 | featureRs = featureR 101 | else: 102 | featureRs = np.concatenate((featureRs, featureR), 0) 103 | # print(featureLs.shape, featureRs.shape) 104 | 105 | result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.labels} 106 | scipy.io.savemat(feature_save_dir, result) 107 | 108 | if __name__ == '__main__': 109 | model_para_path = 'Trained_Models/CASIA_WebFace_MobileFace_2020-08-12 16:24:48/Iter_53400_net.pth' 110 | net, device, agedb_dataset, agedb_loader = loadModel(args.lfw_dataset_path, args.lfw_file_list, args.backbone, args.gpus, model_para_path) 111 | getFeatureFromTorch('Test_Data/cur_lfw_result.mat', net, device, agedb_dataset, agedb_loader) 112 | ACCs = evaluation_10_fold('Test_Data/cur_lfw_result.mat') 113 | for i in range(len(ACCs)): 114 | print('{} {:.2f}'.format(i + 1, ACCs[i] * 100)) 115 | print('--------') 116 | print('AVE {:.4f}'.format(np.mean(ACCs) * 100)) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 MaoCai 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MegaFace_Evaluation/Extract_MegaFace_Features.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import struct 4 | import argparse 5 | import numpy as np 6 | from Datasets import MegaFace 7 | from torch.nn import DataParallel 8 | from torch.utils.data import DataLoader 9 | from Backbones.Backbone import MobileFacenet, CBAMResNet 10 | 11 | cv_type_to_dtype = {5: np.dtype('float32'), 6: np.dtype('float64')} 12 | dtype_to_cv_type = {v: k for k, v in cv_type_to_dtype.items()} 13 | 14 | def write_mat(filename, m): 15 | """Write mat m to file f""" 16 | if len(m.shape) == 1: 17 | rows = m.shape[0] 18 | cols = 1 19 | else: 20 | rows, cols = m.shape 21 | header = struct.pack('iiii', rows, cols, cols * 4, dtype_to_cv_type[m.dtype]) 22 | 23 | with open(filename, 'wb') as outfile: 24 | outfile.write(header) 25 | outfile.write(m.data) 26 | 27 | def read_mat(filename): 28 | """ 29 | Reads an OpenCV mat from the given file opened in binary mode 30 | """ 31 | with open(filename, 'rb') as fin: 32 | rows, cols, stride, type_ = struct.unpack('iiii', fin.read(4 * 4)) 33 | mat = np.fromstring(str(fin.read(rows * stride)), dtype=cv_type_to_dtype[type_]) 34 | return mat.reshape(rows, cols) 35 | 36 | def extract_feature(model_path, backbone_net, face_scrub_path, megaface_path, batch_size=1024, gpus='0', do_norm=False): 37 | # gpu init 38 | multi_gpus = False 39 | if len(gpus.split(',')) > 1: 40 | multi_gpus = True 41 | os.environ['CUDA_VISIBLE_DEVICES'] = gpus 42 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 43 | 44 | # backbone 45 | backbones = {'MobileFaceNet': MobileFacenet(), 46 | 'ResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir'), 47 | 'SEResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se'), 48 | 'ResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir'), 49 | 'SEResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')} 50 | if backbone_net in backbones: 51 | net = backbones[backbone_net] 52 | else: 53 | print(backbone_net + ' is not available!') 54 | 55 | # load parameter 56 | net.load_state_dict(torch.load(model_path)) 57 | 58 | if multi_gpus == True: 59 | net = DataParallel(net).to(device) 60 | else: 61 | net = net.to(device) 62 | net.eval() 63 | 64 | # dataset and dataloader 65 | megaface_dataset = MegaFace(face_scrub_path, megaface_path) 66 | megaface_dataloader = DataLoader(megaface_dataset, batch_size=batch_size, shuffle=False, num_workers=12, drop_last=False) 67 | 68 | for data in megaface_dataloader: 69 | img, img_path= data[0].to(device), data[1] 70 | with torch.no_grad(): 71 | output = net(img).data.cpu().numpy() 72 | 73 | if do_norm is False: 74 | for i in range(len(img_path)): 75 | abs_path = img_path[i] + '.feat' 76 | write_mat(abs_path, output[i]) 77 | print('extract 1 batch...without feature normalization') 78 | else: 79 | for i in range(len(img_path)): 80 | abs_path = img_path[i] + '.feat' 81 | feat = output[i] 82 | feat = feat / np.sqrt((np.dot(feat, feat))) 83 | write_mat(abs_path, feat) 84 | print('extract 1 batch...with feature normalization') 85 | print('all images have been processed!') 86 | 87 | if __name__ == '__main__': 88 | parser = argparse.ArgumentParser(description='Test model on MegaFace') 89 | parser.add_argument('--model_path', type=str, default='../Trained_Models/CASIA_WebFace_MobileFace_2020-08-12 16:24:48/Iter_53400_net.pth', help='The path of trained model') 90 | parser.add_argument('--backbone', type=str, default='MobileFaceNet', help='MobileFaceNet, ResNet50_IR, SEResNet50_IR, ResNet100_IR, SEResNet100_IR') 91 | parser.add_argument('--facescrub_dir', type=str, default='/data/face_datasets/test_datasets/face_recognition/MegaFace/facescrub_align_112/', help='facescrub data') 92 | parser.add_argument('--megaface_dir', type=str, default='/data/face_datasets/test_datasets/face_recognition/MegaFace/megaface_align_112/', help='megaface data') 93 | parser.add_argument('--batch_size', type=int, default=1024, help='batch size') 94 | parser.add_argument('--feature_dim', type=int, default=128, help='feature dimension') 95 | parser.add_argument('--gpus', type=str, default='0,1', help='gpu list') 96 | parser.add_argument("--do_norm", type=int, default=1, help="1 if normalize feature, 0 do nothing(Default case)") 97 | args = parser.parse_args() 98 | 99 | extract_feature(args.model_path, args.backbone, args.facescrub_dir, args.megaface_dir, args.batch_size, args.gpus, args.do_norm) -------------------------------------------------------------------------------- /MegaFace_Evaluation/Get_Evaluation_Results.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import argparse 5 | import subprocess 6 | 7 | MODEL = './models/jb_identity.bin' 8 | IDENTIFICATION_EXE = './bin/Identification' 9 | FUSE_RESULTS_EXE = './bin/FuseResults' 10 | MEGAFACE_LIST_BASENAME = './templatelists/megaface_features_list.json' 11 | PROBE_LIST_BASENAME = './templatelists/facescrub_features_list.json' 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser(description='Get MegaFace evaluation results with the provided feature files') 15 | parser.add_argument('--distractor_feature_path',default='/data/face_datasets/test_datasets/face_recognition/MegaFace/megaface_align_112', help='Path to MegaFace Features') 16 | parser.add_argument('--probe_feature_path',default='/data/face_datasets/test_datasets/face_recognition/MegaFace/facescrub_align_112', help='Path to FaceScrub Features') 17 | parser.add_argument('--file_ending',default='.feat',help='Ending appended to original photo files') 18 | parser.add_argument('--out_root',default='/data/face_datasets/test_datasets/face_recognition/MegaFace/results/', help='File output directory, outputs results files, score matrix files, and feature lists used') 19 | parser.add_argument('-s', '--sizes', type=int, nargs='+',help='(optional) Size(s) of feature list(s) to create. Default: 10 100 1000 10000 100000 1000000') 20 | parser.add_argument('-m', '--model', type=str,help='(optional) Scoring model to use. Default: ./models/jb_identity.bin') 21 | parser.add_argument('-ns','--num_sets', help='Set to change number of sets to run on. Default: 1') 22 | parser.add_argument('-d','--delete_matrices', dest='delete_matrices', action='store_true', help='Deletes matrices used while computing results. Reduces space needed to run test.') 23 | parser.add_argument('-p','--probe_list', help='Set to use different probe list. Default: ./templatelists/facescrub_features_list.json') 24 | parser.add_argument('-dlp','--distractor_list_path', help='Set to change path used for distractor lists') 25 | parser.set_defaults(model=MODEL, num_sets=1, sizes=[10, 100, 1000, 10000, 100000, 1000000], probe_list=PROBE_LIST_BASENAME, distractor_list_path=os.path.dirname(MEGAFACE_LIST_BASENAME)) 26 | args = parser.parse_args() 27 | 28 | distractor_feature_path = args.distractor_feature_path 29 | out_root = args.out_root 30 | probe_feature_path = args.probe_feature_path 31 | model = args.model 32 | num_sets = args.num_sets 33 | sizes = args.sizes 34 | file_ending = args.file_ending 35 | alg_name = file_ending.split('.')[0].strip('_') 36 | delete_matrices = args.delete_matrices 37 | probe_list_basename = args.probe_list 38 | megaface_list_basename = os.path.join(args.distractor_list_path,os.path.basename(MEGAFACE_LIST_BASENAME)) 39 | set_indices = range(1,int(num_sets) + 1) 40 | 41 | assert os.path.exists(distractor_feature_path) 42 | assert os.path.exists(probe_feature_path) 43 | if not os.path.exists(out_root): 44 | os.makedirs(out_root) 45 | if(not os.path.exists(os.path.join(out_root, "otherFiles"))): 46 | os.makedirs(os.path.join(out_root, "otherFiles")) 47 | other_out_root = os.path.join(out_root, "otherFiles") 48 | 49 | probe_name = os.path.basename(probe_list_basename).split('_')[0] 50 | distractor_name = os.path.basename(megaface_list_basename).split('_')[0] 51 | 52 | #Create feature lists for megaface for all sets and sizes and verifies all features exist 53 | missing = False 54 | for index in set_indices: 55 | for size in sizes: 56 | print('Creating feature list of {} photos for set {}'.format(size,str(index))) 57 | cur_list_name = megaface_list_basename + "_{}_{}".format(str(size), str(index)) 58 | with open(cur_list_name) as fp: 59 | featureFile = json.load(fp) 60 | path_list = featureFile["path"] 61 | for i in range(len(path_list)): 62 | path_list[i] = os.path.join(distractor_feature_path,path_list[i] + file_ending) 63 | if(not os.path.isfile(path_list[i])): 64 | print(path_list[i] + " is missing") 65 | missing = True 66 | if (i % 10000 == 0 and i > 0): 67 | print(str(i) + " / " + str(len(path_list))) 68 | featureFile["path"] = path_list 69 | json.dump(featureFile, open(os.path.join( 70 | other_out_root, '{}_features_{}_{}_{}'.format(distractor_name,alg_name,size,index)), 'w'), sort_keys=True, indent=4) 71 | if(missing): 72 | sys.exit("Features are missing...") 73 | 74 | #Create feature list for probe set 75 | with open(probe_list_basename) as fp: 76 | featureFile = json.load(fp) 77 | path_list = featureFile["path"] 78 | for i in range(len(path_list)): 79 | path_list[i] = os.path.join(probe_feature_path,path_list[i] + file_ending) 80 | if(not os.path.isfile(path_list[i])): 81 | print(path_list[i] + " is missing") 82 | missing = True 83 | featureFile["path"] = path_list 84 | json.dump(featureFile, open(os.path.join( 85 | other_out_root, '{}_features_{}'.format(probe_name,alg_name)), 'w'), sort_keys=True, indent=4) 86 | probe_feature_list = os.path.join(other_out_root, '{}_features_{}'.format(probe_name,alg_name)) 87 | if(missing): 88 | sys.exit("Features are missing...") 89 | 90 | print('Running probe to probe comparison') 91 | probe_score_filename = os.path.join( 92 | other_out_root, '{}_{}_{}.bin'.format(probe_name, probe_name, alg_name)) 93 | proc = subprocess.Popen( 94 | [IDENTIFICATION_EXE, model, "path", probe_feature_list, probe_feature_list, probe_score_filename]) 95 | proc.communicate() 96 | 97 | for index in set_indices: 98 | for size in sizes: 99 | print('Running test with size {} images for set {}'.format( 100 | str(size), str(index))) 101 | args = [IDENTIFICATION_EXE, model, "path", os.path.join(other_out_root, '{}_features_{}_{}_{}'.format(distractor_name,alg_name,size,index) 102 | ), probe_feature_list, os.path.join(other_out_root, '{}_{}_{}_{}_{}.bin'.format(probe_name, distractor_name, alg_name, str(size),str(index)))] 103 | proc = subprocess.Popen(args) 104 | proc.communicate() 105 | 106 | print('Computing test results with {} images for set {}'.format( 107 | str(size), str(index))) 108 | args = [FUSE_RESULTS_EXE] 109 | args += [os.path.join(other_out_root, '{}_{}_{}_{}_{}.bin'.format( 110 | probe_name, distractor_name, alg_name, str(size), str(index)))] 111 | args += [os.path.join(other_out_root, '{}_{}_{}.bin'.format( 112 | probe_name, probe_name, alg_name)), probe_feature_list, str(size)] 113 | args += [os.path.join(out_root, "cmc_{}_{}_{}_{}_{}.json".format( 114 | probe_name, distractor_name, alg_name, str(size), str(index)))] 115 | args += [os.path.join(out_root, "matches_{}_{}_{}_{}_{}.json".format( 116 | probe_name, distractor_name, alg_name, str(size), str(index)))] 117 | proc = subprocess.Popen(args) 118 | proc.communicate() 119 | 120 | if(delete_matrices): 121 | os.remove(os.path.join(other_out_root, '{}_{}_{}_{}_{}.bin'.format( 122 | probe_name, distractor_name, alg_name, str(size), str(index)))) 123 | 124 | if __name__ == '__main__': 125 | main() 126 | -------------------------------------------------------------------------------- /MegaFace_Evaluation/Plot_Evaluation_Results.py: -------------------------------------------------------------------------------- 1 | from MegaFace_Evaluation.tools.plot_megaface_result import plot_megaface_result 2 | 3 | if __name__ == '__main__': 4 | Evaluation_Results = ['/data/face_datasets/test_datasets/face_recognition/MegaFace/results/'] 5 | 6 | Margin = ['ArcFace'] 7 | 8 | probe = 'facescrub' 9 | 10 | other_methods_dir = None 11 | save_tpr_and_rank1_for_others = False 12 | target_fpr = 1e-6 13 | 14 | save_dir = './visualization_results' 15 | plot_megaface_result(Evaluation_Results, Margin, 16 | probe, 17 | save_dir, 18 | other_methods_dir, 19 | save_tpr_and_rank1_for_others, 20 | target_fpr = target_fpr 21 | ) 22 | -------------------------------------------------------------------------------- /MegaFace_Evaluation/bin/FuseResults: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/MegaFace_Evaluation/bin/FuseResults -------------------------------------------------------------------------------- /MegaFace_Evaluation/bin/Identification: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/MegaFace_Evaluation/bin/Identification -------------------------------------------------------------------------------- /MegaFace_Evaluation/models/jb_LBP.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/MegaFace_Evaluation/models/jb_LBP.bin -------------------------------------------------------------------------------- /MegaFace_Evaluation/models/jb_identity.bin: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MegaFace_Evaluation/scripts/matio.py: -------------------------------------------------------------------------------- 1 | import struct 2 | import numpy as np 3 | 4 | cv_type_to_dtype = { 5 | 5 : np.dtype('float32'), 6 | 6 : np.dtype('float64') 7 | } 8 | 9 | dtype_to_cv_type = {v : k for k,v in cv_type_to_dtype.items()} 10 | 11 | def write_mat(f, m): 12 | """Write mat m to file f""" 13 | if len(m.shape) == 1: 14 | rows = m.shape[0] 15 | cols = 1 16 | else: 17 | rows, cols = m.shape 18 | header = struct.pack('iiii', rows, cols, cols * 4, dtype_to_cv_type[m.dtype]) 19 | f.write(header) 20 | f.write(m.data) 21 | 22 | 23 | def read_mat(f): 24 | """ 25 | Reads an OpenCV mat from the given file opened in binary mode 26 | """ 27 | rows, cols, stride, type_ = struct.unpack('iiii', f.read(4*4)) 28 | mat = np.fromstring(f.read(rows*stride),dtype=cv_type_to_dtype[type_]) 29 | return mat.reshape(rows,cols) 30 | 31 | def read_mkl_vec(f): 32 | """ 33 | Reads an OpenCV mat from the given file opened in binary mode 34 | """ 35 | # Read past the header information 36 | f.read(4*4) 37 | 38 | length, stride, type_ = struct.unpack('iii', f.read(3*4)) 39 | mat = np.fromstring(f.read(length*4),dtype=np.float32) 40 | return mat 41 | 42 | def load_mkl_vec(filename): 43 | """ 44 | Reads a OpenCV Mat from the given filename 45 | """ 46 | return read_mkl_vec(open(filename,'rb')) 47 | 48 | def load_mat(filename): 49 | """ 50 | Reads a OpenCV Mat from the given filename 51 | """ 52 | return read_mat(open(filename,'rb')) 53 | 54 | def save_mat(filename, m): 55 | """Saves mat m to the given filename""" 56 | return write_mat(open(filename,'wb'), m) 57 | 58 | def main(): 59 | f = open('1_to_0.bin','rb') 60 | vx = read_mat(f) 61 | vy = read_mat(f) 62 | 63 | if __name__ == '__main__': 64 | main() 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pytorch_Face_Recognition 2 | Pytorch implementation of mainstream face recognition algorithms(ArcFace, CosFace). 3 | ## 1. Introduction 4 | (1) Pytorch implementation of ArcFace and CosFace. 5 | (2) Cleaned datasets are provided, including WebFace, MS-Celeb-1M, LFW, AgeDB-30, CFP-FP and MegaFace. 6 | (3) Pretrained models are provided. See **3. Results and Pretrained Models** for further details 7 | (4) Automatic Mixed Precision(AMP) Training is supported to accelerate training process. 8 | (5) Visdom is supported to visualize the changes of loss and accuracy during training process. 9 | ## 2. Usage 10 | (1) **Environment Preparation** 11 | > Python 3.7 12 | > Pytorch 1.4 13 | > torchvision 0.5 14 | > cudatoolkit 10.0 15 | > apex 0.1 (optional) 16 | > visdom 0.1.8.9 (optional) 17 | 18 | (2) Download this project to your machine. 19 | 20 | (3) **Datasets Preparation** 21 | * Training Datasets : 22 | > CASIA-WebFace (453580/10575) 23 | MS-Celeb-1M (3923399/86876) 24 | * Test Datasets : 25 | > LFW 26 | CFP-FP 27 | AgeDB-30 28 | MegaFace 29 | 30 | (4) For training datasets, use Utils/Datasets_Utils/generate_dataset_list.py to generate dataset files list. 31 | 32 | (5) Set training hyperparameters like *batch size*, *backbone*, *initial learning rate* in Config/config.py 33 | 34 | (6) Run Train.py to start training process and training information will be saved in the log file. 35 | 36 | (7) Use LFW_Evaluation.py, AgeDB-30_Evaluation.py, CFP-FP_Evaluation.py and MegaFace_Evaluation to run individual evaluation. 37 | 38 | **Tips** : 39 | (1) In config.py, you can choose to open parameter adjustment mode. In this mode, training information and model will 40 | not be saved. It is convenient when you adjust the training hyperparameters. 41 | 42 | (2) When you want to visualize the training process, you can turn the option 'use_visdom' in config.py to True. Before use 43 | it, make sure that you have installed the visdom and opened the server. 44 | 45 | (3) You can use Automatic Mixed Precision(AMP) to accelerate training process. It allows you to use the bigger batch size and 46 | AMP can also avoid gradient explosion problem. To use it, just set option 'use_amp' in config.py as True. Remember to make sure 47 | that you have installed the apex and your GPU has TensorCore before use AMP. 48 | 49 | (4) If you have multiple GPU devices and want to run parallel training, just set option 'use_multi_gpus' in config.py as True. 50 | 51 | ## 3. Results and Pretrained Models 52 | ### (1) LFW, AgeDB-30 and CFP-FP Evaluation Results 53 | |Training Dataset|Backbone |Model Size|Loss |LFW |AgeDB-30|CFP-FP |Pretrained Models | 54 | |:--------------:|:-----------:|:--------:|:-----:|:-----:|:------:|:-----:|:----------------------------------------------------------------------------------:| 55 | |CASIA-WebFace |MobileFaceNet|4MB |ArcFace|99.3333|92.5833 |94.0143|[BaiduNetDisk](https://pan.baidu.com/s/1wU7F8w-jYgJpjbZGFaJJtA) Extraction code:e3qm| 56 | |CASIA-WebFace |ResNet50-IR |170MB |ArcFace|99.4667|93.9333 |95.5571|[BaiduNetDisk](https://pan.baidu.com/s/1H6vgckjqqAer9Rp2pHU_cQ) Extraction code:byqs| 57 | |CASIA-WebFace |SEResNet50-IR|171MB |ArcFace|99.3833|93.9333 |95.5857|[BaiduNetDisk](https://pan.baidu.com/s/19YoVDVB_N6MPR6VGI6tyQg) Extraction code:c355| 58 | |CASIA-WebFace |ResNet100-IR |256MB |ArcFace|99.5833|94.3500 |96.0429|[BaiduNetDisk](https://pan.baidu.com/s/14NoOJjKZar9JUp6fjruB_A) Extraction code:kqsi| 59 | 60 | ### (2) MegaFace Rank 1 Identifiaction Accuracy and Verfication TPR@FPR=1e-6 Results 61 | |Training Dataset|Backbone |Model Size|Loss |Identification Rank1 Acc|Verfication TPR@FPR=1e-6| 62 | |:--------------:|:-----------:|:--------:|:-----:|:----------------------:|:----------------------:| 63 | |CASIA-WebFace |MobileFaceNet|4MB |ArcFace| 68.46 | 83.49 | 64 | |CASIA-WebFace |ResNet50-IR |170MB |ArcFace| 74.50 | 89.89 | 65 | |CASIA-WebFace |SEResNet50-IR|171MB |ArcFace| 74.72 | 89.41 | 66 | |CASIA-WebFace |ResNet100-IR |256MB |ArcFace| 74.39 | 90.86 | 67 | 68 | ### (3) The experimental condition : 69 | CPU :   Intel(R) Xeon(R) Silver 4114 CPU @ 2.20GHz 10 cores 20 threads × 2 70 | Memory :  128GB 71 | GPU :   RTX2080ti × 2 72 | ### (4) Training hyperparameters : 73 | CASIA-WebFace: Batch size: 256 74 |          Initial learning rate: 0.05 75 |          Total epoch = 36 76 |          Learning rate scheduler = [22, 31] 77 |          S = 32 78 | 79 | MS-Celeb-1M:    Batch size: 256 80 |          Initial learning rate: 0.05 81 |          Total epoch = 25 82 |          Learning rate scheduler = [13, 21] 83 |          S = 32 84 | ## 4. References 85 | [wujiyang/Face_Pytorch](https://github.com/wujiyang/Face_Pytorch) 86 | [deepinsight/insightface](https://github.com/deepinsight/insightface) 87 | [Xiaoccer/MobileFaceNet_Pytorch](https://github.com/Xiaoccer/MobileFaceNet_Pytorch) 88 | [TreB1eN/InsightFace_Pytorch](https://github.com/TreB1eN/InsightFace_Pytorch) 89 | ## 5. If this project is useful to you, please give me a star, love you ! 90 | 91 | -------------------------------------------------------------------------------- /Test_Data/readme.txt: -------------------------------------------------------------------------------- 1 | The embeddings extracted from test datasets will be saved here. 2 | -------------------------------------------------------------------------------- /Train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import torch 4 | import numpy as np 5 | import torch.optim as optim 6 | from apex import amp 7 | from Config import args 8 | from Datasets import LFW, CFP_FP, AgeDB30 9 | from Datasets import CASIA_WebFace, MS_Celeb_1M 10 | from Utils import Visualizer 11 | from Utils import init_logger 12 | from Utils import ChangeTimeFormat 13 | from torch.nn import DataParallel 14 | from torch.optim import lr_scheduler 15 | from torch.utils.data import DataLoader 16 | from Backbones.Backbone import MobileFacenet, CBAMResNet 17 | from Backbones.Margin import ArcMarginProduct, CosineMarginProduct, InnerProduct 18 | from LFW_Evaluation import evaluation_10_fold, getFeatureFromTorch 19 | 20 | # device initialization 21 | os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, args.gpus)) 22 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 23 | print('The current device is:', device) 24 | 25 | # model_save_dir and log initialization 26 | if not args.para_adj_mode: 27 | save_dir = os.path.join(args.save_dir, args.train_dataset + '_' + args.backbone + '_' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) 28 | if os.path.exists(save_dir): 29 | raise NameError('model dir exists!') 30 | os.makedirs(save_dir) 31 | logger = init_logger(save_dir) 32 | _print = logger.info 33 | else: 34 | _print = print 35 | 36 | # visualizer initialization 37 | if args.use_visdom == True: 38 | vis = Visualizer(env=args.model_prefix + '_' + args.backbone) 39 | 40 | # train_dataset and train_dataloader 41 | if args.train_dataset is 'CASIA_WebFace': 42 | train_dataset = CASIA_WebFace(args.webface_dataset_path, args.webface_file_list) 43 | elif args.train_dataset is 'MS_Celeb_1M': 44 | train_dataset = MS_Celeb_1M(args.ms1m_dataset_path, args.ms1m_file_list) 45 | train_dataloader = DataLoader(train_dataset, args.batch_size, shuffle = True, num_workers = 32, drop_last = False, pin_memory = True) 46 | # test_dataset and test_dataloader 47 | lfw_dataset = LFW(args.lfw_dataset_path, args.lfw_file_list) 48 | lfw_dataloader = DataLoader(lfw_dataset, batch_size = 128, shuffle = False, num_workers = 32, drop_last = False) 49 | cfp_dataset = CFP_FP(args.cfp_dataset_path, args.cfp_file_list) 50 | cfp_dataloader = DataLoader(cfp_dataset, batch_size = 128, shuffle = False, num_workers = 32, drop_last = False) 51 | agedb_dataset = AgeDB30(args.agedb_dataset_path, args.agedb_file_list) 52 | agedb_dataloader = DataLoader(agedb_dataset, batch_size = 128, shuffle = False, num_workers = 32, drop_last = False) 53 | 54 | # select backbone and margin 55 | # backbone 56 | backbones = {'MobileFaceNet': MobileFacenet(), 57 | 'ResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir'), 58 | 'SEResNet50_IR': CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se'), 59 | 'ResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir'), 60 | 'SEResNet100_IR': CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')} 61 | if args.backbone in backbones: 62 | net = backbones[args.backbone] 63 | else: 64 | _print(args.backbone + ' is not available!') 65 | # margin 66 | margins = {'ArcFace': ArcMarginProduct(args.feature_dim, train_dataset.num_classes, s=args.scale_size), 67 | 'CosFace': CosineMarginProduct(args.feature_dim, train_dataset.num_classes, s=args.scale_size), 68 | 'Softmax': InnerProduct(args.feature_dim, train_dataset.num_classes)} 69 | if args.margin in margins: 70 | margin = margins[args.margin] 71 | else: 72 | _print(args.margin + 'is not available!') 73 | # resume model 74 | if args.resume == True: 75 | _print('resume the model from: '+args.resume_backbone_path+'\n\t\t\t\t\t\t'+args.resume_margin_path) 76 | net.load_state_dict(torch.load(args.resume_backbone_path)) 77 | margin.load_state_dict(torch.load(args.resume_margin_path)) 78 | # put tensor on device 79 | net = net.to(device) 80 | margin = margin.to(device) 81 | 82 | # loss function 83 | loss_fn = torch.nn.CrossEntropyLoss() 84 | loss_fn =loss_fn.to(device) 85 | 86 | # optimizer 87 | optimizer = optim.SGD([ 88 | {'params': net.parameters()}, 89 | {'params': margin.parameters()}, 90 | ],lr = args.initial_lr, momentum = 0.9, nesterov = True, weight_decay = args.weight_decay) 91 | 92 | # learning rate scheduler 93 | scheduler = lr_scheduler.MultiStepLR(optimizer, milestones = [13, 21], gamma = 0.1) 94 | 95 | # use amp 96 | if args.use_amp == True: 97 | [net, margin], optimizer_ft = amp.initialize([net, margin], optimizer, opt_level="O1") 98 | 99 | # use multiple GPU devices 100 | if args.use_multi_gpus == True: 101 | net = DataParallel(net).to(device) 102 | margin = DataParallel(margin).to(device) 103 | 104 | # best test accuracy and corresponding iteration times 105 | best_lfw_accuracy = 0.0 106 | best_lfw_iters = 0 107 | best_agedb_accuracy = 0.0 108 | best_agedb_iters = 0 109 | best_cfp_accuracy = 0.0 110 | best_cfp_iters = 0 111 | 112 | # training network 113 | current_iters = 0 114 | total_iters = args.total_epoch*len(train_dataloader) 115 | since_time = time.time() 116 | for epoch in range(1, args.total_epoch + 1): 117 | # trian model 118 | _print('Training Epoch: '+str(epoch)+'/'+str(args.total_epoch)) 119 | net.train() 120 | for train_data in train_dataloader: 121 | # get images and labels from dataloader 122 | images, labels = train_data[0], train_data[1] 123 | images = images.to(device) 124 | labels = labels.to(device) 125 | # forward propagation 126 | embeddings = net(images) 127 | output = margin(embeddings, labels) 128 | # calculate loss 129 | loss = loss_fn(output, labels) 130 | # back propagation 131 | optimizer.zero_grad() 132 | if args.use_amp == True: 133 | if not torch.isnan(loss): 134 | with amp.scale_loss(loss, optimizer) as scaled_loss: 135 | scaled_loss.backward() 136 | optimizer.step() 137 | else: 138 | loss.backward() 139 | optimizer.step() 140 | current_iters += 1 141 | 142 | # print train information 143 | if current_iters % 100 == 0: 144 | # calculate train accuracy 145 | _, prediction = torch.max(output.data, 1) 146 | num_correct_classified = float((np.array(prediction.cpu()) == np.array(labels.data.cpu)).sum()) 147 | train_accuracy = num_correct_classified/args.batch_size 148 | # calculate remaining training time 149 | current_time = time.time() 150 | remaining_time = (total_iters - current_iters)*(current_time - since_time)/100 151 | remaining_time = ChangeTimeFormat(remaining_time) 152 | since_time = time.time() 153 | # draw softmax loss curve and train accuracy curve 154 | if args.use_visdom == True: 155 | vis.plot_curves({'softmax loss': loss.item()}, x=current_iters, title='train loss', 156 | xlabel='iterations', ylabel='train loss') 157 | vis.plot_curves({'train accuracy': train_accuracy}, x=current_iters, title='train accuracy', 158 | xlabel='iterations', ylabel='train accuracy') 159 | # print train information, including current epoch, current iterations times, loss, train accuracy, learning rate and remaining training time 160 | _print('Iters:'+str(epoch)+'/'+str(current_iters)+' loss: %.4f' % (loss.item())+' train accuracy:'+ 161 | str(train_accuracy)+' learning rate:'+str(optimizer.param_groups[0]['lr'])+' remaining time:'+ 162 | str(remaining_time)) 163 | 164 | #test model 165 | if current_iters % args.test_freq == 0: 166 | net.eval() 167 | # test model on lfw 168 | getFeatureFromTorch('./Test_Data/cur_lfw_result.mat', net, device, lfw_dataset, lfw_dataloader) 169 | lfw_accuracy = evaluation_10_fold('./Test_Data/cur_lfw_result.mat') 170 | lfw_accuracy = np.mean(lfw_accuracy) 171 | _print('LFW Average Accuracy: {:.4f}%'.format(np.mean(lfw_accuracy) * 100)) 172 | if best_lfw_accuracy <= lfw_accuracy * 100: 173 | best_lfw_accuracy = lfw_accuracy * 100 174 | best_lfw_iters = current_iters 175 | # test model on AgeDB30 176 | getFeatureFromTorch('./Test_Data/cur_agedb_result.mat', net, device, agedb_dataset, agedb_dataloader) 177 | agedb_accuracy = evaluation_10_fold('./Test_Data/cur_agedb_result.mat') 178 | agedb_accuracy = np.mean(agedb_accuracy) 179 | _print('AgeDB-30 Average Accuracy: {:.4f}%'.format(np.mean(agedb_accuracy) * 100)) 180 | if best_agedb_accuracy <= best_agedb_accuracy * 100: 181 | best_agedb_accuracy = agedb_accuracy * 100 182 | best_agedb_iters = current_iters 183 | # test model on CFP-FP 184 | getFeatureFromTorch('./Test_Data/cur_cfp_result.mat', net, device, cfp_dataset, cfp_dataloader) 185 | cfp_accuracy = evaluation_10_fold('./Test_Data/cur_cfp_result.mat') 186 | cfp_accuracy = np.mean(cfp_accuracy) 187 | _print('CFP-FP Average Accuracy: {:.4f}%'.format(np.mean(cfp_accuracy) * 100)) 188 | if best_cfp_accuracy <= cfp_accuracy * 100: 189 | best_cfp_accuracy = cfp_accuracy * 100 190 | best_cfp_iters = current_iters 191 | # draw test accuracy curve 192 | if args.use_visdom == True: 193 | vis.plot_curves({'lfw_accuracy': lfw_accuracy,'agedb_accuracy':agedb_accuracy,'cfp_accuracy':cfp_accuracy}, x=current_iters, title='test accuracy', xlabel='iterations', 194 | ylabel='test accuracy') 195 | # print current best test accuracy 196 | _print('Current Best Test Accuracy: LFW: {:.4f}% in iters: {}, AgeDB-30: {:.4f}% in iters: {} and CFP-FP: {:.4f}% in iters: {}'.format( 197 | best_lfw_accuracy, best_lfw_iters, best_agedb_accuracy, best_agedb_iters, best_cfp_accuracy, best_cfp_iters)) 198 | net.train() 199 | 200 | #save model 201 | if epoch == args.total_epoch and current_iters % args.save_freq == 0: 202 | if not args.para_adj_mode: 203 | _print('Saving model: {}'.format(current_iters)) 204 | if not os.path.exists(save_dir): 205 | os.mkdir(save_dir) 206 | torch.save(net.module.state_dict(), os.path.join(save_dir, 'Iter_%d_net.pth' % current_iters)) 207 | torch.save(margin.module.state_dict(), os.path.join(save_dir, 'Iter_%d_margin.pth' % current_iters)) 208 | 209 | # adjust learning rate 210 | scheduler.step() 211 | 212 | # test model on megaface 213 | if args.test_on_megaface == True: 214 | pass 215 | 216 | -------------------------------------------------------------------------------- /Trained_Models/readme.txt: -------------------------------------------------------------------------------- 1 | The trained models will be saved here. 2 | -------------------------------------------------------------------------------- /Utils/Datasets_Utils/generate_dataset_list.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def dataset_list(dataset_path,dataset_list): 4 | label_list = os.listdir(dataset_path) 5 | f=open(dataset_list,'w') 6 | k=0 7 | for i in label_list: 8 | label_path=os.path.join(dataset_path,i) 9 | if os.listdir(label_path): 10 | image_list=os.listdir(label_path) 11 | for j in image_list: 12 | image_path=os.path.join(label_path, j) 13 | f.write(image_path+' '+str(k)+'\n') 14 | k=k+1 15 | f.close() 16 | 17 | if __name__=='__main__': 18 | dataset = '/data/face_datasets/train_datasets/webface-112x112/casia-112x112' 19 | list = '/data/face_datasets/train_datasets/webface-112x112/casia-112x112.list' 20 | dataset_list(dataset, list) 21 | -------------------------------------------------------------------------------- /Utils/Datasets_Utils/load_images_from_bin.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import pickle 4 | import mxnet as mx 5 | from tqdm import tqdm 6 | 7 | # For MS1M dataset, insightface provide a mxnet .rec file, just install a mxnet-cpu for extract images 8 | 9 | def load_mx_rec(rec_path): 10 | save_path = os.path.join(rec_path, 'MS1M_112x112') 11 | if not os.path.exists(save_path): 12 | os.makedirs(save_path) 13 | 14 | imgrec = mx.recordio.MXIndexedRecordIO(os.path.join(rec_path, 'train.idx'), os.path.join(rec_path, 'train.rec'), 'r') 15 | img_info = imgrec.read_idx(0) 16 | header,_ = mx.recordio.unpack(img_info) 17 | max_idx = int(header.label[0]) 18 | for idx in tqdm(range(1,max_idx)): 19 | img_info = imgrec.read_idx(idx) 20 | header, img = mx.recordio.unpack_img(img_info) 21 | label = int(header.label) 22 | label_path = os.path.join(save_path, str(label).zfill(6)) 23 | if not os.path.exists(label_path): 24 | os.makedirs(label_path) 25 | cv2.imwrite(os.path.join(label_path, str(idx).zfill(8) + '.jpg'), img) 26 | 27 | def load_image_from_bin(bin_path, save_dir): 28 | if not os.path.exists(save_dir): 29 | os.makedirs(save_dir) 30 | file = open(os.path.join(save_dir, '../', 'agedb_30_pair.txt'), 'w') 31 | bins, issame_list = pickle.load(open(bin_path, 'rb'), encoding='bytes') 32 | for idx in tqdm(range(len(bins))): 33 | _bin = bins[idx] 34 | img = mx.image.imdecode(_bin).asnumpy() 35 | img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) 36 | cv2.imwrite(os.path.join(save_dir, str(idx+1).zfill(5)+'.jpg'), img) 37 | if idx % 2 == 0: 38 | label = 1 if issame_list[idx//2] == True else -1 39 | file.write(str(idx+1).zfill(5) + '.jpg' + ' ' + str(idx+2).zfill(5) +'.jpg' + ' ' + str(label) + '\n') 40 | 41 | if __name__ == '__main__': 42 | bin_path = '/data/face_datasets/train_datasets/MS1M_112x112/agedb_30.bin' 43 | save_dir = '/data/face_datasets/train_datasets/MS1M_112x112/agedb_30' 44 | load_image_from_bin(bin_path, save_dir) 45 | # rec_path = '/data/face_datasets/train_datasets/MS1M_112x112' 46 | # load_mx_rec(rec_path) 47 | -------------------------------------------------------------------------------- /Utils/Other_Utils/ChangeTimeFormat.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | def ChangeTimeFormat(time): 4 | day = 24*60*60 5 | hour = 60*60 6 | min = 60 7 | if time <60: 8 | return "%d sec"%math.ceil(time) 9 | elif time > day: 10 | days = divmod(time,day) 11 | return "%d days %s"%(int(days[0]),ChangeTimeFormat(days[1])) 12 | elif time > hour: 13 | hours = divmod(time,hour) 14 | return '%d hours %s'%(int(hours[0]),ChangeTimeFormat(hours[1])) 15 | else: 16 | mins = divmod(time,min) 17 | return "%d mins %d sec"%(int(mins[0]),math.ceil(mins[1])) 18 | 19 | if __name__ == '__main__': 20 | time = 15611561561 21 | time = ChangeTimeFormat(time) 22 | print(time) -------------------------------------------------------------------------------- /Utils/Other_Utils/Logging.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | def init_logger(output_dir): 5 | # create logger 6 | logger = logging.getLogger() 7 | logger.setLevel(logging.DEBUG) 8 | 9 | # create stream handler 10 | sh = logging.StreamHandler() 11 | sh.setLevel(logging.INFO) 12 | 13 | # create file handler 14 | fh = logging.FileHandler(os.path.join(output_dir, 'Training_Log.txt')) 15 | fh.setLevel(logging.INFO) 16 | 17 | # create formatter 18 | formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='%Y%m%d-%H:%M:%S') 19 | 20 | # add formatter to fh 21 | fh.setFormatter(formatter) 22 | 23 | # add sh and fh to logger 24 | # The final log level is the higher one between the default and the one in handler 25 | logger.addHandler(sh) 26 | logger.addHandler(fh) 27 | return logger 28 | -------------------------------------------------------------------------------- /Utils/Other_Utils/Visualizer.py: -------------------------------------------------------------------------------- 1 | import visdom 2 | import numpy as np 3 | 4 | class Visualizer(): 5 | def __init__(self, env='default', **kwargs): 6 | self.vis = visdom.Visdom(env=env, **kwargs) 7 | self.index = 1 8 | 9 | def plot_curves(self, Y, x, title='loss', xlabel='iterations', ylabel='accuracy'): 10 | keys = list(Y.keys()) 11 | vals = list(Y.values()) 12 | if len(vals) == 1: 13 | y = np.array(vals) 14 | else: 15 | y = np.array(vals).reshape(-1, len(vals)) 16 | self.vis.line(Y=y, 17 | X=np.array([self.index]), 18 | win=title, 19 | opts=dict(legend=keys, title = title, xlabel=xlabel, ylabel=ylabel), 20 | update=None if self.index == 0 else 'append') 21 | self.index = x 22 | 23 | 24 | if __name__ == '__main__': 25 | vis = Visualizer(env='test1') 26 | for i in range(10): 27 | y1 = i 28 | y2 = 2 * i 29 | y3 = 6 * i 30 | vis.plot_curves({'acc_lfw': y1, 'acc_agedb': y2}, x=i, title='train') 31 | vis.plot_curves({'acc_lfw': y1, 'acc_agedb': y2, 'acc_cfpfp': y3}, x=i, title='test') -------------------------------------------------------------------------------- /Utils/Other_Utils/__pycache__/ChangeTimeFormat.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Utils/Other_Utils/__pycache__/ChangeTimeFormat.cpython-36.pyc -------------------------------------------------------------------------------- /Utils/Other_Utils/__pycache__/ChangeTimeFormat.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Utils/Other_Utils/__pycache__/ChangeTimeFormat.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/Other_Utils/__pycache__/Logging.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Utils/Other_Utils/__pycache__/Logging.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/Other_Utils/__pycache__/Visualizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Utils/Other_Utils/__pycache__/Visualizer.cpython-37.pyc -------------------------------------------------------------------------------- /Utils/__init__.py: -------------------------------------------------------------------------------- 1 | from Utils.Other_Utils.ChangeTimeFormat import ChangeTimeFormat 2 | from Utils.Other_Utils.Logging import init_logger 3 | from Utils.Other_Utils.Visualizer import Visualizer -------------------------------------------------------------------------------- /Utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /Utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/Utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/LFW_Evaluation.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuasarLight/Pytorch_Face_Recognition/abdd11b0067658f3857b94f52b136ff6839ffece/__pycache__/LFW_Evaluation.cpython-37.pyc --------------------------------------------------------------------------------