├── LICENSE ├── Learner.py ├── README.md ├── __init__.py ├── config.py ├── data ├── __init__.py └── data_pipe.py ├── evaluate_model.ipynb ├── face_verify.py ├── infer_on_video.py ├── model.py ├── mtcnn.py ├── mtcnn_pytorch ├── .gitignore ├── LICENSE ├── README.md ├── caffe_models │ ├── det1.caffemodel │ ├── det1.prototxt │ ├── det2.caffemodel │ ├── det2.prototxt │ ├── det3.caffemodel │ ├── det3.prototxt │ ├── det4.caffemodel │ └── det4.prototxt ├── extract_weights_from_caffe_models.py ├── get_aligned_face_from_mtcnn.ipynb ├── images │ ├── example.png │ ├── face0.jpg │ ├── jf.jpg │ ├── office1.jpg │ ├── office2.jpg │ ├── office3.jpg │ ├── office4.jpg │ └── office5.jpg ├── refine_faces.ipynb ├── src │ ├── __init__.py │ ├── align_trans.py │ ├── box_utils.py │ ├── detector.py │ ├── first_stage.py │ ├── get_nets.py │ ├── matlab_cp2tform.py │ ├── visualization_utils.py │ └── weights │ │ ├── onet.npy │ │ ├── pnet.npy │ │ └── rnet.npy ├── test_on_images.ipynb └── try_mtcnn_step_by_step.ipynb ├── prepare_data.py ├── requirements.txt ├── take_pic.py ├── train.py ├── utils.py ├── verifacation.py └── work_space ├── history ├── log ├── models └── save /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 TreB1eN 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Learner.py: -------------------------------------------------------------------------------- 1 | from data.data_pipe import de_preprocess, get_train_loader, get_val_data 2 | from model import Backbone, Arcface, MobileFaceNet, Am_softmax, l2_norm 3 | from verifacation import evaluate 4 | import torch 5 | from torch import optim 6 | import numpy as np 7 | from tqdm import tqdm 8 | from tensorboardX import SummaryWriter 9 | from matplotlib import pyplot as plt 10 | plt.switch_backend('agg') 11 | from utils import get_time, gen_plot, hflip_batch, separate_bn_paras 12 | from PIL import Image 13 | from torchvision import transforms as trans 14 | import math 15 | import bcolz 16 | 17 | class face_learner(object): 18 | def __init__(self, conf, inference=False): 19 | print(conf) 20 | if conf.use_mobilfacenet: 21 | self.model = MobileFaceNet(conf.embedding_size).to(conf.device) 22 | print('MobileFaceNet model generated') 23 | else: 24 | self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device) 25 | print('{}_{} model generated'.format(conf.net_mode, conf.net_depth)) 26 | 27 | if not inference: 28 | self.milestones = conf.milestones 29 | self.loader, self.class_num = get_train_loader(conf) 30 | 31 | self.writer = SummaryWriter(conf.log_path) 32 | self.step = 0 33 | self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num).to(conf.device) 34 | 35 | print('two model heads generated') 36 | 37 | paras_only_bn, paras_wo_bn = separate_bn_paras(self.model) 38 | 39 | if conf.use_mobilfacenet: 40 | self.optimizer = optim.SGD([ 41 | {'params': paras_wo_bn[:-1], 'weight_decay': 4e-5}, 42 | {'params': [paras_wo_bn[-1]] + [self.head.kernel], 'weight_decay': 4e-4}, 43 | {'params': paras_only_bn} 44 | ], lr = conf.lr, momentum = conf.momentum) 45 | else: 46 | self.optimizer = optim.SGD([ 47 | {'params': paras_wo_bn + [self.head.kernel], 'weight_decay': 5e-4}, 48 | {'params': paras_only_bn} 49 | ], lr = conf.lr, momentum = conf.momentum) 50 | print(self.optimizer) 51 | # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True) 52 | 53 | print('optimizers generated') 54 | self.board_loss_every = len(self.loader)//100 55 | self.evaluate_every = len(self.loader)//10 56 | self.save_every = len(self.loader)//5 57 | self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(self.loader.dataset.root.parent) 58 | else: 59 | self.threshold = conf.threshold 60 | 61 | def save_state(self, conf, accuracy, to_save_folder=False, extra=None, model_only=False): 62 | if to_save_folder: 63 | save_path = conf.save_path 64 | else: 65 | save_path = conf.model_path 66 | torch.save( 67 | self.model.state_dict(), save_path / 68 | ('model_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra))) 69 | if not model_only: 70 | torch.save( 71 | self.head.state_dict(), save_path / 72 | ('head_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra))) 73 | torch.save( 74 | self.optimizer.state_dict(), save_path / 75 | ('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format(get_time(), accuracy, self.step, extra))) 76 | 77 | def load_state(self, conf, fixed_str, from_save_folder=False, model_only=False): 78 | if from_save_folder: 79 | save_path = conf.save_path 80 | else: 81 | save_path = conf.model_path 82 | self.model.load_state_dict(torch.load(save_path/'model_{}'.format(fixed_str))) 83 | if not model_only: 84 | self.head.load_state_dict(torch.load(save_path/'head_{}'.format(fixed_str))) 85 | self.optimizer.load_state_dict(torch.load(save_path/'optimizer_{}'.format(fixed_str))) 86 | 87 | def board_val(self, db_name, accuracy, best_threshold, roc_curve_tensor): 88 | self.writer.add_scalar('{}_accuracy'.format(db_name), accuracy, self.step) 89 | self.writer.add_scalar('{}_best_threshold'.format(db_name), best_threshold, self.step) 90 | self.writer.add_image('{}_roc_curve'.format(db_name), roc_curve_tensor, self.step) 91 | # self.writer.add_scalar('{}_val:true accept ratio'.format(db_name), val, self.step) 92 | # self.writer.add_scalar('{}_val_std'.format(db_name), val_std, self.step) 93 | # self.writer.add_scalar('{}_far:False Acceptance Ratio'.format(db_name), far, self.step) 94 | 95 | def evaluate(self, conf, carray, issame, nrof_folds = 5, tta = False): 96 | self.model.eval() 97 | idx = 0 98 | embeddings = np.zeros([len(carray), conf.embedding_size]) 99 | with torch.no_grad(): 100 | while idx + conf.batch_size <= len(carray): 101 | batch = torch.tensor(carray[idx:idx + conf.batch_size]) 102 | if tta: 103 | fliped = hflip_batch(batch) 104 | emb_batch = self.model(batch.to(conf.device)) + self.model(fliped.to(conf.device)) 105 | embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch) 106 | else: 107 | embeddings[idx:idx + conf.batch_size] = self.model(batch.to(conf.device)).cpu() 108 | idx += conf.batch_size 109 | if idx < len(carray): 110 | batch = torch.tensor(carray[idx:]) 111 | if tta: 112 | fliped = hflip_batch(batch) 113 | emb_batch = self.model(batch.to(conf.device)) + self.model(fliped.to(conf.device)) 114 | embeddings[idx:] = l2_norm(emb_batch) 115 | else: 116 | embeddings[idx:] = self.model(batch.to(conf.device)).cpu() 117 | tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame, nrof_folds) 118 | buf = gen_plot(fpr, tpr) 119 | roc_curve = Image.open(buf) 120 | roc_curve_tensor = trans.ToTensor()(roc_curve) 121 | return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor 122 | 123 | def find_lr(self, 124 | conf, 125 | init_value=1e-8, 126 | final_value=10., 127 | beta=0.98, 128 | bloding_scale=3., 129 | num=None): 130 | if not num: 131 | num = len(self.loader) 132 | mult = (final_value / init_value)**(1 / num) 133 | lr = init_value 134 | for params in self.optimizer.param_groups: 135 | params['lr'] = lr 136 | self.model.train() 137 | avg_loss = 0. 138 | best_loss = 0. 139 | batch_num = 0 140 | losses = [] 141 | log_lrs = [] 142 | for i, (imgs, labels) in tqdm(enumerate(self.loader), total=num): 143 | 144 | imgs = imgs.to(conf.device) 145 | labels = labels.to(conf.device) 146 | batch_num += 1 147 | 148 | self.optimizer.zero_grad() 149 | 150 | embeddings = self.model(imgs) 151 | thetas = self.head(embeddings, labels) 152 | loss = conf.ce_loss(thetas, labels) 153 | 154 | #Compute the smoothed loss 155 | avg_loss = beta * avg_loss + (1 - beta) * loss.item() 156 | self.writer.add_scalar('avg_loss', avg_loss, batch_num) 157 | smoothed_loss = avg_loss / (1 - beta**batch_num) 158 | self.writer.add_scalar('smoothed_loss', smoothed_loss,batch_num) 159 | #Stop if the loss is exploding 160 | if batch_num > 1 and smoothed_loss > bloding_scale * best_loss: 161 | print('exited with best_loss at {}'.format(best_loss)) 162 | plt.plot(log_lrs[10:-5], losses[10:-5]) 163 | return log_lrs, losses 164 | #Record the best loss 165 | if smoothed_loss < best_loss or batch_num == 1: 166 | best_loss = smoothed_loss 167 | #Store the values 168 | losses.append(smoothed_loss) 169 | log_lrs.append(math.log10(lr)) 170 | self.writer.add_scalar('log_lr', math.log10(lr), batch_num) 171 | #Do the SGD step 172 | #Update the lr for the next step 173 | 174 | loss.backward() 175 | self.optimizer.step() 176 | 177 | lr *= mult 178 | for params in self.optimizer.param_groups: 179 | params['lr'] = lr 180 | if batch_num > num: 181 | plt.plot(log_lrs[10:-5], losses[10:-5]) 182 | return log_lrs, losses 183 | 184 | def train(self, conf, epochs): 185 | self.model.train() 186 | running_loss = 0. 187 | for e in range(epochs): 188 | print('epoch {} started'.format(e)) 189 | if e == self.milestones[0]: 190 | self.schedule_lr() 191 | if e == self.milestones[1]: 192 | self.schedule_lr() 193 | if e == self.milestones[2]: 194 | self.schedule_lr() 195 | for imgs, labels in tqdm(iter(self.loader)): 196 | imgs = imgs.to(conf.device) 197 | labels = labels.to(conf.device) 198 | self.optimizer.zero_grad() 199 | embeddings = self.model(imgs) 200 | thetas = self.head(embeddings, labels) 201 | loss = conf.ce_loss(thetas, labels) 202 | loss.backward() 203 | running_loss += loss.item() 204 | self.optimizer.step() 205 | 206 | if self.step % self.board_loss_every == 0 and self.step != 0: 207 | loss_board = running_loss / self.board_loss_every 208 | self.writer.add_scalar('train_loss', loss_board, self.step) 209 | running_loss = 0. 210 | 211 | if self.step % self.evaluate_every == 0 and self.step != 0: 212 | accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.agedb_30, self.agedb_30_issame) 213 | self.board_val('agedb_30', accuracy, best_threshold, roc_curve_tensor) 214 | accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.lfw, self.lfw_issame) 215 | self.board_val('lfw', accuracy, best_threshold, roc_curve_tensor) 216 | accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.cfp_fp, self.cfp_fp_issame) 217 | self.board_val('cfp_fp', accuracy, best_threshold, roc_curve_tensor) 218 | self.model.train() 219 | if self.step % self.save_every == 0 and self.step != 0: 220 | self.save_state(conf, accuracy) 221 | 222 | self.step += 1 223 | 224 | self.save_state(conf, accuracy, to_save_folder=True, extra='final') 225 | 226 | def schedule_lr(self): 227 | for params in self.optimizer.param_groups: 228 | params['lr'] /= 10 229 | print(self.optimizer) 230 | 231 | def infer(self, conf, faces, target_embs, tta=False): 232 | ''' 233 | faces : list of PIL Image 234 | target_embs : [n, 512] computed embeddings of faces in facebank 235 | names : recorded names of faces in facebank 236 | tta : test time augmentation (hfilp, that's all) 237 | ''' 238 | embs = [] 239 | for img in faces: 240 | if tta: 241 | mirror = trans.functional.hflip(img) 242 | emb = self.model(conf.test_transform(img).to(conf.device).unsqueeze(0)) 243 | emb_mirror = self.model(conf.test_transform(mirror).to(conf.device).unsqueeze(0)) 244 | embs.append(l2_norm(emb + emb_mirror)) 245 | else: 246 | embs.append(self.model(conf.test_transform(img).to(conf.device).unsqueeze(0))) 247 | source_embs = torch.cat(embs) 248 | 249 | diff = source_embs.unsqueeze(-1) - target_embs.transpose(1,0).unsqueeze(0) 250 | dist = torch.sum(torch.pow(diff, 2), dim=1) 251 | minimum, min_idx = torch.min(dist, dim=1) 252 | min_idx[minimum > self.threshold] = -1 # if no match, set idx to -1 253 | return min_idx, minimum -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # InsightFace_Pytorch 2 | 3 | Pytorch0.4.1 codes for InsightFace 4 | 5 | ------ 6 | 7 | ## 1. Intro 8 | 9 | - This repo is a reimplementation of Arcface[(paper)](https://arxiv.org/abs/1801.07698), or Insightface[(github)](https://github.com/deepinsight/insightface) 10 | - For models, including the pytorch implementation of the backbone modules of Arcface and MobileFacenet 11 | - Codes for transform MXNET data records in Insightface[(github)](https://github.com/deepinsight/insightface) to Image Datafolders are provided 12 | - Pretrained models are posted, include the [MobileFacenet](https://arxiv.org/abs/1804.07573) and IR-SE50 in the original paper 13 | 14 | ------ 15 | 16 | ## 2. Pretrained Models & Performance 17 | 18 | [IR-SE50 @ BaiduNetdisk](https://pan.baidu.com/s/12BUjjwy1uUTEF9HCx5qvoQ), [IR-SE50 @ Onedrive](https://1drv.ms/u/s!AhMqVPD44cDOhkPsOU2S_HFpY9dC) 19 | 20 | | LFW(%) | CFP-FF(%) | CFP-FP(%) | AgeDB-30(%) | calfw(%) | cplfw(%) | vgg2_fp(%) | 21 | | ------ | --------- | --------- | ----------- | -------- | -------- | ---------- | 22 | | 0.9952 | 0.9962 | 0.9504 | 0.9622 | 0.9557 | 0.9107 | 0.9386 | 23 | 24 | [Mobilefacenet @ BaiduNetDisk](https://pan.baidu.com/s/1hqNNkcAjQOSxUjofboN6qg), [Mobilefacenet @ OneDrive](https://1drv.ms/u/s!AhMqVPD44cDOhkSMHodSH4rhfb5u) 25 | 26 | | LFW(%) | CFP-FF(%) | CFP-FP(%) | AgeDB-30(%) | calfw(%) | cplfw(%) | vgg2_fp(%) | 27 | | ------ | --------- | --------- | ----------- | -------- | -------- | ---------- | 28 | | 0.9918 | 0.9891 | 0.8986 | 0.9347 | 0.9402 | 0.866 | 0.9100 | 29 | 30 | ## 3. How to use 31 | 32 | - clone 33 | 34 | ``` 35 | git clone https://github.com/TropComplique/mtcnn-pytorch.git 36 | ``` 37 | 38 | ### 3.1 Data Preparation 39 | 40 | #### 3.1.1 Prepare Facebank (For testing over camera or video) 41 | 42 | Provide the face images your want to detect in the data/face_bank folder, and guarantee it have a structure like following: 43 | 44 | ``` 45 | data/facebank/ 46 | ---> id1/ 47 | ---> id1_1.jpg 48 | ---> id2/ 49 | ---> id2_1.jpg 50 | ---> id3/ 51 | ---> id3_1.jpg 52 | ---> id3_2.jpg 53 | ``` 54 | 55 | #### 3.1.2 download the pretrained model to work_space/model 56 | 57 | If more than 1 image appears in one folder, an average embedding will be calculated 58 | 59 | #### 3.2.3 Prepare Dataset ( For training) 60 | 61 | download the refined dataset: (emore recommended) 62 | 63 | - [emore dataset @ BaiduDrive](https://pan.baidu.com/s/1eXohwNBHbbKXh5KHyItVhQ), [emore dataset @ Dropbox](https://www.dropbox.com/s/wpx6tqjf0y5mf6r/faces_ms1m-refine-v2_112x112.zip?dl=0) 64 | - More Dataset please refer to the [original post](https://github.com/deepinsight/insightface/wiki/Dataset-Zoo) 65 | 66 | **Note:** If you use the refined [MS1M](https://arxiv.org/abs/1607.08221) dataset and the cropped [VGG2](https://arxiv.org/abs/1710.08092) dataset, please cite the original papers. 67 | 68 | - after unzip the files to 'data' path, run : 69 | 70 | ``` 71 | python prepare_data.py 72 | ``` 73 | 74 | after the execution, you should find following structure: 75 | 76 | ``` 77 | faces_emore/ 78 | ---> agedb_30 79 | ---> calfw 80 | ---> cfp_ff 81 | ---> cfp_fp 82 | ---> cfp_fp 83 | ---> cplfw 84 | --->imgs 85 | ---> lfw 86 | ---> vgg2_fp 87 | ``` 88 | 89 | ------ 90 | 91 | ### 3.2 detect over camera: 92 | 93 | - 1. download the desired weights to model folder: 94 | 95 | - [IR-SE50 @ BaiduNetdisk](https://pan.baidu.com/s/12BUjjwy1uUTEF9HCx5qvoQ) 96 | - [IR-SE50 @ Onedrive](https://1drv.ms/u/s!AhMqVPD44cDOhkPsOU2S_HFpY9dC) 97 | - [Mobilefacenet @ BaiduNetDisk](https://pan.baidu.com/s/1hqNNkcAjQOSxUjofboN6qg) 98 | - [Mobilefacenet @ OneDrive](https://1drv.ms/u/s!AhMqVPD44cDOhkSMHodSH4rhfb5u) 99 | 100 | - 2 to take a picture, run 101 | 102 | ``` 103 | python take_pic.py -n name 104 | ``` 105 | 106 | press q to take a picture, it will only capture 1 highest possibility face if more than 1 person appear in the camera 107 | 108 | - 3 or you can put any preexisting photo into the facebank directory, the file structure is as following: 109 | 110 | ``` 111 | - facebank/ 112 | name1/ 113 | photo1.jpg 114 | photo2.jpg 115 | ... 116 | name2/ 117 | photo1.jpg 118 | photo2.jpg 119 | ... 120 | ..... 121 | if more than 1 image appears in the directory, average embedding will be calculated 122 | ``` 123 | 124 | - 4 to start 125 | 126 | ``` 127 | python face_verify.py 128 | ``` 129 | 130 | - - - 131 | 132 | ### 3.3 detect over video: 133 | 134 | ``` 135 | ​``` 136 | python infer_on_video.py -f [video file name] -s [save file name] 137 | ​``` 138 | ``` 139 | 140 | the video file should be inside the data/face_bank folder 141 | 142 | - Video Detection Demo [@Youtube](https://www.youtube.com/watch?v=6r9RCRmxtHE) 143 | 144 | ### 3.4 Training: 145 | 146 | ``` 147 | ​``` 148 | python train.py -b [batch_size] -lr [learning rate] -e [epochs] 149 | 150 | # python train.py -net mobilefacenet -b 200 -w 4 151 | ​``` 152 | ``` 153 | 154 | ## 4. References 155 | 156 | - This repo is mainly inspired by [deepinsight/insightface](https://github.com/deepinsight/insightface) and [InsightFace_TF](https://github.com/auroua/InsightFace_TF) 157 | 158 | ## PS 159 | 160 | - PRs are welcome, in case that I don't have the resource to train some large models like the 100 and 151 layers model 161 | - Email : treb1en@qq.com 162 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/__init__.py -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | from pathlib import Path 3 | import torch 4 | from torch.nn import CrossEntropyLoss 5 | from torchvision import transforms as trans 6 | 7 | def get_config(training = True): 8 | conf = edict() 9 | conf.data_path = Path('data') 10 | conf.work_path = Path('work_space/') 11 | conf.model_path = conf.work_path/'models' 12 | conf.log_path = conf.work_path/'log' 13 | conf.save_path = conf.work_path/'save' 14 | conf.input_size = [112,112] 15 | conf.embedding_size = 512 16 | conf.use_mobilfacenet = False 17 | conf.net_depth = 50 18 | conf.drop_ratio = 0.6 19 | conf.net_mode = 'ir_se' # or 'ir' 20 | conf.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 21 | conf.test_transform = trans.Compose([ 22 | trans.ToTensor(), 23 | trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) 24 | ]) 25 | conf.data_mode = 'emore' 26 | conf.vgg_folder = conf.data_path/'faces_vgg_112x112' 27 | conf.ms1m_folder = conf.data_path/'faces_ms1m_112x112' 28 | conf.emore_folder = conf.data_path/'faces_emore' 29 | conf.batch_size = 100 # irse net depth 50 30 | # conf.batch_size = 200 # mobilefacenet 31 | #--------------------Training Config ------------------------ 32 | if training: 33 | conf.log_path = conf.work_path/'log' 34 | conf.save_path = conf.work_path/'save' 35 | # conf.weight_decay = 5e-4 36 | conf.lr = 1e-3 37 | conf.milestones = [12,15,18] 38 | conf.momentum = 0.9 39 | conf.pin_memory = True 40 | # conf.num_workers = 4 # when batchsize is 200 41 | conf.num_workers = 3 42 | conf.ce_loss = CrossEntropyLoss() 43 | #--------------------Inference Config ------------------------ 44 | else: 45 | conf.facebank_path = conf.data_path/'facebank' 46 | conf.threshold = 1.5 47 | conf.face_limit = 10 48 | #when inference, at maximum detect 10 faces in one image, my laptop is slow 49 | conf.min_face_size = 30 50 | # the larger this value, the faster deduction, comes with tradeoff in small faces 51 | return conf -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/data/__init__.py -------------------------------------------------------------------------------- /data/data_pipe.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from torch.utils.data import Dataset, ConcatDataset, DataLoader 3 | from torchvision import transforms as trans 4 | from torchvision.datasets import ImageFolder 5 | from PIL import Image, ImageFile 6 | ImageFile.LOAD_TRUNCATED_IMAGES = True 7 | import numpy as np 8 | import cv2 9 | import bcolz 10 | import pickle 11 | import torch 12 | import mxnet as mx 13 | from tqdm import tqdm 14 | 15 | def de_preprocess(tensor): 16 | return tensor*0.5 + 0.5 17 | 18 | def get_train_dataset(imgs_folder): 19 | train_transform = trans.Compose([ 20 | trans.RandomHorizontalFlip(), 21 | trans.ToTensor(), 22 | trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) 23 | ]) 24 | ds = ImageFolder(imgs_folder, train_transform) 25 | class_num = ds[-1][1] + 1 26 | return ds, class_num 27 | 28 | def get_train_loader(conf): 29 | if conf.data_mode in ['ms1m', 'concat']: 30 | ms1m_ds, ms1m_class_num = get_train_dataset(conf.ms1m_folder/'imgs') 31 | print('ms1m loader generated') 32 | if conf.data_mode in ['vgg', 'concat']: 33 | vgg_ds, vgg_class_num = get_train_dataset(conf.vgg_folder/'imgs') 34 | print('vgg loader generated') 35 | if conf.data_mode == 'vgg': 36 | ds = vgg_ds 37 | class_num = vgg_class_num 38 | elif conf.data_mode == 'ms1m': 39 | ds = ms1m_ds 40 | class_num = ms1m_class_num 41 | elif conf.data_mode == 'concat': 42 | for i,(url,label) in enumerate(vgg_ds.imgs): 43 | vgg_ds.imgs[i] = (url, label + ms1m_class_num) 44 | ds = ConcatDataset([ms1m_ds,vgg_ds]) 45 | class_num = vgg_class_num + ms1m_class_num 46 | elif conf.data_mode == 'emore': 47 | ds, class_num = get_train_dataset(conf.emore_folder/'imgs') 48 | loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=True, pin_memory=conf.pin_memory, num_workers=conf.num_workers) 49 | return loader, class_num 50 | 51 | def load_bin(path, rootdir, transform, image_size=[112,112]): 52 | if not rootdir.exists(): 53 | rootdir.mkdir() 54 | bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes') 55 | data = bcolz.fill([len(bins), 3, image_size[0], image_size[1]], dtype=np.float32, rootdir=rootdir, mode='w') 56 | for i in range(len(bins)): 57 | _bin = bins[i] 58 | img = mx.image.imdecode(_bin).asnumpy() 59 | img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) 60 | img = Image.fromarray(img.astype(np.uint8)) 61 | data[i, ...] = transform(img) 62 | i += 1 63 | if i % 1000 == 0: 64 | print('loading bin', i) 65 | print(data.shape) 66 | np.save(str(rootdir)+'_list', np.array(issame_list)) 67 | return data, issame_list 68 | 69 | def get_val_pair(path, name): 70 | carray = bcolz.carray(rootdir = path/name, mode='r') 71 | issame = np.load(path/'{}_list.npy'.format(name)) 72 | return carray, issame 73 | 74 | def get_val_data(data_path): 75 | agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30') 76 | cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp') 77 | lfw, lfw_issame = get_val_pair(data_path, 'lfw') 78 | return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame 79 | 80 | def load_mx_rec(rec_path): 81 | save_path = rec_path/'imgs' 82 | if not save_path.exists(): 83 | save_path.mkdir() 84 | imgrec = mx.recordio.MXIndexedRecordIO(str(rec_path/'train.idx'), str(rec_path/'train.rec'), 'r') 85 | img_info = imgrec.read_idx(0) 86 | header,_ = mx.recordio.unpack(img_info) 87 | max_idx = int(header.label[0]) 88 | for idx in tqdm(range(1,max_idx)): 89 | img_info = imgrec.read_idx(idx) 90 | header, img = mx.recordio.unpack_img(img_info) 91 | label = int(header.label) 92 | img = Image.fromarray(img) 93 | label_path = save_path/str(label) 94 | if not label_path.exists(): 95 | label_path.mkdir() 96 | img.save(label_path/'{}.jpg'.format(idx), quality=95) 97 | 98 | # class train_dataset(Dataset): 99 | # def __init__(self, imgs_bcolz, label_bcolz, h_flip=True): 100 | # self.imgs = bcolz.carray(rootdir = imgs_bcolz) 101 | # self.labels = bcolz.carray(rootdir = label_bcolz) 102 | # self.h_flip = h_flip 103 | # self.length = len(self.imgs) - 1 104 | # if h_flip: 105 | # self.transform = trans.Compose([ 106 | # trans.ToPILImage(), 107 | # trans.RandomHorizontalFlip(), 108 | # trans.ToTensor(), 109 | # trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) 110 | # ]) 111 | # self.class_num = self.labels[-1] + 1 112 | 113 | # def __len__(self): 114 | # return self.length 115 | 116 | # def __getitem__(self, index): 117 | # img = torch.tensor(self.imgs[index+1], dtype=torch.float) 118 | # label = torch.tensor(self.labels[index+1], dtype=torch.long) 119 | # if self.h_flip: 120 | # img = de_preprocess(img) 121 | # img = self.transform(img) 122 | # return img, label -------------------------------------------------------------------------------- /face_verify.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from PIL import Image 3 | import argparse 4 | from pathlib import Path 5 | from multiprocessing import Process, Pipe,Value,Array 6 | import torch 7 | from config import get_config 8 | from mtcnn import MTCNN 9 | from Learner import face_learner 10 | from utils import load_facebank, draw_box_name, prepare_facebank 11 | 12 | if __name__ == '__main__': 13 | parser = argparse.ArgumentParser(description='for face verification') 14 | parser.add_argument("-s", "--save", help="whether save",action="store_true") 15 | parser.add_argument('-th','--threshold',help='threshold to decide identical faces',default=1.54, type=float) 16 | parser.add_argument("-u", "--update", help="whether perform update the facebank",action="store_true") 17 | parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true") 18 | parser.add_argument("-c", "--score", help="whether show the confidence score",action="store_true") 19 | args = parser.parse_args() 20 | 21 | conf = get_config(False) 22 | 23 | mtcnn = MTCNN() 24 | print('mtcnn loaded') 25 | 26 | learner = face_learner(conf, True) 27 | learner.threshold = args.threshold 28 | if conf.device.type == 'cpu': 29 | learner.load_state(conf, 'cpu_final.pth', True, True) 30 | else: 31 | learner.load_state(conf, 'final.pth', True, True) 32 | learner.model.eval() 33 | print('learner loaded') 34 | 35 | if args.update: 36 | targets, names = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta) 37 | print('facebank updated') 38 | else: 39 | targets, names = load_facebank(conf) 40 | print('facebank loaded') 41 | 42 | # inital camera 43 | cap = cv2.VideoCapture(0) 44 | cap.set(3,1280) 45 | cap.set(4,720) 46 | if args.save: 47 | video_writer = cv2.VideoWriter(conf.data_path/'recording.avi', cv2.VideoWriter_fourcc(*'XVID'), 6, (1280,720)) 48 | # frame rate 6 due to my laptop is quite slow... 49 | while cap.isOpened(): 50 | isSuccess,frame = cap.read() 51 | if isSuccess: 52 | try: 53 | # image = Image.fromarray(frame[...,::-1]) #bgr to rgb 54 | image = Image.fromarray(frame) 55 | bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) 56 | bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces 57 | bboxes = bboxes.astype(int) 58 | bboxes = bboxes + [-1,-1,1,1] # personal choice 59 | results, score = learner.infer(conf, faces, targets, args.tta) 60 | for idx,bbox in enumerate(bboxes): 61 | if args.score: 62 | frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame) 63 | else: 64 | frame = draw_box_name(bbox, names[results[idx] + 1], frame) 65 | except: 66 | print('detect error') 67 | 68 | cv2.imshow('face Capture', frame) 69 | 70 | if args.save: 71 | video_writer.write(frame) 72 | 73 | if cv2.waitKey(1)&0xFF == ord('q'): 74 | break 75 | 76 | cap.release() 77 | if args.save: 78 | video_writer.release() 79 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /infer_on_video.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from PIL import Image 3 | import argparse 4 | from pathlib import Path 5 | import torch 6 | from config import get_config 7 | from mtcnn import MTCNN 8 | from Learner import face_learner 9 | from utils import load_facebank, draw_box_name, prepare_facebank 10 | 11 | if __name__ == '__main__': 12 | parser = argparse.ArgumentParser(description='for face verification') 13 | parser.add_argument("-f", "--file_name", help="video file name",default='video.mp4', type=str) 14 | parser.add_argument("-s", "--save_name", help="output file name",default='recording', type=str) 15 | parser.add_argument('-th','--threshold',help='threshold to decide identical faces',default=1.54, type=float) 16 | parser.add_argument("-u", "--update", help="whether perform update the facebank",action="store_true") 17 | parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true") 18 | parser.add_argument("-c", "--score", help="whether show the confidence score",action="store_true") 19 | parser.add_argument("-b", "--begin", help="from when to start detection(in seconds)", default=0, type=int) 20 | parser.add_argument("-d", "--duration", help="perform detection for how long(in seconds)", default=0, type=int) 21 | 22 | args = parser.parse_args() 23 | 24 | conf = get_config(False) 25 | 26 | mtcnn = MTCNN() 27 | print('mtcnn loaded') 28 | 29 | learner = face_learner(conf, True) 30 | learner.threshold = args.threshold 31 | if conf.device.type == 'cpu': 32 | learner.load_state(conf, 'cpu_final.pth', True, True) 33 | else: 34 | learner.load_state(conf, 'final.pth', True, True) 35 | learner.model.eval() 36 | print('learner loaded') 37 | 38 | if args.update: 39 | targets, names = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta) 40 | print('facebank updated') 41 | else: 42 | targets, names = load_facebank(conf) 43 | print('facebank loaded') 44 | 45 | cap = cv2.VideoCapture(str(conf.facebank_path/args.file_name)) 46 | 47 | cap.set(cv2.CAP_PROP_POS_MSEC, args.begin * 1000) 48 | 49 | fps = cap.get(cv2.CAP_PROP_FPS) 50 | video_writer = cv2.VideoWriter(str(conf.facebank_path/'{}.avi'.format(args.save_name)), 51 | cv2.VideoWriter_fourcc(*'XVID'), int(fps), (1280,720)) 52 | 53 | if args.duration != 0: 54 | i = 0 55 | 56 | while cap.isOpened(): 57 | isSuccess,frame = cap.read() 58 | if isSuccess: 59 | # image = Image.fromarray(frame[...,::-1]) #bgr to rgb 60 | image = Image.fromarray(frame) 61 | try: 62 | bboxes, faces = mtcnn.align_multi(image, conf.face_limit, 16) 63 | except: 64 | bboxes = [] 65 | faces = [] 66 | if len(bboxes) == 0: 67 | print('no face') 68 | continue 69 | else: 70 | bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces 71 | bboxes = bboxes.astype(int) 72 | bboxes = bboxes + [-1,-1,1,1] # personal choice 73 | results, score = learner.infer(conf, faces, targets, True) 74 | for idx,bbox in enumerate(bboxes): 75 | if args.score: 76 | frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame) 77 | else: 78 | frame = draw_box_name(bbox, names[results[idx] + 1], frame) 79 | video_writer.write(frame) 80 | else: 81 | break 82 | if args.duration != 0: 83 | i += 1 84 | if i % 25 == 0: 85 | print('{} second'.format(i // 25)) 86 | if i > 25 * args.duration: 87 | break 88 | cap.release() 89 | video_writer.release() 90 | 91 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, ReLU, Sigmoid, Dropout2d, Dropout, AvgPool2d, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter 2 | import torch.nn.functional as F 3 | import torch 4 | from collections import namedtuple 5 | import math 6 | import pdb 7 | 8 | ################################## Original Arcface Model ############################################################# 9 | 10 | class Flatten(Module): 11 | def forward(self, input): 12 | return input.view(input.size(0), -1) 13 | 14 | def l2_norm(input,axis=1): 15 | norm = torch.norm(input,2,axis,True) 16 | output = torch.div(input, norm) 17 | return output 18 | 19 | class SEModule(Module): 20 | def __init__(self, channels, reduction): 21 | super(SEModule, self).__init__() 22 | self.avg_pool = AdaptiveAvgPool2d(1) 23 | self.fc1 = Conv2d( 24 | channels, channels // reduction, kernel_size=1, padding=0 ,bias=False) 25 | self.relu = ReLU(inplace=True) 26 | self.fc2 = Conv2d( 27 | channels // reduction, channels, kernel_size=1, padding=0 ,bias=False) 28 | self.sigmoid = Sigmoid() 29 | 30 | def forward(self, x): 31 | module_input = x 32 | x = self.avg_pool(x) 33 | x = self.fc1(x) 34 | x = self.relu(x) 35 | x = self.fc2(x) 36 | x = self.sigmoid(x) 37 | return module_input * x 38 | 39 | class bottleneck_IR(Module): 40 | def __init__(self, in_channel, depth, stride): 41 | super(bottleneck_IR, self).__init__() 42 | if in_channel == depth: 43 | self.shortcut_layer = MaxPool2d(1, stride) 44 | else: 45 | self.shortcut_layer = Sequential( 46 | Conv2d(in_channel, depth, (1, 1), stride ,bias=False), BatchNorm2d(depth)) 47 | self.res_layer = Sequential( 48 | BatchNorm2d(in_channel), 49 | Conv2d(in_channel, depth, (3, 3), (1, 1), 1 ,bias=False), PReLU(depth), 50 | Conv2d(depth, depth, (3, 3), stride, 1 ,bias=False), BatchNorm2d(depth)) 51 | 52 | def forward(self, x): 53 | shortcut = self.shortcut_layer(x) 54 | res = self.res_layer(x) 55 | return res + shortcut 56 | 57 | class bottleneck_IR_SE(Module): 58 | def __init__(self, in_channel, depth, stride): 59 | super(bottleneck_IR_SE, self).__init__() 60 | if in_channel == depth: 61 | self.shortcut_layer = MaxPool2d(1, stride) 62 | else: 63 | self.shortcut_layer = Sequential( 64 | Conv2d(in_channel, depth, (1, 1), stride ,bias=False), 65 | BatchNorm2d(depth)) 66 | self.res_layer = Sequential( 67 | BatchNorm2d(in_channel), 68 | Conv2d(in_channel, depth, (3,3), (1,1),1 ,bias=False), 69 | PReLU(depth), 70 | Conv2d(depth, depth, (3,3), stride, 1 ,bias=False), 71 | BatchNorm2d(depth), 72 | SEModule(depth,16) 73 | ) 74 | def forward(self,x): 75 | shortcut = self.shortcut_layer(x) 76 | res = self.res_layer(x) 77 | return res + shortcut 78 | 79 | class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): 80 | '''A named tuple describing a ResNet block.''' 81 | 82 | def get_block(in_channel, depth, num_units, stride = 2): 83 | return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units-1)] 84 | 85 | def get_blocks(num_layers): 86 | if num_layers == 50: 87 | blocks = [ 88 | get_block(in_channel=64, depth=64, num_units = 3), 89 | get_block(in_channel=64, depth=128, num_units=4), 90 | get_block(in_channel=128, depth=256, num_units=14), 91 | get_block(in_channel=256, depth=512, num_units=3) 92 | ] 93 | elif num_layers == 100: 94 | blocks = [ 95 | get_block(in_channel=64, depth=64, num_units=3), 96 | get_block(in_channel=64, depth=128, num_units=13), 97 | get_block(in_channel=128, depth=256, num_units=30), 98 | get_block(in_channel=256, depth=512, num_units=3) 99 | ] 100 | elif num_layers == 152: 101 | blocks = [ 102 | get_block(in_channel=64, depth=64, num_units=3), 103 | get_block(in_channel=64, depth=128, num_units=8), 104 | get_block(in_channel=128, depth=256, num_units=36), 105 | get_block(in_channel=256, depth=512, num_units=3) 106 | ] 107 | return blocks 108 | 109 | class Backbone(Module): 110 | def __init__(self, num_layers, drop_ratio, mode='ir'): 111 | super(Backbone, self).__init__() 112 | assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152' 113 | assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' 114 | blocks = get_blocks(num_layers) 115 | if mode == 'ir': 116 | unit_module = bottleneck_IR 117 | elif mode == 'ir_se': 118 | unit_module = bottleneck_IR_SE 119 | self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1 ,bias=False), 120 | BatchNorm2d(64), 121 | PReLU(64)) 122 | self.output_layer = Sequential(BatchNorm2d(512), 123 | Dropout(drop_ratio), 124 | Flatten(), 125 | Linear(512 * 7 * 7, 512), 126 | BatchNorm1d(512)) 127 | modules = [] 128 | for block in blocks: 129 | for bottleneck in block: 130 | modules.append( 131 | unit_module(bottleneck.in_channel, 132 | bottleneck.depth, 133 | bottleneck.stride)) 134 | self.body = Sequential(*modules) 135 | 136 | def forward(self,x): 137 | x = self.input_layer(x) 138 | x = self.body(x) 139 | x = self.output_layer(x) 140 | return l2_norm(x) 141 | 142 | ################################## MobileFaceNet ############################################################# 143 | 144 | class Conv_block(Module): 145 | def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): 146 | super(Conv_block, self).__init__() 147 | self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False) 148 | self.bn = BatchNorm2d(out_c) 149 | self.prelu = PReLU(out_c) 150 | def forward(self, x): 151 | x = self.conv(x) 152 | x = self.bn(x) 153 | x = self.prelu(x) 154 | return x 155 | 156 | class Linear_block(Module): 157 | def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): 158 | super(Linear_block, self).__init__() 159 | self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False) 160 | self.bn = BatchNorm2d(out_c) 161 | def forward(self, x): 162 | x = self.conv(x) 163 | x = self.bn(x) 164 | return x 165 | 166 | class Depth_Wise(Module): 167 | def __init__(self, in_c, out_c, residual = False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1): 168 | super(Depth_Wise, self).__init__() 169 | self.conv = Conv_block(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1)) 170 | self.conv_dw = Conv_block(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride) 171 | self.project = Linear_block(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1)) 172 | self.residual = residual 173 | def forward(self, x): 174 | if self.residual: 175 | short_cut = x 176 | x = self.conv(x) 177 | x = self.conv_dw(x) 178 | x = self.project(x) 179 | if self.residual: 180 | output = short_cut + x 181 | else: 182 | output = x 183 | return output 184 | 185 | class Residual(Module): 186 | def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)): 187 | super(Residual, self).__init__() 188 | modules = [] 189 | for _ in range(num_block): 190 | modules.append(Depth_Wise(c, c, residual=True, kernel=kernel, padding=padding, stride=stride, groups=groups)) 191 | self.model = Sequential(*modules) 192 | def forward(self, x): 193 | return self.model(x) 194 | 195 | class MobileFaceNet(Module): 196 | def __init__(self, embedding_size): 197 | super(MobileFaceNet, self).__init__() 198 | self.conv1 = Conv_block(3, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1)) 199 | self.conv2_dw = Conv_block(64, 64, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64) 200 | self.conv_23 = Depth_Wise(64, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128) 201 | self.conv_3 = Residual(64, num_block=4, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1)) 202 | self.conv_34 = Depth_Wise(64, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256) 203 | self.conv_4 = Residual(128, num_block=6, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1)) 204 | self.conv_45 = Depth_Wise(128, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=512) 205 | self.conv_5 = Residual(128, num_block=2, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1)) 206 | self.conv_6_sep = Conv_block(128, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0)) 207 | self.conv_6_dw = Linear_block(512, 512, groups=512, kernel=(7,7), stride=(1, 1), padding=(0, 0)) 208 | self.conv_6_flatten = Flatten() 209 | self.linear = Linear(512, embedding_size, bias=False) 210 | self.bn = BatchNorm1d(embedding_size) 211 | 212 | def forward(self, x): 213 | out = self.conv1(x) 214 | 215 | out = self.conv2_dw(out) 216 | 217 | out = self.conv_23(out) 218 | 219 | out = self.conv_3(out) 220 | 221 | out = self.conv_34(out) 222 | 223 | out = self.conv_4(out) 224 | 225 | out = self.conv_45(out) 226 | 227 | out = self.conv_5(out) 228 | 229 | out = self.conv_6_sep(out) 230 | 231 | out = self.conv_6_dw(out) 232 | 233 | out = self.conv_6_flatten(out) 234 | 235 | out = self.linear(out) 236 | 237 | out = self.bn(out) 238 | return l2_norm(out) 239 | 240 | ################################## Arcface head ############################################################# 241 | 242 | class Arcface(Module): 243 | # implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599 244 | def __init__(self, embedding_size=512, classnum=51332, s=64., m=0.5): 245 | super(Arcface, self).__init__() 246 | self.classnum = classnum 247 | self.kernel = Parameter(torch.Tensor(embedding_size,classnum)) 248 | # initial kernel 249 | self.kernel.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5) 250 | self.m = m # the margin value, default is 0.5 251 | self.s = s # scalar value default is 64, see normface https://arxiv.org/abs/1704.06369 252 | self.cos_m = math.cos(m) 253 | self.sin_m = math.sin(m) 254 | self.mm = self.sin_m * m # issue 1 255 | self.threshold = math.cos(math.pi - m) 256 | def forward(self, embbedings, label): 257 | # weights norm 258 | nB = len(embbedings) 259 | kernel_norm = l2_norm(self.kernel,axis=0) 260 | # cos(theta+m) 261 | cos_theta = torch.mm(embbedings,kernel_norm) 262 | # output = torch.mm(embbedings,kernel_norm) 263 | cos_theta = cos_theta.clamp(-1,1) # for numerical stability 264 | cos_theta_2 = torch.pow(cos_theta, 2) 265 | sin_theta_2 = 1 - cos_theta_2 266 | sin_theta = torch.sqrt(sin_theta_2) 267 | cos_theta_m = (cos_theta * self.cos_m - sin_theta * self.sin_m) 268 | # this condition controls the theta+m should in range [0, pi] 269 | # 0<=theta+m<=pi 270 | # -m<=theta<=pi-m 271 | cond_v = cos_theta - self.threshold 272 | cond_mask = cond_v <= 0 273 | keep_val = (cos_theta - self.mm) # when theta not in [0,pi], use cosface instead 274 | cos_theta_m[cond_mask] = keep_val[cond_mask] 275 | output = cos_theta * 1.0 # a little bit hacky way to prevent in_place operation on cos_theta 276 | idx_ = torch.arange(0, nB, dtype=torch.long) 277 | output[idx_, label] = cos_theta_m[idx_, label] 278 | output *= self.s # scale up in order to make softmax work, first introduced in normface 279 | return output 280 | 281 | ################################## Cosface head ############################################################# 282 | 283 | class Am_softmax(Module): 284 | # implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599 285 | def __init__(self,embedding_size=512,classnum=51332): 286 | super(Am_softmax, self).__init__() 287 | self.classnum = classnum 288 | self.kernel = Parameter(torch.Tensor(embedding_size,classnum)) 289 | # initial kernel 290 | self.kernel.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5) 291 | self.m = 0.35 # additive margin recommended by the paper 292 | self.s = 30. # see normface https://arxiv.org/abs/1704.06369 293 | def forward(self,embbedings,label): 294 | kernel_norm = l2_norm(self.kernel,axis=0) 295 | cos_theta = torch.mm(embbedings,kernel_norm) 296 | cos_theta = cos_theta.clamp(-1,1) # for numerical stability 297 | phi = cos_theta - self.m 298 | label = label.view(-1,1) #size=(B,1) 299 | index = cos_theta.data * 0.0 #size=(B,Classnum) 300 | index.scatter_(1,label.data.view(-1,1),1) 301 | index = index.byte() 302 | output = cos_theta * 1.0 303 | output[index] = phi[index] #only change the correct predicted output 304 | output *= self.s # scale up in order to make softmax work, first introduced in normface 305 | return output 306 | 307 | -------------------------------------------------------------------------------- /mtcnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from PIL import Image 4 | from torch.autograd import Variable 5 | from mtcnn_pytorch.src.get_nets import PNet, RNet, ONet 6 | from mtcnn_pytorch.src.box_utils import nms, calibrate_box, get_image_boxes, convert_to_square 7 | from mtcnn_pytorch.src.first_stage import run_first_stage 8 | from mtcnn_pytorch.src.align_trans import get_reference_facial_points, warp_and_crop_face 9 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 10 | # device = 'cpu' 11 | 12 | class MTCNN(): 13 | def __init__(self): 14 | self.pnet = PNet().to(device) 15 | self.rnet = RNet().to(device) 16 | self.onet = ONet().to(device) 17 | self.pnet.eval() 18 | self.rnet.eval() 19 | self.onet.eval() 20 | self.refrence = get_reference_facial_points(default_square= True) 21 | 22 | def align(self, img): 23 | _, landmarks = self.detect_faces(img) 24 | facial5points = [[landmarks[0][j],landmarks[0][j+5]] for j in range(5)] 25 | warped_face = warp_and_crop_face(np.array(img), facial5points, self.refrence, crop_size=(112,112)) 26 | return Image.fromarray(warped_face) 27 | 28 | def align_multi(self, img, limit=None, min_face_size=30.0): 29 | boxes, landmarks = self.detect_faces(img, min_face_size) 30 | if limit: 31 | boxes = boxes[:limit] 32 | landmarks = landmarks[:limit] 33 | faces = [] 34 | for landmark in landmarks: 35 | facial5points = [[landmark[j],landmark[j+5]] for j in range(5)] 36 | warped_face = warp_and_crop_face(np.array(img), facial5points, self.refrence, crop_size=(112,112)) 37 | faces.append(Image.fromarray(warped_face)) 38 | return boxes, faces 39 | 40 | def detect_faces(self, image, min_face_size=20.0, 41 | thresholds=[0.6, 0.7, 0.8], 42 | nms_thresholds=[0.7, 0.7, 0.7]): 43 | """ 44 | Arguments: 45 | image: an instance of PIL.Image. 46 | min_face_size: a float number. 47 | thresholds: a list of length 3. 48 | nms_thresholds: a list of length 3. 49 | 50 | Returns: 51 | two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10], 52 | bounding boxes and facial landmarks. 53 | """ 54 | 55 | # BUILD AN IMAGE PYRAMID 56 | width, height = image.size 57 | min_length = min(height, width) 58 | 59 | min_detection_size = 12 60 | factor = 0.707 # sqrt(0.5) 61 | 62 | # scales for scaling the image 63 | scales = [] 64 | 65 | # scales the image so that 66 | # minimum size that we can detect equals to 67 | # minimum face size that we want to detect 68 | m = min_detection_size/min_face_size 69 | min_length *= m 70 | 71 | factor_count = 0 72 | while min_length > min_detection_size: 73 | scales.append(m*factor**factor_count) 74 | min_length *= factor 75 | factor_count += 1 76 | 77 | # STAGE 1 78 | 79 | # it will be returned 80 | bounding_boxes = [] 81 | 82 | with torch.no_grad(): 83 | # run P-Net on different scales 84 | for s in scales: 85 | boxes = run_first_stage(image, self.pnet, scale=s, threshold=thresholds[0]) 86 | bounding_boxes.append(boxes) 87 | 88 | # collect boxes (and offsets, and scores) from different scales 89 | bounding_boxes = [i for i in bounding_boxes if i is not None] 90 | bounding_boxes = np.vstack(bounding_boxes) 91 | 92 | keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0]) 93 | bounding_boxes = bounding_boxes[keep] 94 | 95 | # use offsets predicted by pnet to transform bounding boxes 96 | bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:]) 97 | # shape [n_boxes, 5] 98 | 99 | bounding_boxes = convert_to_square(bounding_boxes) 100 | bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) 101 | 102 | # STAGE 2 103 | 104 | img_boxes = get_image_boxes(bounding_boxes, image, size=24) 105 | img_boxes = torch.FloatTensor(img_boxes).to(device) 106 | 107 | output = self.rnet(img_boxes) 108 | offsets = output[0].cpu().data.numpy() # shape [n_boxes, 4] 109 | probs = output[1].cpu().data.numpy() # shape [n_boxes, 2] 110 | 111 | keep = np.where(probs[:, 1] > thresholds[1])[0] 112 | bounding_boxes = bounding_boxes[keep] 113 | bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) 114 | offsets = offsets[keep] 115 | 116 | keep = nms(bounding_boxes, nms_thresholds[1]) 117 | bounding_boxes = bounding_boxes[keep] 118 | bounding_boxes = calibrate_box(bounding_boxes, offsets[keep]) 119 | bounding_boxes = convert_to_square(bounding_boxes) 120 | bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) 121 | 122 | # STAGE 3 123 | 124 | img_boxes = get_image_boxes(bounding_boxes, image, size=48) 125 | if len(img_boxes) == 0: 126 | return [], [] 127 | img_boxes = torch.FloatTensor(img_boxes).to(device) 128 | output = self.onet(img_boxes) 129 | landmarks = output[0].cpu().data.numpy() # shape [n_boxes, 10] 130 | offsets = output[1].cpu().data.numpy() # shape [n_boxes, 4] 131 | probs = output[2].cpu().data.numpy() # shape [n_boxes, 2] 132 | 133 | keep = np.where(probs[:, 1] > thresholds[2])[0] 134 | bounding_boxes = bounding_boxes[keep] 135 | bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) 136 | offsets = offsets[keep] 137 | landmarks = landmarks[keep] 138 | 139 | # compute landmark points 140 | width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0 141 | height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0 142 | xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1] 143 | landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1)*landmarks[:, 0:5] 144 | landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1)*landmarks[:, 5:10] 145 | 146 | bounding_boxes = calibrate_box(bounding_boxes, offsets) 147 | keep = nms(bounding_boxes, nms_thresholds[2], mode='min') 148 | bounding_boxes = bounding_boxes[keep] 149 | landmarks = landmarks[keep] 150 | 151 | return bounding_boxes, landmarks 152 | -------------------------------------------------------------------------------- /mtcnn_pytorch/.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | __pycache__ 3 | 4 | -------------------------------------------------------------------------------- /mtcnn_pytorch/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Dan Antoshchenko 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /mtcnn_pytorch/README.md: -------------------------------------------------------------------------------- 1 | # MTCNN 2 | 3 | `pytorch` implementation of **inference stage** of face detection algorithm described in 4 | [Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Networks](https://arxiv.org/abs/1604.02878). 5 | 6 | ## Example 7 | ![example of a face detection](images/example.png) 8 | 9 | ## How to use it 10 | Just download the repository and then do this 11 | ```python 12 | from src import detect_faces 13 | from PIL import Image 14 | 15 | image = Image.open('image.jpg') 16 | bounding_boxes, landmarks = detect_faces(image) 17 | ``` 18 | For examples see `test_on_images.ipynb`. 19 | 20 | ## Requirements 21 | * pytorch 0.2 22 | * Pillow, numpy 23 | 24 | ## Credit 25 | This implementation is heavily inspired by: 26 | * [pangyupo/mxnet_mtcnn_face_detection](https://github.com/pangyupo/mxnet_mtcnn_face_detection) 27 | -------------------------------------------------------------------------------- /mtcnn_pytorch/caffe_models/det1.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/caffe_models/det1.caffemodel -------------------------------------------------------------------------------- /mtcnn_pytorch/caffe_models/det1.prototxt: -------------------------------------------------------------------------------- 1 | name: "PNet" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 12 6 | input_dim: 12 7 | 8 | layer { 9 | name: "conv1" 10 | type: "Convolution" 11 | bottom: "data" 12 | top: "conv1" 13 | param { 14 | lr_mult: 1 15 | decay_mult: 1 16 | } 17 | param { 18 | lr_mult: 2 19 | decay_mult: 0 20 | } 21 | convolution_param { 22 | num_output: 10 23 | kernel_size: 3 24 | stride: 1 25 | weight_filler { 26 | type: "xavier" 27 | } 28 | bias_filler { 29 | type: "constant" 30 | value: 0 31 | } 32 | } 33 | } 34 | layer { 35 | name: "PReLU1" 36 | type: "PReLU" 37 | bottom: "conv1" 38 | top: "conv1" 39 | } 40 | layer { 41 | name: "pool1" 42 | type: "Pooling" 43 | bottom: "conv1" 44 | top: "pool1" 45 | pooling_param { 46 | pool: MAX 47 | kernel_size: 2 48 | stride: 2 49 | } 50 | } 51 | 52 | layer { 53 | name: "conv2" 54 | type: "Convolution" 55 | bottom: "pool1" 56 | top: "conv2" 57 | param { 58 | lr_mult: 1 59 | decay_mult: 1 60 | } 61 | param { 62 | lr_mult: 2 63 | decay_mult: 0 64 | } 65 | convolution_param { 66 | num_output: 16 67 | kernel_size: 3 68 | stride: 1 69 | weight_filler { 70 | type: "xavier" 71 | } 72 | bias_filler { 73 | type: "constant" 74 | value: 0 75 | } 76 | } 77 | } 78 | layer { 79 | name: "PReLU2" 80 | type: "PReLU" 81 | bottom: "conv2" 82 | top: "conv2" 83 | } 84 | 85 | layer { 86 | name: "conv3" 87 | type: "Convolution" 88 | bottom: "conv2" 89 | top: "conv3" 90 | param { 91 | lr_mult: 1 92 | decay_mult: 1 93 | } 94 | param { 95 | lr_mult: 2 96 | decay_mult: 0 97 | } 98 | convolution_param { 99 | num_output: 32 100 | kernel_size: 3 101 | stride: 1 102 | weight_filler { 103 | type: "xavier" 104 | } 105 | bias_filler { 106 | type: "constant" 107 | value: 0 108 | } 109 | } 110 | } 111 | layer { 112 | name: "PReLU3" 113 | type: "PReLU" 114 | bottom: "conv3" 115 | top: "conv3" 116 | } 117 | 118 | 119 | layer { 120 | name: "conv4-1" 121 | type: "Convolution" 122 | bottom: "conv3" 123 | top: "conv4-1" 124 | param { 125 | lr_mult: 1 126 | decay_mult: 1 127 | } 128 | param { 129 | lr_mult: 2 130 | decay_mult: 0 131 | } 132 | convolution_param { 133 | num_output: 2 134 | kernel_size: 1 135 | stride: 1 136 | weight_filler { 137 | type: "xavier" 138 | } 139 | bias_filler { 140 | type: "constant" 141 | value: 0 142 | } 143 | } 144 | } 145 | 146 | layer { 147 | name: "conv4-2" 148 | type: "Convolution" 149 | bottom: "conv3" 150 | top: "conv4-2" 151 | param { 152 | lr_mult: 1 153 | decay_mult: 1 154 | } 155 | param { 156 | lr_mult: 2 157 | decay_mult: 0 158 | } 159 | convolution_param { 160 | num_output: 4 161 | kernel_size: 1 162 | stride: 1 163 | weight_filler { 164 | type: "xavier" 165 | } 166 | bias_filler { 167 | type: "constant" 168 | value: 0 169 | } 170 | } 171 | } 172 | layer { 173 | name: "prob1" 174 | type: "Softmax" 175 | bottom: "conv4-1" 176 | top: "prob1" 177 | } 178 | -------------------------------------------------------------------------------- /mtcnn_pytorch/caffe_models/det2.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/caffe_models/det2.caffemodel -------------------------------------------------------------------------------- /mtcnn_pytorch/caffe_models/det2.prototxt: -------------------------------------------------------------------------------- 1 | name: "RNet" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 24 6 | input_dim: 24 7 | 8 | 9 | ########################## 10 | ###################### 11 | layer { 12 | name: "conv1" 13 | type: "Convolution" 14 | bottom: "data" 15 | top: "conv1" 16 | param { 17 | lr_mult: 0 18 | decay_mult: 0 19 | } 20 | param { 21 | lr_mult: 0 22 | decay_mult: 0 23 | } 24 | convolution_param { 25 | num_output: 28 26 | kernel_size: 3 27 | stride: 1 28 | weight_filler { 29 | type: "xavier" 30 | } 31 | bias_filler { 32 | type: "constant" 33 | value: 0 34 | } 35 | } 36 | } 37 | layer { 38 | name: "prelu1" 39 | type: "PReLU" 40 | bottom: "conv1" 41 | top: "conv1" 42 | propagate_down: true 43 | } 44 | layer { 45 | name: "pool1" 46 | type: "Pooling" 47 | bottom: "conv1" 48 | top: "pool1" 49 | pooling_param { 50 | pool: MAX 51 | kernel_size: 3 52 | stride: 2 53 | } 54 | } 55 | 56 | layer { 57 | name: "conv2" 58 | type: "Convolution" 59 | bottom: "pool1" 60 | top: "conv2" 61 | param { 62 | lr_mult: 0 63 | decay_mult: 0 64 | } 65 | param { 66 | lr_mult: 0 67 | decay_mult: 0 68 | } 69 | convolution_param { 70 | num_output: 48 71 | kernel_size: 3 72 | stride: 1 73 | weight_filler { 74 | type: "xavier" 75 | } 76 | bias_filler { 77 | type: "constant" 78 | value: 0 79 | } 80 | } 81 | } 82 | layer { 83 | name: "prelu2" 84 | type: "PReLU" 85 | bottom: "conv2" 86 | top: "conv2" 87 | propagate_down: true 88 | } 89 | layer { 90 | name: "pool2" 91 | type: "Pooling" 92 | bottom: "conv2" 93 | top: "pool2" 94 | pooling_param { 95 | pool: MAX 96 | kernel_size: 3 97 | stride: 2 98 | } 99 | } 100 | #################################### 101 | 102 | ################################## 103 | layer { 104 | name: "conv3" 105 | type: "Convolution" 106 | bottom: "pool2" 107 | top: "conv3" 108 | param { 109 | lr_mult: 0 110 | decay_mult: 0 111 | } 112 | param { 113 | lr_mult: 0 114 | decay_mult: 0 115 | } 116 | convolution_param { 117 | num_output: 64 118 | kernel_size: 2 119 | stride: 1 120 | weight_filler { 121 | type: "xavier" 122 | } 123 | bias_filler { 124 | type: "constant" 125 | value: 0 126 | } 127 | } 128 | } 129 | layer { 130 | name: "prelu3" 131 | type: "PReLU" 132 | bottom: "conv3" 133 | top: "conv3" 134 | propagate_down: true 135 | } 136 | ############################### 137 | 138 | ############################### 139 | 140 | layer { 141 | name: "conv4" 142 | type: "InnerProduct" 143 | bottom: "conv3" 144 | top: "conv4" 145 | param { 146 | lr_mult: 0 147 | decay_mult: 0 148 | } 149 | param { 150 | lr_mult: 0 151 | decay_mult: 0 152 | } 153 | inner_product_param { 154 | num_output: 128 155 | weight_filler { 156 | type: "xavier" 157 | } 158 | bias_filler { 159 | type: "constant" 160 | value: 0 161 | } 162 | } 163 | } 164 | layer { 165 | name: "prelu4" 166 | type: "PReLU" 167 | bottom: "conv4" 168 | top: "conv4" 169 | } 170 | 171 | layer { 172 | name: "conv5-1" 173 | type: "InnerProduct" 174 | bottom: "conv4" 175 | top: "conv5-1" 176 | param { 177 | lr_mult: 0 178 | decay_mult: 0 179 | } 180 | param { 181 | lr_mult: 0 182 | decay_mult: 0 183 | } 184 | inner_product_param { 185 | num_output: 2 186 | #kernel_size: 1 187 | #stride: 1 188 | weight_filler { 189 | type: "xavier" 190 | } 191 | bias_filler { 192 | type: "constant" 193 | value: 0 194 | } 195 | } 196 | } 197 | layer { 198 | name: "conv5-2" 199 | type: "InnerProduct" 200 | bottom: "conv4" 201 | top: "conv5-2" 202 | param { 203 | lr_mult: 1 204 | decay_mult: 1 205 | } 206 | param { 207 | lr_mult: 2 208 | decay_mult: 1 209 | } 210 | inner_product_param { 211 | num_output: 4 212 | #kernel_size: 1 213 | #stride: 1 214 | weight_filler { 215 | type: "xavier" 216 | } 217 | bias_filler { 218 | type: "constant" 219 | value: 0 220 | } 221 | } 222 | } 223 | layer { 224 | name: "prob1" 225 | type: "Softmax" 226 | bottom: "conv5-1" 227 | top: "prob1" 228 | } -------------------------------------------------------------------------------- /mtcnn_pytorch/caffe_models/det3.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/caffe_models/det3.caffemodel -------------------------------------------------------------------------------- /mtcnn_pytorch/caffe_models/det3.prototxt: -------------------------------------------------------------------------------- 1 | name: "ONet" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 48 6 | input_dim: 48 7 | ################################## 8 | layer { 9 | name: "conv1" 10 | type: "Convolution" 11 | bottom: "data" 12 | top: "conv1" 13 | param { 14 | lr_mult: 1 15 | decay_mult: 1 16 | } 17 | param { 18 | lr_mult: 2 19 | decay_mult: 1 20 | } 21 | convolution_param { 22 | num_output: 32 23 | kernel_size: 3 24 | stride: 1 25 | weight_filler { 26 | type: "xavier" 27 | } 28 | bias_filler { 29 | type: "constant" 30 | value: 0 31 | } 32 | } 33 | } 34 | layer { 35 | name: "prelu1" 36 | type: "PReLU" 37 | bottom: "conv1" 38 | top: "conv1" 39 | } 40 | layer { 41 | name: "pool1" 42 | type: "Pooling" 43 | bottom: "conv1" 44 | top: "pool1" 45 | pooling_param { 46 | pool: MAX 47 | kernel_size: 3 48 | stride: 2 49 | } 50 | } 51 | layer { 52 | name: "conv2" 53 | type: "Convolution" 54 | bottom: "pool1" 55 | top: "conv2" 56 | param { 57 | lr_mult: 1 58 | decay_mult: 1 59 | } 60 | param { 61 | lr_mult: 2 62 | decay_mult: 1 63 | } 64 | convolution_param { 65 | num_output: 64 66 | kernel_size: 3 67 | stride: 1 68 | weight_filler { 69 | type: "xavier" 70 | } 71 | bias_filler { 72 | type: "constant" 73 | value: 0 74 | } 75 | } 76 | } 77 | 78 | layer { 79 | name: "prelu2" 80 | type: "PReLU" 81 | bottom: "conv2" 82 | top: "conv2" 83 | } 84 | layer { 85 | name: "pool2" 86 | type: "Pooling" 87 | bottom: "conv2" 88 | top: "pool2" 89 | pooling_param { 90 | pool: MAX 91 | kernel_size: 3 92 | stride: 2 93 | } 94 | } 95 | 96 | layer { 97 | name: "conv3" 98 | type: "Convolution" 99 | bottom: "pool2" 100 | top: "conv3" 101 | param { 102 | lr_mult: 1 103 | decay_mult: 1 104 | } 105 | param { 106 | lr_mult: 2 107 | decay_mult: 1 108 | } 109 | convolution_param { 110 | num_output: 64 111 | kernel_size: 3 112 | weight_filler { 113 | type: "xavier" 114 | } 115 | bias_filler { 116 | type: "constant" 117 | value: 0 118 | } 119 | } 120 | } 121 | layer { 122 | name: "prelu3" 123 | type: "PReLU" 124 | bottom: "conv3" 125 | top: "conv3" 126 | } 127 | layer { 128 | name: "pool3" 129 | type: "Pooling" 130 | bottom: "conv3" 131 | top: "pool3" 132 | pooling_param { 133 | pool: MAX 134 | kernel_size: 2 135 | stride: 2 136 | } 137 | } 138 | layer { 139 | name: "conv4" 140 | type: "Convolution" 141 | bottom: "pool3" 142 | top: "conv4" 143 | param { 144 | lr_mult: 1 145 | decay_mult: 1 146 | } 147 | param { 148 | lr_mult: 2 149 | decay_mult: 1 150 | } 151 | convolution_param { 152 | num_output: 128 153 | kernel_size: 2 154 | weight_filler { 155 | type: "xavier" 156 | } 157 | bias_filler { 158 | type: "constant" 159 | value: 0 160 | } 161 | } 162 | } 163 | layer { 164 | name: "prelu4" 165 | type: "PReLU" 166 | bottom: "conv4" 167 | top: "conv4" 168 | } 169 | 170 | 171 | layer { 172 | name: "conv5" 173 | type: "InnerProduct" 174 | bottom: "conv4" 175 | top: "conv5" 176 | param { 177 | lr_mult: 1 178 | decay_mult: 1 179 | } 180 | param { 181 | lr_mult: 2 182 | decay_mult: 1 183 | } 184 | inner_product_param { 185 | #kernel_size: 3 186 | num_output: 256 187 | weight_filler { 188 | type: "xavier" 189 | } 190 | bias_filler { 191 | type: "constant" 192 | value: 0 193 | } 194 | } 195 | } 196 | 197 | layer { 198 | name: "drop5" 199 | type: "Dropout" 200 | bottom: "conv5" 201 | top: "conv5" 202 | dropout_param { 203 | dropout_ratio: 0.25 204 | } 205 | } 206 | layer { 207 | name: "prelu5" 208 | type: "PReLU" 209 | bottom: "conv5" 210 | top: "conv5" 211 | } 212 | 213 | 214 | layer { 215 | name: "conv6-1" 216 | type: "InnerProduct" 217 | bottom: "conv5" 218 | top: "conv6-1" 219 | param { 220 | lr_mult: 1 221 | decay_mult: 1 222 | } 223 | param { 224 | lr_mult: 2 225 | decay_mult: 1 226 | } 227 | inner_product_param { 228 | #kernel_size: 1 229 | num_output: 2 230 | weight_filler { 231 | type: "xavier" 232 | } 233 | bias_filler { 234 | type: "constant" 235 | value: 0 236 | } 237 | } 238 | } 239 | layer { 240 | name: "conv6-2" 241 | type: "InnerProduct" 242 | bottom: "conv5" 243 | top: "conv6-2" 244 | param { 245 | lr_mult: 1 246 | decay_mult: 1 247 | } 248 | param { 249 | lr_mult: 2 250 | decay_mult: 1 251 | } 252 | inner_product_param { 253 | #kernel_size: 1 254 | num_output: 4 255 | weight_filler { 256 | type: "xavier" 257 | } 258 | bias_filler { 259 | type: "constant" 260 | value: 0 261 | } 262 | } 263 | } 264 | layer { 265 | name: "conv6-3" 266 | type: "InnerProduct" 267 | bottom: "conv5" 268 | top: "conv6-3" 269 | param { 270 | lr_mult: 1 271 | decay_mult: 1 272 | } 273 | param { 274 | lr_mult: 2 275 | decay_mult: 1 276 | } 277 | inner_product_param { 278 | #kernel_size: 1 279 | num_output: 10 280 | weight_filler { 281 | type: "xavier" 282 | } 283 | bias_filler { 284 | type: "constant" 285 | value: 0 286 | } 287 | } 288 | } 289 | layer { 290 | name: "prob1" 291 | type: "Softmax" 292 | bottom: "conv6-1" 293 | top: "prob1" 294 | } 295 | -------------------------------------------------------------------------------- /mtcnn_pytorch/caffe_models/det4.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/caffe_models/det4.caffemodel -------------------------------------------------------------------------------- /mtcnn_pytorch/caffe_models/det4.prototxt: -------------------------------------------------------------------------------- 1 | name: "LNet" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 15 5 | input_dim: 24 6 | input_dim: 24 7 | 8 | layer { 9 | name: "slicer_data" 10 | type: "Slice" 11 | bottom: "data" 12 | top: "data241" 13 | top: "data242" 14 | top: "data243" 15 | top: "data244" 16 | top: "data245" 17 | slice_param { 18 | axis: 1 19 | slice_point: 3 20 | slice_point: 6 21 | slice_point: 9 22 | slice_point: 12 23 | } 24 | } 25 | layer { 26 | name: "conv1_1" 27 | type: "Convolution" 28 | bottom: "data241" 29 | top: "conv1_1" 30 | param { 31 | lr_mult: 1 32 | decay_mult: 1 33 | } 34 | param { 35 | lr_mult: 2 36 | decay_mult: 1 37 | } 38 | convolution_param { 39 | num_output: 28 40 | kernel_size: 3 41 | stride: 1 42 | weight_filler { 43 | type: "xavier" 44 | } 45 | bias_filler { 46 | type: "constant" 47 | value: 0 48 | } 49 | } 50 | 51 | } 52 | layer { 53 | name: "prelu1_1" 54 | type: "PReLU" 55 | bottom: "conv1_1" 56 | top: "conv1_1" 57 | 58 | } 59 | layer { 60 | name: "pool1_1" 61 | type: "Pooling" 62 | bottom: "conv1_1" 63 | top: "pool1_1" 64 | pooling_param { 65 | pool: MAX 66 | kernel_size: 3 67 | stride: 2 68 | } 69 | } 70 | 71 | layer { 72 | name: "conv2_1" 73 | type: "Convolution" 74 | bottom: "pool1_1" 75 | top: "conv2_1" 76 | param { 77 | lr_mult: 1 78 | decay_mult: 1 79 | } 80 | param { 81 | lr_mult: 2 82 | decay_mult: 1 83 | } 84 | convolution_param { 85 | num_output: 48 86 | kernel_size: 3 87 | stride: 1 88 | weight_filler { 89 | type: "xavier" 90 | } 91 | bias_filler { 92 | type: "constant" 93 | value: 0 94 | } 95 | } 96 | 97 | } 98 | layer { 99 | name: "prelu2_1" 100 | type: "PReLU" 101 | bottom: "conv2_1" 102 | top: "conv2_1" 103 | } 104 | layer { 105 | name: "pool2_1" 106 | type: "Pooling" 107 | bottom: "conv2_1" 108 | top: "pool2_1" 109 | pooling_param { 110 | pool: MAX 111 | kernel_size: 3 112 | stride: 2 113 | } 114 | 115 | } 116 | layer { 117 | name: "conv3_1" 118 | type: "Convolution" 119 | bottom: "pool2_1" 120 | top: "conv3_1" 121 | param { 122 | lr_mult: 1 123 | decay_mult: 1 124 | } 125 | param { 126 | lr_mult: 2 127 | decay_mult: 1 128 | } 129 | convolution_param { 130 | num_output: 64 131 | kernel_size: 2 132 | stride: 1 133 | weight_filler { 134 | type: "xavier" 135 | } 136 | bias_filler { 137 | type: "constant" 138 | value: 0 139 | } 140 | } 141 | 142 | } 143 | layer { 144 | name: "prelu3_1" 145 | type: "PReLU" 146 | bottom: "conv3_1" 147 | top: "conv3_1" 148 | } 149 | ########################## 150 | layer { 151 | name: "conv1_2" 152 | type: "Convolution" 153 | bottom: "data242" 154 | top: "conv1_2" 155 | param { 156 | lr_mult: 1 157 | decay_mult: 1 158 | } 159 | param { 160 | lr_mult: 2 161 | decay_mult: 1 162 | } 163 | convolution_param { 164 | num_output: 28 165 | kernel_size: 3 166 | stride: 1 167 | weight_filler { 168 | type: "xavier" 169 | } 170 | bias_filler { 171 | type: "constant" 172 | value: 0 173 | } 174 | } 175 | 176 | } 177 | layer { 178 | name: "prelu1_2" 179 | type: "PReLU" 180 | bottom: "conv1_2" 181 | top: "conv1_2" 182 | 183 | } 184 | layer { 185 | name: "pool1_2" 186 | type: "Pooling" 187 | bottom: "conv1_2" 188 | top: "pool1_2" 189 | pooling_param { 190 | pool: MAX 191 | kernel_size: 3 192 | stride: 2 193 | } 194 | } 195 | 196 | layer { 197 | name: "conv2_2" 198 | type: "Convolution" 199 | bottom: "pool1_2" 200 | top: "conv2_2" 201 | param { 202 | lr_mult: 1 203 | decay_mult: 1 204 | } 205 | param { 206 | lr_mult: 2 207 | decay_mult: 1 208 | } 209 | convolution_param { 210 | num_output: 48 211 | kernel_size: 3 212 | stride: 1 213 | weight_filler { 214 | type: "xavier" 215 | } 216 | bias_filler { 217 | type: "constant" 218 | value: 0 219 | } 220 | } 221 | 222 | } 223 | layer { 224 | name: "prelu2_2" 225 | type: "PReLU" 226 | bottom: "conv2_2" 227 | top: "conv2_2" 228 | } 229 | layer { 230 | name: "pool2_2" 231 | type: "Pooling" 232 | bottom: "conv2_2" 233 | top: "pool2_2" 234 | pooling_param { 235 | pool: MAX 236 | kernel_size: 3 237 | stride: 2 238 | } 239 | 240 | } 241 | layer { 242 | name: "conv3_2" 243 | type: "Convolution" 244 | bottom: "pool2_2" 245 | top: "conv3_2" 246 | param { 247 | lr_mult: 1 248 | decay_mult: 1 249 | } 250 | param { 251 | lr_mult: 2 252 | decay_mult: 1 253 | } 254 | convolution_param { 255 | num_output: 64 256 | kernel_size: 2 257 | stride: 1 258 | weight_filler { 259 | type: "xavier" 260 | } 261 | bias_filler { 262 | type: "constant" 263 | value: 0 264 | } 265 | } 266 | 267 | } 268 | layer { 269 | name: "prelu3_2" 270 | type: "PReLU" 271 | bottom: "conv3_2" 272 | top: "conv3_2" 273 | } 274 | ########################## 275 | ########################## 276 | layer { 277 | name: "conv1_3" 278 | type: "Convolution" 279 | bottom: "data243" 280 | top: "conv1_3" 281 | param { 282 | lr_mult: 1 283 | decay_mult: 1 284 | } 285 | param { 286 | lr_mult: 2 287 | decay_mult: 1 288 | } 289 | convolution_param { 290 | num_output: 28 291 | kernel_size: 3 292 | stride: 1 293 | weight_filler { 294 | type: "xavier" 295 | } 296 | bias_filler { 297 | type: "constant" 298 | value: 0 299 | } 300 | } 301 | 302 | } 303 | layer { 304 | name: "prelu1_3" 305 | type: "PReLU" 306 | bottom: "conv1_3" 307 | top: "conv1_3" 308 | 309 | } 310 | layer { 311 | name: "pool1_3" 312 | type: "Pooling" 313 | bottom: "conv1_3" 314 | top: "pool1_3" 315 | pooling_param { 316 | pool: MAX 317 | kernel_size: 3 318 | stride: 2 319 | } 320 | } 321 | 322 | layer { 323 | name: "conv2_3" 324 | type: "Convolution" 325 | bottom: "pool1_3" 326 | top: "conv2_3" 327 | param { 328 | lr_mult: 1 329 | decay_mult: 1 330 | } 331 | param { 332 | lr_mult: 2 333 | decay_mult: 1 334 | } 335 | convolution_param { 336 | num_output: 48 337 | kernel_size: 3 338 | stride: 1 339 | weight_filler { 340 | type: "xavier" 341 | } 342 | bias_filler { 343 | type: "constant" 344 | value: 0 345 | } 346 | } 347 | 348 | } 349 | layer { 350 | name: "prelu2_3" 351 | type: "PReLU" 352 | bottom: "conv2_3" 353 | top: "conv2_3" 354 | } 355 | layer { 356 | name: "pool2_3" 357 | type: "Pooling" 358 | bottom: "conv2_3" 359 | top: "pool2_3" 360 | pooling_param { 361 | pool: MAX 362 | kernel_size: 3 363 | stride: 2 364 | } 365 | 366 | } 367 | layer { 368 | name: "conv3_3" 369 | type: "Convolution" 370 | bottom: "pool2_3" 371 | top: "conv3_3" 372 | param { 373 | lr_mult: 1 374 | decay_mult: 1 375 | } 376 | param { 377 | lr_mult: 2 378 | decay_mult: 1 379 | } 380 | convolution_param { 381 | num_output: 64 382 | kernel_size: 2 383 | stride: 1 384 | weight_filler { 385 | type: "xavier" 386 | } 387 | bias_filler { 388 | type: "constant" 389 | value: 0 390 | } 391 | } 392 | 393 | } 394 | layer { 395 | name: "prelu3_3" 396 | type: "PReLU" 397 | bottom: "conv3_3" 398 | top: "conv3_3" 399 | } 400 | ########################## 401 | ########################## 402 | layer { 403 | name: "conv1_4" 404 | type: "Convolution" 405 | bottom: "data244" 406 | top: "conv1_4" 407 | param { 408 | lr_mult: 1 409 | decay_mult: 1 410 | } 411 | param { 412 | lr_mult: 2 413 | decay_mult: 1 414 | } 415 | convolution_param { 416 | num_output: 28 417 | kernel_size: 3 418 | stride: 1 419 | weight_filler { 420 | type: "xavier" 421 | } 422 | bias_filler { 423 | type: "constant" 424 | value: 0 425 | } 426 | } 427 | 428 | } 429 | layer { 430 | name: "prelu1_4" 431 | type: "PReLU" 432 | bottom: "conv1_4" 433 | top: "conv1_4" 434 | 435 | } 436 | layer { 437 | name: "pool1_4" 438 | type: "Pooling" 439 | bottom: "conv1_4" 440 | top: "pool1_4" 441 | pooling_param { 442 | pool: MAX 443 | kernel_size: 3 444 | stride: 2 445 | } 446 | } 447 | 448 | layer { 449 | name: "conv2_4" 450 | type: "Convolution" 451 | bottom: "pool1_4" 452 | top: "conv2_4" 453 | param { 454 | lr_mult: 1 455 | decay_mult: 1 456 | } 457 | param { 458 | lr_mult: 2 459 | decay_mult: 1 460 | } 461 | convolution_param { 462 | num_output: 48 463 | kernel_size: 3 464 | stride: 1 465 | weight_filler { 466 | type: "xavier" 467 | } 468 | bias_filler { 469 | type: "constant" 470 | value: 0 471 | } 472 | } 473 | 474 | } 475 | layer { 476 | name: "prelu2_4" 477 | type: "PReLU" 478 | bottom: "conv2_4" 479 | top: "conv2_4" 480 | } 481 | layer { 482 | name: "pool2_4" 483 | type: "Pooling" 484 | bottom: "conv2_4" 485 | top: "pool2_4" 486 | pooling_param { 487 | pool: MAX 488 | kernel_size: 3 489 | stride: 2 490 | } 491 | 492 | } 493 | layer { 494 | name: "conv3_4" 495 | type: "Convolution" 496 | bottom: "pool2_4" 497 | top: "conv3_4" 498 | param { 499 | lr_mult: 1 500 | decay_mult: 1 501 | } 502 | param { 503 | lr_mult: 2 504 | decay_mult: 1 505 | } 506 | convolution_param { 507 | num_output: 64 508 | kernel_size: 2 509 | stride: 1 510 | weight_filler { 511 | type: "xavier" 512 | } 513 | bias_filler { 514 | type: "constant" 515 | value: 0 516 | } 517 | } 518 | 519 | } 520 | layer { 521 | name: "prelu3_4" 522 | type: "PReLU" 523 | bottom: "conv3_4" 524 | top: "conv3_4" 525 | } 526 | ########################## 527 | ########################## 528 | layer { 529 | name: "conv1_5" 530 | type: "Convolution" 531 | bottom: "data245" 532 | top: "conv1_5" 533 | param { 534 | lr_mult: 1 535 | decay_mult: 1 536 | } 537 | param { 538 | lr_mult: 2 539 | decay_mult: 1 540 | } 541 | convolution_param { 542 | num_output: 28 543 | kernel_size: 3 544 | stride: 1 545 | weight_filler { 546 | type: "xavier" 547 | } 548 | bias_filler { 549 | type: "constant" 550 | value: 0 551 | } 552 | } 553 | 554 | } 555 | layer { 556 | name: "prelu1_5" 557 | type: "PReLU" 558 | bottom: "conv1_5" 559 | top: "conv1_5" 560 | 561 | } 562 | layer { 563 | name: "pool1_5" 564 | type: "Pooling" 565 | bottom: "conv1_5" 566 | top: "pool1_5" 567 | pooling_param { 568 | pool: MAX 569 | kernel_size: 3 570 | stride: 2 571 | } 572 | } 573 | 574 | layer { 575 | name: "conv2_5" 576 | type: "Convolution" 577 | bottom: "pool1_5" 578 | top: "conv2_5" 579 | param { 580 | lr_mult: 1 581 | decay_mult: 1 582 | } 583 | param { 584 | lr_mult: 2 585 | decay_mult: 1 586 | } 587 | convolution_param { 588 | num_output: 48 589 | kernel_size: 3 590 | stride: 1 591 | weight_filler { 592 | type: "xavier" 593 | } 594 | bias_filler { 595 | type: "constant" 596 | value: 0 597 | } 598 | } 599 | 600 | } 601 | layer { 602 | name: "prelu2_5" 603 | type: "PReLU" 604 | bottom: "conv2_5" 605 | top: "conv2_5" 606 | } 607 | layer { 608 | name: "pool2_5" 609 | type: "Pooling" 610 | bottom: "conv2_5" 611 | top: "pool2_5" 612 | pooling_param { 613 | pool: MAX 614 | kernel_size: 3 615 | stride: 2 616 | } 617 | 618 | } 619 | layer { 620 | name: "conv3_5" 621 | type: "Convolution" 622 | bottom: "pool2_5" 623 | top: "conv3_5" 624 | param { 625 | lr_mult: 1 626 | decay_mult: 1 627 | } 628 | param { 629 | lr_mult: 2 630 | decay_mult: 1 631 | } 632 | convolution_param { 633 | num_output: 64 634 | kernel_size: 2 635 | stride: 1 636 | weight_filler { 637 | type: "xavier" 638 | } 639 | bias_filler { 640 | type: "constant" 641 | value: 0 642 | } 643 | } 644 | 645 | } 646 | layer { 647 | name: "prelu3_5" 648 | type: "PReLU" 649 | bottom: "conv3_5" 650 | top: "conv3_5" 651 | } 652 | ########################## 653 | layer { 654 | name: "concat" 655 | bottom: "conv3_1" 656 | bottom: "conv3_2" 657 | bottom: "conv3_3" 658 | bottom: "conv3_4" 659 | bottom: "conv3_5" 660 | top: "conv3" 661 | type: "Concat" 662 | concat_param { 663 | axis: 1 664 | } 665 | } 666 | ########################## 667 | layer { 668 | name: "fc4" 669 | type: "InnerProduct" 670 | bottom: "conv3" 671 | top: "fc4" 672 | param { 673 | lr_mult: 1 674 | decay_mult: 1 675 | } 676 | param { 677 | lr_mult: 2 678 | decay_mult: 1 679 | } 680 | inner_product_param { 681 | num_output: 256 682 | weight_filler { 683 | type: "xavier" 684 | } 685 | bias_filler { 686 | type: "constant" 687 | value: 0 688 | } 689 | } 690 | 691 | } 692 | layer { 693 | name: "prelu4" 694 | type: "PReLU" 695 | bottom: "fc4" 696 | top: "fc4" 697 | } 698 | ############################ 699 | layer { 700 | name: "fc4_1" 701 | type: "InnerProduct" 702 | bottom: "fc4" 703 | top: "fc4_1" 704 | param { 705 | lr_mult: 1 706 | decay_mult: 1 707 | } 708 | param { 709 | lr_mult: 2 710 | decay_mult: 1 711 | } 712 | inner_product_param { 713 | num_output: 64 714 | weight_filler { 715 | type: "xavier" 716 | } 717 | bias_filler { 718 | type: "constant" 719 | value: 0 720 | } 721 | } 722 | 723 | } 724 | layer { 725 | name: "prelu4_1" 726 | type: "PReLU" 727 | bottom: "fc4_1" 728 | top: "fc4_1" 729 | } 730 | layer { 731 | name: "fc5_1" 732 | type: "InnerProduct" 733 | bottom: "fc4_1" 734 | top: "fc5_1" 735 | param { 736 | lr_mult: 1 737 | decay_mult: 1 738 | } 739 | param { 740 | lr_mult: 2 741 | decay_mult: 1 742 | } 743 | inner_product_param { 744 | num_output: 2 745 | weight_filler { 746 | type: "xavier" 747 | #type: "constant" 748 | #value: 0 749 | } 750 | bias_filler { 751 | type: "constant" 752 | value: 0 753 | } 754 | } 755 | } 756 | 757 | 758 | ######################### 759 | layer { 760 | name: "fc4_2" 761 | type: "InnerProduct" 762 | bottom: "fc4" 763 | top: "fc4_2" 764 | param { 765 | lr_mult: 1 766 | decay_mult: 1 767 | } 768 | param { 769 | lr_mult: 2 770 | decay_mult: 1 771 | } 772 | inner_product_param { 773 | num_output: 64 774 | weight_filler { 775 | type: "xavier" 776 | } 777 | bias_filler { 778 | type: "constant" 779 | value: 0 780 | } 781 | } 782 | 783 | } 784 | layer { 785 | name: "prelu4_2" 786 | type: "PReLU" 787 | bottom: "fc4_2" 788 | top: "fc4_2" 789 | } 790 | layer { 791 | name: "fc5_2" 792 | type: "InnerProduct" 793 | bottom: "fc4_2" 794 | top: "fc5_2" 795 | param { 796 | lr_mult: 1 797 | decay_mult: 1 798 | } 799 | param { 800 | lr_mult: 2 801 | decay_mult: 1 802 | } 803 | inner_product_param { 804 | num_output: 2 805 | weight_filler { 806 | type: "xavier" 807 | #type: "constant" 808 | #value: 0 809 | } 810 | bias_filler { 811 | type: "constant" 812 | value: 0 813 | } 814 | } 815 | } 816 | 817 | ######################### 818 | layer { 819 | name: "fc4_3" 820 | type: "InnerProduct" 821 | bottom: "fc4" 822 | top: "fc4_3" 823 | param { 824 | lr_mult: 1 825 | decay_mult: 1 826 | } 827 | param { 828 | lr_mult: 2 829 | decay_mult: 1 830 | } 831 | inner_product_param { 832 | num_output: 64 833 | weight_filler { 834 | type: "xavier" 835 | } 836 | bias_filler { 837 | type: "constant" 838 | value: 0 839 | } 840 | } 841 | 842 | } 843 | layer { 844 | name: "prelu4_3" 845 | type: "PReLU" 846 | bottom: "fc4_3" 847 | top: "fc4_3" 848 | } 849 | layer { 850 | name: "fc5_3" 851 | type: "InnerProduct" 852 | bottom: "fc4_3" 853 | top: "fc5_3" 854 | param { 855 | lr_mult: 1 856 | decay_mult: 1 857 | } 858 | param { 859 | lr_mult: 2 860 | decay_mult: 1 861 | } 862 | inner_product_param { 863 | num_output: 2 864 | weight_filler { 865 | type: "xavier" 866 | #type: "constant" 867 | #value: 0 868 | } 869 | bias_filler { 870 | type: "constant" 871 | value: 0 872 | } 873 | } 874 | } 875 | 876 | ######################### 877 | layer { 878 | name: "fc4_4" 879 | type: "InnerProduct" 880 | bottom: "fc4" 881 | top: "fc4_4" 882 | param { 883 | lr_mult: 1 884 | decay_mult: 1 885 | } 886 | param { 887 | lr_mult: 2 888 | decay_mult: 1 889 | } 890 | inner_product_param { 891 | num_output: 64 892 | weight_filler { 893 | type: "xavier" 894 | } 895 | bias_filler { 896 | type: "constant" 897 | value: 0 898 | } 899 | } 900 | 901 | } 902 | layer { 903 | name: "prelu4_4" 904 | type: "PReLU" 905 | bottom: "fc4_4" 906 | top: "fc4_4" 907 | } 908 | layer { 909 | name: "fc5_4" 910 | type: "InnerProduct" 911 | bottom: "fc4_4" 912 | top: "fc5_4" 913 | param { 914 | lr_mult: 1 915 | decay_mult: 1 916 | } 917 | param { 918 | lr_mult: 2 919 | decay_mult: 1 920 | } 921 | inner_product_param { 922 | num_output: 2 923 | weight_filler { 924 | type: "xavier" 925 | #type: "constant" 926 | #value: 0 927 | } 928 | bias_filler { 929 | type: "constant" 930 | value: 0 931 | } 932 | } 933 | } 934 | 935 | ######################### 936 | layer { 937 | name: "fc4_5" 938 | type: "InnerProduct" 939 | bottom: "fc4" 940 | top: "fc4_5" 941 | param { 942 | lr_mult: 1 943 | decay_mult: 1 944 | } 945 | param { 946 | lr_mult: 2 947 | decay_mult: 1 948 | } 949 | inner_product_param { 950 | num_output: 64 951 | weight_filler { 952 | type: "xavier" 953 | } 954 | bias_filler { 955 | type: "constant" 956 | value: 0 957 | } 958 | } 959 | 960 | } 961 | layer { 962 | name: "prelu4_5" 963 | type: "PReLU" 964 | bottom: "fc4_5" 965 | top: "fc4_5" 966 | } 967 | layer { 968 | name: "fc5_5" 969 | type: "InnerProduct" 970 | bottom: "fc4_5" 971 | top: "fc5_5" 972 | param { 973 | lr_mult: 1 974 | decay_mult: 1 975 | } 976 | param { 977 | lr_mult: 2 978 | decay_mult: 1 979 | } 980 | inner_product_param { 981 | num_output: 2 982 | weight_filler { 983 | type: "xavier" 984 | #type: "constant" 985 | #value: 0 986 | } 987 | bias_filler { 988 | type: "constant" 989 | value: 0 990 | } 991 | } 992 | } 993 | 994 | ######################### 995 | 996 | -------------------------------------------------------------------------------- /mtcnn_pytorch/extract_weights_from_caffe_models.py: -------------------------------------------------------------------------------- 1 | import caffe 2 | import numpy as np 3 | 4 | """ 5 | The purpose of this script is to convert pretrained weights taken from 6 | official implementation here: 7 | https://github.com/kpzhang93/MTCNN_face_detection_alignment/tree/master/code/codes/MTCNNv2 8 | to required format. 9 | 10 | In a nutshell, it just renames and transposes some of the weights. 11 | You don't have to use this script because weights are already in `src/weights`. 12 | """ 13 | 14 | 15 | def get_all_weights(net): 16 | all_weights = {} 17 | for p in net.params: 18 | if 'conv' in p: 19 | name = 'features.' + p 20 | if '-' in p: 21 | s = list(p) 22 | s[-2] = '_' 23 | s = ''.join(s) 24 | all_weights[s + '.weight'] = net.params[p][0].data 25 | all_weights[s + '.bias'] = net.params[p][1].data 26 | elif len(net.params[p][0].data.shape) == 4: 27 | all_weights[name + '.weight'] = net.params[p][0].data.transpose((0, 1, 3, 2)) 28 | all_weights[name + '.bias'] = net.params[p][1].data 29 | else: 30 | all_weights[name + '.weight'] = net.params[p][0].data 31 | all_weights[name + '.bias'] = net.params[p][1].data 32 | elif 'prelu' in p.lower(): 33 | all_weights['features.' + p.lower() + '.weight'] = net.params[p][0].data 34 | return all_weights 35 | 36 | 37 | # P-Net 38 | net = caffe.Net('caffe_models/det1.prototxt', 'caffe_models/det1.caffemodel', caffe.TEST) 39 | np.save('src/weights/pnet.npy', get_all_weights(net)) 40 | 41 | # R-Net 42 | net = caffe.Net('caffe_models/det2.prototxt', 'caffe_models/det2.caffemodel', caffe.TEST) 43 | np.save('src/weights/rnet.npy', get_all_weights(net)) 44 | 45 | # O-Net 46 | net = caffe.Net('caffe_models/det3.prototxt', 'caffe_models/det3.caffemodel', caffe.TEST) 47 | np.save('src/weights/onet.npy', get_all_weights(net)) 48 | -------------------------------------------------------------------------------- /mtcnn_pytorch/images/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/images/example.png -------------------------------------------------------------------------------- /mtcnn_pytorch/images/face0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/images/face0.jpg -------------------------------------------------------------------------------- /mtcnn_pytorch/images/jf.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/images/jf.jpg -------------------------------------------------------------------------------- /mtcnn_pytorch/images/office1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/images/office1.jpg -------------------------------------------------------------------------------- /mtcnn_pytorch/images/office2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/images/office2.jpg -------------------------------------------------------------------------------- /mtcnn_pytorch/images/office3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/images/office3.jpg -------------------------------------------------------------------------------- /mtcnn_pytorch/images/office4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/images/office4.jpg -------------------------------------------------------------------------------- /mtcnn_pytorch/images/office5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/images/office5.jpg -------------------------------------------------------------------------------- /mtcnn_pytorch/refine_faces.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "metadata": { 7 | "ExecuteTime": { 8 | "end_time": "2018-07-21T07:06:15.533290Z", 9 | "start_time": "2018-07-21T07:06:15.509560Z" 10 | } 11 | }, 12 | "outputs": [ 13 | { 14 | "name": "stdout", 15 | "output_type": "stream", 16 | "text": [ 17 | "The autoreload extension is already loaded. To reload it, use:\n", 18 | " %reload_ext autoreload\n" 19 | ] 20 | } 21 | ], 22 | "source": [ 23 | "%load_ext autoreload\n", 24 | "%autoreload 2\n", 25 | "\n", 26 | "from src import detect_faces, show_bboxes\n", 27 | "from PIL import Image\n", 28 | "import cv2\n", 29 | "import numpy as np\n", 30 | "from src.align_trans import get_reference_facial_points, warp_and_crop_face\n", 31 | "import mxnet as mx\n", 32 | "import io\n", 33 | "from pathlib import Path" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 3, 39 | "metadata": { 40 | "ExecuteTime": { 41 | "end_time": "2018-07-21T07:08:15.237357Z", 42 | "start_time": "2018-07-21T07:08:15.214563Z" 43 | } 44 | }, 45 | "outputs": [], 46 | "source": [ 47 | "face_folder = Path('/home/f/learning/Dataset/faces_vgg_112x112')\n", 48 | "bin_path = face_folder/'train.rec'\n", 49 | "idx_path = face_folder/'train.idx'" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 4, 55 | "metadata": { 56 | "ExecuteTime": { 57 | "end_time": "2018-07-21T07:08:20.176501Z", 58 | "start_time": "2018-07-21T07:08:17.337626Z" 59 | } 60 | }, 61 | "outputs": [], 62 | "source": [ 63 | "imgrec = mx.recordio.MXIndexedRecordIO(str(idx_path), str(bin_path), 'r')" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 25, 69 | "metadata": { 70 | "ExecuteTime": { 71 | "end_time": "2018-07-21T07:10:25.708722Z", 72 | "start_time": "2018-07-21T07:10:25.687476Z" 73 | } 74 | }, 75 | "outputs": [ 76 | { 77 | "name": "stdout", 78 | "output_type": "stream", 79 | "text": [ 80 | "HEADER(flag=0, label=2.0, id=813, id2=0)\n" 81 | ] 82 | }, 83 | { 84 | "data": { 85 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAHAAAABwCAIAAABJgmMcAABQ2UlEQVR4nGX9Z5NkSZYlBl6m+t4z5ixoZtGuLirdgAyAnVngv+MDdoEVwYpMTwPTmOmqnqpKHpERGeHEyHuqegk+qLln9MA+uAQxdzdTU73k3HOO4m92u6AorRoCCQLTuBqzJNeaWNA8zDNyYgYAAgThMh/BY5Q0sIgQABRTRwjCEBqGgZnBkQFzzuimqurmEY7AgMKckSeRBMQEZu3hsC9a8jgAInkehgGZJCUZxMLfvn+nZhEhIhAREYLiamAuIiDjaVmMWQmd0InVjYjClSIgDAAQAwgNwiESACLDv3oQALh7RPSvEfH0f4gYEYjYn9P/xd37MxHRzMZxbK0NwxARYmAACIgpEWYBgohorSTmUAtzDkcCV4cICKCQi2kaJYV5NCUPImIWSmIEFgHm7EAACIS1UsQgnJhKawAgIhwQtQ45Z+JwhYCVCEFkkTyNqCQiJOwAYZqFn11e1FpbayklIkrEQsnMCHCapuZwd9ifaisRFUDBw1UtEBEiEAIAHADdHcIhmgVRIGJfI0QkQiJ6WrhPl7X/GRHp8XH+BIhqrcwcEUTUn6mqZiYOAeBBEQiECAj9V2FAuAriSCkTEmIiZqT1atqtV6s8WG1aKrhZuEFUr4GoDrU16b/bFWsEISi6O4WP47hZjZnYS8tE6yExZBEKviy1BuE4ZsG0LEsQElFtTd3Wq01LLSL63o8IIWLmMQ+r1erkfrlffdjvPzwcojVGimBVhfNGi77NIhAACQBA+wbsXxERwPsaPS3o0xr540NVn5aSmfsTiMjdRaT/OxGpqqg3AALCILQwCKQgFta5oFsmYiYJ2uRxt12vhnGdebNaT5IFghDdzd2BsJlV1+Myz/Ps7oKEARhQwoEQER0hAHL4Jo/rzXY1DKs8JMacc0psEMiAzHWux+OxtWYQTWQpZVyN7iMiXl5ejuOoWt2dmYUYAFYcq+EyCdR2qk0r0CAYDoEQThAY6J+eX+LzeY8fH/bpMcfHx9Oi961nZn2Jn57GzGbWvz59ixg4IZIwCQGhu9da0YjDOCAFJoCVyM1m9fLqZruedoOs1+uBJUzJIyKIgIQNwsEtPCLCQVsr86KqKKm5AeIwDGkcMktOaZA0cBqYhpRyTkSEgkBYSjFrrV3u9/v7+3sl9jFfXFwSkUP0Be3Hvy+Iuh2ijCkRrGrbtVYe5lqAEqMDOYC5QxAA+CfL+una9T+4u5k9rebT6e5P6AEhpfS0W/uGba2pKiKqqoj0JZY8DH3VIwICIsJUSUGYB6Z1Hq7X6+eb7Yvrq5vtdhzyduTtapVEbKkIPqacBwEiC7dQd++vxjXcHQOquSMwMzIhMCIK48CSWDJxZhIRAKckgX7wpkRpNdxMed5uaq3LUpmSDHmappQSCbeIJVTVACESQmALkPUKIszM3/8AxYGwmkVAxL9auOhvEqCf7sdt6BHxlJqewuin39VPuohEBDP3XPQUIp6yFhFJSqm1pu5uDoQIgQFZJDGNKe/Wq+dXl59dXV5vt9thGFK+2qUxZyG2BGCeE4uwmQ1CANkBmDkxm5mrIaLwYGYOgYiURJAAgCymMRMguEc0IhIwZE6rySGrqoWMCAvhCEgku90ugAAAwpkACQp4cw31gQHD3P1iyM+36+NxNj2VUlvV6hgAKMzECBxh9JjT+1rFOWP14NhXGSIcACPoKcg+hYb+1x40eyhIKT0tqKpGhLi6ewBiuPVwJ4g5pfUw7PKwXa13q2k9jqPwKLyZMoGFKQuO42DaAIDAAN2sEREjUjiEJ4QQwIjQRQiZWYQ9wLUhYk5pYMwphRqCi4iDRVhOpAANHJkBhkUSbzZmkceRiCy8qnrplQOys2qEmzuQaTLYSr7ZbJZqD6dS5kWD+nbqZ5Yet9vT3uxr+l8Fzcf/Oi+0GTwlqKc/mNkwDKqacwYAZu7ZEgDEVZko3DJLEFLAahqnlNZ5WI/Ds8uLm6vLy836YhgHJkYgDASr1Q2RERhR1SICwAmFkDAcmqs7EbEIQQCEkBChu0PCxNyfR6CBbmZhjgDhDgyIwAQU6O7rIRMRBIlkCwdKzGRePdgiXB09WEEcRmCH2A7TzRYPs97lsp3iw+G01MqmrUlKIgQpMaI8ZaSnoPlpYP2vQi2zmFkPqWZWa621juPYE2Nr7dOfQ0QSBuCBSFkGySzEq5wHgTGn9WrcbdcX2812PU1JBEIAwR2ZGSPMFD0AiUCIATgn6UVvuBMC92Kr1zjMzIwoPV4TwuNJ8gg3CApABEYOAjAHRGbE6PWbuysLGzhir2msv8nVarXMSqQAqO5KMYmsclrldKxKgKq6tIpEQ05DYvchCUUE4Lm6BDhHzE+z0NNWjYierD7NYz2e9jNeSmHmXs/1qCoIjBhJJOecs2ThKaccNg15O43bzWo1DrmXAO5mStGQEIiQAh0CwtWVghDCmJFcDXoUBxTElHNKqb/Q/joAkAh7DgGEXlrE+e15b2MeNw6Ch3s4WphrOCCMqwETPhwPdVmqFvKMgRQgBBlpnYbtarXbtPcPp3MENAyA1hTMXQ3Xjz+f/PEXwVPF/vT4NCb0iNHXq3/taerTcrWU0kt9ISISTomFgCCS0JglA1+sx+1mXE85JyQOxEAMoCDDiAB3AEciDA8AN3MIBnSi6IkSICyA4CnvAwDnxMznsOUeAI5IQWDnstkAQ8MdEP2x2wsPZ2BOGc0DIUtGpqKttubNHc/FNgdwOBMMTFOSIUtOiZpGFAhwQyAEoGVZer5mwh6pALCX6PD/eCAi83nhehJ/WutaK37yeIqw0qvuHiBSkjGnaUwDxmY9bFbDOLBQIDhSEAYgJWBCdNfHDwoIkZnPFX4P+UDhYGCISINwTsMwEFHOQ28neqPGiOjGgOHJ3dGt17AR8Gm9HRGOodUjAJjAmvuPhTQgAgYLsRtFMPogvFlN05CZT4zkgBjoAaoOHhZVhIdhyJAlIQBFuJkhPsVQ+q/W9OnPIpJz7vVmrVVEnsqppycLCgeGhqaglHic8iA8SYyDJAEMNXd1aEYGDh6AICTIFO6O3ncbEBCQR0CEsBCzuweCQzTT0ioSCzORMrP3mMWciNz96T2pKpqDeyBFhDk81YwQptqQkzCnlBLlIGyqqmoB7mAR/WMkjCHLJnAcx0Tcv51Q3N0B1AEZVC1icfcBUkqJmQAgwnrQ/H8kqPh08/ayqR/5vr7MDABPUVjOIQuAc5pWgwgHNObMjABh1hzJAowJwiNCgbD3s8xIPXKDQwABBrqHQQAEMgVAEIZDLQ0CRSQAhmFCZEQkYUR0NXdFRAKEZo59gxCAAQA9AkPeG+cwrRARLAkAmIiJLALCAR3QA9xVzTwiri52b+728HDU5sEKEYIJkRGjdzsREWARkbOISD8WANCDwNPe7Mf5Mf64mYlIa+1p9Xu46Emfmc+LzcLjmMcx998njOCqLbRSC2aiIEYERkyciMggEFxIqH++7hhBiIGP8Ez/3CLArIcbIorAp1fQD0uLACdE8HN956QB5v3DI0IijAgP7QGNzmBY09Z6B91qNW/gARBEgD1AQQzDkFISEWEmllZqCzVQNGXm/mN70ROR3J35Xy3ip4nx8cX/WGkBwDAMZtZa601n37/uLhjACBPLBAzHYhTjIGyYOLvZqbWcGCU5oSNAgCE5ICMkJgpgs15vIpCqEmFQEDEAaGvMxMMoIiLS+2DqjTUGhhPQkKS6lVLQg5M0U4cIDkQmYHdXD0M0JyYBAEMkhAg3cABD9AmSOlZTC2ekTCiwiJYLltebfLjDdLk+tbDVpmjUZhFGERRAZuRG3prZeoqUzqHQ3SMMBJk5AIgCyQMcEUJtGBLAGQxtrQlnRjEzDCLg8JDr68t5nlU1IosI47mvr7Uyd5QQVZWYmJjwv+qLIR5hV6bzvzOzsCAiIT6Gp/PRePrkRYSZ+z4VES21mZ4/dqitdQTrnC4FCAWb6vkA8rnt4TOKgcxM4RFA/bcLpZQa4NXVxe7+oKeKGRtIBp6XqlrDFcDB7YxhEKr2AyEpCRFoOICH9/PvEQGBEeEI6RGyE5FHkKlXK9ETtSCiEAKipP7ygJnOcVdYOAOARohDMDg+wTDxiJ2el7hjXD0VJpZ+XiIiJel5MKWUUoJHrOGc6Puqfdr8ATJSECBiVdXWgACYMMAhzAwNA0FVa63Wmgd5GIAjIkIQQ2YZEizNVtO0Xa/vFmUQTlNpkRKM4+jW3BW8qjaIFhFz0UGE0AkRhQnPEImBCiDAGVIBIGCy1s5dH0DPsYFAzOCBiOJh45gJEQCaViAMQTULT+eTayGBQQhBfbf2tO6PRUVfCEJ+ggl6eO5oaQ/qPZY/bVUWcbMf61NmIjKzZVlcNSKIGREZ0YgcHD0AIMzP2ybQPSAQApvZUzKJgDAnDGEkjYz48vnzd/enj7cPPODiIHkicCKCEIbBvIQZAqhWIG5GvjSRSCmRIFCAgWojoiechJkXiJySanU3APIzhkkWhoiSCIkITFtTDuHMEdzx1NbaDBBEaRwAyAAAwAK4VzJOZwAoAgDMrS9lPw7MjCKIgMQ9CdZan85779s6Vm5mPee21sq8oHlEkDsyEVHOWV1rrbWjvI8VjLubati522HmcANXcAtXRswExeFit7q5unyYWyOWHkICiNg1DJ0wozgREab+U00jHDw8AZ/7T5IACggM78H7jDQjAFMQajPssJU7IopaJUMIG5iFkM8VA5GwG1RvnFI4eqAbwGOgROy/L6w3wxHhkFJi5sQSEW6Wc05JiNgeazQA6B9VrdXd6bHHsNpU1dX6N0ZEM40aABCI6lZr1TgPxhARgMzcrPdUCAAdHjw3uIACkRDIm5uPmcYxL0tjmUprjJFzdgAIQkR3BEMPfgxl7hHe3MyREQByogCKMEAOdPUAZAgC8KcOCogcIJA9QhiBEASlv3t3s9o0J+YU5+meG0TzCPfE6BjI6BEdVQALcHOL3rB/+ngaszz1Emfs+fHRARJ6bJYRcRgGGUYL71GiqkYEI+ScrRYiIkSiMwTTpwNzc3eHnh8gGI0ACIIRhJHMB5FpTPuqwZg5leNpGIbAXtwyBAR2bAERETAALBDNDcEBYNEgogDr8I6bMSdksarMZN6RHo5+zs1lyCki+g5EOn/gYW5mCGFuwmwWihqAPcsTCRESBkYAnsN1r5bdPbH0ngwf54h955jZU4WMiPIYN9FDRAiwQkGCCNCmvbHpkw9y0PCcs0cgcv9Hbw2MAGLIubUGgWZGxL0ACHcOB62rvH714nr78uX7Q3vzw91+rtFazkm1qTZEdDMiJiKkDmg6khAFovSs7eFufZDp5qTWmNG8EBAKuzvEIwgMGADCGCmnME9ARIDmvaNSVYBIhKp6PJ1oWq2HDITqjmbgQBiMQYDIIhhPCKu6uTs9blgRflpEM1PEnPPThAwRAbzWEo9xFh36xsSO3gF0+MO8DypC3QCASNy11hqAECiSRQDroiSEit6yUGJcyvH2sFx89vObNJaIz7cXf/7jl6q62WwOD/v7+/vr62sKXJYlJe6Z013DnAeu5ikxgUQEAkO4mYWjhpshgJemzJxSsoCckqois4R5kCdCQUSPLLyaJhEJc8ewAIMwOh98ifDeKjICBAIwE0ZnQPQFPDe27h7hANTP/lOK71huz+wRwQAgQkSt1nMfjciPg0mHeJqL9QVFJn7Mcj23AnJEIHIgECkiE2JiKXURhN12c1cbhP3xn/94N9df/Ob345h/97v/5uuvv/5W62r9slVTVRmyqxIRMDS1lKRHKrNAoKdTGNExN0MARCbA1pp7B561n0tJhIPwZr2eJLsZQ6zHSfBxehM/8il6Qu8bBAOYAIkdnbF3PwAA+DghaKrM1Kv3p1oq5xz/am8+oraP/Zy7Mwk+jsks/Ckcd/IIQ4dN4fyxMltzh2BmDAAgdw9HAEgiuKiVJTMxmmu9urrQulxcXPyv/9v/5+/+7u9+8YufTdOU0vDll1++f/dh2G5Op1OP9RGuTXsz2ptU7LOE/oKDws3BUCRQkAWBqlrVCgAyDkPOmZGYKBP3Cc+YJNwSAoKjKfb2OELDJcLQERA8sEMiFASP7RMhGTRoiMjU89J5uz0hsn2Je31H0dEgJKI+p0UPAtD+NHdg6n89T9RcUTsgG+BmpoBJeuAEIqIkg0dEc8UYhqGaD5L2h+Plbnv5+idfvXnzhz/8D3e3H6ZhxID/8B/+wx/+8Idf/epXt7e3v//971pr9/f3Hz58eHh4mKap1kVE/LGm78VNBAVEYBCJublDNGUiZmmtmpmAR2IJtRp1M0zCzIBMlESEMVy9BgI+NrBmHZ8KAgyHsACMcISwnnDcer5srTejKZ0Broh4KjnP0L25f7I33Z0B1Q0RhYgAGgAEWHio9SPn1skY52GkqwEEU0KyZuchNiK6Q2vNzZk4Qpno5voaCL3p++/fvXjxoqMhL18+j7Db2w8PD3en+TDkabPZ5Jyfv7j5+uuvH8+lwydLCo8bxzSIKefcWtE+JkEYplEEaUzZzdA7xscQoKrjNIkQGjRWip7H1d0lkCKQAQmQoPcu2Fv3T8JoDw5kJGgdi+4A19ORZ2Z3BYBOAeukAUYU+rHjEiIAICACTHD+IRp9zs9C5yjGEoDgoRH+dA4SJhUjj9U0vXn7wz/86S83r3/+56++ef/DqT/nJz/57Pe///3bt2+J4vXr15vN6vb2/v3797/5zW8eHh4QMeXRQxH9x5N+bskI8VyE9pct8iO9STabjTCrWniEeacAxXlIfR7Adhy+J4diPdsAASEAIsQnkNdjvfxjb25m6N5hrnEcH9fUWzOw88tCACIaJDFzVFNV7/AdIDIlosi5V10Okc7gIyGmLNK8ErEHAzN1tAqhmUarS6tJeHk4XF/ePL/ef/f27c3V9WFuzIiIX3311ZdffklEn3/++bNn1wBweXn58PCw3W7/4z/9k0f0OgxAAQKAAiyiY6AIQK3VNA7hIUMG8NPxMI5jGpPklPpJGcdpyAO49mbD3cO8DyMAPSI8QrUKUz8LjsC9v0WkM5QQeAYN8cf1fZx0x+OEC/v0DaD3oPZYJFHP+8znvwZpPM7RAKxPHKNnPjAzZiACjOg/273zj34k1TBgay3n/LA0dxdKD/eHaXtTypxzPhwO0zTVtvzLv/wLAGw2u5SSO/zxT38ahmFZlsfj7YjU2Q8/5l7ENE4RNs/z8xc3z55d395+VNWH/Z24uyAFUYDVdkKPnARJwmsAEmHOCQAcaQktfbjIjCICIgESLN4jmjmCgwJQr10kiB2NKRAIKSJqrRSBwh36DUJzJ4CcBgLojXnRYn3QicjEiBgYwORe+tTTEYKRUIiIUICnpgWpDNJqW2q1qtQ8OSZ1EwzBxtZGTplxoahxalATkuQBMbkCAAjS/vaYMhPRw8PdI0YOZg0gEB2REahvVABgYggDtPVKklSR080zPB5Pz19uhYiY+qaACIA4Y+nMHWGzcBMkRw1zDABz8D5GD0QC8AiMsJ4q4mljAhgEhiNKZ3ecy0YAq9bpqT1ZoQc4PBF6e67ve5mIkDvDQN06/tIjxnnchIjmEQjgaAERwMwJRCPmeWFmUBWkaZqGlCPakHIFFGDTFB6tAAQDALAwBxAHBQZ39uNjxISIc9GNPwa3QMQAWJbTVMldA+L6+jrApLfSfS/jY3HTAQh3DzdyV1R2wug9MhMiPY78e81rHakkwiCHcDzX5ErA7szcSb8RYZ8c/6dAGxEWIUSEhEwZAZnMDII01MyaaevwrYCIqBqeG4+iJNz5f27MiQPUyKx4qLUKCCJCTQmYkdEQbSTMrhARas6cAQMAkROAd4omAHUCGQJjR0CCAOkxLwAiABoTrlbj5eVuHIe7+31tlnMSd3XH3tAwEQMBgGlYlFGYCM4DXvYkwmkYMDIS97EaOCI5WHhHOiMQHcDCJSg6KtBxT2TEQMRE3MHjvg37J1e9YWuJWYiNAoFIBEjc/THHOgYRnWEUBuxv0dyQEUiQEM2RDMgNznMtRMyMwIygWYSRwuLMUAHoU7BAAAQLB7RHHLlH5fM0CcIh4rxJP5kdpczzckS0u7u7gLrdrdUKYkife3WcUYQRwMy9KRMYgnQsBWlMacyCiBlAiMA7QYWcIMD1vDTnLAQAgIEEQCgszEwYjGmQ1Olq7ra0euZiBHYsJjFnSTwmooBAi/MUIhAROOeOVBmYRwQjpcREpIzmDs3dwB16jUaEALFeDw3cNTbTarNSin1iLqBnpM6dGCKMEDzUoW8Jfxy7BgQBYG+WH99Wn/J6hEVwKeV3v/+b3cU0z4eHh4/3Dx/v74/SA1mnrPv5vAKYDeMgZ9w8Bk7rIQtjRIw9JJzxVEDqaR0MguO8oGEe/UUHJKGUmJkTcQdMe9kEAMWLmnq4qYVZcw9zDGVmQg4ERHYAd2+t5UGIyEwhgjp3HQCxNws1zC3AAsxCVZsbknVYLBEkoHHMWdJcLaUAQA0HDCQM92AKBwjv7wIRKBCBzgxy6DXDmTPSO+FedT5//nwcVvd3D9vdqLZar9fPni3SB/z4OCk1MyZKzOeMgTByGgVz4kSEAZnpnEnAAaGT2RAR3VEI+9C9R2wPR6+1CpJIypKE5Vx0MFlKfSzj3uf6EBGllGW/5JwlDyLCKcNjt9rjg5baW/iqtWoDgApnYmYP+g5h4R46jtmioWAGIoOUKWWi0pgwELA1ZDG3oF7ZPVEbGAA6qBPR62+gR8JEf0ZPxa3VCEDkq6sbQIWQq6vrWpczrMLIRMSAGsGAImKthSByToSJJSMNwiICpuhhj/TJxxANRCQoQsRIDEhA6IAY3lRZzYwG6jyWftJzHgBQJLm7iZqZ1tpaw4D+qZiZRWVm6wHRI9RKKR27OpWlc5KUpI/X3aFpIGJK3IDHVXaUxRo45ElyFiSbVqm2yijFIWU2B0J29/MgrsfHiJ6jMboS6Ecqzqd5nykxwzdfv0Hyq+utanz/9mPOWVS1Nbu6uCQiLUsSQQBwY8LEMiUZMo9JBDHMPSqcyS2IiICojzOyiHAzcGFiAowIJEwsRuFu7ue1eELvtRrnxJAAINRaa1UYmQtia7r4QpIt1Myij6EeQ1jRVsMCsJmCQUUzMzcw86WpAoZQHlMINnMgrLUaIKX42S8/e/fx+PbP3758eS2pnUpJkhwpZz6eTogI6H16Gh7MYKZIHQc6x1HoBKiIgHAPcD4djjnLx798u9mu9vv7Z8+eiVmIcCnF3RPhKAnChWg95dUwrBKn3ga5AZ1PREQYAPnjZBXOScg0DJshEQh4NG/u7gjG3ty93ffuc7u9kJyZUydNm5m6AwplZkowzy3Aq6J5s3iEJ+Aphlo4BVWwU2utteZBJBBoFsXcGcOjlhbVHVTDa2Babw/Lh3cfbteXzz77/NVqvbkgOS01gIP466+/3u12t7e36810PO77ZL/W0k9qc4NPHh2UQaCcUtMqPNaiwtMyG+F0PFTprEZVBXNZjUQY6kPK0zBOWbIwh1I8VrMevR0EAH8EQXraJaKwKABEBo5E4MCp8xQIm9vheHL3nPOxqOSEnDog3x4hKOqUq6DqoQam2loLMER0balySqnzbABQAw61nuaiEUyJiAJJPdSjFC1tCQpOVE0bsNBptV2t5vlPX/zzaaa0H0jyOE7TarfdrL7GdjzcSQIRFOFPscTmhv+ajBefCBXCIYCYU2st5zHQtIU09awxJMopp5QggjHGLExIGGEaGEQgJNjZd58ArggAQX6mx4QjgsVcWmUVkYEYEANBI8BgcVfVk/q+GQByEg/U8KbaF3oYBiaKubbmBtFaOS0zuCdmd01GQwQgGiBaFMCHUvfz0hTMTiJZ8uAQs9ZjWZa6mLVAkCFjylbs4JHGdP3s4lfXL/I07h9O33339n5/ePv9l0x2cXV9e3s/zzVJQqRaC5EgnAHyT6NnP5SI4N5nC6KqScZwSJLcVRDZAgA5pUQBGJ4l9bETAQACBiB2zAkMgojC8RErpHOZiGSqCGAQTRsHZxG3VsK0RURo8+V0cgsNEJEAOpUFkEnYEcKRqDLPAJDdm1tE1Losy4kBc2KOyImDBYksoLnPGh8PZX88FafWGsAiIi18Lsv98bAsJxnyMAyxn4HloZRY5evXr9JAMvo//If/369//dvNll+++klK+asvvwOgIW8/3h1qWUQGZoboIiyGT9LvI7LjABQROefWWp/jPi49ieSxE0taa0gw5bQaE2EQBiP0UsndNaznX9O+F/GprTbvhe5Z5oREQeiuVkNVj8vSgatlqaquqsQJOVl4VXOIAHJ3YOr0iAzQORZNK5hOScYhZUKHRE1JpEGo0sNSb4+n+4fT+8P8qHILDbfQqm2plYiGlFbTJq+lWqD5YT7NtaT5dPP8WR6Gm+f5iy//i1nc3Ny8fPHZ/f1+msavv/qutSK8giBE1hbEZ/VIR0ke17aP1vKyLKvVqstBalVmlpQSY5iZdyYyBp0Fqr2WiD4hNTCCzlT+cc7jEGEBAOGPLFHAwKha69zmMhdtD/tTL6pU3TWW0jTcPJbSqrkhiHTM+gy/roeMiK0Va5oFxyHtdNyMqc/4BaA6ztruD8cfbvd3h4d3R1XV1lqttWlBIhkyM7949vxqd/Xs2Ys37954VWv2xT//5zQNF5cvnt189ubN+4uLi9cvP1PVh4eHb77+YprWTJ4H8dlba4QkLCmR+ZmrD51X9kh0JiLVOk2Duw/DUEpZr9dmIYgIgYDQJ6IAHuEp53NYNEcMISZiAiSidKZLkENAdCmvQILWWpgFoUXMy+nDw93heNTwpk5EiTMAISEwWbWltMXstCzNHIn0UbnGzHeIKSUIM2uj8FoThg9p6wgabs2Wpodmd4fj7f3Dx4f7D/M5pyFiAA55uHr27Orq6mJ7OaTh9uOdG4pkGfBXv/rV+9sPd/fl/uOteVvm9tOffJ7SsN1uu+7o/bs3rbWcJ1QiTKYeXVSBGGeZ+I+DxS6N7tPcUs7aXjMTw46tmKIoBiMwM6GEVYhgIiHicDAARsQwQkQKDfBgJvBw166BoZyU6OPdx4+Hw2HRElyKIuTn18+JJCLGaTUFlqWN2tzhuzdv9h8+dN229So9sNYqEiKS0wREAclDSouqIIk8oDqVZsclPj6Uw0Je0RxAIJhmrVVZ5jjNd8dN2W6m4+EDJqqh2KaLdPPTV88+PhzX6/X1zeX333/37fffPezvVtNms7n4cPfhtCDSypxT4tZm6OM4Sp0S3SWHiNzhZ226Xq93l5ettap3rz57SUS3t7diZkjQ9yN+InogInxMcME/CsYRmAL6cBIRg6A1K/NczI/LPJsdllLUaqvAPI7TOOzOhcUwusP+eKhVT3M5nU6Hw6Fvrkf6lTOzMHf63Ho1Xmw2q8xj5rXQdrNipGa+niYexu/v7k/HvRvmNI2SqlvxKsQQ9nB7F60+fKDVmHKmabcaNqv11U2X1u12Fyh8dXPz4e6Hm5ur6+trs2DKrZyW+cicI1AVEFNEJywxRBCSB4pkAOiF5jAMm82GkfaneT2Nq3H65ptvcs5S2wIswthHksCpT4z7Zo/HaravZq/j+9nsNbcGtPBjqbPW+4fD3BQkcc4TsyN5BAHVpX68vd0fTyR5Lsv9/hgRrRkzd5Cxg4eJOOdMtuSc1+v11eXF1W67mfJuPW6nfLlZE+BpLg3gYW4CkBC229Xm2es8Dg+n/cf9/dxqrRrqJEnArGler1fjmtNUZz0U213dBOL11c3DwyGn6f7+cHNzczzM47jx7fDu+z1SLqW5e0qJKFFvBR2JWdU7v7UsZbVaaW3g8Zvf/Oavf/6XaZpqrS+fP7u+vpbzBvFohDEkfJwOuZ917vHImzkjxB4AQMIAUJo201OpJ6uHeWlElAcgquqn03ycy3E+SeSHw2GYxqW0/XKq6lXt6vr6YruDiDIv8+G4GXe7zXqzXjOz6LFzgcZBGIzBRuGLabq5uNisprnUovHu7uHl1cWvfvYTGcfd9edpSId69Wzen5Z6nOcwn1JG98P+Xr0NMqzXl2/vDg/7+XB6f4p2dXVBDMO4CTCR9Xo1rFa77775c20xTcM4DF0L0Exba5IzsiAzhjInRBSpzNxqvby8/PIvf725uSHA4erq/fv3zy6uhJldLeDc7fStp6pC4Y5B5Oeu86x3ZKSuzuw04lNdTks5LVUtzOE4n45zOZyOZkGSCVibX+2uZBrwcBo3W0jp7v5+s9kMw5BT8vXmwOnVs5uXN89evXg5phz1ttZayuzumXFAzOAcBnXhaRBXkrRO/PrmqvMYIk0GEcBJthdrK20LBm42Hw+75y8p0XRxFXko39+5cVOcI46nH9brSa0wwv3t4g7rVTGX1bSFENXamYXMQpmLKxGpOwDWPujOQ5d71lovt7v337/72c9+lpmvLy7fv38v3hQ8hiwdnG+tSdCUMiJEh76jy7moL3eiVHrnQCgipGdw0MzMw8xyzlvkAOoER4Zhvd1sLi4d4Yfbjx/uH6ZXU5f3rMZpFF4226vt5np3cb3bZklU27IsR4imJRFOvXdxy4RsJhDIMApfbKZhGA7L8vFgagW0MXkArhI5wVLmFXPOediseBhPzqYYnogGByTCpRjxVOY5wKzpMt+36szJ3ZEJAZp5z+xMiZk9lIRczQGGnJbFPeKrr75Z/fpvD/vTfr9/++136/X62bMbCTsLbMDDmkangJ4HYRA9tyOeZ5lEAJ4Y1TEAxpwAVgg8DAMPY6l6fziyJLVwBw2otWrFl89fjKsJEl9cXNzsH+KR2qiteS1TklF4TELe0EPApkQ0ZVUSgpw4E5LHfDoKnvUJjD4wHI9zm09WCMEyIaTkEc09PDDJuJmImVNykbJf3AOAiJJXzGNurbiZ5G0rmvNGW1uWinT2FzAIJOiaLFdERLMAcYcI89aJFMPInKr5r//whzqfdpfXr1+/3m63Mg0DRpc6OAOOKQuf52jhgcxEIp1c5wEGxJhS7jwO5DQN43a1dqRiiiw3F0qSTrW0Zktpx+NRBwgrrSI5t1rF+1SNzaAZNDcR1vk4E7KVzWo9shECCgcTMydBIQxTaOxNE7HWJoG71fp0WlpODnmuZW4LIgR6lAYA6yElkc1uvTQ34TAXzmx0mOfgdamEIaXqRgZKuZRCJJvtVWtFVVHY64JMHoaBAAkAALv2jCzCIUhSVcvCQfzlV9/M83y5uwjOLz//mYR5EjZvncFkZkjSmQMWrhEE2Dm0fbKGKVLmlFKniDBzIDUzBzCHkl0DNqupmR9Ox8wxFzMtRUtKwyiSh4QBRFRMwZsIjYNMkpEcvYEWAEuSh9XK3VWrEAmhVS1QECCJSM6MDSOmPCxzHVhogJRpadWarXJaDeMwpK6IjowVXRIwwel0Gla7En2BcBzQA7U0llxb6RJHBwIzTmc9XEcwPPRpYnamuhCSsAJ88/YteABQ/XCbxhWlQbIkIsgyZEJ319pCuHPZGRCR+pTaH804+gM9EJ0CE3LOmYiW0pbWADwzB1KlkNW0HfJhKYfDoZTK0MjNW6tLizBCvFmv19NEAbUtBLjZTOv1enAlIhYxs8XN1ZSAAMy8459TytOwMqynuRJAq6VZdYQxyWY9EZEQJmZE6OYxHoBuzJQyqypKhLVxHFXdwzjJspxyzubNzzppRsAIhTNDG+iTQd2Z3YbcLJjZ/MzULGbff/g4N5dE3LRgF/EhMTMTQTgiOpznOe6OyI/CLUAPREwsBJiQOJwBE6ETBKCBm7u4deUXAmdctTEhIpgvEA0ijDabzXgWWcZu2kzTNIyJiFY4qapGIGLOWVvrUnJ3j25l1an/em5VwRuGUxhjHpgQEdyFkRDMWyeQJ4rEiO7MMI10OtlSjmYt54xIIqLaCZPnoXsARDzOyp6E5mepGwGd1a6A3NrCKJKEgPZzOZb30mUZiqCMubM5EJ8GKRHR7McpHlHnGT62VcQR0cqiiACUEDCRejhGFukj4tVmjVvsGqRlKatEiBMiWlMAo4DNatput+v1upcZNhdVUG1PIksEoBDX+gRSQB/TIgrxZstEU9Vm4UlQkMAxMbnrwABhED4NeUpzEpp22yNjWTSlYSnqrkvTnLM3B0T3DvUiBTxOeYhDgc7yIe10EERA9kAmTGlAxKZnzhahiPUCk1BVm3CfSQhTYKcqBuKZdtYXkoHdwZorKlAwdp7XmVkrQZ1OBYidK63ekNkRURATKnYViKWcRGizWm+32/U0IGIr1WqtdQkPDFOHR1IawKPWtbU2AozjWB2qBzNvxzQMqblVbcMw5JxR3a21tlgwtAqlgToj7VbTtJo4yXH/cLFZj2M+HOdEWKqeWcHoiNjJL50HjoEOROfFDToPk89M4ohHtrtDt11o1c76BMTQsK4UA+oj5SctD3WiT8euTYMIncMsIlQRu9MQMYE54tnJDM6dFTtwRKhDpnEQnmuxR1BxmqbNej3lxNhfPgRC7ZRoIowQIXf0dhY9qnopbVKdhklEVsM4pLxEHadpM6zMjCllFlPVBU66gLswjcJJ4PnV1eXLS6PxICwkgLgCrO1dqEGzznLofPq+aj1DPcpWqDO/EACZEAMQRXJrDQDCXCR1F4vqRVJKpRSnEAQL916ydOcKhHCzcERwoj48d7XgICVEIyBEB4uoIIkJMBESUhJKxHhOi9BaO5bF1REiC1POXXq1Wa2GlEOtloLuCCAAKTNqVHVEYBZE9KYRwUSdYHLmkcXZX2EpM7hPOVMXEHpwEIn4MPiiVIMA2UFb06j3p1NcXr568brU+vHuPgJr0ZQGh+js306kRwAE6gZZ2OeyDhFn3RvimZOccybA4/EIHrXWnPMgSbpOLQ+CRKWUZVmmxMCMwmDhHuhhBPyj6QaGRY3qLp2xf3ZnQx9SDkQzY0AcmAJaa4EGEAIohEOSMQ0gKQgzSZ/RC9E4DmiupbbuFxbRCfwdrzkTvf2RPkwkIqzeR4cEXuosC0/TRCiAgAiJeTEP8zIviElEluPhw7LcHdrxh9sXrz7L4/Ddt28b+DiOyLTUQkn6tBExCBghAJAAPQgCACFCI/pCBwCshqm7XILHuB57+2Nh8rHeOTdwO57Ks+0WHW0xGMUCGcUxFDSTEJ3JigsYP/LuUgA7U/dHaWhgGB2gciiN+0lu2cEQeEwpPbJtCQgDsUGE5ZyQSV1dYCkGi7XFhTJAeETmVNRra+CRJM+tO1kyASQEBm2tAMAqTY9JXxGwmmooZIkhqVJ1AOGHwx3xdHnzk+bw/vu3ljTCizew5JASJwcDLRAGHIAehI5s3SfClvVmBFO3Ak6hsN1d//TVL83inbzT0ELH1haIJtYspzyI9FDVWYnRMSe3MKBHco171No6X0OIgIM6+zEoItQNzyYsZ3PT1NOJexA+7u5e5xERgQcxPbJxHIGJgJlbAi3RWu00qwBSN0eQM0qbeq4z81IKBWg1RlO1psqU3Mysdb5u+Jkda6Ey5M1mEzLhNH339tvD/KDY+hg43EUGfzITBAQIIIY49zoBSmws+pOfvSjL4bvvvru5fob08J//9L/vtleXl9dzVRJ5dfHaTKUtBdgMK1RzAVVV9UbGWeJszeCtNXQR7rsK2CnOwg6MiEgsXd2HIXjut/AsLoV+YHo1RohAT1h1l5z3Uhdbq3Mtx+M8WzvMx6ZmEEgizADIREystXblGSEPw1CqMnUbETYNVbfktZlrJY6n2q5bTZijIajp6e5hLgWIEVytRAAEMZmZY5c1Yiczcl9XokCiQCCozR62F+k3F68GGe7v9s3m6xfXp+Pbjx9vEfi0vIsIIZLWrDbdDMMwjA6k4eagDgTAyBjYZXRMFAHq2lez12zuHpFcvOs1Oz/H4cyPDEQR7pUw8o9qht5BlNLcjTkhRGu23x/3+/2H+WBmnRVKJF3enIkbGAKauvUhFXVDgzTmMYIi0Bxas2Z6lqqGuwMihweQzOX4cDguTgdMwzBYawE0jqtmYQqlFEROnYEO5BFI/XsBg5HQvZnZ+/fv9Wq6ut7Utj+cPlzdXN3vvxvy9PL1er1en06nDx8+SOLsrsEwTeuUEgB2r9uqDcOBOXe1JoE5uDUMlT6LdwACdLTuYhsuDt0OqyslkEiYMfHjMsITYSAgau3FELVqzfS0lMPxtD8c96f9NK0kiTm4OziitwBtbqtxioju3+kQTEREKQ3LXDt8VWpVU2FozdzOXNGmkYaxtMPcVDEZurpZALEggHsFIGamoO4D5w4e7oZEoAHs0ErlBBcXV9c362nCVvZDXrf6bjVN1uzqcgMA//7f//9/+/vfvnh5IaHGSI4CAGYRDOZezXsB5ADNjMPDkSHCgPi8Np1e10vUeKSImhkwECB84kRwnmRjn3p1t4JQVSKBoMN8vN8fTku5fzjcH45CtBrGYZjUoaf47npi6jxAuKk2jGAixEAKQTGb+8Q/CCOcCJ5GVWZUiwWhWjgxSg7n41zymIKs1obESMjAj9wQQgwINIwANPMhBRKs1hMzvnv/lslYwlsdx6kW22wuv397+8tf/vJnP/ub1bh7eHgQjHOb6WomBpCqGZTCq5G7zapp79YDIbEQRW9sqbOrAdQtSgwjqmofFKazdwu4x/n5CHh2Kuj7zt3BrJnD/ni6fdgf5tP+eJpre7bZTDwkykMiGEhVQ5tCsx5jIlwNETixz0bdJZu5+/pREsQ4n3ZAM+jtVm2ujkjJSIgwzNXBVRFpkKyqrVWR7GoASCxE0PMhYAQUwHb97Hq3Sc0gp0DyL/78l2c3L+7vb93p44c7opTS6usv3202G8EAQCAAC19qZUZz9ZTHyOBAbgghwgyEbhYO7t3sCCiQJUi68JgqGQVI14Qlw6heETGlJ5ShU1jPb9gNlqWqx2Fejsv8cJpPpQJS4kyRwkCYhbOANEBXYxYCZOEAU62OvpRTgHV7mNLqXGPs9t8Bnc9y9s/goVYHYiAJR0MdV1OtFZGBYZ7nzgk0VUDvokgRcSTosDo1DM5pfDg+7LaJxFpd/ubXvwADutodD/NqNW7W6zdv3kbE+x++l2VZhmFoqjlLa00T73bbcG1mQZAQutg40IGJkRCCmc+IVkR3EnnU1wO4EImQt9bsbMt19gUws14/hYYFttbUo6q++/DDsdS5tuq22W3NqXkIEpFAEiCwhg4IBOGWCVhErRJ6hLlr0aZuc1lIZBgSEak2d1f11qwEmlNTm5dKJGZADKqVGatFaHQ7qNbaj/48XRwL3X8RWiWiQRsP+eJw3F/uVmZaFlutVn/9619++tlPKtXmJxn85z//+dvv3ggJu3unjTfV5gkRKYl6QxICoK78fbSCTY/JxQN6VypnPxECcIsws1qRkQQ7Hd86TV3P2jswMwcsS1P3peqstbTqiJilqp/YrBYOg1KBqWvlDIyiu5aFaL0/nlLipZZaazUvrZbWEqGFo6O6gcVpqU58OtaTmzqJiC6GSBFGhKrWGzAg0la715j7jxhbAPQ+lyLGcfXV19+xwJDo4w9zK6ftdv3eH/7w2/9RtX28/fLVq+c31y8f9nd5nKTPS0XEIFprZmYQA6Vwde8mkdgcACEhQydPIpkZmAdF4jMzycIJ0B1aNcdgAhPJLCzSnthcqGZmAV3Vfirl/uHw8fYeCDeXVzLkh/1xJj+VpR6beuC5PrOceBQSiAkZW4vTKWeptdXaiuH98XA6nVaxESnMj+wjwtNpDk4k6fhwf3+4b8GQpgA1g+4K2uEiBwjoyc8DDIAYiRAcnTBShsPx42q1qlVxmL7//t3l7uLDB52m1f/1H982XaZp/ea7RRhKrc+fv5QnVbQ2DEI1m+eFp7NAwiAQgJiim4UxBwQgIbrhuRQFAPUIUwZkoYgQpAhGjIahy6JuZoFMHV3t5cRclvm0PByPiLjd7W6ePzeIt2/ePcTJEX74cPvm/TuStF6vp2nYbdcD03aaNlPe17pRG8fcP/599e9vb1UVU4b5lLnbwumxVENc3G6Pd4qxu1qf7meDCiGEZEhgjtwTQDCL24+0RcQABIZAhKrLertelpKHbOFXlzcfP96/fPH6dDoVAlO6uz8yY86pzMvth/dS1QEA1RR0SNksjsdjSpL5rOt3Aj8blVFn2joAklBv25EDwdxczYhScDCAILijebg6m7urOwOrm6o7gKruD8f9fl9Km1abq6ur3Wa71JKFPn6435+O+3k5zst+fjD4IALjOLy8uX71/Nk17EaLgnVoZmbM/N3Hj9/f3U15vEZRg96vLtpaQAErGpBQiF5e37yb/3o4POCw5ZRNDUAEqVnT8AHRwKhbQZ6nPNYBzJy2h/2cc3ajbt41TMNcjw4+z6f1tFml1AWIeUpFTQIJAJoZAeQMqt6quYejB1HvFonIEc1Bw/uR6rrWLl8IQHPvfDwNByByR0Yw1fPkK9xdAZtZZ6iWWiy8mRLzbre72O7GlMdx/PWv/vZw/M/H/eHZ1fXLV5992D/MrTbz43H/9bv3SmREq3HaNwfTWjWltF+Oikx5MCRyYATrulaKqlbDnOS7775Nu9l8SYMbWSuLOa/Xm2VRROzkL2YMiDA3bN3RNEAB0WwgISDqt6RUU85ctIgIBVZbRCSnIcICfZiydDQeA4jALMx1HMdwNHBhdiAGUDdwYkEPOVulISJTqEWEPkoBAcgDzYLCIEIBKKCoA4SFi4i6mRsj1KZ3d3fufrHbbbfrnLNbY0hXu4u//83vrncXxjybUc6yWinEx48fv/rm6/vDjHS7XlcicjVVRab1athd32ymlQFKoDmEqgPU1lzweDoVyc9ePYdhNaN/+Hh/sbmYi97ezcxsVjgzM+o5zeqjXUecVR/gHtod1rvGUpsxc86pNyZNNRxdCwCYNvMqZu5uQiQh3d6rJ2IMcOfOygzAICei/HQFDndbTw8LBLBwQuIARwizCGKLLq6hgR3CmzW1oue7Hk7Ho5ptN5ubm5vddjuk3EM5E23HYT2Mi7knXg3jfl6q+Xq7e/Hi1ffff//9h4/jsTN4pctz8jRcXt+s0qCnpXcTy1xCwMKPpyI5NWYBev9w+5Of/vT62Y3FLiB/+If/OJ9OAI4oy7JM65XVAh6u8aTp7A7mJKa6nJn2SCTJHUPRXcwMgiHY+sQwp9aaaDgxN3cLFWcRPFgdVJMQh1P32HJLyAy8uKJ2UiMAABKbB7p108tu9tMzPj9e6ZK1FG0pj1UbQSzLUuellsIozy5unu0uE6eEEBCtLQrxcDpGQmWYTWdUTDjmjMDTMErO98f9vMxAMeaB6jykHIDEwmNe5mP3swmOIGzBhnR/PNaBTxDFjtVX65vVf/rjP//8Z3+bJlVcigZKSnljFhqZCEMQEIMgogVyeBDyPM+r1ep4PF6M4+lURDpWa0OWWnu32q07JTzkzGIEGHI6F4mPHne9TWTE7kjUSWR2NrzCfgkFRHTBXJ/aP8LqT9ZNEQRqEE2XZqXW/eGkqoR0fXm93l6koV+hAEBSvS7Lcjgd7u/vj6CVqJS5NgemnMbVarXb7YLxUI4iCQiZEzK5+3GZkzAAmHuYAWF41NaKtmGaHk6HzbOr689e3Z0ON+vty5exLJWZCTgGdnfTmsdERG7WbytxtwiXxIGxWa8IYplPY06Hh/ucBm81pcTCtbbN1IfeDuClzCIi1bqNKHX/SKLUR4wO0FQpGBiZ0MyqWdfBMXPE2YCZ8NElCjHcDYAfm6gON1STau7utdlpqadmGDiN4+7Zs2G1dmKLqKUEwLzU41KOy3wop4JQCUurVT1TRgohubza5VXOp6EbB3e0sA+oS+IIBWtMxEw1nEVAW2t2/7C/b2357k0FuDvMpY13d+/cQbUGjD9W8hERQRxI4c27jDzC//aXvwCA169f7/f7N2/efPHFF8DkZq217W5ba72/u728uj4tZZqms312RBhjV0yOQ3b341w2q1EJOJyDNALdOtxcTdN5G3I6s28JCcLOZllOyI+gPSIWDwNStVPVw1ItMKWUVhsZV4emi5qpLsvSJY7VtKAbo0MUr0ExroYxT4ho1jbTKJlX63Gp1cxaKYlFw9V1qcVbFQjJjEyh3q1citrl5TWuN3998+Ynf/OLQDi+Xw4PJ5GNBWIECgGBqeIn8mE3i4CmM0FYm//yl798/vLZh+/f/v3vfxet5pxrre/ev1+thm8f7q4udwA2ZA7QsJA0jLVWC8BwhAjA0gxxFgITcmZIKSfuF6UYAAUaIISjIyJjBCMisnYDs24w1Pm6QYigFoHYPA5LmZuKSJpWw3pV3dpyQoAyL+7Wu7XW2hKmhDXc3PMwiAghaC2mwTlnoETkoEur1JwRgkKbHeJEbushB2Hpdz0gUx6mLICE0+QhZYHrF8/Habj7WJY5CEUkAYudvampg6GI2FsDQr55dv3y+fM///GPrSx//fOfPnv5YmAKbc+vrz5/9fIwnz7/7NXt/d2H29vN7kZVP97diuR8OJ0wgseRiDQ8zEnp4TQPTJoEIogzImOAWjB3MxHSADAD4H6XSAdvMM53T1CcHXUCobV2nE+llJzztB6n1UjCx7JoKQw4l6VrDlv4YZmPWpcw781M+HI4gCMjYhAhCid1S05LMQ6yojGmuZayeBZIQuzJ3YPYiZeiBzOcVpebm7/5ZT6pauNaLOcpPMDTvDT3xjl1H7GIcLeUhrNagdKzm+eHw+HFq1edQvH+3fevX7/64x//+POf/+zNmzffvX3zb/7Nv3G3aRqb2+X11ev66sm/20W1MUc09cCAACGUFNRU54LO5iyCcHZ86TAcUYQGM0NId5/j8yAkiFTV1NxaKbO2EmHTtBnHERFLK6fTES1SShZOENXdVefWmoEpKliZKzIdj0cC3q7WQ0rTMAnnpRYnzGmMiONxdqqIgWYLQpinIUsa1KGoHuf27uFYcf9235yTAs/lIZS1oBuEh4gwp2q6LKcnINysmUGE1Vpvb++vd/kXf/NLZP5//0//09dffrksy69//euPHz8eDodf/Ozn+/3+3bt3L1+/0mV+8+bNb3/7WxnGNE5ZVaMbrXQbilIRMbN44NK01lqIpnGcIgtSQCC6IPU1BbAgArMu2gfshgtogM2j1sW8kpAEk1CtpV9GOR9PRDwNIzNb09OyNNV+pYhZO52Ww/E4rCbwPliSzfZys9mYhQEeiw7rzdK0npZuSRCmXqvWenVznYaplHLYz4EyTrt50XrS/bIsGsj3U17PpeU0dszbXc2s37HVDeQiIjDGPB2Pxw8fbr/55r9M0zTmYTOt5tPpK/oWPU6n0+vXrz98vB1W09XV9eXlVV6m+e2b03EWQbrYbE+nU3Nr3siH0qqzZM9LVUbicASPJFhKl5at8xgQFiiEfWjobsYc3R6IM/X5O6KaNVODUG15TAF2nEseh493DyKSg5qHRlQrh+PsCCLJncpiZjjkdTimnBDxVI2Op8PSROT9Dx/v9g8asbQqKaG3w8M+J96sBnU4zmV3nX1pBmJomBhK3R+K0bC7uChVm1r06zIgIkyjG28CccLHWw16ahqGIRzzZuMBs9nHt28vtjsrSgHq9O2b92oVEcdp+stXX0eE5FSrShIWpggPNXevfmaIRETV5loxYJXTIMkAltaISD06O9wBIwxV4TH1MyBxAISbuYMhOWHVAEm12eILIVe101wAWhI/VWdOJDIrNI8BMNQXg6V6adURJHfTF39/++Dagc7ltMwtALOM0wTzg6qunl3X0mgiIP7h4y3mddPTXHRWsGAkVqfWEGggan1iHxFB59oPzgYS0GtYIjII6lV5YjMdhoxpKBZC6TSfhNgBZFgjYtFmpXoEM8+nIlNOVmWP2G+L6Ds/EDQ8IZemBC4EpwLuufVLtphjGEBYKOis9kRQB+iYrbiDekQEMjWkakZJillrMU5y2s/vP9wD4Ga9GwbwaED88eHhtFTJ6eFw36lhZ8EzUx+xo4c11aWYRvMgQY54mJfLddrtdtM0galwauaJWD0swIObhwcDMQWZdcE2sAAiGAQE9PvnelPprkTk/eLaDuCBeXAAqCHJWLVhYgUSTqa1dy9mSkTMAgDLUkUYkxCFm7XucI+IrTVGIgY1FcBmQVUJJchOSwWmCPCcBiaCEApGbGb9FIkDgJ2l+8QgyZCW2mrz1po5PTzsT6cZHXVx1buHw7F5LGqOwJIXWmo1BqA+GSc0M7BYjaOeFlPIABdT2lxejKvpVMtqnTbrCRHRpZouS91cpA/3+2aBLF6tmgMkSUPn1WiYh1NAgCMxATGJI4X3UY8DdaAHEZCZ1QOBa9Gcs1nUZpIGYDTrbjBILH2OS4ySk6yHHKbTmEspwJJSIpZlWSxcw5saEAWCBagbh6hbLSpYwANyyoRgAdz9/sLDizYIVD/HeCAhyeWw7ydKa9uO683nazDYPxwPh5MEqjsHELCWWsKYISUy81YNGc7kmKagsEt0s7u8ur68uLrkIVvoHEuEUUBrkTlLTg/Hw7KU0rAZNnU1NHKkR2mLARAHWkD3PjTEBN0cBxncmBkesTcC6I09PjrCakBCMIdhGmutWg0xcs4U4GGqer6UeMzn0S8R+ePtOGmSYRg6MamZYetXKEgpjYgIkAk5sYcjkRBDv1RS7YzNALlbcYMzX9UIWQCfXV3vVlt03N8fzMEAD/P8/vbu/rA/HOcdt4vt7nK3C4dqSpxqWK21HufxGd1sLnar1bQahim38OJ4mo9hurQWEavtRrU+3B8bkvFUWluaIY0AoKotlEiQu6oFDRzP10ZDnCeJEEwi1CG3AIsg7tN5FtNuFUK1FmZuaiKSBrF+HQ9BZ7LIkBIRPbu+rlVrMyMyNQCota7GKaXcGYXckZEYWmsNWoSBZ0JgTBgumB3OJkzdmwKBA6yazvPSuTdqhuBDHi+macp5M6yuplW/hLZG9AU9zovVOUuapnW/q8EJ70+Hu/v78eWrzTAJgLfqYM0bJPTahJDT2Frpls9LKU3bYsirUXsdJmgRZs2CrA8aATsaj0wYQMzRtR0BiOxuDl1ZCACA6pk4AlwdCPtOd3dgtAhwc3ek851G3SaVO5i0Px6Pp7rUNqtGxDiuOGWDWJaFEYeU1FVKIUrn+4vcu8ct9dvoAhnPVlQAIMwGoarzPBMRI1qrKeXddnt9edWB6IGZERhivdmsVqu51dIqz6XV7gOHLWxx5/Vut9ky8ygy7x8UvarVVkhSEIgQIqSUzOx4PDa11nx/qhNPtVZVoOT9ntaICPeuD3q6jupHhlCQuyJF04YYrsGcENBMp3E8zTMigkegppQ81Dz6/VadDXeGiTFk7QIAmfFKsmMBgpMrETrF4qqqTjiwqCl5oBpkscDzHcjavMKYpZRT9pSzBIRZd5NuGFhdW2ullGlIY8o315c3V5vwExG7qWrosQ7DiD4PedykZJSaB44IAIk4Iuba8jA08Op2LHMInhy1mpqvQsABaHAEQluiLEuc1DGvVtc3IeN/93d/mC5uvn3zw7/89Yurm5dffPUVIYBVbH0WjtqCOHnnLZJHGIMBBfiSRww/AITg8Ovf/+2HH27v7w/7h6MrCHMEAbqwlFY7vdkdUkrmIe1xc+dp3Lj7aWZAhCBErV1vnxMTuJOZARznEwG2FrUupgPDKiLCWnd978eh380TQK21DsVHBOUzqNpaC/JM7GBF22kpD4fTOK52u11KKQ0JAM2shavpertq6kny8bgvrWm4uhkEMlXVeZ5hoFZb0dapERSQh2HY7O5OVYb84cOHd+/e/bt/9+8+3O7/8sVfO8s10JBFEPV8qacBkLWahIjAzQFiyLwstpQTk1a9f/5y8+zZjkm+/fbbzWZTtdVWlmXBU1sWjUAi7p4AApK6gmEcV7W4xDKQMFEEH0tVbDnnhojh3VeJm67GQUTUo1Q98qzWGGEu1R7vFDUH1Y6gmroBITD1ixMNomhzsNoNzAECobS6aJtbZeZBhpSS1vMNacZ4nGcLN4K74/3D8TBr1fA0DpQIgz2ihSOTBxQtRSOxpGl1tbqYlzqupvvDAxH98x//U9cPR0BYWGggOVCgdq7vkMW1Opjr8uLF7mI3Sbo+nY6Hw+HNmz+XUsZhVWtV1eOSU+Zpmn7925/+wz/8Y9Wy3VzPRUUyeEg9G+ACU8oiCWTiwQCOqq7miIoOhADORCzJam1dZcPYwA+neSGchgxAOQsjAsCAhMgerhYOlDPlYcrj0O1q3UDRukGwYQSjY5j5Mh/dPcw6d92arzebkzcNPyzz/njczydIzCLA4oTFVCFKLXNZarPZfa7mMoKIEztQIBGnn/zsZ2/fv7u7v+9jxMTJ+1X3FBAeCEgEYWEe3tzrepWE/eHu/ec/eblZb5+/2P3TP/3Tf/v3f8/MX3311Wq1Wa+n+8P+T3/6J5JFchnNa9tr9WmdtFV5OM1THgQQkYc0rqapVj0sS3c16LMnILZWPSChcB4BWd0jMDEahJoFArKcRRIBQDIMYgCtH3aRnHMaByKM84UURuergEI9gNAINcIxgqAsp+PxeDgchoehm0AurZp7GofdaiNDbuEO0VqrrqdlnstiIMjCQy7Nv/ru++Xrd05Jhul+P19cP4/A9Xr98PDwSPNHJA7qgpkItwj38DGLWbn9+P7Fs5+mafzTn/7P169fTqvd5W7z8eO777///uWr58MIX3z1x1//+lfj3//q9ec/2V1M7vnq6vWXX3z35//y1TBt5XgqmbMQUwRTmvLU1l7MQFv3VVcPcNWmGMDILCkYzMNM1YEJBmFD0YDqnd0UXisgLaW01qAzT4SZufugIYZ7NPd+U0FxDUckMQoQBofT4aQEC1htS0qptJbzSITb3S6NGYnHPCDicT7NZQmEqt6iArATOZGGBpIBafVhXLXWSmmdGfWojcHwwK427OObAIKYTw/L/LBe0cVuHbD8+m9+wYIBIsTXV1c/+fz1l19+gSNPQyaCPPD/93/5n3/zu7+7vLr84ot/fvXqF3/8039ORnI6LpthzWLmjhGc0nq9XtzuamnamkfXs2GEIKP6SVvXW3TVNTsgiQRWc0ejAITw2hywtoaIXaPoBgGUZAA3FE50Zu55uKmrOwh0LhkldqFhHKewTmQbhzTkySDSOFh4prSaVup2OB1LKQ+HQ1UzEI3mTArJggPFAhGpqUVbmBM9CifcvN856moaDojSxbJh7n55tfvFz16L0H/5018lRcp4v19Op9O0St98c/x4+8O/ef3fpsT/xz/+4+9+99v/4f/133/7zdthmJ49v7y/fycM5rMcDqfVuG5ME/OQJFEOIVokjwkOBSAASbVJECAupXqm1to4pLzekIg1NY8WAIYBfSrC4I7qajGOQ84cdnZfQyYMT0kYsrbSr1tN4RHuEaU2AMCU56arlIMpr6b9fr/dbjFIEEWkNWXmzFJrjRaJMxBLTgjcaiCzpEyqVQFAltpSGoiktda9KNrSHCNhIuGg6NeEd8QuMQ3DsBz3InI6LT/9xS8B62m/F5GffPby889e/vWvf35xc/3lX//8/v33KPyP//iPu4tLItkfy2Z99dcvvmVmU5O52f3heLPbnmqNiMSkbuM4TGUcawU1NaMAIgyCIQ0zWJax1XaYFwRnpDTkYymZbRqHxGweQrioIVDXFClYa0oky1JXmadxJYRlAUlprk3NJMlpnvtQ73A4tNYsfCkLZllavRIBIGtKAYl4O61W09SKJmJb6iPYyMwYMlqIatMWTudrBeNsmYTd0dlDLRgcPbxrOiO62ZyZLjfX18KjamOSeamvPv/p8fBut9v9+3/43//tv/23rbVvv/328vLyV7/61fc/vH///v2LF5+FS1M+7QvLQJRFUR5KpcNpEmJmc42IzDKlNJ0vQXIIJIIIaG4azkgkqZqC+ZiHuTRVtQy2RGbJSRBFm2IEMpUWQoyIy7LIOHQLj2kcwCMQRBoxB2Kptc9mEYRI5rm4x36/N7P7+3tGWQ0jAkzDKMRRFdxBDTwAoDUjkSQZh6Eu/VogauoEARxdzxAR0GW+QhHWzo0NRL96GCNJqmWe57LfH8zb+w/3V9e7//Sf/vLi5TS0+OlPfnn7cR8Rr199/tlnny3LcrG7Oh7KOGymceeQhL8Cz4QiFbBVIyoAeQ3UIyMCJKCMWCM0wAm6nbu5t6YNYLVaoQYwkbBGIFMgW0BRc3dLEW5E5EDaHBOrxf5wTCw6emk1MXGSlFLO2RHmpWhrVpZamqHknKFZQNRSiGie5yFlk2RmeSUIUJZFazEzRowI1cYoBpYBXKO7HwJEgEGge4tAD0gkzAzcryANZj63jBFEtCzLOI5lqf/yp78Se8r0w/u7Uua7j/c5/7DdbiMOL14+P82nh/3Hq6vLnPOv/uZ337/9MA3pmy/fIAyIuS4uikLgiwGXtl5ZSpkJoOl6tTrfjb3MZmHRFPDMJFV1d8mJiLr8bkiCJGlIYa02dWyJGJnUDSS7exBqeDNdahMk6pcBp9TdCVup0GxMmYMpT/1ii8NpTzQ4xJDyOI5JSKgfFK9aLTxnGVbDmqZmJ7UGnFprtS2tqXogy9nfol8xHhTdczLcXXtzj0z9TjF0TCxlqUmoGZDF/thvssinA+sgD/cHN/3um/uANozy7u19beXVy9fvP3w8nfDu9uSGQx4aNAlMXQGyrzXJHmO9HiXllGkgosd7qRY3pbOQqwtwrFNIaq1MBMTnaXBgIHSXfbIoUecIRFyv1yxSmx7nOXUpuLYOyoA5Im5W681qXdWSTOoG6K2kcWRHX6/XUx4QUTKrqoWXtjR3mbJYTpDhsK9zldUY1oiABdnACQI1gAIgAgJQHS080OBMb/F+RXVHHZdSj/vTej1NeZjrLDKauoGPvD0dlmHYJfH94X4chuXUKnmAfPnFW7XY3/9VeLIaiy8ig5CniOaEFrpfWsTeYXW5XgnzOE0X5mq94YnzPciIKSVVLaUBkFnkSSJiLlVkDlcCHADAgTIlTlUNwEurSWhpJkzNdH8sBHgqS78Cfj1NY86EMs+zqiPhJBl2uxaKTJx4GFL37jBT16hWjUhyylNe09TsAmRxpCBfrcZodry/R86IZK5AbOZE1EyDkN2BkBjAwdApziWTqu92G6uttQYG43q9398Pw6Dh42pda42qQ14ToRpi9w/lzNSxCx/HqWq4q6BjBCMDkLTwQy10BEC/XG9zTqvVqpo2D91bNFU1yMLMrVkppdeSImLWhmFAxNacMBCj30fKzAgolEqzLBboLAIkx+NpYDqdTomYqIufsBf9AgiBqyGnMc1aNRSiG12qW7h7NS2uxAm8Gjgy5CzTaqjGkIYpbweHk9ZjqcAEYURsEMR9M0hEnK/aeBRcRTyyEJCKKiIPw/T+/Yfr60tVVT/Wk4tkDWdgBjYFHrIH9gtDuqh/WZacRwMTBvJ+nYwFkqPbsS5xsMTCtEbhaZo2zWpriDXM56dCBJ9ksESUNptNYupkNndHjGZKtWaiIQ0OUVp1iq6scffFGgZk4ZQySGqt9YsrwaMui1M4Y2I0A2R0NwBo1pjZLQIgBAKCc1qPcpwXRCTCQDwcHhpSKbN79OuazxewEwGdX/aZyBZnVi1EAMIyF0jQ7QKJaBxHs1D16vvtdqulEQMAqipzMnUidgPE6IxXkRRh5vZ/A6WGzyuMZ43wAAAAAElFTkSuQmCC\n", 86 | "text/plain": [ 87 | "" 88 | ] 89 | }, 90 | "execution_count": 25, 91 | "metadata": {}, 92 | "output_type": "execute_result" 93 | } 94 | ], 95 | "source": [ 96 | "i =813\n", 97 | "\n", 98 | "img_info = imgrec.read_idx(i)\n", 99 | "\n", 100 | "header, img = mx.recordio.unpack(img_info)\n", 101 | "\n", 102 | "encoded_jpg_io = io.BytesIO(img)\n", 103 | "\n", 104 | "image = Image.open(encoded_jpg_io)\n", 105 | "\n", 106 | "print(header)\n", 107 | "image" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": 26, 113 | "metadata": { 114 | "ExecuteTime": { 115 | "end_time": "2018-07-21T07:10:26.732578Z", 116 | "start_time": "2018-07-21T07:10:26.711066Z" 117 | } 118 | }, 119 | "outputs": [ 120 | { 121 | "data": { 122 | "text/plain": [ 123 | "(112, 112)" 124 | ] 125 | }, 126 | "execution_count": 26, 127 | "metadata": {}, 128 | "output_type": "execute_result" 129 | } 130 | ], 131 | "source": [ 132 | "image.size" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 27, 138 | "metadata": { 139 | "ExecuteTime": { 140 | "end_time": "2018-07-21T07:10:29.714824Z", 141 | "start_time": "2018-07-21T07:10:29.676756Z" 142 | } 143 | }, 144 | "outputs": [], 145 | "source": [ 146 | "bounding_boxes, landmarks = detect_faces(image)" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": 28, 152 | "metadata": { 153 | "ExecuteTime": { 154 | "end_time": "2018-07-21T07:10:30.404858Z", 155 | "start_time": "2018-07-21T07:10:30.386340Z" 156 | } 157 | }, 158 | "outputs": [ 159 | { 160 | "data": { 161 | "text/plain": [ 162 | "(array([[ 13.36201936, 5.58984986, 78.93511893, 104.44713098,\n", 163 | " 0.99996698]]),\n", 164 | " array([[45.040733, 73.22949 , 67.01588 , 46.294598, 68.35203 , 47.975132,\n", 165 | " 46.75182 , 68.91486 , 85.37722 , 84.38674 ]], dtype=float32))" 166 | ] 167 | }, 168 | "execution_count": 28, 169 | "metadata": {}, 170 | "output_type": "execute_result" 171 | } 172 | ], 173 | "source": [ 174 | "bounding_boxes,landmarks" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": 36, 180 | "metadata": { 181 | "ExecuteTime": { 182 | "end_time": "2018-07-21T07:14:20.172835Z", 183 | "start_time": "2018-07-21T07:14:20.138160Z" 184 | } 185 | }, 186 | "outputs": [ 187 | { 188 | "name": "stderr", 189 | "output_type": "stream", 190 | "text": [ 191 | " 0%| | 0/1 [00:00\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0mlandmark\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlandmarks\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0mfacial5points\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mlandmark\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mbox\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mlandmark\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mbox\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mdst_img\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mwarp_and_crop_face\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mface\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfacial5points\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcrop_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m112\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m112\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0mfaces\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mImage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfromarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdst_img\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m...\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 202 | "\u001b[0;32m~/Notebooks/face/mtcnn-pytorch/src/align_trans.py\u001b[0m in \u001b[0;36mwarp_and_crop_face\u001b[0;34m(src_img, facial_pts, reference_pts, crop_size, align_type)\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0minner_padding_factor\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 259\u001b[0m \u001b[0mouter_padding\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 260\u001b[0;31m default_square)\n\u001b[0m\u001b[1;32m 261\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 262\u001b[0m \u001b[0mref_pts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreference_pts\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 203 | "\u001b[0;32m~/Notebooks/face/mtcnn-pytorch/src/align_trans.py\u001b[0m in \u001b[0;36mget_reference_facial_points\u001b[0;34m(output_size, inner_padding_factor, outer_padding, default_square)\u001b[0m\n\u001b[1;32m 102\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 103\u001b[0m raise FaceWarpException(\n\u001b[0;32m--> 104\u001b[0;31m 'No paddings to do, output_size must be None or {}'.format(tmp_crop_size))\n\u001b[0m\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 106\u001b[0m \u001b[0;31m# check output size\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 204 | "\u001b[0;31mFaceWarpException\u001b[0m: In File /root/Notebooks/face/mtcnn-pytorch/src/align_trans.py:FaceWarpException('No paddings to do, output_size must be None or [ 96 112]',)" 205 | ] 206 | } 207 | ], 208 | "source": [ 209 | "from tqdm import tqdm\n", 210 | "faces = []\n", 211 | "img_cv2 = np.array(image)[...,::-1]\n", 212 | "for i in tqdm(range(len(bounding_boxes))):\n", 213 | " box = bounding_boxes[i][:4].astype(np.int32).tolist()\n", 214 | " for idx, coord in enumerate(box[:2]):\n", 215 | " if coord > 1:\n", 216 | " box[idx] -= 1\n", 217 | " if box[2] + 1 < img_cv2.shape[1]:\n", 218 | " box[2] += 1\n", 219 | " if box[3] + 1 < img_cv2.shape[0]:\n", 220 | " box[3] += 1\n", 221 | " face = img_cv2[box[1]:box[3],box[0]:box[2]]\n", 222 | " landmark = landmarks[i]\n", 223 | " facial5points = [[landmark[j] - box[0],landmark[j+5] - box[1]] for j in range(5)]\n", 224 | " dst_img = warp_and_crop_face(face,facial5points, crop_size=(112,112))\n", 225 | " faces.append(Image.fromarray(dst_img[...,::-1]))" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "execution_count": 38, 231 | "metadata": { 232 | "ExecuteTime": { 233 | "end_time": "2018-07-21T07:21:45.873749Z", 234 | "start_time": "2018-07-21T07:21:45.857902Z" 235 | } 236 | }, 237 | "outputs": [], 238 | "source": [ 239 | "reference_pts = get_reference_facial_points(default_square= True)" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": 40, 245 | "metadata": { 246 | "ExecuteTime": { 247 | "end_time": "2018-07-21T07:22:21.544120Z", 248 | "start_time": "2018-07-21T07:22:21.517479Z" 249 | } 250 | }, 251 | "outputs": [ 252 | { 253 | "name": "stderr", 254 | "output_type": "stream", 255 | "text": [ 256 | "/root/Notebooks/face/mtcnn-pytorch/src/matlab_cp2tform.py:90: FutureWarning: `rcond` parameter will change to the default of machine precision times ``max(M, N)`` where M and N are the input matrix dimensions.\n", 257 | "To use the future default and silence this warning we advise to pass `rcond=None`, to keep using the old, explicitly pass `rcond=-1`.\n", 258 | " r, _, _, _ = lstsq(X, U)\n" 259 | ] 260 | } 261 | ], 262 | "source": [ 263 | "dst_img = warp_and_crop_face(face, facial5points, reference_pts, crop_size=(112,112))" 264 | ] 265 | }, 266 | { 267 | "cell_type": "code", 268 | "execution_count": 43, 269 | "metadata": { 270 | "ExecuteTime": { 271 | "end_time": "2018-07-21T07:22:31.344783Z", 272 | "start_time": "2018-07-21T07:22:31.313710Z" 273 | } 274 | }, 275 | "outputs": [ 276 | { 277 | "data": { 278 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAHAAAABwCAIAAABJgmMcAAAzBUlEQVR4nL29TZNjS44deAC430syPjJfVXVPtaSWmWx+zvz/zZjNTtNSS/U+MjNI3usO4GiBy3ivW4vZTJGWi4zMCAYJwvFxcHBc/nI5nXpvZmszUz31vrZG5mLa1QAsTUWkmbamJJtpV2VSgXBXoLeWoKma2WldTuvS1M7rclmWru3Sezd9Oa0vp1VE11ODiAkVdPc5/Xa9/vj4iJkJruvpdDqJqjV7fX1dliUi5pyZTpJkIG8ck7yP+cvH9V9//fY/fvv22/W+BSYRRCQikSQACBIJ4P/5rz9/v254yqP9X//nf/rzl69fXl9fz8vr2i/n82KNEZopgJmaKUwIJCJJqVdKZIRPB5mQFKiqmrXeVFsz66aLtaW1Ra2bLc2amQi0GZVj7GMMtmbnZZyW/fXFp48xQLXW+rqe1rW1pqYuGIw5kUwKUkigEdYbLhdmurtP5+ZJekQZlICoiKhAyRR5jjEBoP3Tn//y09vr2/l8Wtvr2l7O526W05HRzZberBkEKUwmmSKiagrJSDIFkhQCEIiqqoqaqaqgQZbWuplBVCCAKEwhpuuyuAmZmbmKnE3GrkOFiWVZz+czoBAR0AQqUOFEeoZkLgYhmXFWvK/Ldjlv23C/79sec05HAFBVMQMAEYg+0aLtL1/f3s7ny7Ke1/b+0te+NNXsgjRT6c1EJJlqCihBUW1qImBmRgrEtJEgKCKiKqoKADDI0mxpTQXIBGhmIoCyi6S1YIY7VNnbsMZ1JWHW+nISFZKegRmBbAIxMVEHkZGkZVrmSnld1p8uL/vIj23kDPcMglbWhIgo+DRrAmiXdTn3dl7aeWkqBMNUFmvIJClCFYKZ4aqiIsIEU0UFpEFI5Cz3NGuq5T2pqt10MekGU0UATDMBMkmAVAihJqJKookKAKqqmTUKKDLDychUwIRkpJIaaImkJpAm7+tlvshtjx/3sZ84b/d9H3NOi2ZmrVlTwRNt2l5Op3Pva7OmokhheBAiAiiIRCJJAiliChWBZDAiSRGx1gQUQAWmUp4FlaZqWj9ARSQymRWXhYQAIiqoX0TAmgoUEIGq1scIUip8SBKEqq7rKiMEASQZIVxMzr2fezv3fmve1EjOOfc5ILL0tnRj5hMNejmdzLqKCYUposgIQEACKhChQgFpqnWayQQpFamAZpWQTE1NDRAy8fhZgBUQyAyKoowpInK4jYiqHD5EkklCoElCaE07eyB9JpMEFWqCFJpIVyywl768nk5vl/n9PhQQCCrGCNwDyYgnGnRdexcYKCSzjrOACZBgEgpQIYAAhzmDJFXFICbSWmutmVmZSVVF2sNoQDIzIYAKmSkiIgYABASf3yfH7yOZAiWSQUHrJu0EE0fO8AgXNoEoxERM0IXdbO3tsixrb83MzNSVdACZQjlM/CSDNq2jHcwQBkKgCqGAIHC8RwpoLqKGsjsAKISiAFAeKiIQsdbKkQ97RjBAgaqgfq58k2AyEyIpIgDKOUmaGEzrm9VUgM7WZxuqEUIRKBVqpDFUaMLV7LT087osvTUzESUFkEwkSD4xhopQUHahZr2xFB5h9HiQBMMdkhABoSJMUpiRbISK9WZmqtr6oqp8PEQVKpmpkEp04Od7JIkK0J/fTyCDzqyII3U0QK2a7AgUAhU10YSCJlyaXk7LeV16b6YiEAICYSLwzJyEBkkRKCHQBqhIZuRRb4oIVMRUK7QlUqAqClGCmSkiwUxAVLU1U2utqWpmZvlypZnMChckGcFMRhJUSbDsXLY94m2EQ820TrBZbxTxCHevur2+UwQKCrKZrks7n0+nZWlqADKpCuZnuHqWQcOHAIKUJABTExVASSZS6uAqJIVgAgZUscms1gWROX2O2QihHakfAAmImooamUlSAWYG67+gwoQSiUemohy+CgHJiABmIivQmqqpBohMCCGEJJkMr4/vtCxvLy/9+7UqkToKlQ2eaNCYAigShECQYqoQERVR1NlJsAImyQAAKiimBCggEZ5jzExGaxARbaoqQoioqUAiPCOOEglyWC0JovytCigRICVQUZVStUGARLrXgcj0Gc5MZgqoClFK+WnT3ns7sqSKtnCPwFNjaKbzePWiaiKSVe6oHGELACl8fBdRbwyVSYDIhHslazPLBMDPCKyHaY15xMoKnZmUPL5EZfnjpEg+6mCKQAAyycggQ4SNirQIBAGmqXVFk9Eyz+BPq/7Dpe9XaXLZAyHnmTLuG/zjSQYtLzv8Q41ylDIKUaD+iKriyEIAhJXAERHMrKBZGSkzyQAowmr8halqS7OZMUYwo9y/nIrKKvAfKb4MqlXsgJRMiJTdVSAC0yMqVkmnAlM006baLc+n/tP767fbPc0tZMI6bGTi/hx7olE0RRIAqn457FjHs95P1ZeUrIJO9fBlkKplkOMhj88DwGdlmpllcROd6fVlX3picmZmQlVEqlBTCMw8omJrWR9AnfEKh3U8RFDHRE2a2dJyQBbIy8vl5XK5+S1EVbvDmj0JuwPQIkKpVIU8KqVqWwjIEXzIgppIUiprVwVQiJzo53mvCv/f/Y6oKPio4Y82KT8xNgOQZEZGBgQwfRRUlaCEoHuER0bk0QunHFEfAphIM7NEI87L8vXt7bfbuM2ZtIQkn9gpBWkABfn5XsFHhKxAICQLaiuDljUPUIkpvwNMZerj71V2ffbRVYe11jIzMz1mRlShKiLINJEUIShJAZDHp4hq2qLOvWRmPLKM4HhpgjRlU7Tk2tqffvr6t++377eRGfkoZZ9k0EyhCqmkRESdMCmUApWgBIAcbSc/ndHMmEnmYREgMyPiM56inOvApJHHJ0KS7j7HRBm0in+BmnXVYLj7jIiIyE+DMpnpwfg89aKmzESGMJEpZFM2gSnOp/7l7eXbbeRIQv/3Q/P3NCgASAICBGGoNCx8dMCPt5R4tJjNmpmZqlSUfHQmVTkeJ5p8VDlZOS8zY3pGhIf7DHdEHh3Tw4ES8Iw5x8yMiHwEGZZjVgtRyDOA3/ECKGBgAxqo6WD2Juvarr4jqfpED2VKUjIBkXaUiVJxtCr58gjmcZxNzVTLXq213itoytFl4ggR7u7u9XnoZ6vukRHhnpGiKpCjAePhuwlkJlARtkB+FTERyVTRPKrXigaZIoUaZqVGBU1hKpq5Njstre8zQp7qoTXYEpLKFKiCFMqBKUhWh8mqSg9/fDzKxCTINLNqkD7D63G8M1Pq6QtM1RRVhZk1SGQmMzLrgCsIMQIGSgpR8VoJIAIiSTYVSXoEGMFUpYoqREghjanpp3b609f39f3Lnwd//rHlv/z3Zxo0PZNAh1IJ0aNil8MkyGQyBUcyce92GO6zen/EWWQm//BfIseEKSKEVFFTFcBjlhd6+BFay7Xr6VQMJtVpHa+HQgXBDBAFXYmpKuEzNFRUKAo2FRMO38cey8v7T+cz+vrLx4/nGdQ9jAJVO4YvlRM/wV+hqAjL6SpERkZmtsLrIKpy1P4A6sxGVJA9EkoVW2SU+TIyMzyQNad8wHcC4IAPqqkCUBken5j0gR2qQmHHOZgRKm6Hn0ZTLGqyD0Hebx+//vLtfntSmwSgbfuwVbtZARORCZJCFVi1RGai1EcRCRwuWxXq463qZzl1GKE+D6ACqwJQrTasqiWi/E9bxYcH8JqP45+Z9TE9GjB+Fm2ftQYFInbMTupFEEp006VpSu7b7Xb9wfTnGTQikxmggYdBVUQkQQFMVf4AXFS+IshkRqSpaj6yzr8NAo/y4DMa1BPow9E+K7A/IKH8356Eis+IHNVSMMAkq1PiUc9JoYSizUwj6N5FBlOZ53X5ydbnGTTz8DYSkXmcPcJURDSlBpxIHA18PpzIVTUqLWU+usSqT/GZkQ7DoeZyZeKqvTJTCAHy8ZwkoaIoSPiBPGcgjlFTFboPN1Upt1c1661lguk5kUvvM7KZ7R692dcvX5bU5xkUQMpROQdTIKKF2VEEAVALoz1MRoEGZjX1Aj3C6KPYru7o00BAhB+B4Jh1ymFT0eoIBZX9JUghCnDGZz0fwXxMYx79mkAFTSQ8IzMEqDieSXfPSFUTJoil90i73sYTDSo1kqRHmB5oGoucgQyKkCkHaikiUImgiEhA/TizrfXPUikfjzKciFR++WPBn5l1YD/HHHjkNBAKNYXiMxDk7/ABIQiSqoQgWZjeI3wTTVq09OS69G8f9//6L//jX79f9fzleQYVQZFAADqhUC00XSpRHOjnI7CVk1Xtycj06lahtMOBPp207GtmyQCPoVAe+SZNtIb4Rxz4RBCBGkllReoiRrAiqTU1D0+mmhF2tCEBiiyZnk6XGdNMYx/d+pe3L79cx//8+ZdnGrQSd2SmKIQFeUAVqZBkPgqiz1zxgEPrS9bBlEwXRERh5sd/MN2TkfqHRGRqBjEzeqZHRCRAEKIHzlJ9aiaVWX1FfaiqMDWToItYUj3pSVEePYCaamoqg8XJMFWVJzaeQANRHUhEIFKoIlCBFQenMskDnMcD2Dv8CfhsUvGoHI+G6uHTLN+sOtQdKLsZSRWFEoCSSok/VAWfg7XCFKos1Yebk8WmKgqJV98fkYRBQAYYpjCmZDaxxVYVeRS2f2eDUpI1YwgXYaZkHtQvVDkiRQERHrOzBBQF7j6yTT5KQTzQz3Ko1g70hI+m3tQKmgPh6YHC6AARq5OggEpERnrNuVJAFRETUX1EV48pGGahPpMeiYQkWkKJqZIGdJW1taV1xRPrUGk9RYMxgRQ0FZiJNYM1wkTbo5FBwaYIUgGT6qIoAiQOekYBTqzySyXTWmtHQVr8PKh9dqVVsQORgaM0VyQzEVnlXNb8tT7Kz1ojignEat+hImbaqDMyYhzMSdHWpLfetRUE/pzhZxMgGSxOXJKZUqQLEKL4rF/AKkX5GUkP2PeYkRwJmZ/xAJmMiMpLj5ymwRCzKpWKefqYe1KgicxkZHiGZwIsGCUiwWRKEolMUVWFWqSZNU3VrNwayciYQJpqI5uIiqmoigaegdu3jAAFecw1C4rQI0M8OkkgmKz2RDXBFFZBLiSYmmlmAhK/Q5wP0lZxJhTVHWR6ufdBtRNrBv19AFrMrjzi6RGOKwQfBR4ItXJ0bRRP0YAc9UB9fxOFqTJNdWlW1IfnPFqG1yy+qXZFE7UDtH+Y5LM5BCFCkaO34WHOOuZHLoJAYKqf/X8+kHqFEAwGyKba1FD0ZCn6HchEahXBqiZHm4rqdvFoj0iISlaBysKeD9JpzRpOaw+hkCv1cjqf19lwfRqJuUnSTBYTU1sE3bQd2GLimI6wAGB8pvqjieSDkyemViN8ETPV3nplmGQOn4V1BCmJjMzMptqt2dK0mRwMkMdoQIpYpRUnMgNRDZVoEaQLkM5gPW/hi8mMCBCaahCVhHSRdZHzae3WTJ/Ufbameup96aYiXaQf5yuZoGgNQgpQ/yxE/4BlFEaB1rQ1LapMt1aT+qpDRWRwuDuTkcwIRrgqM5Uhrqp2cFTE8hg9JZlqTQQZiYNjwQOnJ4Jwn8xMStS/ZBZhqjWBkoouOgU2ZVlsWexZDoq29nbqrTcF0EWWI2KBB7fp96q4UODHFwdQyTr0x4xMe+s1cZJjrn/0TyADIshK2gB8+tzuBHrv1rqZaet4DFfIjAhSfYwHZMfhEwAhE+JeizZygAZSJR772ikZkgSaaF+kd7WGp42V2trb0q2pKqSLNBUAkgTyWJ/4Q41etacdyKPg6PnT52yitKYizeyzUwLQW68jXL1PqDMz3GdkRtYgGGREBFFoIOpzSzJjzlkg1iyakoiIupi7V/08gx4EYKZNrC8t9Rjjk2xd+mpv7+fnHXlQmuhq1sxUiqKa+e8qjCKJiJhYAeP1pzKUKFiocEQ+xpLlUwASbICaZs2eekTEHLPoC+meHk6BWiIeFUKRxLNC9gyfDBJR4BNkSGREJDI4PUZmiMDUuqViMmquEwmKwfL1y7n39iSDLt16ocgZOIAhfvaOUdQvHh1nRsDsaFcAgCJqailIZmSMMZiMCDNrvQKyqhW+VgW6Uq2JwRqJPXJGIKZYRjEYqnpTqXUczwwmUlMwMsecM8ITxVHOpEdOIk2YnOE5gwhnOhnWtS3b3L59fOdTilAALZPCoiwW4ItalysI798ETSDAmB4QbU2ongllZBa+5pm3vJNsra3L+vL6Zr2rKoEgk+kR7oEEiYCEaojMZGYywiOz2JykgL2bmhWhCYoU2SJuY+zDnVQxEaVIJpz05GTsc1BTFAFOMo1Y9PJ6uey72bOOvEeg6A0EHhV1jTUOZOTgbx75fgJaK38KFSG0ATSISpBjjrEPEMuy3IZbb2qtSleSw716p+NT8sjACHoyGGPOQovBBLK59d5Z+LcyRe4eH2PetzGjAoOJNYg6cw+/z30fe9BZ3FUzmkfzj0jr1tqzDCrHnAfAw35H5/3Akh4GRTFvgwORCTVtZq1GjbWPQI7k5hGZN8+PGTUVgWiQnjHdM9laW5bFzDAjd5+RwZw+tm3LDBVRUMAWtgIQSUgqHbhN/76N632bIe4OUbNOlRm+zfHjftvHRoGaqhlEU/UeMbud3t/laUnJM7VS4IH61L8r8YdtFDEBkinHmmU4abQQyQwPJkFHBuc+9n3PYAA2JiAjIgkeCYyZNVPaASk6lWckOee+bXcwm2pTaYLOBmtVos/IPXHdx7fb/nG735xzeh2b+qj2Oa77fd93Uem9t9atdZrefOLc5TLkWVOlJp84EUCBihJSyfoxpj8IjnFsyaLGeKYSzOGDEyM8MjNyjrnvMzNnpFmjqEd6ZhTXHZJ5cKNFtVUnlOlzDh8MbyJrt6XZopKANhezEHjK5vnjvn+73r9/3H+5bcMPDDQZXt0C6RF9Wc4qbT3Zso50T6fn337+ec4njZXaPj3SBTxw3MTBPK45x7+dX+LI1CKwYHLsM8PDb/fNo/hx4TPcc0yHaEIic8wYGRQ5Clw5GnNT7SIkp4+YrsKl6XldLr3l0kVV3Q0Iyh782Mf36+3XH9dv14+/3Xx6+JxzTvcZDDXrp3Xpy8vb1y9vX15f3iDy67dfIU5w23aPeJJBh1eWpooC8vsESVS0AJyqsYU1kqylsAz3sY2xj233eb3vGYWbSiQY3Id7hkeOGbv7iKjVm/zDrzDVbqYqETPdm8ppMY+J87k1XUBnhocH7x7Xbf9+vX77/vHbx49vW4zpcTBPKap9Wd++/PTly/vby/tpPTHy9nFNVjzVP7//w8u/3P/28zNo4e0gLzyMeBxx0QJEC/OFNiO9DpYIhdP9ut1++/h+u9+dWfu2zbpKE1FpYtDwyHQ2jcS+zzG9AJV4nAAVGGDVUFT4dDTD8JxOT7EEgBncndvI6xbfruO6wYd4aK1KB+iZ01V37n+7Xj/m6+Uk8H2/osmka5xXfe9teYI1UWWTftpUUXuYR8A76CJgweIJbRaq29i/3W7f7/fryJ02RpD2+vrWzxcRE9VlPZEydu9zeoReb7f5t/3+w91ba3mw8ETJzEhDa21dlmVp69LXpbfWRM2sqVgCam2BtRbuebvd3VPR124BBjIZgEbE9cfHNeJD5NvSepfWZb2clvN6fnsPasYz4PrDoH/w0MegqDqWg+yZEeGZw2N33zNuY35s232M3T1FrC+9nZf1DLEkmlomr/f7HH7fx7bt1+t127aqQPMTuCJUa8NZlmYv59Pby+XltJyXdlnb69rf315MzD1SbBJ7ZBMyfLX2cnm1dQnGfe63sY0amLhLMpGDrmzrcl76au2Ujs1H5rMMWoPdA1SXkqMolBNHTARGxLbve8T1fr/PuUc6AdVmjaoEmnUmtrFdb/fpAbVt339c73NO93APAHlMAMRMTbR0SZRzWZbL5fL+9vrl9eX1cno5LW+X9fW8fn19aWr7mDN5n74Nf1nX98tpPV1e//QPy+nsjB/b9brdd/d9n0g2sSZAejKa6tpP6+n1Y8Ttvs/xrKQ04xhRyMGH56e7kgzmzBwR9znvc1z3sbsnlGZFPJrDx5yK6Z5jzul+38fuPtzv++xLX9fTcu4g55hj7qd+upzPl9NpXZcmsNx768vS17U3hSKa8tTa22l9f3l5OZ88Ynh+bGPM+MefvmSGLcvp9c99PU3G27zc576PuY0yqCLDx8h06+3l5a1fXr/ffxs7n7ZL1x57FCkiSdbRUFOIeORwHz7vc2wx72NOIkUjOcfYho8x9hljDoRGECLarIhJ1paW6H05nc+9dxWJ6Td8fH1//+nL+5++/vT++tqE4rfMjJgAm8ki0oGG1AzMXdduzEVwMn0/r//0lz9dLpcZmXYOqGaKtKWpn9bpiWR6zH2ntd7MTsvy8hbaIyRDBc9Cm7YZEXkMhCAmGhEEqcyM6XMbY9v3+z49MpP79Pu2b/vY9j3LcNoBPa/LclptWbZ9zIxBbvtozdZ17b0vvSu5raevr69/ev/yl59+ent97QLMH2Ps27a5TxWupqtqAzWzCYwpTBVZm556e385r+t62/fvd8YciCl0FTZIa5Ipc04R2NL6svTzWftydSCFqU/brGl71aGfEkH6Ozb8KStwIJvFXI4QkdN6am2BWGtNRBR9Xc+vb2/r5RLg94+Pbx8fARaP2cy6WQO89dfz+f3lfFmXU29NUmFdujHdIOBScyYRYfq+T7VqzJtwUSyGfRs59hxEpCFFFSYJeM0Bm2o/9RLtakYzv+/HmZMnDT7ryONBNlYVNJWoCafi1KVs2/qQLz2St23MCIjVGlhA3N0nX19ef/r6tZ8Wmr69vb5fr3FszGdG+Jyc+2rahcpkzBwbmjSkqshiYYsCzXQpEaOImNPbWLAAgmRXfVmX7b51k1M3kRwxvcQ3QEwHuZg21XVdltM6AtlMSNNmCnvWJLl5UuWxOQkI2JsZD/bWIrL0fllPAaRIQqanJyAy/NEIbduY2Ztk7D4opj6npqugGN2B9KAXLT9m7Pdd0nLauqiGimhrNANgKt3EBOkR4lNH02OJrAkWa6e+zOHRTISmMtKdkRGLSm+lj3Es/0AxNa2hNaku9XkGfdD9gYQY6kUdg1tVEa0N4yAJ8cRMEuKkR973/dZ0HzOSMbfw0Vrvqq0ZDn2IHEhhWNN1sVPrTcUkNKekiKRpt6URCJ8lISNkBqdM3VHiS02tW3bj2vtdVUklTdBNDLZ0q4Xn3sxUgVrqBwFDqsDa85bp2kNhTVELHE1Vpde7hqiKWVO1LDg4c0TOJEUS6szF5NR0H77tY4wR6ZowaGTEjBkBsAEvl9OpL63m7Myl98vldD6dlnQzUzOSE3SfGRECLeURCXdnpGnvbVlTSl6KOdMjGaJYuvVlERUFmmqzGjAGRYxpwnL5x+7d39+go8gCSHlQOEtuQIUqUEgX9GaqOmfuPiVThbXlqkhtttppdF+ajCbTvTaR5oRLhqSJnk7raelL7wAErbe2rsuy9m52klNGxO9LNEh3PGhWTGUW3b7o4Q99QxVTEUoKK0qUX5vQAJUa3YuQ3WRt2k3a0wBmkpk8qEYHbg/woNOoqEHkoZbWC+YDApU80xQQWbStTWLtRb0LD1eJrkyqHsC+KXtrp9N6Pp/XpZspSZ0xyBqy12AVqsJUNsb8d06VmTV5Pa29nxaCHpFgb9rMJGFarIxUE5KS3lUWk7XZ+rSp5z4PLY+qcD73t0iIFZeIPvao5QTIYlJLlwks2kgmwSaiHUBG7vu+jxEGsh3PlFCVpfeXy/nlcj6dVlVlhHvM9OLHFiv/0dAcRzQiagmhr7K2ZSYm0e77SUR7VxNneniVukogwn1EwFPpUxnK7Kqvl/PL+fwsg8ahCqpA1toPkcGQqC1jrfV5/WxPpT223iBCIiODkQhRTRFJUeoUK04NINZb73Y5X17Ol/NpaaZMurOwfDA/A5yqJARxsHAz0z2m+5JsvbWWp76c+hLcrPF0OWnTiGzWuzVkxpwTsQcImulq1g0vl7Oez9+d+L+fYlA+WIapevBvICnMZERSUkSaqZmKHbuAR++vWsvEbCSNSBJOdMU02+bwONRYRPR0Ol3O51Pvpc8oSQObSClCJEULIZSS0CmlA5BwD3fPDAAm0lvrvatvYDaVdV0PiIzCFGNKdqbnCA0qYOXn4TGfhtgfi4JIEsz0FJOQ0qZJlQdfNKQJKtbWSrU1bf9GboTuvo3BoIPN1Hqz1ltrUHk5n5fWkczpjJBMEEa0pgjOKHbFA9j+nWxfDNMDj1VRM1ozVUmG+1iyt94PeiMhqhQZxQ7aB2gqypjXHz9u16ep4hBJ6pFLJUl5aBmSFJXS5yKQyPZg7BSTTJuZSOk6gFlO10TS1LqJGVqr5YwiippI703MEOFjesQnt0kSBANx6GYc2woHzbHESU1TkwoIM2LMoV5PpyYEKAQcTBCmqZopKZKUfY59f5YG84go9qw+NuJI1OiXrBUDlUCUogMka/SGFEpIQLR4hsd6O1o3USVEUFRsEYWqA8gi5lHplDTsSI6IncKHIC2oKilZ9EcTBWREFkNUAGWaJNN9TBPLHhRKEzDx2HMmWIeIkARg2nrTp5VNI+m1NykUcEha6cxChFRCKJLHyU4wELBPNiOa6rG1EEpRgJ/c1oMgexxdKRLzQzLCRCE60xCC4ZFkrSKQ4swE9ZjACKCZx/7ddEc+iLuR7uktzJKRGZ4MPvIlBKKaDALarC/9SQY9NnkhmZx0QEqVhWowlRqiU0j9nDIeMbcotVq1gAjzc5KiNex/LHLVNP9gnGVJ08accwwf4bex73PG77uMRfpWFcGnuBEp0N6XOVOlqbZav8mge7ZWxnXQeTBd+IlOROaMeNYEBG13Dw9liSSIA0Qe/GPYoffVKhMo8JB8kFoURvmdHtILMDk2m47sXtWAVuJGFZiZHGPe7vfb7f5jbNu+HYwnSBXCptZEQlFHpFZCtBglvffel7b0tjzY9XA/FF/wYK9n0fMAiiaxjXHf9ycZdHpGUEF5KDBSxA5+E/nggzO1tmDUDnAqa4nmELS03vQAUx7LgGVYlWIs5OcUIiLH8Ovt/vHx8dv9GswKugQQaWqpSdEEmoqIZWa4R0RthZhqa920RZRQec45k0GkkB6ZiXAyGUFVg9p0zqcN6SrY8CAtZ2SYgigN+Fp7K4xEwBSKkSyayYMDUkvy0uxzcoqHLFktytSBLjpnRGxjv9632+1+vd23fWu9995ErZJj7fjU6pSY9kWZkeHMCkUQhaqo2PA9DuItyVRJID9nqxHiM2vZInhIzj7DoDNTRFnqs5ngQWYlUVJ3Wnx7Uh9iAYfqw0G3tULYPvUxWOR6kERCin9bKkDD5/V2+7jeb9v24+P2/XYXwWr9vJzUehBFVhImPJJ5CISi4mOqVkSBaalnwz1Ep9IgoBzHqRbuH9GAEUiq2rOSEkmP0BQrLb/fV46PdcDSInnMPsQzNWr6dLSf+PyRg/l57DzwELGppa5IxrbtP6637x/X63b/uN+3Md9Op9WWRRezXovlEUH3wAzIkc5ARmRmEzCDGcXkV7OI4ByGpqooryhpp0QNPrKqalG1rg/Jg7+vQUeEh9c2bNMCwpCAkHrI7lbhd6iugF5tCcxMq91GJgtRZnU4BwpYiopkYE735H0ft22/btvHfbuPmRDVZmiSioSZmbZEOo4lXpVjHTwZEdNdpu8eE5LFNZtz95mLQqRF4HHMMoJBJCyTRbYXNVHF319/vc2kNDOoZHzqISuJJJQdknK0pZWoUrM04hUSSCRCQlUyDQ8E8JO2l8FIRuScMSM/brdvH9frtt3H3MPX04liQQkKxFRMWoNEhiYlS4wrqSZqFjklJDPIiPSR0zP2McXCuhkJqYiRc6ZHzpAZ8IAHk5SnDekOCFTyseheXUZt+YGkz4mHGKi7C4DWRMJUBZGRCohKZlS5lI9NEJIMBmXO6cHpcdv3j+22DfdMqqTIJLaI8KmgeOicRGZEIgmWxiEDFv7jvi3h0+c+x5g+I4bP4dPQIiNpOLj63OYMyPAYkTPk2HpKV5EnACRtRj4SDgVqD5i5gM5gSlWXAI6tSwaZmT6DSoPWdnUcdMeSpSymPCOZgI+Y4ds+r/f7fd89U/uy2JLJPYM+JSNrR7x4QExBmqAJTRCq4hPbtmTLzDF9c98jPu632/2+LKuaZVI/F7hF9jkCAlt8+jbu9/2W+aQZSKsVbeHjIopDOr54V6xNWJbmTOkEQDLhMxluJQlqDYCa+UPAqiDBjIwjjGIf4+N2//FxvW9bX9bz5WK9//bt+23sd/fhfr3ftzEo2lpTxdJt7W0xXZuds+2ZG7nOhcg5fXh87P7z9+9jzhcCZktkM1ORZNzHdHIC13G/T5cmr+/n+4/tOct0bUZGLaCjFNAeK8jHocu6CCRRrVH1RbUJnCnaaCIJkRhRBi1Q4BANASK4j3HftuvtPny01t/eXr/+9KdA/vrLb98+rjPzvu+//vb952+/JXRZlr6083k9r8vL6fT+cnnN0zL9klhnyKHyHr98XH/+8UMgy3r2oCBImOqIOTInOYIOSpP1df3pbfl5+5fn4CPHkX84n5SGeZG2a7eD8lk5sRpNo1JBKkoiKyQT0IjMSEIFoJfcJxDu1/v9dr3e7zshl5fL2/v768vLPsfSzef89uPjum3Xfffgx3aL601VrMl5Xb++v+2ZE3Ja+kbtY1YnHOTPPz4+9nFe1oQGRbPm4PREAJM5mSnYY367fcf6Skw85dA3ALWMkJBSAT8YeGoPOrMSStTpZargIWhRYlPwGuNlDSoLlyyJWkhlJB/hgVz66fX19e3ysrS+LMt/+g//cU7e77ubv/z57a9/XX69fuw+Z+Q+9m27/+uvv4Vqqp7m2oYr6e6ZFJURQ5a1nc40y9IULd1RwJkO7jlHMoXaBZ2X1+U5u1/NI1WtVtJCqRATEDWhE5BQoQBHqSwAAgUJH/pYVS17aTswC7vy9IhU1el+37Y5p6ldLufL5dz7Aoayvb28/vM//YdFl++3D7a2J3VZZFlS5b5tP//886/ffvv+cYfo6XxuZhlHZ6lNXy6nl/cvL6ezivEo4OmMFEaEI4PhoPZ27qeN+PNfvlal/Hc36Mzc3E9qao9rtADFobDESB63PxyK1oAkRZK1YqhVIxEuoBw3/RDwjJKx3ca+bRvJ0+X09vb2cjkvvdW4pam+XS7z/R3kAJScmffMmGHa3r989eT379/+9utvy+1WkK1aUxEzW8+n1/f3l+XM6fAgSpXLaXBw9+k1hxBGzuu2//mvf+3LMybJDUR4ukFE7NHeQOShjJHHlUokmSJqBARxgPQHlkdQuxYhIRPJnOFzekbu93tEnk+nL6/v7y+vp2XtarN2i8AMh1C7sUp2A5mqUGkAzufzj+v1tt83JkoUl2aCk52SEDFdmqfPiKgaT0kVp6bo7uNG35tOgdjw/FB7Sgydmbe5q3ahKFXzQD3qOB+h/iFgIZks2U4ARIjUvX0ke4wZrtoeoFXe79vcd5+zaX85vby/vr+czqaFWovndPdt3+77tsfYMnbmHmNEEFKyoefT6Xy5hHCPUfJxdc8YhSS3OZbZ66OOqs4ETLr7CBc7Sv3XL28/vV6+3a58CoLXAM7MrPFG5jzuCTiay4Liarm7tEDIQy1RPmWvAAAURkCYQY7p277f7pu7q8jr5eXl7X05nbUvEPFkQjy5Db9t2227f9yuG2Kqjjn26RBtja310+n0/v5qi133O8G6ZbAubIuIfd9Hb0aSLswinTgoZhKR6WOfv92v2PbxrzGI6c+Qw2oeVDnk5txdSDOrmUf7hDcfD5EKkwfE9IkZkwwxT9bdUmPGffjdQ1KWZTm9vp1e32xZUzQzx5wk933exriPcRvb5vsAhmLGjMd1GKJoqm+vL33t61w9ShMjkWxqnjl87mNvAoQb0M3ElIWRgNMDkMvlJdfTt19//fqPfzmdfgFuf3+DZo4aeAlKOkRJU4UB+lDDKPCdR293jIh+h+kIwBNO8eT0vA/fhs+U1szWcztdXPQ6huyjPMsjMtMjdqYrUiXBmZPCvljvq2kDmemn3tRkXfvwOef06UlvagQjYp/DQSXXEn+qBSsRqGprvS0wi3XJX76DHXhK2QRgrzsNBGKWLEXzasetOlEBUDje41aPGoHI45IuEfFkUCIxIu9j7l5Xm6z9fKLp7r7tO8m5Dw8vkczM3HwOpCsmGWTrTWt8xXT3DJpIh5gYkJkzPCVqIsjpvm1UcDFtpgdDCkLRFA0axaytp8vb169Tirz/HIOWUs+x0lkjYQgeV++U+HmV+Mdgs1z0IbFWKkBBBjEjtjGGO1T72pfzqS2LM+9jT3cmx76ramRAZPq8j33LGGSN+0nMMRh7resL1UXMmpCWUIc4lZIjuMo+Z7gbkkszE+1GgiIhMoO3bdwTCLmsOK1vm+dzLvdsAIZHFE+D8KxVLgrVpa7HVCAfZ7+6p6M7PUjkxx0odI8xxhgjM9fTup7WvjQKtrnXxdoCzPAuPcDM3N139+mMRCDnDErMOX1G03Za1t5s7UuzxSOQs7clA5l5v++UCXCCQs6pJPu6tr4EMT234bf7+LbN+WPoLRzq1OnPaj2Hx3CnakIpUIixrktjKYsxJVVNPkXDj0uURCRFApEeGek+3AcZKljW3lojOH3sW8Gm0lor0k5pZYwxZzBTwrn73O47TCv8mMCsXc6v5/NFRPcxR1CM/WS7+9xGTE+Bgem+kyC/fP3alpPPuc/pAW1rW/o2c7/7deQIzPkUMUEA1/t267q0tvQGNBMlHgJCydqlP0SuqzqBotTDBVSJECemj4gBSbW671PmHAxB0udMz2ZtWRZVm9O3fUyvvSfJFJ8ce8Qk8mClQswTTuweAG/b/u3jVljqNmdmkL5vdxF007Xb8Lxv4/Ju9ExYSqOqNOSY19sIWU7n196fIb7eAFy3/bZY9GS1H8pFH/DNIeiAkqyKuhM2XWl2TDkFIiXGHGRkALTeknHfR+t927bwaNoIlaBkzozazRFVFfPMMekBoGUWIKsz4Pd9n0lCRW+3+/ePj22MSaagLx1z8znW3uy0eEhkBvBxvae2oAznPmKGJgxiQRWX52X56TE9VNQsLEND+6J5UDuL+8A4LqjSx+sqVA+PKZykwHH8JT0GCYhn7mOObYp6t7SRqk2b7Y5tHp1ZzLjN2HefPp2pNd/KPMS1POgxxtzHHOEUkaXZmAumCqy1Zq3m1pEY7mitLt+JhAdIExUJTWpS/j+M8f+XQceMGWyNIwJjquj0QGvHpLhKzyAyqprSuhOMx+JIElBJVSccJGS6h0fri2/z+4/77XY37acTeyfhEP1xv9/ue5AJXu/XuijoEGs/NODApJIxI8ZIz0yIiS2tQX3Odm6X03o+n5ZmQEJkeqyiY8bwSAhhQXqSbFpivU+Z0zUApSoDMCLDj1t3SFQdipIMFEhNd2u1oPZYSK9bGkSpVqpWgZyeM2Khbtv+cb3dr5vp3O9Oyn0bM7nNuXsQQsOWu88QQB4KuglkJJLndY1txEwjTt1eLufL2+t6Pm1z9JOdT701E6JE2dwjEvd930dJMiOSXqIeZjy0KZ5i0BGcHgIVUWhs+4DWILkvZk1hgClMJCKkGCUHogvPECgEMIO1GL67u2dEgjM917bq2RBwz7GN++2+l1odQYiDGybAbpJg+idIwLrZD86Tytvp/PX97f3L++v7W1uXYOzcg35IGYJmqk2v99t9j+GYIR6cnpmamsBDAvU5Bo1jUS4jU9McMYfX0ixaZ6tbdVjDz7p0ZWaEVyRIVRaDHGIUrXt1BcrI87J+Ob8pNT2vH/dxnl9e/T7nNuc2fJ9zxOwi59P6ejkDxRSHmCYYHpzeX+V1Wb+8vL6cT5eX83JeApwpc+x1GVvdSLSua4RfP7aRcF08dXjMoOiK2ibB72y1v7tBS/1nRqqGmgV0DC9CokJUmlo1mmKlych8SMuWvJNk5mAGE8clMinQJvp+eXl/eT21E4PbfRdRiu3u32+37x8f1/t9n4Pwl/Pl5fLSrEGUKglsc9zud4OcWrss66JqNUsQqgEeqmy9AYzwktedY0z3PUgTynIMDFuJAHhRyZ9k0Dh0B3KKtJaejJLjZkVMCkyYhl4Y7QEj10wESqRn7mPMCGYqxCME2ft6WfrLsp6WddGel0tvi7UlVT+27cftdt/3fd/hs6n1vpg1iFBlZN7Gfr9svfXzsmhmzhE+HeExxIxy8HxBjVCS+75Pd/fcdk9DGnx6uEjLLM7ap7rfEww6PTzDUJva7RAay+NuRwXBpqDhMbbn0WsCMNUEI2PMsY9ZY6j02Vq/nE9vL2+vl4sRktlKqBp5Xtbz6fTl/W34jDl1n+6HynKSzhyZ76dT6heIqGBs9w1BBOcMj97NoF1bIKkQTXffNt8iAk2WVW09v/70f/yXf5Tl8u1j+2//8+e2nO/buP32NA/NmBEhEpk6XaxlflKOpZiCTSXJxpIkOFS5zSwYSPF0d9/3ncxu2s3eXi9f3i7rApEBlHxF5t17XxDnvqyvrUu31O6ErAKg7mGPTCdaay7cw+9j+2Eyazp0iAkLnZ+hJSkBddJTpPfl9JK6tNPp65/+NFJ//X7/z//8z9D+//63/46nxVAXHSKSaRCJoCiThozwdKSCiqWb+FzYeo0ieBDw3Gd59gifPplp0pfWlt5bs+OK+EOONXcf29hv96335XS+nE4nU2tLE0i1DZEpKqdlnZFmFj6G+3CfGV5cNpNgzvCQrL8kKSq1r9asLeuJbbVlKSm8Hx8//vLXv377cXWfdanjMwxqZn1ZkamZAdz3rUT75xSfylwVzGwlgVvL4Hzc8eXTM1lKq3WnBURa77Wh4xHIrD2bcq+ImJ7bnPc5l21rZl27mtUFqiSttZV53/dAOnjdbz/uP277tsdMoC2LNAGUwXAGahCP4XMmVFRa7+eL9NWTfT1Zb2OMX3755cfHD3laUhoe25jCLMdT97X13iwFHtzHFKGHFZOJhOnRQLVEPNSXPeKIqa31ZVHVGu0CSESRKRKkllC6jxG3udcUcFmWcHcPU1vWtfk+w29jv+/7j/t1D4cKVGmSKiPDyRG+z30fc0RumfvM0B6CkenTkSPtri1Pl/Ov3779uH6MsU9/hkBjA+CR0wOgiYgaOZ0psAakYGTk3XeVtTcSHtmaFXYHUYGRmYmgQLT11pe1r4u2WnQqHYhjmeTYuBMk6Hlcw8JMnduYc993EOXdntzmvs8ZYF/XZT1pt1pkighn7D62fd/dE6badJHh/OW3H/eff0wqrIstH7f95f2rSKs1nedINzUAB6WJGZAuptZQshgJPLghGiQIVaoEgBrWiy2LpYgXCUeg1nrvbeml/MTHbXdCRkYmCjhnAamlGSCy+djnuG236V7kXy9xbaCvaz8tfe1iZqYQ7GOnzyRLPa5uwqAYBY7MAygRgMtyysjpW+2LPMOcwP8CdBbcY3UBMa0AAAAASUVORK5CYII=\n", 279 | "text/plain": [ 280 | "" 281 | ] 282 | }, 283 | "execution_count": 43, 284 | "metadata": {}, 285 | "output_type": "execute_result" 286 | } 287 | ], 288 | "source": [ 289 | "Image.fromarray(dst_img[...,::-1])" 290 | ] 291 | } 292 | ], 293 | "metadata": { 294 | "hide_input": false, 295 | "kernelspec": { 296 | "display_name": "Python 3", 297 | "language": "python", 298 | "name": "python3" 299 | }, 300 | "language_info": { 301 | "codemirror_mode": { 302 | "name": "ipython", 303 | "version": 3 304 | }, 305 | "file_extension": ".py", 306 | "mimetype": "text/x-python", 307 | "name": "python", 308 | "nbconvert_exporter": "python", 309 | "pygments_lexer": "ipython3", 310 | "version": "3.6.4" 311 | } 312 | }, 313 | "nbformat": 4, 314 | "nbformat_minor": 2 315 | } 316 | -------------------------------------------------------------------------------- /mtcnn_pytorch/src/__init__.py: -------------------------------------------------------------------------------- 1 | from .visualization_utils import show_bboxes 2 | from .detector import detect_faces 3 | -------------------------------------------------------------------------------- /mtcnn_pytorch/src/align_trans.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Apr 24 15:43:29 2017 4 | @author: zhaoy 5 | """ 6 | import numpy as np 7 | import cv2 8 | 9 | # from scipy.linalg import lstsq 10 | # from scipy.ndimage import geometric_transform # , map_coordinates 11 | 12 | from mtcnn_pytorch.src.matlab_cp2tform import get_similarity_transform_for_cv2 13 | 14 | # reference facial points, a list of coordinates (x,y) 15 | REFERENCE_FACIAL_POINTS = [ 16 | [30.29459953, 51.69630051], 17 | [65.53179932, 51.50139999], 18 | [48.02519989, 71.73660278], 19 | [33.54930115, 92.3655014], 20 | [62.72990036, 92.20410156] 21 | ] 22 | 23 | DEFAULT_CROP_SIZE = (96, 112) 24 | 25 | 26 | class FaceWarpException(Exception): 27 | def __str__(self): 28 | return 'In File {}:{}'.format( 29 | __file__, super.__str__(self)) 30 | 31 | 32 | def get_reference_facial_points(output_size=None, 33 | inner_padding_factor=0.0, 34 | outer_padding=(0, 0), 35 | default_square=False): 36 | """ 37 | Function: 38 | ---------- 39 | get reference 5 key points according to crop settings: 40 | 0. Set default crop_size: 41 | if default_square: 42 | crop_size = (112, 112) 43 | else: 44 | crop_size = (96, 112) 45 | 1. Pad the crop_size by inner_padding_factor in each side; 46 | 2. Resize crop_size into (output_size - outer_padding*2), 47 | pad into output_size with outer_padding; 48 | 3. Output reference_5point; 49 | Parameters: 50 | ---------- 51 | @output_size: (w, h) or None 52 | size of aligned face image 53 | @inner_padding_factor: (w_factor, h_factor) 54 | padding factor for inner (w, h) 55 | @outer_padding: (w_pad, h_pad) 56 | each row is a pair of coordinates (x, y) 57 | @default_square: True or False 58 | if True: 59 | default crop_size = (112, 112) 60 | else: 61 | default crop_size = (96, 112); 62 | !!! make sure, if output_size is not None: 63 | (output_size - outer_padding) 64 | = some_scale * (default crop_size * (1.0 + inner_padding_factor)) 65 | Returns: 66 | ---------- 67 | @reference_5point: 5x2 np.array 68 | each row is a pair of transformed coordinates (x, y) 69 | """ 70 | #print('\n===> get_reference_facial_points():') 71 | 72 | #print('---> Params:') 73 | #print(' output_size: ', output_size) 74 | #print(' inner_padding_factor: ', inner_padding_factor) 75 | #print(' outer_padding:', outer_padding) 76 | #print(' default_square: ', default_square) 77 | 78 | tmp_5pts = np.array(REFERENCE_FACIAL_POINTS) 79 | tmp_crop_size = np.array(DEFAULT_CROP_SIZE) 80 | 81 | # 0) make the inner region a square 82 | if default_square: 83 | size_diff = max(tmp_crop_size) - tmp_crop_size 84 | tmp_5pts += size_diff / 2 85 | tmp_crop_size += size_diff 86 | 87 | #print('---> default:') 88 | #print(' crop_size = ', tmp_crop_size) 89 | #print(' reference_5pts = ', tmp_5pts) 90 | 91 | if (output_size and 92 | output_size[0] == tmp_crop_size[0] and 93 | output_size[1] == tmp_crop_size[1]): 94 | #print('output_size == DEFAULT_CROP_SIZE {}: return default reference points'.format(tmp_crop_size)) 95 | return tmp_5pts 96 | 97 | if (inner_padding_factor == 0 and 98 | outer_padding == (0, 0)): 99 | if output_size is None: 100 | #print('No paddings to do: return default reference points') 101 | return tmp_5pts 102 | else: 103 | raise FaceWarpException( 104 | 'No paddings to do, output_size must be None or {}'.format(tmp_crop_size)) 105 | 106 | # check output size 107 | if not (0 <= inner_padding_factor <= 1.0): 108 | raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)') 109 | 110 | if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) 111 | and output_size is None): 112 | output_size = tmp_crop_size * \ 113 | (1 + inner_padding_factor * 2).astype(np.int32) 114 | output_size += np.array(outer_padding) 115 | #print(' deduced from paddings, output_size = ', output_size) 116 | 117 | if not (outer_padding[0] < output_size[0] 118 | and outer_padding[1] < output_size[1]): 119 | raise FaceWarpException('Not (outer_padding[0] < output_size[0]' 120 | 'and outer_padding[1] < output_size[1])') 121 | 122 | # 1) pad the inner region according inner_padding_factor 123 | #print('---> STEP1: pad the inner region according inner_padding_factor') 124 | if inner_padding_factor > 0: 125 | size_diff = tmp_crop_size * inner_padding_factor * 2 126 | tmp_5pts += size_diff / 2 127 | tmp_crop_size += np.round(size_diff).astype(np.int32) 128 | 129 | #print(' crop_size = ', tmp_crop_size) 130 | #print(' reference_5pts = ', tmp_5pts) 131 | 132 | # 2) resize the padded inner region 133 | #print('---> STEP2: resize the padded inner region') 134 | size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2 135 | #print(' crop_size = ', tmp_crop_size) 136 | #print(' size_bf_outer_pad = ', size_bf_outer_pad) 137 | 138 | if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]: 139 | raise FaceWarpException('Must have (output_size - outer_padding)' 140 | '= some_scale * (crop_size * (1.0 + inner_padding_factor)') 141 | 142 | scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0] 143 | #print(' resize scale_factor = ', scale_factor) 144 | tmp_5pts = tmp_5pts * scale_factor 145 | # size_diff = tmp_crop_size * (scale_factor - min(scale_factor)) 146 | # tmp_5pts = tmp_5pts + size_diff / 2 147 | tmp_crop_size = size_bf_outer_pad 148 | #print(' crop_size = ', tmp_crop_size) 149 | #print(' reference_5pts = ', tmp_5pts) 150 | 151 | # 3) add outer_padding to make output_size 152 | reference_5point = tmp_5pts + np.array(outer_padding) 153 | tmp_crop_size = output_size 154 | #print('---> STEP3: add outer_padding to make output_size') 155 | #print(' crop_size = ', tmp_crop_size) 156 | #print(' reference_5pts = ', tmp_5pts) 157 | 158 | #print('===> end get_reference_facial_points\n') 159 | 160 | return reference_5point 161 | 162 | 163 | def get_affine_transform_matrix(src_pts, dst_pts): 164 | """ 165 | Function: 166 | ---------- 167 | get affine transform matrix 'tfm' from src_pts to dst_pts 168 | Parameters: 169 | ---------- 170 | @src_pts: Kx2 np.array 171 | source points matrix, each row is a pair of coordinates (x, y) 172 | @dst_pts: Kx2 np.array 173 | destination points matrix, each row is a pair of coordinates (x, y) 174 | Returns: 175 | ---------- 176 | @tfm: 2x3 np.array 177 | transform matrix from src_pts to dst_pts 178 | """ 179 | 180 | tfm = np.float32([[1, 0, 0], [0, 1, 0]]) 181 | n_pts = src_pts.shape[0] 182 | ones = np.ones((n_pts, 1), src_pts.dtype) 183 | src_pts_ = np.hstack([src_pts, ones]) 184 | dst_pts_ = np.hstack([dst_pts, ones]) 185 | 186 | # #print(('src_pts_:\n' + str(src_pts_)) 187 | # #print(('dst_pts_:\n' + str(dst_pts_)) 188 | 189 | A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_) 190 | 191 | # #print(('np.linalg.lstsq return A: \n' + str(A)) 192 | # #print(('np.linalg.lstsq return res: \n' + str(res)) 193 | # #print(('np.linalg.lstsq return rank: \n' + str(rank)) 194 | # #print(('np.linalg.lstsq return s: \n' + str(s)) 195 | 196 | if rank == 3: 197 | tfm = np.float32([ 198 | [A[0, 0], A[1, 0], A[2, 0]], 199 | [A[0, 1], A[1, 1], A[2, 1]] 200 | ]) 201 | elif rank == 2: 202 | tfm = np.float32([ 203 | [A[0, 0], A[1, 0], 0], 204 | [A[0, 1], A[1, 1], 0] 205 | ]) 206 | 207 | return tfm 208 | 209 | 210 | def warp_and_crop_face(src_img, 211 | facial_pts, 212 | reference_pts=None, 213 | crop_size=(96, 112), 214 | align_type='smilarity'): 215 | """ 216 | Function: 217 | ---------- 218 | apply affine transform 'trans' to uv 219 | Parameters: 220 | ---------- 221 | @src_img: 3x3 np.array 222 | input image 223 | @facial_pts: could be 224 | 1)a list of K coordinates (x,y) 225 | or 226 | 2) Kx2 or 2xK np.array 227 | each row or col is a pair of coordinates (x, y) 228 | @reference_pts: could be 229 | 1) a list of K coordinates (x,y) 230 | or 231 | 2) Kx2 or 2xK np.array 232 | each row or col is a pair of coordinates (x, y) 233 | or 234 | 3) None 235 | if None, use default reference facial points 236 | @crop_size: (w, h) 237 | output face image size 238 | @align_type: transform type, could be one of 239 | 1) 'similarity': use similarity transform 240 | 2) 'cv2_affine': use the first 3 points to do affine transform, 241 | by calling cv2.getAffineTransform() 242 | 3) 'affine': use all points to do affine transform 243 | Returns: 244 | ---------- 245 | @face_img: output face image with size (w, h) = @crop_size 246 | """ 247 | 248 | if reference_pts is None: 249 | if crop_size[0] == 96 and crop_size[1] == 112: 250 | reference_pts = REFERENCE_FACIAL_POINTS 251 | else: 252 | default_square = False 253 | inner_padding_factor = 0 254 | outer_padding = (0, 0) 255 | output_size = crop_size 256 | 257 | reference_pts = get_reference_facial_points(output_size, 258 | inner_padding_factor, 259 | outer_padding, 260 | default_square) 261 | 262 | ref_pts = np.float32(reference_pts) 263 | ref_pts_shp = ref_pts.shape 264 | if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2: 265 | raise FaceWarpException( 266 | 'reference_pts.shape must be (K,2) or (2,K) and K>2') 267 | 268 | if ref_pts_shp[0] == 2: 269 | ref_pts = ref_pts.T 270 | 271 | src_pts = np.float32(facial_pts) 272 | src_pts_shp = src_pts.shape 273 | if max(src_pts_shp) < 3 or min(src_pts_shp) != 2: 274 | raise FaceWarpException( 275 | 'facial_pts.shape must be (K,2) or (2,K) and K>2') 276 | 277 | if src_pts_shp[0] == 2: 278 | src_pts = src_pts.T 279 | 280 | # #print('--->src_pts:\n', src_pts 281 | # #print('--->ref_pts\n', ref_pts 282 | 283 | if src_pts.shape != ref_pts.shape: 284 | raise FaceWarpException( 285 | 'facial_pts and reference_pts must have the same shape') 286 | 287 | if align_type is 'cv2_affine': 288 | tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3]) 289 | # #print(('cv2.getAffineTransform() returns tfm=\n' + str(tfm)) 290 | elif align_type is 'affine': 291 | tfm = get_affine_transform_matrix(src_pts, ref_pts) 292 | # #print(('get_affine_transform_matrix() returns tfm=\n' + str(tfm)) 293 | else: 294 | tfm = get_similarity_transform_for_cv2(src_pts, ref_pts) 295 | # #print(('get_similarity_transform_for_cv2() returns tfm=\n' + str(tfm)) 296 | 297 | # #print('--->Transform matrix: ' 298 | # #print(('type(tfm):' + str(type(tfm))) 299 | # #print(('tfm.dtype:' + str(tfm.dtype)) 300 | # #print( tfm 301 | 302 | face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1])) 303 | 304 | return face_img -------------------------------------------------------------------------------- /mtcnn_pytorch/src/box_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PIL import Image 3 | 4 | 5 | def nms(boxes, overlap_threshold=0.5, mode='union'): 6 | """Non-maximum suppression. 7 | 8 | Arguments: 9 | boxes: a float numpy array of shape [n, 5], 10 | where each row is (xmin, ymin, xmax, ymax, score). 11 | overlap_threshold: a float number. 12 | mode: 'union' or 'min'. 13 | 14 | Returns: 15 | list with indices of the selected boxes 16 | """ 17 | 18 | # if there are no boxes, return the empty list 19 | if len(boxes) == 0: 20 | return [] 21 | 22 | # list of picked indices 23 | pick = [] 24 | 25 | # grab the coordinates of the bounding boxes 26 | x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)] 27 | 28 | area = (x2 - x1 + 1.0)*(y2 - y1 + 1.0) 29 | ids = np.argsort(score) # in increasing order 30 | 31 | while len(ids) > 0: 32 | 33 | # grab index of the largest value 34 | last = len(ids) - 1 35 | i = ids[last] 36 | pick.append(i) 37 | 38 | # compute intersections 39 | # of the box with the largest score 40 | # with the rest of boxes 41 | 42 | # left top corner of intersection boxes 43 | ix1 = np.maximum(x1[i], x1[ids[:last]]) 44 | iy1 = np.maximum(y1[i], y1[ids[:last]]) 45 | 46 | # right bottom corner of intersection boxes 47 | ix2 = np.minimum(x2[i], x2[ids[:last]]) 48 | iy2 = np.minimum(y2[i], y2[ids[:last]]) 49 | 50 | # width and height of intersection boxes 51 | w = np.maximum(0.0, ix2 - ix1 + 1.0) 52 | h = np.maximum(0.0, iy2 - iy1 + 1.0) 53 | 54 | # intersections' areas 55 | inter = w * h 56 | if mode == 'min': 57 | overlap = inter/np.minimum(area[i], area[ids[:last]]) 58 | elif mode == 'union': 59 | # intersection over union (IoU) 60 | overlap = inter/(area[i] + area[ids[:last]] - inter) 61 | 62 | # delete all boxes where overlap is too big 63 | ids = np.delete( 64 | ids, 65 | np.concatenate([[last], np.where(overlap > overlap_threshold)[0]]) 66 | ) 67 | 68 | return pick 69 | 70 | 71 | def convert_to_square(bboxes): 72 | """Convert bounding boxes to a square form. 73 | 74 | Arguments: 75 | bboxes: a float numpy array of shape [n, 5]. 76 | 77 | Returns: 78 | a float numpy array of shape [n, 5], 79 | squared bounding boxes. 80 | """ 81 | 82 | square_bboxes = np.zeros_like(bboxes) 83 | x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)] 84 | h = y2 - y1 + 1.0 85 | w = x2 - x1 + 1.0 86 | max_side = np.maximum(h, w) 87 | square_bboxes[:, 0] = x1 + w*0.5 - max_side*0.5 88 | square_bboxes[:, 1] = y1 + h*0.5 - max_side*0.5 89 | square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0 90 | square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0 91 | return square_bboxes 92 | 93 | 94 | def calibrate_box(bboxes, offsets): 95 | """Transform bounding boxes to be more like true bounding boxes. 96 | 'offsets' is one of the outputs of the nets. 97 | 98 | Arguments: 99 | bboxes: a float numpy array of shape [n, 5]. 100 | offsets: a float numpy array of shape [n, 4]. 101 | 102 | Returns: 103 | a float numpy array of shape [n, 5]. 104 | """ 105 | x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)] 106 | w = x2 - x1 + 1.0 107 | h = y2 - y1 + 1.0 108 | w = np.expand_dims(w, 1) 109 | h = np.expand_dims(h, 1) 110 | 111 | # this is what happening here: 112 | # tx1, ty1, tx2, ty2 = [offsets[:, i] for i in range(4)] 113 | # x1_true = x1 + tx1*w 114 | # y1_true = y1 + ty1*h 115 | # x2_true = x2 + tx2*w 116 | # y2_true = y2 + ty2*h 117 | # below is just more compact form of this 118 | 119 | # are offsets always such that 120 | # x1 < x2 and y1 < y2 ? 121 | 122 | translation = np.hstack([w, h, w, h])*offsets 123 | bboxes[:, 0:4] = bboxes[:, 0:4] + translation 124 | return bboxes 125 | 126 | 127 | def get_image_boxes(bounding_boxes, img, size=24): 128 | """Cut out boxes from the image. 129 | 130 | Arguments: 131 | bounding_boxes: a float numpy array of shape [n, 5]. 132 | img: an instance of PIL.Image. 133 | size: an integer, size of cutouts. 134 | 135 | Returns: 136 | a float numpy array of shape [n, 3, size, size]. 137 | """ 138 | 139 | num_boxes = len(bounding_boxes) 140 | width, height = img.size 141 | 142 | [dy, edy, dx, edx, y, ey, x, ex, w, h] = correct_bboxes(bounding_boxes, width, height) 143 | img_boxes = np.zeros((num_boxes, 3, size, size), 'float32') 144 | 145 | for i in range(num_boxes): 146 | img_box = np.zeros((h[i], w[i], 3), 'uint8') 147 | 148 | img_array = np.asarray(img, 'uint8') 149 | img_box[dy[i]:(edy[i] + 1), dx[i]:(edx[i] + 1), :] =\ 150 | img_array[y[i]:(ey[i] + 1), x[i]:(ex[i] + 1), :] 151 | 152 | # resize 153 | img_box = Image.fromarray(img_box) 154 | img_box = img_box.resize((size, size), Image.BILINEAR) 155 | img_box = np.asarray(img_box, 'float32') 156 | 157 | img_boxes[i, :, :, :] = _preprocess(img_box) 158 | 159 | return img_boxes 160 | 161 | 162 | def correct_bboxes(bboxes, width, height): 163 | """Crop boxes that are too big and get coordinates 164 | with respect to cutouts. 165 | 166 | Arguments: 167 | bboxes: a float numpy array of shape [n, 5], 168 | where each row is (xmin, ymin, xmax, ymax, score). 169 | width: a float number. 170 | height: a float number. 171 | 172 | Returns: 173 | dy, dx, edy, edx: a int numpy arrays of shape [n], 174 | coordinates of the boxes with respect to the cutouts. 175 | y, x, ey, ex: a int numpy arrays of shape [n], 176 | corrected ymin, xmin, ymax, xmax. 177 | h, w: a int numpy arrays of shape [n], 178 | just heights and widths of boxes. 179 | 180 | in the following order: 181 | [dy, edy, dx, edx, y, ey, x, ex, w, h]. 182 | """ 183 | 184 | x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)] 185 | w, h = x2 - x1 + 1.0, y2 - y1 + 1.0 186 | num_boxes = bboxes.shape[0] 187 | 188 | # 'e' stands for end 189 | # (x, y) -> (ex, ey) 190 | x, y, ex, ey = x1, y1, x2, y2 191 | 192 | # we need to cut out a box from the image. 193 | # (x, y, ex, ey) are corrected coordinates of the box 194 | # in the image. 195 | # (dx, dy, edx, edy) are coordinates of the box in the cutout 196 | # from the image. 197 | dx, dy = np.zeros((num_boxes,)), np.zeros((num_boxes,)) 198 | edx, edy = w.copy() - 1.0, h.copy() - 1.0 199 | 200 | # if box's bottom right corner is too far right 201 | ind = np.where(ex > width - 1.0)[0] 202 | edx[ind] = w[ind] + width - 2.0 - ex[ind] 203 | ex[ind] = width - 1.0 204 | 205 | # if box's bottom right corner is too low 206 | ind = np.where(ey > height - 1.0)[0] 207 | edy[ind] = h[ind] + height - 2.0 - ey[ind] 208 | ey[ind] = height - 1.0 209 | 210 | # if box's top left corner is too far left 211 | ind = np.where(x < 0.0)[0] 212 | dx[ind] = 0.0 - x[ind] 213 | x[ind] = 0.0 214 | 215 | # if box's top left corner is too high 216 | ind = np.where(y < 0.0)[0] 217 | dy[ind] = 0.0 - y[ind] 218 | y[ind] = 0.0 219 | 220 | return_list = [dy, edy, dx, edx, y, ey, x, ex, w, h] 221 | return_list = [i.astype('int32') for i in return_list] 222 | 223 | return return_list 224 | 225 | 226 | def _preprocess(img): 227 | """Preprocessing step before feeding the network. 228 | 229 | Arguments: 230 | img: a float numpy array of shape [h, w, c]. 231 | 232 | Returns: 233 | a float numpy array of shape [1, c, h, w]. 234 | """ 235 | img = img.transpose((2, 0, 1)) 236 | img = np.expand_dims(img, 0) 237 | img = (img - 127.5)*0.0078125 238 | return img 239 | -------------------------------------------------------------------------------- /mtcnn_pytorch/src/detector.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch.autograd import Variable 4 | from .get_nets import PNet, RNet, ONet 5 | from .box_utils import nms, calibrate_box, get_image_boxes, convert_to_square 6 | from .first_stage import run_first_stage 7 | 8 | 9 | def detect_faces(image, min_face_size=20.0, 10 | thresholds=[0.6, 0.7, 0.8], 11 | nms_thresholds=[0.7, 0.7, 0.7]): 12 | """ 13 | Arguments: 14 | image: an instance of PIL.Image. 15 | min_face_size: a float number. 16 | thresholds: a list of length 3. 17 | nms_thresholds: a list of length 3. 18 | 19 | Returns: 20 | two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10], 21 | bounding boxes and facial landmarks. 22 | """ 23 | 24 | # LOAD MODELS 25 | pnet = PNet() 26 | rnet = RNet() 27 | onet = ONet() 28 | onet.eval() 29 | 30 | # BUILD AN IMAGE PYRAMID 31 | width, height = image.size 32 | min_length = min(height, width) 33 | 34 | min_detection_size = 12 35 | factor = 0.707 # sqrt(0.5) 36 | 37 | # scales for scaling the image 38 | scales = [] 39 | 40 | # scales the image so that 41 | # minimum size that we can detect equals to 42 | # minimum face size that we want to detect 43 | m = min_detection_size/min_face_size 44 | min_length *= m 45 | 46 | factor_count = 0 47 | while min_length > min_detection_size: 48 | scales.append(m*factor**factor_count) 49 | min_length *= factor 50 | factor_count += 1 51 | 52 | # STAGE 1 53 | 54 | # it will be returned 55 | bounding_boxes = [] 56 | 57 | with torch.no_grad(): 58 | # run P-Net on different scales 59 | for s in scales: 60 | boxes = run_first_stage(image, pnet, scale=s, threshold=thresholds[0]) 61 | bounding_boxes.append(boxes) 62 | 63 | # collect boxes (and offsets, and scores) from different scales 64 | bounding_boxes = [i for i in bounding_boxes if i is not None] 65 | bounding_boxes = np.vstack(bounding_boxes) 66 | 67 | keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0]) 68 | bounding_boxes = bounding_boxes[keep] 69 | 70 | # use offsets predicted by pnet to transform bounding boxes 71 | bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:]) 72 | # shape [n_boxes, 5] 73 | 74 | bounding_boxes = convert_to_square(bounding_boxes) 75 | bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) 76 | 77 | # STAGE 2 78 | 79 | img_boxes = get_image_boxes(bounding_boxes, image, size=24) 80 | img_boxes = torch.FloatTensor(img_boxes) 81 | 82 | output = rnet(img_boxes) 83 | offsets = output[0].data.numpy() # shape [n_boxes, 4] 84 | probs = output[1].data.numpy() # shape [n_boxes, 2] 85 | 86 | keep = np.where(probs[:, 1] > thresholds[1])[0] 87 | bounding_boxes = bounding_boxes[keep] 88 | bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) 89 | offsets = offsets[keep] 90 | 91 | keep = nms(bounding_boxes, nms_thresholds[1]) 92 | bounding_boxes = bounding_boxes[keep] 93 | bounding_boxes = calibrate_box(bounding_boxes, offsets[keep]) 94 | bounding_boxes = convert_to_square(bounding_boxes) 95 | bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) 96 | 97 | # STAGE 3 98 | 99 | img_boxes = get_image_boxes(bounding_boxes, image, size=48) 100 | if len(img_boxes) == 0: 101 | return [], [] 102 | img_boxes = torch.FloatTensor(img_boxes) 103 | output = onet(img_boxes) 104 | landmarks = output[0].data.numpy() # shape [n_boxes, 10] 105 | offsets = output[1].data.numpy() # shape [n_boxes, 4] 106 | probs = output[2].data.numpy() # shape [n_boxes, 2] 107 | 108 | keep = np.where(probs[:, 1] > thresholds[2])[0] 109 | bounding_boxes = bounding_boxes[keep] 110 | bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) 111 | offsets = offsets[keep] 112 | landmarks = landmarks[keep] 113 | 114 | # compute landmark points 115 | width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0 116 | height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0 117 | xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1] 118 | landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1)*landmarks[:, 0:5] 119 | landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1)*landmarks[:, 5:10] 120 | 121 | bounding_boxes = calibrate_box(bounding_boxes, offsets) 122 | keep = nms(bounding_boxes, nms_thresholds[2], mode='min') 123 | bounding_boxes = bounding_boxes[keep] 124 | landmarks = landmarks[keep] 125 | 126 | return bounding_boxes, landmarks 127 | -------------------------------------------------------------------------------- /mtcnn_pytorch/src/first_stage.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Variable 3 | import math 4 | from PIL import Image 5 | import numpy as np 6 | from .box_utils import nms, _preprocess 7 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 8 | # device = 'cpu' 9 | 10 | def run_first_stage(image, net, scale, threshold): 11 | """Run P-Net, generate bounding boxes, and do NMS. 12 | 13 | Arguments: 14 | image: an instance of PIL.Image. 15 | net: an instance of pytorch's nn.Module, P-Net. 16 | scale: a float number, 17 | scale width and height of the image by this number. 18 | threshold: a float number, 19 | threshold on the probability of a face when generating 20 | bounding boxes from predictions of the net. 21 | 22 | Returns: 23 | a float numpy array of shape [n_boxes, 9], 24 | bounding boxes with scores and offsets (4 + 1 + 4). 25 | """ 26 | 27 | # scale the image and convert it to a float array 28 | width, height = image.size 29 | sw, sh = math.ceil(width*scale), math.ceil(height*scale) 30 | img = image.resize((sw, sh), Image.BILINEAR) 31 | img = np.asarray(img, 'float32') 32 | 33 | img = torch.FloatTensor(_preprocess(img)).to(device) 34 | with torch.no_grad(): 35 | output = net(img) 36 | probs = output[1].cpu().data.numpy()[0, 1, :, :] 37 | offsets = output[0].cpu().data.numpy() 38 | # probs: probability of a face at each sliding window 39 | # offsets: transformations to true bounding boxes 40 | 41 | boxes = _generate_bboxes(probs, offsets, scale, threshold) 42 | if len(boxes) == 0: 43 | return None 44 | 45 | keep = nms(boxes[:, 0:5], overlap_threshold=0.5) 46 | return boxes[keep] 47 | 48 | 49 | def _generate_bboxes(probs, offsets, scale, threshold): 50 | """Generate bounding boxes at places 51 | where there is probably a face. 52 | 53 | Arguments: 54 | probs: a float numpy array of shape [n, m]. 55 | offsets: a float numpy array of shape [1, 4, n, m]. 56 | scale: a float number, 57 | width and height of the image were scaled by this number. 58 | threshold: a float number. 59 | 60 | Returns: 61 | a float numpy array of shape [n_boxes, 9] 62 | """ 63 | 64 | # applying P-Net is equivalent, in some sense, to 65 | # moving 12x12 window with stride 2 66 | stride = 2 67 | cell_size = 12 68 | 69 | # indices of boxes where there is probably a face 70 | inds = np.where(probs > threshold) 71 | 72 | if inds[0].size == 0: 73 | return np.array([]) 74 | 75 | # transformations of bounding boxes 76 | tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)] 77 | # they are defined as: 78 | # w = x2 - x1 + 1 79 | # h = y2 - y1 + 1 80 | # x1_true = x1 + tx1*w 81 | # x2_true = x2 + tx2*w 82 | # y1_true = y1 + ty1*h 83 | # y2_true = y2 + ty2*h 84 | 85 | offsets = np.array([tx1, ty1, tx2, ty2]) 86 | score = probs[inds[0], inds[1]] 87 | 88 | # P-Net is applied to scaled images 89 | # so we need to rescale bounding boxes back 90 | bounding_boxes = np.vstack([ 91 | np.round((stride*inds[1] + 1.0)/scale), 92 | np.round((stride*inds[0] + 1.0)/scale), 93 | np.round((stride*inds[1] + 1.0 + cell_size)/scale), 94 | np.round((stride*inds[0] + 1.0 + cell_size)/scale), 95 | score, offsets 96 | ]) 97 | # why one is added? 98 | 99 | return bounding_boxes.T 100 | -------------------------------------------------------------------------------- /mtcnn_pytorch/src/get_nets.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from collections import OrderedDict 5 | import numpy as np 6 | 7 | 8 | class Flatten(nn.Module): 9 | 10 | def __init__(self): 11 | super(Flatten, self).__init__() 12 | 13 | def forward(self, x): 14 | """ 15 | Arguments: 16 | x: a float tensor with shape [batch_size, c, h, w]. 17 | Returns: 18 | a float tensor with shape [batch_size, c*h*w]. 19 | """ 20 | 21 | # without this pretrained model isn't working 22 | x = x.transpose(3, 2).contiguous() 23 | 24 | return x.view(x.size(0), -1) 25 | 26 | 27 | class PNet(nn.Module): 28 | 29 | def __init__(self): 30 | 31 | super(PNet, self).__init__() 32 | 33 | # suppose we have input with size HxW, then 34 | # after first layer: H - 2, 35 | # after pool: ceil((H - 2)/2), 36 | # after second conv: ceil((H - 2)/2) - 2, 37 | # after last conv: ceil((H - 2)/2) - 4, 38 | # and the same for W 39 | 40 | self.features = nn.Sequential(OrderedDict([ 41 | ('conv1', nn.Conv2d(3, 10, 3, 1)), 42 | ('prelu1', nn.PReLU(10)), 43 | ('pool1', nn.MaxPool2d(2, 2, ceil_mode=True)), 44 | 45 | ('conv2', nn.Conv2d(10, 16, 3, 1)), 46 | ('prelu2', nn.PReLU(16)), 47 | 48 | ('conv3', nn.Conv2d(16, 32, 3, 1)), 49 | ('prelu3', nn.PReLU(32)) 50 | ])) 51 | 52 | self.conv4_1 = nn.Conv2d(32, 2, 1, 1) 53 | self.conv4_2 = nn.Conv2d(32, 4, 1, 1) 54 | 55 | weights = np.load('mtcnn_pytorch/src/weights/pnet.npy')[()] 56 | for n, p in self.named_parameters(): 57 | p.data = torch.FloatTensor(weights[n]) 58 | 59 | def forward(self, x): 60 | """ 61 | Arguments: 62 | x: a float tensor with shape [batch_size, 3, h, w]. 63 | Returns: 64 | b: a float tensor with shape [batch_size, 4, h', w']. 65 | a: a float tensor with shape [batch_size, 2, h', w']. 66 | """ 67 | x = self.features(x) 68 | a = self.conv4_1(x) 69 | b = self.conv4_2(x) 70 | a = F.softmax(a, dim=-1) 71 | return b, a 72 | 73 | 74 | class RNet(nn.Module): 75 | 76 | def __init__(self): 77 | 78 | super(RNet, self).__init__() 79 | 80 | self.features = nn.Sequential(OrderedDict([ 81 | ('conv1', nn.Conv2d(3, 28, 3, 1)), 82 | ('prelu1', nn.PReLU(28)), 83 | ('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)), 84 | 85 | ('conv2', nn.Conv2d(28, 48, 3, 1)), 86 | ('prelu2', nn.PReLU(48)), 87 | ('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)), 88 | 89 | ('conv3', nn.Conv2d(48, 64, 2, 1)), 90 | ('prelu3', nn.PReLU(64)), 91 | 92 | ('flatten', Flatten()), 93 | ('conv4', nn.Linear(576, 128)), 94 | ('prelu4', nn.PReLU(128)) 95 | ])) 96 | 97 | self.conv5_1 = nn.Linear(128, 2) 98 | self.conv5_2 = nn.Linear(128, 4) 99 | 100 | weights = np.load('mtcnn_pytorch/src/weights/rnet.npy')[()] 101 | for n, p in self.named_parameters(): 102 | p.data = torch.FloatTensor(weights[n]) 103 | 104 | def forward(self, x): 105 | """ 106 | Arguments: 107 | x: a float tensor with shape [batch_size, 3, h, w]. 108 | Returns: 109 | b: a float tensor with shape [batch_size, 4]. 110 | a: a float tensor with shape [batch_size, 2]. 111 | """ 112 | x = self.features(x) 113 | a = self.conv5_1(x) 114 | b = self.conv5_2(x) 115 | a = F.softmax(a, dim=-1) 116 | return b, a 117 | 118 | 119 | class ONet(nn.Module): 120 | 121 | def __init__(self): 122 | 123 | super(ONet, self).__init__() 124 | 125 | self.features = nn.Sequential(OrderedDict([ 126 | ('conv1', nn.Conv2d(3, 32, 3, 1)), 127 | ('prelu1', nn.PReLU(32)), 128 | ('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)), 129 | 130 | ('conv2', nn.Conv2d(32, 64, 3, 1)), 131 | ('prelu2', nn.PReLU(64)), 132 | ('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)), 133 | 134 | ('conv3', nn.Conv2d(64, 64, 3, 1)), 135 | ('prelu3', nn.PReLU(64)), 136 | ('pool3', nn.MaxPool2d(2, 2, ceil_mode=True)), 137 | 138 | ('conv4', nn.Conv2d(64, 128, 2, 1)), 139 | ('prelu4', nn.PReLU(128)), 140 | 141 | ('flatten', Flatten()), 142 | ('conv5', nn.Linear(1152, 256)), 143 | ('drop5', nn.Dropout(0.25)), 144 | ('prelu5', nn.PReLU(256)), 145 | ])) 146 | 147 | self.conv6_1 = nn.Linear(256, 2) 148 | self.conv6_2 = nn.Linear(256, 4) 149 | self.conv6_3 = nn.Linear(256, 10) 150 | 151 | weights = np.load('mtcnn_pytorch/src/weights/onet.npy')[()] 152 | for n, p in self.named_parameters(): 153 | p.data = torch.FloatTensor(weights[n]) 154 | 155 | def forward(self, x): 156 | """ 157 | Arguments: 158 | x: a float tensor with shape [batch_size, 3, h, w]. 159 | Returns: 160 | c: a float tensor with shape [batch_size, 10]. 161 | b: a float tensor with shape [batch_size, 4]. 162 | a: a float tensor with shape [batch_size, 2]. 163 | """ 164 | x = self.features(x) 165 | a = self.conv6_1(x) 166 | b = self.conv6_2(x) 167 | c = self.conv6_3(x) 168 | a = F.softmax(a, dim = -1) 169 | return c, b, a 170 | -------------------------------------------------------------------------------- /mtcnn_pytorch/src/matlab_cp2tform.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Jul 11 06:54:28 2017 4 | 5 | @author: zhaoyafei 6 | """ 7 | 8 | import numpy as np 9 | from numpy.linalg import inv, norm, lstsq 10 | from numpy.linalg import matrix_rank as rank 11 | 12 | class MatlabCp2tormException(Exception): 13 | def __str__(self): 14 | return 'In File {}:{}'.format( 15 | __file__, super.__str__(self)) 16 | 17 | def tformfwd(trans, uv): 18 | """ 19 | Function: 20 | ---------- 21 | apply affine transform 'trans' to uv 22 | 23 | Parameters: 24 | ---------- 25 | @trans: 3x3 np.array 26 | transform matrix 27 | @uv: Kx2 np.array 28 | each row is a pair of coordinates (x, y) 29 | 30 | Returns: 31 | ---------- 32 | @xy: Kx2 np.array 33 | each row is a pair of transformed coordinates (x, y) 34 | """ 35 | uv = np.hstack(( 36 | uv, np.ones((uv.shape[0], 1)) 37 | )) 38 | xy = np.dot(uv, trans) 39 | xy = xy[:, 0:-1] 40 | return xy 41 | 42 | 43 | def tforminv(trans, uv): 44 | """ 45 | Function: 46 | ---------- 47 | apply the inverse of affine transform 'trans' to uv 48 | 49 | Parameters: 50 | ---------- 51 | @trans: 3x3 np.array 52 | transform matrix 53 | @uv: Kx2 np.array 54 | each row is a pair of coordinates (x, y) 55 | 56 | Returns: 57 | ---------- 58 | @xy: Kx2 np.array 59 | each row is a pair of inverse-transformed coordinates (x, y) 60 | """ 61 | Tinv = inv(trans) 62 | xy = tformfwd(Tinv, uv) 63 | return xy 64 | 65 | 66 | def findNonreflectiveSimilarity(uv, xy, options=None): 67 | 68 | options = {'K': 2} 69 | 70 | K = options['K'] 71 | M = xy.shape[0] 72 | x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector 73 | y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector 74 | # print('--->x, y:\n', x, y 75 | 76 | tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1)))) 77 | tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1)))) 78 | X = np.vstack((tmp1, tmp2)) 79 | # print('--->X.shape: ', X.shape 80 | # print('X:\n', X 81 | 82 | u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector 83 | v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector 84 | U = np.vstack((u, v)) 85 | # print('--->U.shape: ', U.shape 86 | # print('U:\n', U 87 | 88 | # We know that X * r = U 89 | if rank(X) >= 2 * K: 90 | r, _, _, _ = lstsq(X, U) 91 | r = np.squeeze(r) 92 | else: 93 | raise Exception('cp2tform:twoUniquePointsReq') 94 | 95 | # print('--->r:\n', r 96 | 97 | sc = r[0] 98 | ss = r[1] 99 | tx = r[2] 100 | ty = r[3] 101 | 102 | Tinv = np.array([ 103 | [sc, -ss, 0], 104 | [ss, sc, 0], 105 | [tx, ty, 1] 106 | ]) 107 | 108 | # print('--->Tinv:\n', Tinv 109 | 110 | T = inv(Tinv) 111 | # print('--->T:\n', T 112 | 113 | T[:, 2] = np.array([0, 0, 1]) 114 | 115 | return T, Tinv 116 | 117 | 118 | def findSimilarity(uv, xy, options=None): 119 | 120 | options = {'K': 2} 121 | 122 | # uv = np.array(uv) 123 | # xy = np.array(xy) 124 | 125 | # Solve for trans1 126 | trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options) 127 | 128 | # Solve for trans2 129 | 130 | # manually reflect the xy data across the Y-axis 131 | xyR = xy 132 | xyR[:, 0] = -1 * xyR[:, 0] 133 | 134 | trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options) 135 | 136 | # manually reflect the tform to undo the reflection done on xyR 137 | TreflectY = np.array([ 138 | [-1, 0, 0], 139 | [0, 1, 0], 140 | [0, 0, 1] 141 | ]) 142 | 143 | trans2 = np.dot(trans2r, TreflectY) 144 | 145 | # Figure out if trans1 or trans2 is better 146 | xy1 = tformfwd(trans1, uv) 147 | norm1 = norm(xy1 - xy) 148 | 149 | xy2 = tformfwd(trans2, uv) 150 | norm2 = norm(xy2 - xy) 151 | 152 | if norm1 <= norm2: 153 | return trans1, trans1_inv 154 | else: 155 | trans2_inv = inv(trans2) 156 | return trans2, trans2_inv 157 | 158 | 159 | def get_similarity_transform(src_pts, dst_pts, reflective=True): 160 | """ 161 | Function: 162 | ---------- 163 | Find Similarity Transform Matrix 'trans': 164 | u = src_pts[:, 0] 165 | v = src_pts[:, 1] 166 | x = dst_pts[:, 0] 167 | y = dst_pts[:, 1] 168 | [x, y, 1] = [u, v, 1] * trans 169 | 170 | Parameters: 171 | ---------- 172 | @src_pts: Kx2 np.array 173 | source points, each row is a pair of coordinates (x, y) 174 | @dst_pts: Kx2 np.array 175 | destination points, each row is a pair of transformed 176 | coordinates (x, y) 177 | @reflective: True or False 178 | if True: 179 | use reflective similarity transform 180 | else: 181 | use non-reflective similarity transform 182 | 183 | Returns: 184 | ---------- 185 | @trans: 3x3 np.array 186 | transform matrix from uv to xy 187 | trans_inv: 3x3 np.array 188 | inverse of trans, transform matrix from xy to uv 189 | """ 190 | 191 | if reflective: 192 | trans, trans_inv = findSimilarity(src_pts, dst_pts) 193 | else: 194 | trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts) 195 | 196 | return trans, trans_inv 197 | 198 | 199 | def cvt_tform_mat_for_cv2(trans): 200 | """ 201 | Function: 202 | ---------- 203 | Convert Transform Matrix 'trans' into 'cv2_trans' which could be 204 | directly used by cv2.warpAffine(): 205 | u = src_pts[:, 0] 206 | v = src_pts[:, 1] 207 | x = dst_pts[:, 0] 208 | y = dst_pts[:, 1] 209 | [x, y].T = cv_trans * [u, v, 1].T 210 | 211 | Parameters: 212 | ---------- 213 | @trans: 3x3 np.array 214 | transform matrix from uv to xy 215 | 216 | Returns: 217 | ---------- 218 | @cv2_trans: 2x3 np.array 219 | transform matrix from src_pts to dst_pts, could be directly used 220 | for cv2.warpAffine() 221 | """ 222 | cv2_trans = trans[:, 0:2].T 223 | 224 | return cv2_trans 225 | 226 | 227 | def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True): 228 | """ 229 | Function: 230 | ---------- 231 | Find Similarity Transform Matrix 'cv2_trans' which could be 232 | directly used by cv2.warpAffine(): 233 | u = src_pts[:, 0] 234 | v = src_pts[:, 1] 235 | x = dst_pts[:, 0] 236 | y = dst_pts[:, 1] 237 | [x, y].T = cv_trans * [u, v, 1].T 238 | 239 | Parameters: 240 | ---------- 241 | @src_pts: Kx2 np.array 242 | source points, each row is a pair of coordinates (x, y) 243 | @dst_pts: Kx2 np.array 244 | destination points, each row is a pair of transformed 245 | coordinates (x, y) 246 | reflective: True or False 247 | if True: 248 | use reflective similarity transform 249 | else: 250 | use non-reflective similarity transform 251 | 252 | Returns: 253 | ---------- 254 | @cv2_trans: 2x3 np.array 255 | transform matrix from src_pts to dst_pts, could be directly used 256 | for cv2.warpAffine() 257 | """ 258 | trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective) 259 | cv2_trans = cvt_tform_mat_for_cv2(trans) 260 | 261 | return cv2_trans 262 | 263 | 264 | if __name__ == '__main__': 265 | """ 266 | u = [0, 6, -2] 267 | v = [0, 3, 5] 268 | x = [-1, 0, 4] 269 | y = [-1, -10, 4] 270 | 271 | # In Matlab, run: 272 | # 273 | # uv = [u'; v']; 274 | # xy = [x'; y']; 275 | # tform_sim=cp2tform(uv,xy,'similarity'); 276 | # 277 | # trans = tform_sim.tdata.T 278 | # ans = 279 | # -0.0764 -1.6190 0 280 | # 1.6190 -0.0764 0 281 | # -3.2156 0.0290 1.0000 282 | # trans_inv = tform_sim.tdata.Tinv 283 | # ans = 284 | # 285 | # -0.0291 0.6163 0 286 | # -0.6163 -0.0291 0 287 | # -0.0756 1.9826 1.0000 288 | # xy_m=tformfwd(tform_sim, u,v) 289 | # 290 | # xy_m = 291 | # 292 | # -3.2156 0.0290 293 | # 1.1833 -9.9143 294 | # 5.0323 2.8853 295 | # uv_m=tforminv(tform_sim, x,y) 296 | # 297 | # uv_m = 298 | # 299 | # 0.5698 1.3953 300 | # 6.0872 2.2733 301 | # -2.6570 4.3314 302 | """ 303 | u = [0, 6, -2] 304 | v = [0, 3, 5] 305 | x = [-1, 0, 4] 306 | y = [-1, -10, 4] 307 | 308 | uv = np.array((u, v)).T 309 | xy = np.array((x, y)).T 310 | 311 | print('\n--->uv:') 312 | print(uv) 313 | print('\n--->xy:') 314 | print(xy) 315 | 316 | trans, trans_inv = get_similarity_transform(uv, xy) 317 | 318 | print('\n--->trans matrix:') 319 | print(trans) 320 | 321 | print('\n--->trans_inv matrix:') 322 | print(trans_inv) 323 | 324 | print('\n---> apply transform to uv') 325 | print('\nxy_m = uv_augmented * trans') 326 | uv_aug = np.hstack(( 327 | uv, np.ones((uv.shape[0], 1)) 328 | )) 329 | xy_m = np.dot(uv_aug, trans) 330 | print(xy_m) 331 | 332 | print('\nxy_m = tformfwd(trans, uv)') 333 | xy_m = tformfwd(trans, uv) 334 | print(xy_m) 335 | 336 | print('\n---> apply inverse transform to xy') 337 | print('\nuv_m = xy_augmented * trans_inv') 338 | xy_aug = np.hstack(( 339 | xy, np.ones((xy.shape[0], 1)) 340 | )) 341 | uv_m = np.dot(xy_aug, trans_inv) 342 | print(uv_m) 343 | 344 | print('\nuv_m = tformfwd(trans_inv, xy)') 345 | uv_m = tformfwd(trans_inv, xy) 346 | print(uv_m) 347 | 348 | uv_m = tforminv(trans, xy) 349 | print('\nuv_m = tforminv(trans, xy)') 350 | print(uv_m) 351 | -------------------------------------------------------------------------------- /mtcnn_pytorch/src/visualization_utils.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageDraw 2 | 3 | 4 | def show_bboxes(img, bounding_boxes, facial_landmarks=[]): 5 | """Draw bounding boxes and facial landmarks. 6 | 7 | Arguments: 8 | img: an instance of PIL.Image. 9 | bounding_boxes: a float numpy array of shape [n, 5]. 10 | facial_landmarks: a float numpy array of shape [n, 10]. 11 | 12 | Returns: 13 | an instance of PIL.Image. 14 | """ 15 | 16 | img_copy = img.copy() 17 | draw = ImageDraw.Draw(img_copy) 18 | 19 | for b in bounding_boxes: 20 | draw.rectangle([ 21 | (b[0], b[1]), (b[2], b[3]) 22 | ], outline='white') 23 | 24 | for p in facial_landmarks: 25 | for i in range(5): 26 | draw.ellipse([ 27 | (p[i] - 1.0, p[i + 5] - 1.0), 28 | (p[i] + 1.0, p[i + 5] + 1.0) 29 | ], outline='blue') 30 | 31 | return img_copy 32 | -------------------------------------------------------------------------------- /mtcnn_pytorch/src/weights/onet.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/src/weights/onet.npy -------------------------------------------------------------------------------- /mtcnn_pytorch/src/weights/pnet.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/src/weights/pnet.npy -------------------------------------------------------------------------------- /mtcnn_pytorch/src/weights/rnet.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TreB1eN/InsightFace_Pytorch/350ff7aa9c9db8d369d1932e14d2a4d11a3e9553/mtcnn_pytorch/src/weights/rnet.npy -------------------------------------------------------------------------------- /prepare_data.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from config import get_config 3 | from data.data_pipe import load_bin, load_mx_rec 4 | import argparse 5 | 6 | if __name__ == '__main__': 7 | parser = argparse.ArgumentParser(description='for face verification') 8 | parser.add_argument("-r", "--rec_path", help="mxnet record file path",default='faces_emore', type=str) 9 | args = parser.parse_args() 10 | conf = get_config() 11 | rec_path = conf.data_path/args.rec_path 12 | load_mx_rec(rec_path) 13 | 14 | bin_files = ['agedb_30', 'cfp_fp', 'lfw', 'calfw', 'cfp_ff', 'cplfw', 'vgg2_fp'] 15 | 16 | for i in range(len(bin_files)): 17 | load_bin(rec_path/(bin_files[i]+'.bin'), rec_path/bin_files[i], conf.test_transform) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch==0.4.0 2 | numpy==1.14.5 3 | matplotlib==2.1.2 4 | tqdm==4.23.4 5 | mxnet_cu90==1.2.1 6 | scipy==1.0.0 7 | bcolz==1.2.1 8 | easydict==1.7 9 | opencv_python==3.4.0.12 10 | Pillow==5.2.0 11 | mxnet==1.2.1.post1 12 | scikit_learn==0.19.2 13 | tensorboardX==1.2 14 | torchvision==0.2.1 15 | -------------------------------------------------------------------------------- /take_pic.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import argparse 3 | from pathlib import Path 4 | from PIL import Image 5 | from mtcnn import MTCNN 6 | from datetime import datetime 7 | 8 | from PIL import Image 9 | import numpy as np 10 | from mtcnn_pytorch.src.align_trans import get_reference_facial_points, warp_and_crop_face 11 | 12 | parser = argparse.ArgumentParser(description='take a picture') 13 | parser.add_argument('--name','-n', default='unknown', type=str,help='input the name of the recording person') 14 | args = parser.parse_args() 15 | from pathlib import Path 16 | data_path = Path('data') 17 | save_path = data_path/'facebank'/args.name 18 | if not save_path.exists(): 19 | save_path.mkdir() 20 | 21 | # 初始化摄像头 22 | cap = cv2.VideoCapture(0) 23 | # 我的摄像头默认像素640*480,可以根据摄像头素质调整分辨率 24 | cap.set(3,1280) 25 | cap.set(4,720) 26 | mtcnn = MTCNN() 27 | 28 | while cap.isOpened(): 29 | # 采集一帧一帧的图像数据 30 | isSuccess,frame = cap.read() 31 | # 实时的将采集到的数据显示到界面上 32 | if isSuccess: 33 | frame_text = cv2.putText(frame, 34 | 'Press t to take a picture,q to quit.....', 35 | (10,100), 36 | cv2.FONT_HERSHEY_SIMPLEX, 37 | 2, 38 | (0,255,0), 39 | 3, 40 | cv2.LINE_AA) 41 | cv2.imshow("My Capture",frame_text) 42 | # 实现按下“t”键拍照 43 | if cv2.waitKey(1)&0xFF == ord('t'): 44 | p = Image.fromarray(frame[...,::-1]) 45 | try: 46 | warped_face = np.array(mtcnn.align(p))[...,::-1] 47 | cv2.imwrite(str(save_path/'{}.jpg'.format(str(datetime.now())[:-7].replace(":","-").replace(" ","-"))), warped_face) 48 | except: 49 | print('no face captured') 50 | 51 | if cv2.waitKey(1)&0xFF == ord('q'): 52 | break 53 | 54 | # 释放摄像头资源 55 | cap.release() 56 | cv2.destoryAllWindows() 57 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from config import get_config 2 | from Learner import face_learner 3 | import argparse 4 | 5 | # python train.py -net mobilefacenet -b 200 -w 4 6 | 7 | if __name__ == '__main__': 8 | parser = argparse.ArgumentParser(description='for face verification') 9 | parser.add_argument("-e", "--epochs", help="training epochs", default=20, type=int) 10 | parser.add_argument("-net", "--net_mode", help="which network, [ir, ir_se, mobilefacenet]",default='ir_se', type=str) 11 | parser.add_argument("-depth", "--net_depth", help="how many layers [50,100,152]", default=50, type=int) 12 | parser.add_argument('-lr','--lr',help='learning rate',default=1e-3, type=float) 13 | parser.add_argument("-b", "--batch_size", help="batch_size", default=96, type=int) 14 | parser.add_argument("-w", "--num_workers", help="workers number", default=3, type=int) 15 | parser.add_argument("-d", "--data_mode", help="use which database, [vgg, ms1m, emore, concat]",default='emore', type=str) 16 | args = parser.parse_args() 17 | 18 | conf = get_config() 19 | 20 | if args.net_mode == 'mobilefacenet': 21 | conf.use_mobilfacenet = True 22 | else: 23 | conf.net_mode = args.net_mode 24 | conf.net_depth = args.net_depth 25 | 26 | conf.lr = args.lr 27 | conf.batch_size = args.batch_size 28 | conf.num_workers = args.num_workers 29 | conf.data_mode = args.data_mode 30 | learner = face_learner(conf) 31 | 32 | learner.train(conf, args.epochs) -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from PIL import Image 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | plt.switch_backend('agg') 6 | import io 7 | from torchvision import transforms as trans 8 | from data.data_pipe import de_preprocess 9 | import torch 10 | from model import l2_norm 11 | import pdb 12 | import cv2 13 | 14 | def separate_bn_paras(modules): 15 | if not isinstance(modules, list): 16 | modules = [*modules.modules()] 17 | paras_only_bn = [] 18 | paras_wo_bn = [] 19 | for layer in modules: 20 | if 'model' in str(layer.__class__): 21 | continue 22 | if 'container' in str(layer.__class__): 23 | continue 24 | else: 25 | if 'batchnorm' in str(layer.__class__): 26 | paras_only_bn.extend([*layer.parameters()]) 27 | else: 28 | paras_wo_bn.extend([*layer.parameters()]) 29 | return paras_only_bn, paras_wo_bn 30 | 31 | def prepare_facebank(conf, model, mtcnn, tta = True): 32 | model.eval() 33 | embeddings = [] 34 | names = ['Unknown'] 35 | for path in conf.facebank_path.iterdir(): 36 | if path.is_file(): 37 | continue 38 | else: 39 | embs = [] 40 | for file in path.iterdir(): 41 | if not file.is_file(): 42 | continue 43 | else: 44 | try: 45 | img = Image.open(file) 46 | except: 47 | continue 48 | if img.size != (112, 112): 49 | img = mtcnn.align(img) 50 | with torch.no_grad(): 51 | if tta: 52 | mirror = trans.functional.hflip(img) 53 | emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0)) 54 | emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0)) 55 | embs.append(l2_norm(emb + emb_mirror)) 56 | else: 57 | embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0))) 58 | if len(embs) == 0: 59 | continue 60 | embedding = torch.cat(embs).mean(0,keepdim=True) 61 | embeddings.append(embedding) 62 | names.append(path.name) 63 | embeddings = torch.cat(embeddings) 64 | names = np.array(names) 65 | torch.save(embeddings, conf.facebank_path/'facebank.pth') 66 | np.save(conf.facebank_path/'names', names) 67 | return embeddings, names 68 | 69 | def load_facebank(conf): 70 | embeddings = torch.load(conf.facebank_path/'facebank.pth') 71 | names = np.load(conf.facebank_path/'names.npy') 72 | return embeddings, names 73 | 74 | def face_reader(conf, conn, flag, boxes_arr, result_arr, learner, mtcnn, targets, tta): 75 | while True: 76 | try: 77 | image = conn.recv() 78 | except: 79 | continue 80 | try: 81 | bboxes, faces = mtcnn.align_multi(image, limit=conf.face_limit) 82 | except: 83 | bboxes = [] 84 | 85 | results = learner.infer(conf, faces, targets, tta) 86 | 87 | if len(bboxes) > 0: 88 | print('bboxes in reader : {}'.format(bboxes)) 89 | bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces 90 | bboxes = bboxes.astype(int) 91 | bboxes = bboxes + [-1,-1,1,1] # personal choice 92 | assert bboxes.shape[0] == results.shape[0],'bbox and faces number not same' 93 | bboxes = bboxes.reshape([-1]) 94 | for i in range(len(boxes_arr)): 95 | if i < len(bboxes): 96 | boxes_arr[i] = bboxes[i] 97 | else: 98 | boxes_arr[i] = 0 99 | for i in range(len(result_arr)): 100 | if i < len(results): 101 | result_arr[i] = results[i] 102 | else: 103 | result_arr[i] = -1 104 | else: 105 | for i in range(len(boxes_arr)): 106 | boxes_arr[i] = 0 # by default,it's all 0 107 | for i in range(len(result_arr)): 108 | result_arr[i] = -1 # by default,it's all -1 109 | print('boxes_arr : {}'.format(boxes_arr[:4])) 110 | print('result_arr : {}'.format(result_arr[:4])) 111 | flag.value = 0 112 | 113 | hflip = trans.Compose([ 114 | de_preprocess, 115 | trans.ToPILImage(), 116 | trans.functional.hflip, 117 | trans.ToTensor(), 118 | trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) 119 | ]) 120 | 121 | def hflip_batch(imgs_tensor): 122 | hfliped_imgs = torch.empty_like(imgs_tensor) 123 | for i, img_ten in enumerate(imgs_tensor): 124 | hfliped_imgs[i] = hflip(img_ten) 125 | return hfliped_imgs 126 | 127 | def get_time(): 128 | return (str(datetime.now())[:-10]).replace(' ','-').replace(':','-') 129 | 130 | def gen_plot(fpr, tpr): 131 | """Create a pyplot plot and save to buffer.""" 132 | plt.figure() 133 | plt.xlabel("FPR", fontsize=14) 134 | plt.ylabel("TPR", fontsize=14) 135 | plt.title("ROC Curve", fontsize=14) 136 | plot = plt.plot(fpr, tpr, linewidth=2) 137 | buf = io.BytesIO() 138 | plt.savefig(buf, format='jpeg') 139 | buf.seek(0) 140 | plt.close() 141 | return buf 142 | 143 | def draw_box_name(bbox,name,frame): 144 | frame = cv2.rectangle(frame,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),6) 145 | frame = cv2.putText(frame, 146 | name, 147 | (bbox[0],bbox[1]), 148 | cv2.FONT_HERSHEY_SIMPLEX, 149 | 2, 150 | (0,255,0), 151 | 3, 152 | cv2.LINE_AA) 153 | return frame -------------------------------------------------------------------------------- /verifacation.py: -------------------------------------------------------------------------------- 1 | """Helper for evaluation on the Labeled Faces in the Wild dataset 2 | """ 3 | 4 | # MIT License 5 | # 6 | # Copyright (c) 2016 David Sandberg 7 | # 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy 9 | # of this software and associated documentation files (the "Software"), to deal 10 | # in the Software without restriction, including without limitation the rights 11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | # copies of the Software, and to permit persons to whom the Software is 13 | # furnished to do so, subject to the following conditions: 14 | # 15 | # The above copyright notice and this permission notice shall be included in all 16 | # copies or substantial portions of the Software. 17 | # 18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | # SOFTWARE. 25 | 26 | import numpy as np 27 | from sklearn.model_selection import KFold 28 | from sklearn.decomposition import PCA 29 | import sklearn 30 | from scipy import interpolate 31 | import datetime 32 | import mxnet as mx 33 | 34 | def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, pca=0): 35 | assert (embeddings1.shape[0] == embeddings2.shape[0]) 36 | assert (embeddings1.shape[1] == embeddings2.shape[1]) 37 | nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) 38 | nrof_thresholds = len(thresholds) 39 | k_fold = KFold(n_splits=nrof_folds, shuffle=False) 40 | 41 | tprs = np.zeros((nrof_folds, nrof_thresholds)) 42 | fprs = np.zeros((nrof_folds, nrof_thresholds)) 43 | accuracy = np.zeros((nrof_folds)) 44 | best_thresholds = np.zeros((nrof_folds)) 45 | indices = np.arange(nrof_pairs) 46 | # print('pca', pca) 47 | 48 | if pca == 0: 49 | diff = np.subtract(embeddings1, embeddings2) 50 | dist = np.sum(np.square(diff), 1) 51 | 52 | for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)): 53 | # print('train_set', train_set) 54 | # print('test_set', test_set) 55 | if pca > 0: 56 | print('doing pca on', fold_idx) 57 | embed1_train = embeddings1[train_set] 58 | embed2_train = embeddings2[train_set] 59 | _embed_train = np.concatenate((embed1_train, embed2_train), axis=0) 60 | # print(_embed_train.shape) 61 | pca_model = PCA(n_components=pca) 62 | pca_model.fit(_embed_train) 63 | embed1 = pca_model.transform(embeddings1) 64 | embed2 = pca_model.transform(embeddings2) 65 | embed1 = sklearn.preprocessing.normalize(embed1) 66 | embed2 = sklearn.preprocessing.normalize(embed2) 67 | # print(embed1.shape, embed2.shape) 68 | diff = np.subtract(embed1, embed2) 69 | dist = np.sum(np.square(diff), 1) 70 | 71 | # Find the best threshold for the fold 72 | acc_train = np.zeros((nrof_thresholds)) 73 | for threshold_idx, threshold in enumerate(thresholds): 74 | _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set]) 75 | best_threshold_index = np.argmax(acc_train) 76 | # print('best_threshold_index', best_threshold_index, acc_train[best_threshold_index]) 77 | best_thresholds[fold_idx] = thresholds[best_threshold_index] 78 | for threshold_idx, threshold in enumerate(thresholds): 79 | tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(threshold, 80 | dist[test_set], 81 | actual_issame[ 82 | test_set]) 83 | _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], 84 | actual_issame[test_set]) 85 | 86 | tpr = np.mean(tprs, 0) 87 | fpr = np.mean(fprs, 0) 88 | return tpr, fpr, accuracy, best_thresholds 89 | 90 | 91 | def calculate_accuracy(threshold, dist, actual_issame): 92 | predict_issame = np.less(dist, threshold) 93 | tp = np.sum(np.logical_and(predict_issame, actual_issame)) 94 | fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame))) 95 | tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame))) 96 | fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame)) 97 | 98 | tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn) 99 | fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn) 100 | acc = float(tp + tn) / dist.size 101 | return tpr, fpr, acc 102 | 103 | 104 | def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10): 105 | ''' 106 | Copy from [insightface](https://github.com/deepinsight/insightface) 107 | :param thresholds: 108 | :param embeddings1: 109 | :param embeddings2: 110 | :param actual_issame: 111 | :param far_target: 112 | :param nrof_folds: 113 | :return: 114 | ''' 115 | assert (embeddings1.shape[0] == embeddings2.shape[0]) 116 | assert (embeddings1.shape[1] == embeddings2.shape[1]) 117 | nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) 118 | nrof_thresholds = len(thresholds) 119 | k_fold = KFold(n_splits=nrof_folds, shuffle=False) 120 | 121 | val = np.zeros(nrof_folds) 122 | far = np.zeros(nrof_folds) 123 | 124 | diff = np.subtract(embeddings1, embeddings2) 125 | dist = np.sum(np.square(diff), 1) 126 | indices = np.arange(nrof_pairs) 127 | 128 | for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)): 129 | 130 | # Find the threshold that gives FAR = far_target 131 | far_train = np.zeros(nrof_thresholds) 132 | for threshold_idx, threshold in enumerate(thresholds): 133 | _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set]) 134 | if np.max(far_train) >= far_target: 135 | f = interpolate.interp1d(far_train, thresholds, kind='slinear') 136 | threshold = f(far_target) 137 | else: 138 | threshold = 0.0 139 | 140 | val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set]) 141 | 142 | val_mean = np.mean(val) 143 | far_mean = np.mean(far) 144 | val_std = np.std(val) 145 | return val_mean, val_std, far_mean 146 | 147 | 148 | def calculate_val_far(threshold, dist, actual_issame): 149 | predict_issame = np.less(dist, threshold) 150 | true_accept = np.sum(np.logical_and(predict_issame, actual_issame)) 151 | false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame))) 152 | n_same = np.sum(actual_issame) 153 | n_diff = np.sum(np.logical_not(actual_issame)) 154 | val = float(true_accept) / float(n_same) 155 | far = float(false_accept) / float(n_diff) 156 | return val, far 157 | 158 | 159 | def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0): 160 | # Calculate evaluation metrics 161 | thresholds = np.arange(0, 4, 0.01) 162 | embeddings1 = embeddings[0::2] 163 | embeddings2 = embeddings[1::2] 164 | tpr, fpr, accuracy, best_thresholds = calculate_roc(thresholds, embeddings1, embeddings2, 165 | np.asarray(actual_issame), nrof_folds=nrof_folds, pca=pca) 166 | # thresholds = np.arange(0, 4, 0.001) 167 | # val, val_std, far = calculate_val(thresholds, embeddings1, embeddings2, 168 | # np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds) 169 | # return tpr, fpr, accuracy, best_thresholds, val, val_std, far 170 | return tpr, fpr, accuracy, best_thresholds -------------------------------------------------------------------------------- /work_space/history: -------------------------------------------------------------------------------- 1 | /home/f/learning/face_studio/history -------------------------------------------------------------------------------- /work_space/log: -------------------------------------------------------------------------------- 1 | /home/f/learning/face_studio/log -------------------------------------------------------------------------------- /work_space/models: -------------------------------------------------------------------------------- 1 | /home/f/learning/face_studio/models -------------------------------------------------------------------------------- /work_space/save: -------------------------------------------------------------------------------- 1 | /home/f/learning/face_studio/save --------------------------------------------------------------------------------