├── README.md ├── mask_img ├── mask_b.png └── test.jpg ├── mtcnn.py ├── pb └── mtcnn.pb ├── res.png ├── res ├── 001.jpg ├── 002.jpg ├── 003.jpg └── 004.jpg ├── test.py ├── test_img ├── 001.jpg ├── 002.jpg ├── 003.jpg ├── 004.jpg ├── 005.jpg ├── 006.jpg ├── 20190102164654147.png └── model.jpg └── video.py /README.md: -------------------------------------------------------------------------------- 1 | 2 | 本项目实现人脸贴纸: 3 | >(1)人脸检测使用的是mtcnn。 (tensorflow = 1.13.1) 4 | 5 | >(2)通过放射变换将贴纸映射到面部。 6 | 7 | >(3)使用numba,实现实时人脸贴纸。 8 | 9 | 代码运行: 10 | ``` 11 | git clone https://github.com/MachineLP/face_stickers 12 | cd fase_stickers 13 | 图片下测试:python test.py 14 | 视频下测试:python video.py 15 | ``` 16 | 17 | 18 | 19 | 效果如下: 20 | 21 | ![show](./res.png) 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /mask_img/mask_b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/mask_img/mask_b.png -------------------------------------------------------------------------------- /mask_img/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/mask_img/test.jpg -------------------------------------------------------------------------------- /mtcnn.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import tensorflow as tf 4 | import cv2 5 | 6 | 7 | # tensorflow = 1.13.1 8 | class MTCNN: 9 | 10 | def __init__(self, model_path, min_size=40, factor=0.709, thresholds=[0.6, 0.7, 0.7]): 11 | self.min_size = min_size 12 | self.factor = factor 13 | self.thresholds = thresholds 14 | 15 | graph = tf.Graph() 16 | with graph.as_default(): 17 | with open(model_path, 'rb') as f: 18 | graph_def = tf.GraphDef.FromString(f.read()) 19 | tf.import_graph_def(graph_def, name='') 20 | self.graph = graph 21 | config = tf.ConfigProto( 22 | allow_soft_placement=True, 23 | intra_op_parallelism_threads=4, 24 | inter_op_parallelism_threads=4) 25 | config.gpu_options.allow_growth = True 26 | self.sess = tf.Session(graph=graph, config=config) 27 | 28 | def detect(self, img): 29 | feeds = { 30 | self.graph.get_operation_by_name('input').outputs[0]: img, 31 | self.graph.get_operation_by_name('min_size').outputs[0]: self.min_size, 32 | self.graph.get_operation_by_name('thresholds').outputs[0]: self.thresholds, 33 | self.graph.get_operation_by_name('factor').outputs[0]: self.factor 34 | } 35 | fetches = [self.graph.get_operation_by_name('prob').outputs[0], 36 | self.graph.get_operation_by_name('landmarks').outputs[0], 37 | self.graph.get_operation_by_name('box').outputs[0]] 38 | prob, landmarks, box = self.sess.run(fetches, feeds) 39 | return box, prob, landmarks 40 | 41 | 42 | def main(args): 43 | mtcnn = MTCNN('./mtcnn.pb') 44 | img = cv2.imread(args.image) 45 | 46 | bbox, scores, landmarks = mtcnn.detect(img) 47 | 48 | print('total box:', len(bbox)) 49 | for box, pts in zip(bbox, landmarks): 50 | box = box.astype('int32') 51 | img = cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]), (255, 0, 0), 3) 52 | 53 | pts = pts.astype('int32') 54 | for i in range(5): 55 | img = cv2.circle(img, (pts[i+5], pts[i]), 1, (0, 255, 0), 2) 56 | cv2.imshow('image', img) 57 | cv2.waitKey(0) 58 | 59 | 60 | if __name__ == '__main__': 61 | parser = argparse.ArgumentParser(description='tensorflow mtcnn') 62 | parser.add_argument('image', help='image path') 63 | args = parser.parse_args() 64 | main(args) 65 | 66 | -------------------------------------------------------------------------------- /pb/mtcnn.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/pb/mtcnn.pb -------------------------------------------------------------------------------- /res.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/res.png -------------------------------------------------------------------------------- /res/001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/res/001.jpg -------------------------------------------------------------------------------- /res/002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/res/002.jpg -------------------------------------------------------------------------------- /res/003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/res/003.jpg -------------------------------------------------------------------------------- /res/004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/res/004.jpg -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import time 3 | from numba import njit 4 | import numpy as np 5 | from PIL import Image 6 | from mtcnn import MTCNN 7 | from numba import jit, prange 8 | 9 | 10 | # 仿射变换,用于将贴图点映射到人脸点 11 | def get_text_trans_matrix(x1, y1, x2, y2, x3, y3, tx1, ty1, tx2, ty2, tx3, ty3): 12 | # 放射变换 13 | return cv2.getAffineTransform( np.float32([ [tx1, ty1], [tx2, ty2], [tx3, ty3] ]), np.float32( [ [x1, y1], [x2, y2], [x3, y3] ]) ).flatten() 14 | # 透视变换 15 | # return cv2.getPerspectiveTransform( np.float32([ [tx1, ty1], [tx2, ty2], [tx3, ty3] ]), np.float32( [ [x1, y1], [x2, y2], [x3, y3] ]) ).flatten() 16 | 17 | @jit(nopython=True) 18 | def sticker(srcData, width, height, stride, mask, maskWidth, maskHeight, maskStride, srcFacePoints, maskFacePoints, H): 19 | def CLIP3(x, a, b): 20 | return min(max(a,x), b) 21 | # 用于将贴图点映射到人脸点 22 | for i in range(height): 23 | # for i in prange(height): 24 | for j in range(width): 25 | x = float(i) 26 | y = float(j) 27 | tx = (int)((H[0] * (x)+H[1] * (y)+H[2]) + 0.5) 28 | ty = (int)((H[3] * (x)+H[4] * (y)+H[5]) + 0.5) 29 | tx = CLIP3(tx, 0, maskHeight - 1) 30 | ty = CLIP3(ty, 0, maskWidth - 1) 31 | mr = int( mask[ int(tx), int(ty), 0 ] ) 32 | mg = int( mask[ int(tx), int(ty), 1 ] ) 33 | mb = int( mask[ int(tx), int(ty), 2 ] ) 34 | alpha = int( mask[ int(tx), int(ty), 3 ] ) 35 | #if alpha!=0: 36 | # print( '>>>', alpha ) 37 | b = srcData[i, j, 0] 38 | g = srcData[i, j, 1] 39 | r = srcData[i, j, 2] 40 | srcData[i, j, 0] =CLIP3((b * (255 - alpha) + mb * alpha) / 255, 0, 255) 41 | srcData[i, j, 1] =CLIP3((g * (255 - alpha) + mg * alpha) / 255, 0, 255) 42 | srcData[i, j, 2] =CLIP3((r * (255 - alpha) + mr * alpha) / 255, 0, 255) 43 | return srcData 44 | 45 | 46 | # @jit(parallel=True,nogil=True) 47 | # @njit(parallel=True,nogil=True) 48 | def trent_sticker(srcData, width, height, stride, mask, maskWidth, maskHeight, maskStride, srcFacePoints, maskFacePoints, ratio): 49 | ret = 0 50 | H = get_text_trans_matrix( maskFacePoints[0], maskFacePoints[1],maskFacePoints[2],maskFacePoints[3],maskFacePoints[4],maskFacePoints[5], srcFacePoints[0], srcFacePoints[1],srcFacePoints[2],srcFacePoints[3],srcFacePoints[4],srcFacePoints[5] ) 51 | #print ('H', H) 52 | srcData = sticker(srcData, width, height, stride, mask, maskWidth, maskHeight, maskStride, srcFacePoints, maskFacePoints, H) 53 | return srcData, ret 54 | 55 | 56 | 57 | img = Image.open('./mask_img/mask_b.png') 58 | r,g,b,a=img.split() #分离4通道 59 | print (r,g,b,a) 60 | im_array = np.array(img) 61 | # 高宽通道 (坐标取值为y, x) 62 | mask_h, mask_w, mask_c = im_array.shape 63 | print ('>>>>>>>', mask_h, mask_w, mask_c) 64 | 65 | 66 | mtcnn = MTCNN('./pb/mtcnn.pb') 67 | img = cv2.imread( './test_img/004.jpg' ) 68 | # 高宽通道 (坐标取值为y, x) 69 | h, w, c = img.shape 70 | print ('>>>>>>>', h, w, c) 71 | bbox, scores, landmarks = mtcnn.detect(img) 72 | print ('>>>>>', len( bbox) ) 73 | for box, pts in zip(bbox, landmarks): 74 | faceInfos = np.array( [ 1, box[1], box[0], box[3] - box[1], box[2] - box[0], pts[5], pts[0], pts[6], pts[1], pts[7], pts[2], pts[8], pts[3], pts[9], pts[4] ] ) 75 | 76 | 77 | # 左眼 78 | # 右眼 79 | # 两嘴角中间 80 | # 坐标为 x, y 81 | # srcFacePoints = np.array( [faceInfos[5], faceInfos[6], faceInfos[7], faceInfos[8], (faceInfos[11] + faceInfos[13])/2.0, (faceInfos[12]+faceInfos[14])/2.0 ]) 82 | # 坐标转为 y, x 83 | srcFacePoints = np.array( [faceInfos[6], faceInfos[5], faceInfos[8], faceInfos[7], (faceInfos[12]+faceInfos[14])/2.0, (faceInfos[11] + faceInfos[13])/2.0 ] ) 84 | print ('srcFacePoints:', srcFacePoints) 85 | 86 | # 307, 364, 423, 364, 365, 490 87 | # 左眼 88 | # 右眼 89 | # 两嘴角中间 90 | # 坐标为 x, y 91 | # maskFacePoints = np.array( [ 307.0, 364.0, 423.0, 364.0, 365.0, 490.0 ] ) 92 | # 坐标为 y, x 93 | maskFacePoints = np.array( [ 364.0, 307.0, 364.0, 423.0, 490.0 , 365.0 ] ) 94 | print ('maskFacePoints:', maskFacePoints) 95 | 96 | 97 | ##### trent_sticker(srcData, width, height, stride, mask, maskWidth, maskHeight, maskStride, srcFacePoints, maskFacePoints, ratio) 98 | start_time = time.time() 99 | srcData, ret = trent_sticker( img, w, h, 3, im_array, mask_w, mask_h, 4, srcFacePoints, maskFacePoints, 100 ) 100 | print ( 'time >>>>', time.time() - start_time ) 101 | img_mask = np.array(srcData, dtype=np.uint8) 102 | cv2.imwrite('res.jpg', img_mask) 103 | 104 | 105 | 106 | 107 | 108 | #global pts 109 | #global box 110 | pts = [0,0,0,0,0,0,0,0,0,0] 111 | box = [0,0,0,0] 112 | def interface(img): 113 | global pts, box 114 | h, w, c = img.shape 115 | bbox, scores, landmarks = mtcnn.detect(img) 116 | 117 | try: 118 | # for box, pts in zip(bbox, landmarks): 119 | # box = bbox[0]; pts = landmarks[0] 120 | if abs( bbox[0][0] - box[0]) > 7 or abs( bbox[0][1] - box[1]) > 7 or abs( bbox[0][2] - box[2]) > 7 or abs( bbox[0][3] - box[3]) > 7: 121 | box = bbox[0]; pts = landmarks[0] 122 | faceInfos_t = np.array( [ 1, box[1], box[0], box[3] - box[1], box[2] - box[0], pts[5], pts[0], pts[6], pts[1], pts[7], pts[2], pts[8], pts[3], pts[9], pts[4] ] ) 123 | srcFacePoints = np.array( [faceInfos_t[6], faceInfos_t[5], faceInfos_t[8], faceInfos_t[7], (faceInfos_t[12]+faceInfos_t[14])/2.0, (faceInfos_t[11] + faceInfos_t[13])/2.0 ] ) 124 | srcData, ret = trent_sticker( img, w, h, 3, im_array, mask_w, mask_h, 4, srcFacePoints, maskFacePoints, 100 ) 125 | except: 126 | srcData = img 127 | 128 | return srcData 129 | 130 | 131 | -------------------------------------------------------------------------------- /test_img/001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/test_img/001.jpg -------------------------------------------------------------------------------- /test_img/002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/test_img/002.jpg -------------------------------------------------------------------------------- /test_img/003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/test_img/003.jpg -------------------------------------------------------------------------------- /test_img/004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/test_img/004.jpg -------------------------------------------------------------------------------- /test_img/005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/test_img/005.jpg -------------------------------------------------------------------------------- /test_img/006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/test_img/006.jpg -------------------------------------------------------------------------------- /test_img/20190102164654147.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/test_img/20190102164654147.png -------------------------------------------------------------------------------- /test_img/model.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachineLP/face_stickers/cf38263ede4008be2546c41277c24cd31d9538f8/test_img/model.jpg -------------------------------------------------------------------------------- /video.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import sys 3 | from test import interface 4 | 5 | 6 | if __name__ == '__main__' : 7 | 8 | # Read video 9 | # video = cv2.VideoCapture("video/WeChatSight1395.mp4") 10 | video = cv2.VideoCapture(0) 11 | 12 | # Exit if video not opened. 13 | if not video.isOpened(): 14 | print ("Could not open video") 15 | sys.exit() 16 | 17 | # Read first frame. 18 | for i in range(10): 19 | ok, frame = video.read() 20 | 21 | h, w, c = frame.shape 22 | 23 | while True: 24 | # Read a new frame 25 | ok, frame = video.read() 26 | if not ok: 27 | break 28 | frame = cv2.resize( frame, (w//2, h//2) ) 29 | frame = interface(frame) 30 | 31 | # Display result 32 | cv2.imshow("Tracking", frame) 33 | 34 | # Exit if ESC pressed 35 | k = cv2.waitKey(1) & 0xff 36 | if k == 27 : break 37 | 38 | --------------------------------------------------------------------------------