├── head.jpg ├── onet.onnx ├── pnet.onnx ├── rnet.onnx ├── Points5_Net1.onnx ├── Points5_Net2.onnx ├── Points81_Net1.onnx ├── Points81_Net2.onnx ├── points5_result.jpg ├── points81_result.jpg ├── Points5_Net_all.onnx ├── Points81_Net_all.onnx ├── README.md ├── test_points5_net.py └── test_points81_net.py /head.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/head.jpg -------------------------------------------------------------------------------- /onet.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/onet.onnx -------------------------------------------------------------------------------- /pnet.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/pnet.onnx -------------------------------------------------------------------------------- /rnet.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/rnet.onnx -------------------------------------------------------------------------------- /Points5_Net1.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/Points5_Net1.onnx -------------------------------------------------------------------------------- /Points5_Net2.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/Points5_Net2.onnx -------------------------------------------------------------------------------- /Points81_Net1.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/Points81_Net1.onnx -------------------------------------------------------------------------------- /Points81_Net2.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/Points81_Net2.onnx -------------------------------------------------------------------------------- /points5_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/points5_result.jpg -------------------------------------------------------------------------------- /points81_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/points81_result.jpg -------------------------------------------------------------------------------- /Points5_Net_all.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/Points5_Net_all.onnx -------------------------------------------------------------------------------- /Points81_Net_all.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/seetaface2_onnx_model/HEAD/Points81_Net_all.onnx -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # seetaface2_onnx_model 2 | contain face detect 、5/81 points 、face recognization models 3 | 4 | 5 | 1. pnet.onnx rnet.onnx onet.onnx 负责检测人脸 6 | 7 | 2. 5/81点检测分成了两个模型,Points5/81_Net1.onnx 和Points5/81_Net2.onnx,之所以分成两个是因为网络中包含了shapeIndexPatch 算子(具体实现请参考seetaface2的源码,或者测试脚本中的python实现),模型可以直接转换成MNN或者NCNN, 8 | 全网络结构参考Points5/81_Net_all.onnx 9 | 10 | 3. 人脸特征提取的模型因为文件比较大,所以放在网盘 https://pan.baidu.com/s/1R4rEpYxN3_GlZBBsOcWViQ 提取码:awb8。 11 | -------------------------------------------------------------------------------- /test_points5_net.py: -------------------------------------------------------------------------------- 1 | import onnxruntime 2 | import numpy as np 3 | import cv2 4 | 5 | m_origin_patch = [15, 15] 6 | m_origin = [112, 112] 7 | 8 | class HypeShape: 9 | def __init__(self, shape): 10 | self.m_shape = shape 11 | self.m_weights = [0]*len(self.m_shape) 12 | size = len(self.m_shape) 13 | self.m_weights[size - 1] = self.m_shape[size - 1] 14 | for times in range(size - 1): 15 | self.m_weights[size - 1 - times - 1] = self.m_weights[size - 1 - times] * self.m_shape[size - 1 - times - 1] 16 | 17 | def to_index(self, coordinate): 18 | if len(coordinate) == 0: 19 | return 0 20 | size = len(coordinate) 21 | weight_start = len(self.m_weights) - size + 1 22 | index = 0 23 | for times in range(size - 1): 24 | index += self.m_weights[weight_start + times] * coordinate[times] 25 | index += coordinate[size - 1] 26 | return index 27 | 28 | 29 | def shape_index_process(feat_data, pos_data): 30 | feat_h = feat_data.shape[2] 31 | feat_w = feat_data.shape[3] 32 | 33 | landmarkx2 = pos_data.shape[1] 34 | x_patch_h = int( m_origin_patch[0] * feat_data.shape[2] / float( m_origin[0] ) + 0.5 ) 35 | x_patch_w = int( m_origin_patch[1] * feat_data.shape[3] / float( m_origin[1] ) + 0.5 ) 36 | 37 | feat_patch_h = x_patch_h 38 | feat_patch_w = x_patch_w 39 | 40 | num = feat_data.shape[0] 41 | channels = feat_data.shape[1] 42 | 43 | r_h = ( feat_patch_h - 1 ) / 2.0 44 | r_w = ( feat_patch_w - 1 ) / 2.0 45 | landmark_num = int(landmarkx2 * 0.5) 46 | 47 | pos_offset = HypeShape([pos_data.shape[0], pos_data.shape[1]]) 48 | feat_offset = HypeShape([feat_data.shape[0], feat_data.shape[1], feat_data.shape[2], feat_data.shape[3]]) 49 | nmarks = int( landmarkx2 * 0.5 ) 50 | out_shape = [feat_data.shape[0], feat_data.shape[1], x_patch_h, nmarks, x_patch_w] 51 | out_offset = HypeShape([feat_data.shape[0], feat_data.shape[1], x_patch_h, nmarks, x_patch_w]) 52 | buff = np.zeros(out_shape) 53 | zero = 0 54 | 55 | buff = buff.reshape((-1)) 56 | pos_data = pos_data.reshape((-1)) 57 | feat_data = feat_data.reshape((-1)) 58 | 59 | for i in range(landmark_num): 60 | for n in range(num): 61 | # coordinate of the first patch pixel, scale to the feature map coordinate 62 | y = int( pos_data[pos_offset.to_index( [n, 2 * i + 1] )] * ( feat_h - 1 ) - r_h + 0.5 ) 63 | x = int( pos_data[pos_offset.to_index( [n, 2 * i] )] * ( feat_w - 1 ) - r_w + 0.5 ) 64 | 65 | for c in range(channels): 66 | for ph in range(feat_patch_h): 67 | for pw in range(feat_patch_w): 68 | y_p = y + ph 69 | x_p = x + pw 70 | # set zero if exceed the img bound 71 | if y_p < 0 or y_p >= feat_h or x_p < 0 or x_p >= feat_w: 72 | buff[out_offset.to_index( [n, c, ph, i, pw] )] = zero 73 | else: 74 | buff[out_offset.to_index( [n, c, ph, i, pw] )] = feat_data[feat_offset.to_index( [n, c, y_p, x_p] )] 75 | 76 | return buff.reshape((1,-1,1,1)).astype(np.float32) 77 | 78 | 79 | 80 | 81 | devices = onnxruntime.get_device() 82 | session = onnxruntime.InferenceSession("./Points5_Net1.onnx") 83 | first_input_name = session.get_inputs()[0].name 84 | 85 | test_img = cv2.imread("./head.jpg") 86 | gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY) 87 | gray_img = gray_img.reshape((1, 1, 112, 112)).astype(np.float32) 88 | # points5 net1 89 | results_1 = session.run([], {first_input_name : gray_img}) 90 | 91 | # shape index process 92 | feat_data = results_1[0] 93 | pos_data = results_1[1] 94 | shape_index_results = shape_index_process(feat_data, pos_data) 95 | 96 | # points5 net2 97 | session = onnxruntime.InferenceSession("./Points5_Net2.onnx") 98 | first_input_name = session.get_inputs()[0].name 99 | results_2 = session.run([], {first_input_name : shape_index_results}) 100 | 101 | 102 | landmarks = (results_2[0] + results_1[1])*112 103 | landmarks = landmarks.reshape((-1)).astype(np.int32) 104 | 105 | point_size = 1 106 | point_color = (0, 0, 255) # BGR 107 | thickness = 4 # 可以为 0 、4、8 108 | for i in range(landmarks.size // 2): 109 | point = (landmarks[2*i], landmarks[2*i + 1]) 110 | cv2.circle(test_img, point, point_size, point_color, thickness) 111 | 112 | cv2.imwrite("points5_result.jpg", test_img) 113 | print(landmarks) -------------------------------------------------------------------------------- /test_points81_net.py: -------------------------------------------------------------------------------- 1 | import onnxruntime 2 | import numpy as np 3 | import cv2 4 | 5 | m_origin_patch = [15, 15] 6 | m_origin = [112, 112] 7 | 8 | class HypeShape: 9 | def __init__(self, shape): 10 | self.m_shape = shape 11 | self.m_weights = [0]*len(self.m_shape) 12 | size = len(self.m_shape) 13 | self.m_weights[size - 1] = self.m_shape[size - 1] 14 | for times in range(size - 1): 15 | self.m_weights[size - 1 - times - 1] = self.m_weights[size - 1 - times] * self.m_shape[size - 1 - times - 1] 16 | 17 | def to_index(self, coordinate): 18 | if len(coordinate) == 0: 19 | return 0 20 | size = len(coordinate) 21 | weight_start = len(self.m_weights) - size + 1 22 | index = 0 23 | for times in range(size - 1): 24 | index += self.m_weights[weight_start + times] * coordinate[times] 25 | index += coordinate[size - 1] 26 | return index 27 | 28 | 29 | def shape_index_process(feat_data, pos_data): 30 | feat_h = feat_data.shape[2] 31 | feat_w = feat_data.shape[3] 32 | 33 | landmarkx2 = pos_data.shape[1] 34 | x_patch_h = int( m_origin_patch[0] * feat_data.shape[2] / float( m_origin[0] ) + 0.5 ) 35 | x_patch_w = int( m_origin_patch[1] * feat_data.shape[3] / float( m_origin[1] ) + 0.5 ) 36 | 37 | feat_patch_h = x_patch_h 38 | feat_patch_w = x_patch_w 39 | 40 | num = feat_data.shape[0] 41 | channels = feat_data.shape[1] 42 | 43 | r_h = ( feat_patch_h - 1 ) / 2.0 44 | r_w = ( feat_patch_w - 1 ) / 2.0 45 | landmark_num = int(landmarkx2 * 0.5) 46 | 47 | pos_offset = HypeShape([pos_data.shape[0], pos_data.shape[1]]) 48 | feat_offset = HypeShape([feat_data.shape[0], feat_data.shape[1], feat_data.shape[2], feat_data.shape[3]]) 49 | nmarks = int( landmarkx2 * 0.5 ) 50 | out_shape = [feat_data.shape[0], feat_data.shape[1], x_patch_h, nmarks, x_patch_w] 51 | out_offset = HypeShape([feat_data.shape[0], feat_data.shape[1], x_patch_h, nmarks, x_patch_w]) 52 | buff = np.zeros(out_shape) 53 | zero = 0 54 | 55 | buff = buff.reshape((-1)) 56 | pos_data = pos_data.reshape((-1)) 57 | feat_data = feat_data.reshape((-1)) 58 | 59 | for i in range(landmark_num): 60 | for n in range(num): 61 | # coordinate of the first patch pixel, scale to the feature map coordinate 62 | y = int( pos_data[pos_offset.to_index( [n, 2 * i + 1] )] * ( feat_h - 1 ) - r_h + 0.5 ) 63 | x = int( pos_data[pos_offset.to_index( [n, 2 * i] )] * ( feat_w - 1 ) - r_w + 0.5 ) 64 | 65 | for c in range(channels): 66 | for ph in range(feat_patch_h): 67 | for pw in range(feat_patch_w): 68 | y_p = y + ph 69 | x_p = x + pw 70 | # set zero if exceed the img bound 71 | if y_p < 0 or y_p >= feat_h or x_p < 0 or x_p >= feat_w: 72 | buff[out_offset.to_index( [n, c, ph, i, pw] )] = zero 73 | else: 74 | buff[out_offset.to_index( [n, c, ph, i, pw] )] = feat_data[feat_offset.to_index( [n, c, y_p, x_p] )] 75 | 76 | return buff.reshape((1,-1,1,1)).astype(np.float32) 77 | 78 | 79 | 80 | 81 | devices = onnxruntime.get_device() 82 | session = onnxruntime.InferenceSession("./Points81_Net1.onnx") 83 | first_input_name = session.get_inputs()[0].name 84 | 85 | test_img = cv2.imread("./head.jpg") 86 | gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY) 87 | gray_img = gray_img.reshape((1, 1, 112, 112)).astype(np.float32) 88 | # points81 net1 89 | results_1 = session.run([], {first_input_name : gray_img}) 90 | 91 | # shape index process 92 | feat_data = results_1[0] 93 | pos_data = results_1[1] 94 | shape_index_results = shape_index_process(feat_data, pos_data) 95 | 96 | # points81 net2 97 | session = onnxruntime.InferenceSession("./Points81_Net2.onnx") 98 | first_input_name = session.get_inputs()[0].name 99 | results_2 = session.run([], {first_input_name : shape_index_results}) 100 | 101 | landmarks = (results_2[0] + results_1[1])*112 102 | landmarks = landmarks.reshape((-1)).astype(np.int32) 103 | 104 | point_size = 1 105 | point_color = (0, 0, 255) # BGR 106 | thickness = 4 # 可以为 0 、4、8 107 | for i in range(landmarks.size // 2): 108 | point = (landmarks[2*i], landmarks[2*i + 1]) 109 | cv2.circle(test_img, point, point_size, point_color, thickness) 110 | 111 | cv2.imwrite("points81_result.jpg", test_img) 112 | print(landmarks) --------------------------------------------------------------------------------