├── .gitignore ├── LICENSE ├── README.md ├── data ├── 1.jpeg └── result.jpg ├── demo.py ├── face_detector.py ├── face_landmark.py └── model ├── FaceDetector.onnx └── FaceLandmark.onnx /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 you-old 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # face_landmark_1000 2 | 3 | 1000点的人脸关键点定位,工程包含人脸检测和人脸关键点,以及模型 ([作者](https://github.com/you-old/FaceLandmark1000)) 4 | 5 | 6 | 7 | ## The result: 8 | 9 | 图片名称 10 | 11 | ## How to Run 12 | 13 | > `python3 demo.py cap` # run 摄像头 14 | 15 | > `python3 demo.py` # img 16 | 17 | ## Model 18 | 19 | `./model` 20 | 21 | ## To Do List 22 | 23 | - [x] Face detection module 24 | - [x] Face landmark module 25 | - [x] Web service 26 | - [ ] Face tracking module 27 | - [ ] Train module 28 | 29 | ## Reference: 30 | 31 | 1. RetinaFace https://github.com/biubug6/Pytorch_Retinaface 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /data/1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Single430/FaceLandmark1000/1a951b612e7138d490fa8fd8b932506b7dd6451f/data/1.jpeg -------------------------------------------------------------------------------- /data/result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Single430/FaceLandmark1000/1a951b612e7138d490fa8fd8b932506b7dd6451f/data/result.jpg -------------------------------------------------------------------------------- /demo.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import torch 3 | from face_detector import * 4 | from face_landmark import * 5 | 6 | 7 | def camera_run(): 8 | face_detector_handle = FaceDetector() 9 | face_landmark_handle = FaceLandmark() 10 | 11 | cap = cv2.VideoCapture(0) 12 | while True: 13 | ret, image = cap.read() 14 | if image is None: 15 | continue 16 | detections, _ = face_detector_handle.run(image) 17 | 18 | if len(detections) == 0: 19 | continue 20 | for detection in detections: 21 | landmarks, states = face_landmark_handle.run(image, detection) 22 | if landmarks is None: 23 | continue 24 | face_landmark_handle.show_result(image, landmarks) 25 | 26 | 27 | def image_run(): 28 | face_detector_handle = FaceDetector() 29 | face_landmark_handle = FaceLandmark() 30 | 31 | image = cv2.imread('data/1.jpg') 32 | detections, _ = face_detector_handle.run(image) 33 | 34 | face_detector_handle.show_result(image, detections) 35 | 36 | if len(detections) == 0: 37 | return 38 | 39 | for detection in detections: 40 | landmarks, states = face_landmark_handle.run(image, detection) 41 | face_landmark_handle.show_result(image, landmarks) 42 | 43 | 44 | def video_capture_run(): 45 | face_detector_handle = FaceDetector() 46 | face_landmark_handle = FaceLandmark() 47 | cap = cv2.VideoCapture(0) 48 | fourcc = cv2.VideoWriter_fourcc(*'XVID') 49 | out = cv2.VideoWriter('out.mp4', fourcc, 10, (640, 480)) 50 | while True: 51 | t1 = cv2.getTickCount() 52 | ret, frame = cap.read() 53 | if ret == True: 54 | detections, landmarks = face_detector_handle.run(frame) 55 | t2 = cv2.getTickCount() 56 | t = (t2 - t1) / cv2.getTickFrequency() 57 | fps = 1.0 / t 58 | for i in range(detections.shape[0]): 59 | bbox = detections[i, :4] 60 | score = detections[i, 4] 61 | corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])] 62 | 63 | # 画人脸框 64 | cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), 65 | (corpbbox[2], corpbbox[3]), (255, 0, 0), 1) # -1, 4) 66 | # 画置信度 67 | cv2.putText(frame, '{:.2f}'.format(score), 68 | (corpbbox[0], corpbbox[1] - 2), 69 | cv2.FONT_HERSHEY_SIMPLEX, 70 | 0.5, (0, 0, 255), 2) 71 | # 画fps值 72 | cv2.putText(frame, '{:.4f}'.format(t) + " " + '{:.3f}'.format(fps), (10, 20), 73 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2) 74 | # 画关键点 75 | if len(detections) == 0: 76 | return 77 | 78 | for detection in detections: 79 | landmarks, states = face_landmark_handle.run(frame, detection) 80 | for i in range(landmarks.shape[0]): 81 | for j in range(len(landmarks[i]) // 2): 82 | cv2.circle(frame, (int(landmarks[i][2 * j]), int(int(landmarks[i][2 * j + 1]))), 2, (0, 0, 255)) 83 | # face_landmark_handle.show_result(frame, landmarks) 84 | a = out.write(frame) 85 | cv2.imshow("result", frame) 86 | if cv2.waitKey(1) & 0xFF == ord('q'): 87 | break 88 | else: 89 | break 90 | cap.release() 91 | out.release() 92 | cv2.destroyAllWindows() 93 | 94 | 95 | if __name__ == '__main__': 96 | if len(sys.argv) == 1: 97 | image_run() 98 | else: 99 | video_capture_run() 100 | -------------------------------------------------------------------------------- /face_detector.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import onnx 3 | import onnxruntime 4 | import numpy as np 5 | from itertools import product as product 6 | from math import ceil 7 | import cv2 8 | 9 | 10 | class FaceDetector(object): 11 | def __init__(self): 12 | self.model_path = r'model/FaceDetector.onnx' 13 | self.onnx_model = onnx.load(self.model_path) 14 | onnx.checker.check_model(self.onnx_model) 15 | self.ort_session = onnxruntime.InferenceSession(self.model_path) 16 | self.cfg = self.config() 17 | self.conf_threshold = 0.5 18 | self.top_k = 5000 19 | self.nms_threshold = 0.4 20 | self.keep_top_k = 750 21 | self.vis_threshold = 0.6 22 | self.image_size = (640, 640) 23 | 24 | def run(self, image): 25 | ori_height, ori_width = image.shape[:2] 26 | processed_image, scale, img_height, img_width = self.preprocess(image) 27 | ort_inputs = {self.ort_session.get_inputs()[0].name: self.to_numpy(processed_image)} 28 | locations, confidences, landmarks = self.ort_session.run(None, ort_inputs) 29 | detections, landmarks = self.postprocess(processed_image, locations, confidences, landmarks, scale, img_height, 30 | img_width) 31 | 32 | detections[:, 0] = detections[:, 0] * ori_width / self.image_size[0] 33 | detections[:, 1] = detections[:, 1] * ori_height / self.image_size[1] 34 | detections[:, 2] = detections[:, 2] * ori_width / self.image_size[0] 35 | detections[:, 3] = detections[:, 3] * ori_height / self.image_size[1] 36 | 37 | return detections, landmarks 38 | 39 | def show_result(self, image, detections): 40 | for d in detections: 41 | if self.vis_threshold > d[4]: 42 | continue 43 | image = cv2.rectangle(image, (int(d[0]), int(d[1])), (int(d[2]), int(d[3])), (0, 0, 255), 2) 44 | cv2.imshow('', image) 45 | cv2.waitKey(1) 46 | 47 | def preprocess(self, image): 48 | image = cv2.resize(image, self.image_size) 49 | img = np.float32(image) 50 | img_height, img_width, _ = img.shape 51 | scale = torch.Tensor([img_width, img_height, img_width, img_height]) 52 | img -= (104, 117, 123) 53 | img = img.transpose(2, 0, 1) 54 | img = torch.from_numpy(img).unsqueeze(0) 55 | img = img.detach() 56 | img = img.to('cuda') 57 | scale = scale.to('cuda') 58 | return img, scale, img_height, img_width 59 | 60 | def postprocess(self, image, locations, confidences, landmarks, scale, img_height, img_width): 61 | priorbox = PriorBox(self.cfg, image_size=self.image_size) 62 | priors = priorbox.forward() 63 | priors = priors.to('cuda') 64 | resize = 1 65 | prior_data = priors.data 66 | locations = torch.from_numpy(locations).to('cuda') 67 | confidences = torch.from_numpy(confidences).to('cuda') 68 | landmarks = torch.from_numpy(landmarks).to('cuda') 69 | boxes = self.decode(locations.data.squeeze(0), prior_data, self.cfg['variance']) 70 | boxes = boxes * scale / resize 71 | boxes = boxes.cpu().numpy() 72 | scores = confidences.squeeze(0).data.cpu().numpy()[:, 1] 73 | landmarks = self.decode_landmarks(landmarks.data.squeeze(0), prior_data, self.cfg['variance']) 74 | scale1 = torch.Tensor([image.shape[3], image.shape[2], image.shape[3], image.shape[2], 75 | image.shape[3], image.shape[2], image.shape[3], image.shape[2], 76 | image.shape[3], image.shape[2]]) 77 | scale1 = scale1.to('cuda') 78 | landmarks = landmarks * scale1 / resize 79 | landmarks = landmarks.cpu().numpy() 80 | inds = np.where(scores > self.conf_threshold)[0] 81 | boxes = boxes[inds] 82 | landmarks = landmarks[inds] 83 | scores = scores[inds] 84 | 85 | order = scores.argsort()[::-1][:self.top_k] 86 | boxes = boxes[order] 87 | landmarks = landmarks[order] 88 | scores = scores[order] 89 | 90 | detections = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False) 91 | keep = self.py_cpu_nms(detections, self.nms_threshold) 92 | # keep = nms(dets, args.nms_threshold,force_cpu=args.cpu) 93 | detections = detections[keep, :] 94 | landmarks = landmarks[keep] 95 | 96 | # keep top-K faster NMS 97 | detections = detections[:self.keep_top_k, :] 98 | landmarks = landmarks[:self.keep_top_k, :] 99 | 100 | # detections = np.concatenate((detections, landmarks), axis=1) 101 | return detections, landmarks 102 | 103 | def decode(self, loc, priors, variances): 104 | boxes = torch.cat(( 105 | priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], 106 | priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) 107 | boxes[:, :2] -= boxes[:, 2:] / 2 108 | boxes[:, 2:] += boxes[:, :2] 109 | return boxes 110 | 111 | def decode_landmarks(self, pre, priors, variances): 112 | landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:], 113 | priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:], 114 | priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:], 115 | priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:], 116 | priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:], 117 | ), dim=1) 118 | return landms 119 | 120 | def config(self): 121 | cfg = { 122 | 'min_sizes': [[16, 32], [64, 128], [256, 512]], 123 | 'steps': [8, 16, 32], 124 | 'variance': [0.1, 0.2], 125 | 'clip': False, 126 | } 127 | 128 | return cfg 129 | 130 | def to_numpy(self, tensor): 131 | return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() 132 | 133 | def py_cpu_nms(self, dets, thresh): 134 | """Pure Python NMS baseline.""" 135 | x1 = dets[:, 0] 136 | y1 = dets[:, 1] 137 | x2 = dets[:, 2] 138 | y2 = dets[:, 3] 139 | scores = dets[:, 4] 140 | 141 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 142 | order = scores.argsort()[::-1] 143 | 144 | keep = [] 145 | while order.size > 0: 146 | i = order[0] 147 | keep.append(i) 148 | xx1 = np.maximum(x1[i], x1[order[1:]]) 149 | yy1 = np.maximum(y1[i], y1[order[1:]]) 150 | xx2 = np.minimum(x2[i], x2[order[1:]]) 151 | yy2 = np.minimum(y2[i], y2[order[1:]]) 152 | 153 | w = np.maximum(0.0, xx2 - xx1 + 1) 154 | h = np.maximum(0.0, yy2 - yy1 + 1) 155 | inter = w * h 156 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 157 | 158 | inds = np.where(ovr <= thresh)[0] 159 | order = order[inds + 1] 160 | 161 | return keep 162 | 163 | 164 | class PriorBox(object): 165 | def __init__(self, cfg, image_size=None): 166 | super(PriorBox, self).__init__() 167 | self.min_sizes = cfg['min_sizes'] 168 | self.steps = cfg['steps'] 169 | self.clip = cfg['clip'] 170 | self.image_size = image_size 171 | self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps] 172 | self.name = "s" 173 | 174 | def forward(self): 175 | anchors = [] 176 | for k, f in enumerate(self.feature_maps): 177 | min_sizes = self.min_sizes[k] 178 | for i, j in product(range(f[0]), range(f[1])): 179 | for min_size in min_sizes: 180 | s_kx = min_size / self.image_size[1] 181 | s_ky = min_size / self.image_size[0] 182 | dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]] 183 | dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]] 184 | for cy, cx in product(dense_cy, dense_cx): 185 | anchors += [cx, cy, s_kx, s_ky] 186 | 187 | # back to torch land 188 | output = torch.Tensor(anchors).view(-1, 4) 189 | if self.clip: 190 | output.clamp_(max=1, min=0) 191 | return output 192 | 193 | 194 | if __name__ == '__main__': 195 | image = cv2.imread('data/1.jpg') 196 | handle = FaceDetector() 197 | handle.run(image) 198 | -------------------------------------------------------------------------------- /face_landmark.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import cv2 4 | import onnx 5 | import onnxruntime 6 | import math 7 | 8 | 9 | class FaceLandmark(object): 10 | def __init__(self): 11 | self.model_path = r'model/FaceLandmark.onnx' 12 | self.onnx_model = onnx.load(self.model_path) 13 | onnx.checker.check_model(self.onnx_model) 14 | self.ort_session = onnxruntime.InferenceSession(self.model_path) 15 | self.image_size = 128 16 | self.min_face = 100 17 | self.iou_thres = 0.5 18 | self.thres = 1 19 | self.filter = OneEuroFilter() 20 | self.previous_landmarks_set = None 21 | 22 | def run(self, image, bbox): 23 | processed_image, details = self.preprocess(image, bbox) 24 | ort_inputs = {self.ort_session.get_inputs()[0].name: self.to_numpy(processed_image)} 25 | result = self.ort_session.run(None, ort_inputs) 26 | landmarks = result[0][0, :1946].reshape(-1, 2) 27 | states = result[(1946 + 3):] 28 | landmarks = self.postprocess(landmarks, details) 29 | return np.array(landmarks), np.array(states) 30 | 31 | def show_result(self, image, landmark): 32 | for point in landmark: 33 | cv2.circle(image, center=(int(point[0]), int(point[1])), 34 | color=(255, 122, 122), radius=1, thickness=1) 35 | cv2.imshow('', image) 36 | cv2.waitKey(1) 37 | 38 | def preprocess(self, image, bbox): 39 | bbox_width = bbox[2] - bbox[0] 40 | bbox_height = bbox[3] - bbox[1] 41 | if bbox_width <= self.min_face or bbox_height <= self.min_face: 42 | return None, None 43 | add = int(max(bbox_width, bbox_height)) 44 | bimg = cv2.copyMakeBorder(image, add, add, add, add, 45 | borderType=cv2.BORDER_CONSTANT, 46 | value=np.array([127., 127., 127.])) 47 | bbox += add 48 | 49 | face_width = (1 + 2 * 0.1) * bbox_width 50 | face_height = (1 + 2 * 0.2) * bbox_height 51 | center = [(bbox[0] + bbox[2]) // 2, (bbox[1] + bbox[3]) // 2] 52 | 53 | bbox[0] = center[0] - face_width // 2 54 | bbox[1] = center[1] - face_height // 2 55 | bbox[2] = center[0] + face_width // 2 56 | bbox[3] = center[1] + face_height // 2 57 | 58 | # crop 59 | bbox = bbox.astype(np.int) 60 | crop_image = bimg[bbox[1]:bbox[3], bbox[0]:bbox[2], :] 61 | 62 | h, w, _ = crop_image.shape 63 | crop_image = cv2.resize(crop_image, (self.image_size, self.image_size)) 64 | crop_image = cv2.cvtColor(crop_image, cv2.COLOR_RGB2GRAY) 65 | crop_image = np.expand_dims(crop_image, axis=0) 66 | crop_image = np.expand_dims(crop_image, axis=0) 67 | crop_image = torch.from_numpy(crop_image).detach().float() 68 | return crop_image, [h, w, bbox[1], bbox[0], add] 69 | 70 | def postprocess(self, landmark, detail): 71 | landmark[:, 0] = landmark[:, 0] * detail[1] + detail[3] - detail[4] 72 | landmark[:, 1] = landmark[:, 1] * detail[0] + detail[2] - detail[4] 73 | return landmark 74 | 75 | def to_numpy(self, tensor): 76 | return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() 77 | 78 | def calculate(self, now_landmarks_set): 79 | 80 | if self.previous_landmarks_set is None or self.previous_landmarks_set.shape[0]==0: 81 | self.previous_landmarks_set = now_landmarks_set 82 | result = now_landmarks_set 83 | 84 | else: 85 | if self.previous_landmarks_set.shape[0] == 0: 86 | return now_landmarks_set 87 | else: 88 | result = [] 89 | for i in range(now_landmarks_set.shape[0]): 90 | not_in_flag = True 91 | for j in range(self.previous_landmarks_set.shape[0]): 92 | if self.iou(now_landmarks_set[i], self.previous_landmarks_set[j]) > self.iou_thres: 93 | 94 | result.append(self.smooth(now_landmarks_set[i], self.previous_landmarks_set[j])) 95 | not_in_flag = False 96 | break 97 | if not_in_flag: 98 | result.append(now_landmarks_set[i]) 99 | 100 | result = np.array(result) 101 | self.previous_landmarks_set=result 102 | 103 | return result 104 | 105 | def iou(self, p_set0, p_set1): 106 | rec1=[np.min(p_set0[:, 0]), np.min(p_set0[:, 1]), np.max(p_set0[:, 0]), np.max(p_set0[:, 1])] 107 | rec2 = [np.min(p_set1[:, 0]), np.min(p_set1[:, 1]), np.max(p_set1[:, 0]), np.max(p_set1[:, 1])] 108 | 109 | # computing area of each rectangles 110 | S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1]) 111 | S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1]) 112 | 113 | # computing the sum_area 114 | sum_area = S_rec1 + S_rec2 115 | 116 | # find the each edge of intersect rectangle 117 | x1 = max(rec1[0], rec2[0]) 118 | y1 = max(rec1[1], rec2[1]) 119 | x2 = min(rec1[2], rec2[2]) 120 | y2 = min(rec1[3], rec2[3]) 121 | 122 | # judge if there is an intersect 123 | intersect = max(0, x2 - x1) * max(0, y2 - y1) 124 | 125 | return intersect / (sum_area - intersect) 126 | 127 | def smooth(self, now_landmarks, previous_landmarks): 128 | 129 | result=[] 130 | for i in range(now_landmarks.shape[0]): 131 | 132 | dis = np.sqrt(np.square(now_landmarks[i][0] - previous_landmarks[i][0]) + np.square(now_landmarks[i][1] - previous_landmarks[i][1])) 133 | 134 | if dis < self.thres: 135 | result.append(previous_landmarks[i]) 136 | else: 137 | result.append(self.filter(now_landmarks[i], previous_landmarks[i])) 138 | 139 | return np.array(result) 140 | 141 | 142 | class OneEuroFilter: 143 | def __init__(self, dx0=0.0, min_cutoff=1.0, beta=0.0, 144 | d_cutoff=1.0): 145 | """Initialize the one euro filter.""" 146 | # The parameters. 147 | self.min_cutoff = float(min_cutoff) 148 | self.beta = float(beta) 149 | self.d_cutoff = float(d_cutoff) 150 | # Previous values. 151 | 152 | self.dx_prev = float(dx0) 153 | #self.t_prev = float(t0) 154 | 155 | def __call__(self, x,x_prev): 156 | 157 | if x_prev is None: 158 | 159 | return x 160 | """Compute the filtered signal.""" 161 | t_e = 1 162 | 163 | # The filtered derivative of the signal. 164 | a_d = self.smoothing_factor(t_e, self.d_cutoff) 165 | dx = (x - x_prev) / t_e 166 | dx_hat = self.exponential_smoothing(a_d, dx, self.dx_prev) 167 | 168 | # The filtered signal. 169 | cutoff = self.min_cutoff + self.beta * abs(dx_hat) 170 | a = self.smoothing_factor(t_e, cutoff) 171 | x_hat = self.exponential_smoothing(a, x, x_prev) 172 | 173 | # Memorize the previous values. 174 | 175 | self.dx_prev = dx_hat 176 | return x_hat 177 | 178 | def smoothing_factor(self, t_e, cutoff): 179 | r = 2 * math.pi * cutoff * t_e 180 | return r / (r + 1) 181 | 182 | def exponential_smoothing(self, a, x, x_prev): 183 | return a * x + (1 - a) * x_prev 184 | 185 | 186 | if __name__ == '__main__': 187 | image = cv2.imread('data/1.jpg') 188 | bbox = np.array([117.58737, 58.62614, 354.0737, 401.39395]) 189 | handle = FaceLandmark() 190 | handle.run(image, bbox) -------------------------------------------------------------------------------- /model/FaceDetector.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Single430/FaceLandmark1000/1a951b612e7138d490fa8fd8b932506b7dd6451f/model/FaceDetector.onnx -------------------------------------------------------------------------------- /model/FaceLandmark.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Single430/FaceLandmark1000/1a951b612e7138d490fa8fd8b932506b7dd6451f/model/FaceLandmark.onnx --------------------------------------------------------------------------------