├── .gitignore
├── license.txt
├── requirements.txt
├── libfacesdk1.so
├── gradio
├── live_examples
│ ├── 1.jpg
│ ├── 2.jpg
│ ├── 3.jpg
│ └── 4.jpg
└── demo.py
├── Dockerfile
├── facebox.py
├── facesdk.py
├── header
└── facesdk.h
├── postman
└── kby-ai-live.postman_collection.json
├── app.py
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | /data
--------------------------------------------------------------------------------
/license.txt:
--------------------------------------------------------------------------------
1 | None
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | flask
2 | flask-cors
3 | Pillow
4 | numpy
--------------------------------------------------------------------------------
/libfacesdk1.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kby-ai/FaceLivenessDetection-Docker/HEAD/libfacesdk1.so
--------------------------------------------------------------------------------
/gradio/live_examples/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kby-ai/FaceLivenessDetection-Docker/HEAD/gradio/live_examples/1.jpg
--------------------------------------------------------------------------------
/gradio/live_examples/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kby-ai/FaceLivenessDetection-Docker/HEAD/gradio/live_examples/2.jpg
--------------------------------------------------------------------------------
/gradio/live_examples/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kby-ai/FaceLivenessDetection-Docker/HEAD/gradio/live_examples/3.jpg
--------------------------------------------------------------------------------
/gradio/live_examples/4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kby-ai/FaceLivenessDetection-Docker/HEAD/gradio/live_examples/4.jpg
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM openvino/ubuntu20_runtime:2022.3.0
2 |
3 | RUN mkdir -p /home/openvino/kby-ai-live
4 | WORKDIR /home/openvino/kby-ai-live
5 | COPY ./libfacesdk1.so .
6 | COPY ./facesdk.py .
7 | COPY ./facebox.py .
8 | COPY ./app.py .
9 | COPY ./requirements.txt .
10 | COPY ./data ./data
11 | RUN pip3 install -r requirements.txt
12 | CMD [ "python3", "app.py"]
13 | EXPOSE 8080
--------------------------------------------------------------------------------
/facebox.py:
--------------------------------------------------------------------------------
1 | from ctypes import *
2 |
3 | class FaceBox(Structure):
4 | _fields_ = [("x1", c_int32), ("y1", c_int32), ("x2", c_int32), ("y2", c_int32),
5 | ("liveness", c_float),
6 | ("yaw", c_float), ("roll", c_float), ("pitch", c_float),
7 | ("face_quality", c_float), ("face_luminance", c_float), ("eye_dist", c_float),
8 | ("left_eye_closed", c_float), ("right_eye_closed", c_float),
9 | ("face_occlusion", c_float), ("mouth_opened", c_float),
10 | ("landmark_68", c_float * 136)
11 | ]
--------------------------------------------------------------------------------
/facesdk.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from ctypes import *
4 | from numpy.ctypeslib import ndpointer
5 | from facebox import FaceBox
6 |
7 | libPath = os.path.abspath(os.path.dirname(__file__)) + '/libfacesdk1.so'
8 | facesdk = cdll.LoadLibrary(libPath)
9 |
10 | getMachineCode = facesdk.getMachineCode
11 | getMachineCode.argtypes = []
12 | getMachineCode.restype = c_char_p
13 |
14 | setActivation = facesdk.setActivation
15 | setActivation.argtypes = [c_char_p]
16 | setActivation.restype = c_int32
17 |
18 | initSDK = facesdk.initSDK
19 | initSDK.argtypes = [c_char_p]
20 | initSDK.restype = c_int32
21 |
22 | faceDetection = facesdk.faceDetection
23 | faceDetection.argtypes = [ndpointer(c_ubyte, flags='C_CONTIGUOUS'), c_int32, c_int32, POINTER(FaceBox), c_int32]
24 | faceDetection.restype = c_int32
25 |
26 |
--------------------------------------------------------------------------------
/header/facesdk.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #ifdef __cplusplus
4 | extern "C" {
5 | #endif
6 |
7 | enum SDK_ERROR
8 | {
9 | SDK_SUCCESS = 0,
10 | SDK_LICENSE_KEY_ERROR = -1,
11 | SDK_LICENSE_APPID_ERROR = -2,
12 | SDK_LICENSE_EXPIRED = -3,
13 | SDK_NO_ACTIVATED = -4,
14 | SDK_INIT_ERROR = -5,
15 | };
16 |
17 | typedef struct _tagFaceBox
18 | {
19 | int x1, y1, x2, y2;
20 | float liveness;
21 | float yaw, roll, pitch;
22 | float face_quality, face_luminance, eye_dist;
23 | float left_eye_closed, right_eye_closed, face_occlusion, mouth_opened;
24 | float landmark_68[68 * 2];
25 | } FaceBox;
26 |
27 | /*
28 | * Get the machine code for SDK activation
29 | */
30 | const char* getMachineCode();
31 |
32 | /*
33 | * Activate the SDK using the provided license
34 | */
35 |
36 | int setActivation(char* license);
37 |
38 | /*
39 | * Initialize the SDK with the specified model path
40 | */
41 | int initSDK(char* modelPath);
42 |
43 | /*
44 | * Detect faces, perform liveness detection, determine face orientation (yaw, roll, pitch),
45 | * assess face quality, detect facial occlusion, eye closure, mouth opening, and identify facial landmarks.
46 | */
47 | int faceDetection(unsigned char* rgbData, int width, int height, FaceBox* faceBoxes, int faceBoxCount);
48 |
49 | #ifdef __cplusplus
50 | }
51 | #endif
--------------------------------------------------------------------------------
/postman/kby-ai-live.postman_collection.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": {
3 | "_postman_id": "26c7273b-38a7-4db0-8e7d-b1a84589128e",
4 | "name": "kby-ai-live",
5 | "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json",
6 | "_exporter_id": "2379931"
7 | },
8 | "item": [
9 | {
10 | "name": "check_liveness",
11 | "request": {
12 | "method": "POST",
13 | "header": [],
14 | "body": {
15 | "mode": "formdata",
16 | "formdata": [
17 | {
18 | "key": "file",
19 | "type": "file",
20 | "src": []
21 | }
22 | ]
23 | },
24 | "url": {
25 | "raw": "http://18.221.33.238:8080/check_liveness",
26 | "protocol": "http",
27 | "host": [
28 | "18",
29 | "221",
30 | "33",
31 | "238"
32 | ],
33 | "port": "8080",
34 | "path": [
35 | "check_liveness"
36 | ]
37 | }
38 | },
39 | "response": []
40 | },
41 | {
42 | "name": "check_liveness_base64",
43 | "request": {
44 | "method": "POST",
45 | "header": [],
46 | "body": {
47 | "mode": "raw",
48 | "raw": "{\r\n \"base64\":\"xxx\"\r\n}",
49 | "options": {
50 | "raw": {
51 | "language": "json"
52 | }
53 | }
54 | },
55 | "url": {
56 | "raw": "http://18.221.33.238:8080/check_liveness_base64",
57 | "protocol": "http",
58 | "host": [
59 | "18",
60 | "221",
61 | "33",
62 | "238"
63 | ],
64 | "port": "8080",
65 | "path": [
66 | "check_liveness_base64"
67 | ]
68 | }
69 | },
70 | "response": []
71 | }
72 | ]
73 | }
--------------------------------------------------------------------------------
/gradio/demo.py:
--------------------------------------------------------------------------------
1 | import gradio as gr
2 | import requests
3 | from PIL import Image
4 |
5 | def check_liveness(frame):
6 | url = "http://127.0.0.1:8080/check_liveness"
7 | file = {'file': open(frame, 'rb')}
8 |
9 | r = requests.post(url=url, files=file)
10 | result = r.json().get('face_state').get('result')
11 |
12 | html = None
13 | faces = None
14 | if r.json().get('face_state').get('is_not_front') is not None:
15 | liveness_score = r.json().get('face_state').get('liveness_score')
16 | eye_closed = r.json().get('face_state').get('eye_closed')
17 | is_boundary_face = r.json().get('face_state').get('is_boundary_face')
18 | is_not_front = r.json().get('face_state').get('is_not_front')
19 | is_occluded = r.json().get('face_state').get('is_occluded')
20 | is_small = r.json().get('face_state').get('is_small')
21 | luminance = r.json().get('face_state').get('luminance')
22 | mouth_opened = r.json().get('face_state').get('mouth_opened')
23 | quality = r.json().get('face_state').get('quality')
24 |
25 | html = ("
"
26 | ""
27 | "| Face State | "
28 | "Value | "
29 | "
"
30 | ""
31 | "| Result | "
32 | "{result} | "
33 | "
"
34 | ""
35 | "| Liveness Score | "
36 | "{liveness_score} | "
37 | "
"
38 | ""
39 | "| Quality | "
40 | "{quality} | "
41 | "
"
42 | ""
43 | "| Luminance | "
44 | "{luminance} | "
45 | "
"
46 | ""
47 | "| Is Small | "
48 | "{is_small} | "
49 | "
"
50 | ""
51 | "| Is Boundary | "
52 | "{is_boundary_face} | "
53 | "
"
54 | ""
55 | "| Is Not Front | "
56 | "{is_not_front} | "
57 | "
"
58 | ""
59 | "| Face Occluded | "
60 | "{is_occluded} | "
61 | "
"
62 | ""
63 | "| Eye Closed | "
64 | "{eye_closed} | "
65 | "
"
66 | ""
67 | "| Mouth Opened | "
68 | "{mouth_opened} | "
69 | "
"
70 | "
".format(liveness_score=liveness_score, quality=quality, luminance=luminance, is_small=is_small, is_boundary_face=is_boundary_face,
71 | is_not_front=is_not_front, is_occluded=is_occluded, eye_closed=eye_closed, mouth_opened=mouth_opened, result=result))
72 |
73 | else:
74 | html = (""
75 | ""
76 | "| Face State | "
77 | "Value | "
78 | "
"
79 | ""
80 | "| Result | "
81 | "{result} | "
82 | "
"
83 | "
".format(result=result))
84 |
85 | try:
86 | image = Image.open(frame)
87 |
88 | for face in r.json().get('faces'):
89 | x1 = face.get('x1')
90 | y1 = face.get('y1')
91 | x2 = face.get('x2')
92 | y2 = face.get('y2')
93 |
94 | if x1 < 0:
95 | x1 = 0
96 | if y1 < 0:
97 | y1 = 0
98 | if x2 >= image.width:
99 | x2 = image.width - 1
100 | if y2 >= image.height:
101 | y2 = image.height - 1
102 |
103 | face_image = image.crop((x1, y1, x2, y2))
104 | face_image_ratio = face_image.width / float(face_image.height)
105 | resized_w = int(face_image_ratio * 150)
106 | resized_h = 150
107 |
108 | face_image = face_image.resize((int(resized_w), int(resized_h)))
109 |
110 | if faces is None:
111 | faces = face_image
112 | else:
113 | new_image = Image.new('RGB',(faces.width + face_image.width + 10, 150), (80,80,80))
114 |
115 | new_image.paste(faces,(0,0))
116 | new_image.paste(face_image,(faces.width + 10, 0))
117 | faces = new_image.copy()
118 | except:
119 | pass
120 |
121 | return [faces, html]
122 |
123 | def compare_face(frame1, frame2):
124 | url = "http://127.0.0.1:8081/compare_face"
125 | files = {'file1': open(frame1, 'rb'), 'file2': open(frame2, 'rb')}
126 |
127 | r = requests.post(url=url, files=files)
128 |
129 | html = None
130 | faces = None
131 |
132 | compare_result = r.json().get('compare_result')
133 | compare_similarity = r.json().get('compare_similarity')
134 |
135 | html = (""
136 | ""
137 | "| Face State | "
138 | "Value | "
139 | "
"
140 | ""
141 | "| Result | "
142 | "{compare_result} | "
143 | "
"
144 | ""
145 | "| Similarity | "
146 | "{compare_similarity} | "
147 | "
"
148 | "
".format(compare_result=compare_result, compare_similarity=compare_similarity))
149 |
150 | try:
151 | image1 = Image.open(frame1)
152 | image2 = Image.open(frame2)
153 |
154 | face1 = None
155 | face2 = None
156 |
157 | if r.json().get('face1') is not None:
158 | face = r.json().get('face1')
159 | x1 = face.get('x1')
160 | y1 = face.get('y1')
161 | x2 = face.get('x2')
162 | y2 = face.get('y2')
163 |
164 | if x1 < 0:
165 | x1 = 0
166 | if y1 < 0:
167 | y1 = 0
168 | if x2 >= image1.width:
169 | x2 = image1.width - 1
170 | if y2 >= image1.height:
171 | y2 = image1.height - 1
172 |
173 | face1 = image1.crop((x1, y1, x2, y2))
174 | face_image_ratio = face1.width / float(face1.height)
175 | resized_w = int(face_image_ratio * 150)
176 | resized_h = 150
177 |
178 | face1 = face1.resize((int(resized_w), int(resized_h)))
179 |
180 | if r.json().get('face2') is not None:
181 | face = r.json().get('face2')
182 | x1 = face.get('x1')
183 | y1 = face.get('y1')
184 | x2 = face.get('x2')
185 | y2 = face.get('y2')
186 |
187 | if x1 < 0:
188 | x1 = 0
189 | if y1 < 0:
190 | y1 = 0
191 | if x2 >= image2.width:
192 | x2 = image2.width - 1
193 | if y2 >= image2.height:
194 | y2 = image2.height - 1
195 |
196 | face2 = image2.crop((x1, y1, x2, y2))
197 | face_image_ratio = face2.width / float(face2.height)
198 | resized_w = int(face_image_ratio * 150)
199 | resized_h = 150
200 |
201 | face2 = face2.resize((int(resized_w), int(resized_h)))
202 |
203 | if face1 is not None and face2 is not None:
204 | new_image = Image.new('RGB',(face1.width + face2.width + 10, 150), (80,80,80))
205 |
206 | new_image.paste(face1,(0,0))
207 | new_image.paste(face2,(face1.width + 10, 0))
208 | faces = new_image.copy()
209 | elif face1 is not None and face2 is None:
210 | new_image = Image.new('RGB',(face1.width + face1.width + 10, 150), (80,80,80))
211 |
212 | new_image.paste(face1,(0,0))
213 | faces = new_image.copy()
214 | elif face1 is None and face2 is not None:
215 | new_image = Image.new('RGB',(face2.width + face2.width + 10, 150), (80,80,80))
216 |
217 | new_image.paste(face2,(face2.width + 10, 0))
218 | faces = new_image.copy()
219 |
220 | except:
221 | pass
222 |
223 | return [faces, html]
224 |
225 |
226 | with gr.Blocks() as demo:
227 | gr.Markdown(
228 | """
229 | # KBY-AI Technology
230 | """
231 | )
232 | with gr.TabItem("Face Liveness Detection"):
233 | with gr.Row():
234 | with gr.Column():
235 | live_image_input = gr.Image(type='filepath')
236 | gr.Examples(['live_examples/1.jpg', 'live_examples/2.jpg', 'live_examples/3.jpg', 'live_examples/4.jpg'],
237 | inputs=live_image_input)
238 | check_liveness_button = gr.Button("Check Liveness")
239 | with gr.Column():
240 | liveness_face_output = gr.Image(type="pil").style(height=150)
241 | livness_result_output = gr.HTML()
242 |
243 | check_liveness_button.click(check_liveness, inputs=live_image_input, outputs=[liveness_face_output, livness_result_output])
244 | with gr.TabItem("Face Recognition"):
245 | with gr.Row():
246 | with gr.Column():
247 | compare_face_input1 = gr.Image(type='filepath')
248 | gr.Examples(['face_examples/1.jpg', 'face_examples/3.jpg', 'face_examples/5.jpg', 'face_examples/7.jpg', 'face_examples/9.jpg'],
249 | inputs=compare_face_input1)
250 |
251 | compare_face_input2 = gr.Image(type='filepath')
252 | gr.Examples(['face_examples/2.jpg', 'face_examples/4.jpg', 'face_examples/6.jpg', 'face_examples/8.jpg', 'face_examples/10.jpg'],
253 | inputs=compare_face_input2)
254 |
255 | compare_face_button = gr.Button("Compare Face")
256 | with gr.Column():
257 | compare_face_output = gr.Image(type="pil").style(height=150)
258 | compare_result_output = gr.HTML(label='Result')
259 |
260 | compare_face_button.click(compare_face, inputs=[compare_face_input1, compare_face_input2], outputs=[compare_face_output, compare_result_output])
261 |
262 | demo.launch(server_name="0.0.0.0", server_port=9000)
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('.')
3 |
4 | import os
5 | import numpy as np
6 | import base64
7 | import io
8 |
9 | from PIL import Image
10 | from flask import Flask, request, jsonify
11 | from facesdk import getMachineCode
12 | from facesdk import setActivation
13 | from facesdk import faceDetection
14 | from facesdk import initSDK
15 | from facebox import FaceBox
16 |
17 | livenessThreshold = 0.7
18 | yawThreshold = 10
19 | pitchThreshold = 10
20 | rollThreshold = 10
21 | occlusionThreshold = 0.9
22 | eyeClosureThreshold = 0.8
23 | mouthOpeningThreshold = 0.5
24 | borderRate = 0.05
25 | smallFaceThreshold = 100
26 | lowQualityThreshold = 0.3
27 | hightQualityThreshold = 0.7
28 | luminanceDarkThreshold = 50
29 | luminanceLightThreshold = 200
30 |
31 | maxFaceCount = 10
32 |
33 | licensePath = "license.txt"
34 | license = ""
35 |
36 | machineCode = getMachineCode()
37 | print("machineCode: ", machineCode.decode('utf-8'))
38 |
39 | try:
40 | with open(licensePath, 'r') as file:
41 | license = file.read()
42 | except IOError as exc:
43 | print("failed to open license.txt: ", exc.errno)
44 | print("license: ", license)
45 |
46 | ret = setActivation(license.encode('utf-8'))
47 | print("activation: ", ret)
48 |
49 | ret = initSDK("data".encode('utf-8'))
50 | print("init: ", ret)
51 |
52 | app = Flask(__name__)
53 |
54 | @app.route('/check_liveness', methods=['POST'])
55 | def check_liveness():
56 | faces = []
57 | isNotFront = None
58 | isOcclusion = None
59 | isEyeClosure = None
60 | isMouthOpening = None
61 | isBoundary = None
62 | isSmall = None
63 | quality = None
64 | luminance = None
65 | livenessScore = None
66 |
67 | file = request.files['file']
68 |
69 | try:
70 | image = Image.open(file)
71 | except:
72 | result = "Failed to open file"
73 | faceState = {"is_not_front": isNotFront, "is_occluded": isOcclusion, "eye_closed": isEyeClosure, "mouth_opened": isMouthOpening,
74 | "is_boundary_face": isBoundary, "is_small": isSmall, "quality": quality, "luminance": luminance, "result": result, "liveness_score": livenessScore}
75 | response = jsonify({"face_state": faceState, "faces": faces})
76 |
77 | response.status_code = 200
78 | response.headers["Content-Type"] = "application/json; charset=utf-8"
79 | return response
80 |
81 |
82 | image_np = np.asarray(image)
83 |
84 | faceBoxes = (FaceBox * maxFaceCount)()
85 | faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
86 |
87 | for i in range(faceCount):
88 | landmark_68 = []
89 | for j in range(68):
90 | landmark_68.append({"x": faceBoxes[i].landmark_68[j * 2], "y": faceBoxes[i].landmark_68[j * 2 + 1]})
91 | faces.append({"x1": faceBoxes[i].x1, "y1": faceBoxes[i].y1, "x2": faceBoxes[i].x2, "y2": faceBoxes[i].y2,
92 | "liveness": faceBoxes[i].liveness,
93 | "yaw": faceBoxes[i].yaw, "roll": faceBoxes[i].roll, "pitch": faceBoxes[i].pitch,
94 | "face_quality": faceBoxes[i].face_quality, "face_luminance": faceBoxes[i].face_luminance, "eye_dist": faceBoxes[i].eye_dist,
95 | "left_eye_closed": faceBoxes[i].left_eye_closed, "right_eye_closed": faceBoxes[i].right_eye_closed,
96 | "face_occlusion": faceBoxes[i].face_occlusion, "mouth_opened": faceBoxes[i].mouth_opened,
97 | "landmark_68": landmark_68})
98 |
99 | result = ""
100 | if faceCount == 0:
101 | result = "No face"
102 | elif faceCount > 1:
103 | result = "Multiple face"
104 | else:
105 | livenessScore = faceBoxes[0].liveness
106 | if livenessScore > livenessThreshold:
107 | result = "Real"
108 | else:
109 | result = "Spoof"
110 |
111 | isNotFront = True
112 | isOcclusion = False
113 | isEyeClosure = False
114 | isMouthOpening = False
115 | isBoundary = False
116 | isSmall = False
117 | quality = "Low"
118 | luminance = "Dark"
119 | if abs(faceBoxes[0].yaw) < yawThreshold and abs(faceBoxes[0].roll) < rollThreshold and abs(faceBoxes[0].pitch) < pitchThreshold:
120 | isNotFront = False
121 |
122 | if faceBoxes[0].face_occlusion > occlusionThreshold:
123 | isOcclusion = True
124 |
125 | if faceBoxes[0].left_eye_closed > eyeClosureThreshold or faceBoxes[0].right_eye_closed > eyeClosureThreshold:
126 | isEyeClosure = True
127 |
128 | if faceBoxes[0].mouth_opened > mouthOpeningThreshold:
129 | isMouthOpening = True
130 |
131 | if (faceBoxes[0].x1 < image_np.shape[1] * borderRate or
132 | faceBoxes[0].y1 < image_np.shape[0] * borderRate or
133 | faceBoxes[0].x1 > image_np.shape[1] - image_np.shape[1] * borderRate or
134 | faceBoxes[0].x1 > image_np.shape[0] - image_np.shape[0] * borderRate):
135 | isBoundary = True
136 |
137 | if faceBoxes[0].eye_dist < smallFaceThreshold:
138 | isSmall = True
139 |
140 | if faceBoxes[0].face_quality < lowQualityThreshold:
141 | quality = "Low"
142 | elif faceBoxes[0].face_quality < hightQualityThreshold:
143 | quality = "Medium"
144 | else:
145 | quality = "High"
146 |
147 | if faceBoxes[0].face_luminance < luminanceDarkThreshold:
148 | luminance = "Dark"
149 | elif faceBoxes[0].face_luminance < luminanceLightThreshold:
150 | luminance = "Normal"
151 | else:
152 | luminance = "Light"
153 |
154 | faceState = {"is_not_front": isNotFront, "is_occluded": isOcclusion, "eye_closed": isEyeClosure, "mouth_opened": isMouthOpening,
155 | "is_boundary_face": isBoundary, "is_small": isSmall, "quality": quality, "luminance": luminance, "result": result, "liveness_score": livenessScore}
156 | response = jsonify({"face_state": faceState, "faces": faces})
157 |
158 | response.status_code = 200
159 | response.headers["Content-Type"] = "application/json; charset=utf-8"
160 | return response
161 |
162 | @app.route('/check_liveness_base64', methods=['POST'])
163 | def check_liveness_base64():
164 | faces = []
165 | isNotFront = None
166 | isOcclusion = None
167 | isEyeClosure = None
168 | isMouthOpening = None
169 | isBoundary = None
170 | isSmall = None
171 | quality = None
172 | luminance = None
173 | livenessScore = None
174 |
175 | content = request.get_json()
176 |
177 | try:
178 | imageBase64 = content['base64']
179 | image_data = base64.b64decode(imageBase64)
180 | image = Image.open(io.BytesIO(image_data))
181 | except:
182 | result = "Failed to open file"
183 | faceState = {"is_not_front": isNotFront, "is_occluded": isOcclusion, "eye_closed": isEyeClosure, "mouth_opened": isMouthOpening,
184 | "is_boundary_face": isBoundary, "is_small": isSmall, "quality": quality, "luminance": luminance, "result": result, "liveness_score": livenessScore}
185 | response = jsonify({"face_state": faceState, "faces": faces})
186 |
187 | response.status_code = 200
188 | response.headers["Content-Type"] = "application/json; charset=utf-8"
189 | return response
190 |
191 |
192 | image_np = np.asarray(image)
193 |
194 | faceBoxes = (FaceBox * maxFaceCount)()
195 | faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
196 |
197 | for i in range(faceCount):
198 | landmark_68 = []
199 | for j in range(68):
200 | landmark_68.append({"x": faceBoxes[i].landmark_68[j * 2], "y": faceBoxes[i].landmark_68[j * 2 + 1]})
201 | faces.append({"x1": faceBoxes[i].x1, "y1": faceBoxes[i].y1, "x2": faceBoxes[i].x2, "y2": faceBoxes[i].y2,
202 | "liveness": faceBoxes[i].liveness,
203 | "yaw": faceBoxes[i].yaw, "roll": faceBoxes[i].roll, "pitch": faceBoxes[i].pitch,
204 | "face_quality": faceBoxes[i].face_quality, "face_luminance": faceBoxes[i].face_luminance, "eye_dist": faceBoxes[i].eye_dist,
205 | "left_eye_closed": faceBoxes[i].left_eye_closed, "right_eye_closed": faceBoxes[i].right_eye_closed,
206 | "face_occlusion": faceBoxes[i].face_occlusion, "mouth_opened": faceBoxes[i].mouth_opened,
207 | "landmark_68": landmark_68})
208 |
209 | result = ""
210 | if faceCount == 0:
211 | result = "No face"
212 | elif faceCount > 1:
213 | result = "Multiple face"
214 | else:
215 | livenessScore = faceBoxes[0].liveness
216 | if livenessScore > livenessThreshold:
217 | result = "Real"
218 | else:
219 | result = "Spoof"
220 |
221 | isNotFront = True
222 | isOcclusion = False
223 | isEyeClosure = False
224 | isMouthOpening = False
225 | isBoundary = False
226 | isSmall = False
227 | quality = "Low"
228 | luminance = "Dark"
229 | if abs(faceBoxes[0].yaw) < yawThreshold and abs(faceBoxes[0].roll) < rollThreshold and abs(faceBoxes[0].pitch) < pitchThreshold:
230 | isNotFront = False
231 |
232 | if faceBoxes[0].face_occlusion > occlusionThreshold:
233 | isOcclusion = True
234 |
235 | if faceBoxes[0].left_eye_closed > eyeClosureThreshold or faceBoxes[0].right_eye_closed > eyeClosureThreshold:
236 | isEyeClosure = True
237 |
238 | if faceBoxes[0].mouth_opened > mouthOpeningThreshold:
239 | isMouthOpening = True
240 |
241 | if (faceBoxes[0].x1 < image_np.shape[1] * borderRate or
242 | faceBoxes[0].y1 < image_np.shape[0] * borderRate or
243 | faceBoxes[0].x1 > image_np.shape[1] - image_np.shape[1] * borderRate or
244 | faceBoxes[0].x1 > image_np.shape[0] - image_np.shape[0] * borderRate):
245 | isBoundary = True
246 |
247 | if faceBoxes[0].eye_dist < smallFaceThreshold:
248 | isSmall = True
249 |
250 | if faceBoxes[0].face_quality < lowQualityThreshold:
251 | quality = "Low"
252 | elif faceBoxes[0].face_quality < hightQualityThreshold:
253 | quality = "Medium"
254 | else:
255 | quality = "High"
256 |
257 | if faceBoxes[0].face_luminance < luminanceDarkThreshold:
258 | luminance = "Dark"
259 | elif faceBoxes[0].face_luminance < luminanceLightThreshold:
260 | luminance = "Normal"
261 | else:
262 | luminance = "Light"
263 |
264 | faceState = {"is_not_front": isNotFront, "is_occluded": isOcclusion, "eye_closed": isEyeClosure, "mouth_opened": isMouthOpening,
265 | "is_boundary_face": isBoundary, "is_small": isSmall, "quality": quality, "luminance": luminance, "result": result, "liveness_score": livenessScore}
266 | response = jsonify({"face_state": faceState, "faces": faces})
267 |
268 | response.status_code = 200
269 | response.headers["Content-Type"] = "application/json; charset=utf-8"
270 | return response
271 |
272 |
273 | if __name__ == '__main__':
274 | port = int(os.environ.get("PORT", 8080))
275 | app.run(host='0.0.0.0', port=port)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | ### Our facial recognition algorithm is globally top-ranked by NIST in the FRVT 1:1 leaderboards.
8 | [Latest NIST FRVT evaluation report 2024-12-20](https://pages.nist.gov/frvt/html/frvt11.html)
9 |
10 | 
11 |
12 | #### 🆔 ID Document Liveness Detection - Linux - [Here](https://web.kby-ai.com)
13 | #### 🤗 Hugging Face - [Here](https://huggingface.co/kby-ai)
14 | #### 📚 Product & Resources - [Here](https://github.com/kby-ai/Product)
15 | #### 🛟 Help Center - [Here](https://docs.kby-ai.com)
16 | #### 💼 KYC Verification Demo - [Here](https://github.com/kby-ai/KYC-Verification-Demo-Android)
17 | #### 🙋♀️ Docker Hub - [Here](https://hub.docker.com/r/kbyai/face-liveness-detection)
18 | ```bash
19 | sudo docker pull kbyai/face-liveness-detection:latest
20 | sudo docker run -e LICENSE="xxxxx" -p 8080:8080 -p 9000:9000 kbyai/face-liveness-detection:latest
21 | ```
22 | # FaceLivenessDetection-Docker
23 | ## Overview
24 |
25 | This repository demonstrates an advanced `face liveness detection` technology implemented via a `Dockerized Flask API`.
26 | It includes features that allow for testing `face liveness detection` using both image files and `base64-encoded` images.
27 |
28 | > In this repo, we integrated `KBY-AI`'s `Face Liveness Detection` solution into `Linux Server SDK` by `docker container`.
29 | > We can customize the `SDK` to align with your specific requirements.
30 |
31 | ### ◾FaceSDK(Server) Details
32 | | 🔽 Face Liveness Detection | Face Recognition |
33 | |------------------|------------------|
34 | | Face Detection | Face Detection |
35 | | Face Liveness Detection | Face Recognition(Face Matching or Face Comparison) |
36 | | Pose Estimation | Pose Estimation |
37 | | 68 points Face Landmark Detection | 68 points Face Landmark Detection |
38 | | Face Quality Calculation | Face Occlusion Detection |
39 | | Face Occlusion Detection | Face Occlusion Detection |
40 | | Eye Closure Detection | Eye Closure Detection |
41 | | Mouth Opening Check | Mouth Opening Check |
42 |
43 | ### ◾FaceSDK(Server) Product List
44 | | No. | Repository | SDK Details |
45 | |------------------|------------------|------------------|
46 | | ➡️ | [Face Liveness Detection - Linux](https://github.com/kby-ai/FaceLivenessDetection-Docker) | Face Livness Detection |
47 | | 2 | [Face Liveness Detection - Windows](https://github.com/kby-ai/FaceLivenessDetection-Windows) | Face Livness Detection |
48 | | 3 | [Face Liveness Detection - C#](https://github.com/kby-ai/FaceLivenessDetection-CSharp-.Net) | Face Livness Detection |
49 | | 4 | [Face Recognition - Linux](https://github.com/kby-ai/FaceRecognition-Docker) | Face Recognition |
50 | | 5 | [Face Recognition - Windows](https://github.com/kby-ai/FaceRecognition-Windows) | Face Recognition |
51 | | 6 | [Face Recognition - C#](https://github.com/kby-ai/FaceRecognition-CSharp-.NET) | Face Recognition |
52 |
53 | > To get `Face SDK(mobile)`, please visit products [here](https://github.com/kby-ai/Product):
54 |
55 | ## Try the API
56 | ### Online Demo
57 | You can test the SDK using images from the following URL:
58 | `https://web.kby-ai.com`
59 |
60 | 
61 |
62 | ### Postman
63 | To test the `API`, you can use `Postman`. Here are the endpoints for testing:
64 | - Test with an image file: Send a `POST` request to `http://18.221.33.238:8080/check_liveness`.
65 | - Test with a `base64-encoded` image: Send a `POST` request to `http://18.221.33.238:8080/check_liveness_base64`.
66 |
67 | You can download the `Postman` collection to easily access and use these endpoints. [click here](https://github.com/kby-ai/FaceLivenessDetection-Docker/blob/main/postman/kby-ai-live.postman_collection.json)
68 |
69 | 
70 |
71 |
72 | ## SDK License
73 |
74 | This project uses `KBY-AI`'s `Face Liveness Detection` `Server SDK`, which requires a license per machine.
75 |
76 | - The code below shows how to use the license: https://github.com/kby-ai/FaceLivenessDetection-Docker/blob/6aafd08dba5093600008ec66df39f362e53f9bb8/app.py#L36-L48
77 |
78 | - To request the license, please provide us with the `machine code` obtained from the `getMachineCode` function.
79 |
80 | #### Please contact us:
81 | 🧙`Email:` contact@kby-ai.com
82 | 🧙`Telegram:` [@kbyaisupport](https://t.me/kbyaisupport)
83 | 🧙`WhatsApp:` [+19092802609](https://wa.me/+19092802609)
84 | 🧙`Discord:` [KBY-AI](https://discord.gg/vBUMRJJe)
85 | 🧙`Teams:` [KBY-AI](https://teams.live.com/l/invite/FBAYGB1-IlXkuQM3AY)
86 |
87 | ## How to run
88 |
89 | ### 1. System Requirements
90 | - CPU: 2 cores or more (Recommended: 8 cores)
91 | - RAM: 4 GB or more (Recommended: 8 GB)
92 | - HDD: 4 GB or more (Recommended: 8 GB)
93 | - OS: Ubuntu 20.04 or later
94 | - Dependency: OpenVINO™ Runtime (Version: 2022.3)
95 |
96 | ### 2. Setup and Test
97 | - Clone the project:
98 | ```bash
99 | git clone https://github.com/kby-ai/FaceLivenessDetection-Docker.git
100 | ```
101 | - Download the model from `Google Drive`: [click here](https://drive.google.com/file/d/1bYl0p5uHXuTQoETdbRwYLpd3huOqA3wY/view?usp=share_link)
102 | ```bash
103 | cd FaceLivenessDetection-Docker
104 |
105 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1bYl0p5uHXuTQoETdbRwYLpd3huOqA3wY' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1bYl0p5uHXuTQoETdbRwYLpd3huOqA3wY" -O data.zip && rm -rf /tmp/cookies.txt
106 |
107 | unzip data.zip
108 | ```
109 | - Build the `Docker` image:
110 | ```bash
111 | sudo docker build --pull --rm -f Dockerfile -t kby-ai-live:latest .
112 | ```
113 | - Run the `Docker` container:
114 | ```bash
115 | sudo docker run -v ./license.txt:/home/openvino/kby-ai-live/license.txt -p 8080:8080 kby-ai-live
116 | ```
117 | - Send us the `machine code` and then we will give you a `license key`.
118 |
119 | After that, update the `license.txt` file by overwriting the `license key` that you received. Then, run the `Docker` container again.
120 |
121 | 
122 |
123 | 
124 |
125 |
126 | - To test the API, you can use `Postman`. Here are the endpoints for testing:
127 |
128 | Test with an image file: Send a `POST` request to `http://{xx.xx.xx.xx}:8080/check_liveness`.
129 |
130 | Test with a `base64-encoded` image: Send a `POST` request to `http://{xx.xx.xx.xx}:8080/check_liveness_base64`.
131 |
132 | You can download the `Postman` collection to easily access and use these endpoints. [click here](https://github.com/kby-ai/FaceLivenessDetection-Docker/blob/main/postman/kby-ai-live.postman_collection.json)
133 |
134 | ### 3. Execute the Gradio demo
135 | - Setup Gradio
136 | Ensure that you have the necessary dependencies installed.
137 |
138 | `Gradio` requires `Python 3.6` or above.
139 |
140 | You can install `Gradio` using `pip` by running the following command:
141 | ```bash
142 | pip install gradio
143 | ```
144 | - Run the demo
145 | Run it using the following command:
146 | ```bash
147 | cd gradio
148 | python demo.py
149 | ```
150 | - You can test within the following URL:
151 | `http://127.0.0.1:9000 `
152 | ## About SDK
153 |
154 | ### 1. Initializing the SDK
155 |
156 | - Step One
157 |
158 | First, obtain the machine code for activation and request a license based on the `machine code`.
159 | ```python
160 | machineCode = getMachineCode()
161 | print("machineCode: ", machineCode.decode('utf-8'))
162 | ```
163 |
164 | - Step Two
165 |
166 | Next, activate the SDK using the received license.
167 | ```python
168 | setActivation(license.encode('utf-8'))
169 | ```
170 | If activation is successful, the return value will be `SDK_SUCCESS`. Otherwise, an error value will be returned.
171 |
172 | - Step Three
173 |
174 | After activation, call the initialization function of the SDK.
175 | ```python
176 | initSDK("data".encode('utf-8'))
177 | ```
178 | The first parameter is the path to the model.
179 |
180 | If initialization is successful, the return value will be `SDK_SUCCESS`. Otherwise, an error value will be returned.
181 |
182 | ### 2. Enum and Structure
183 | - SDK_ERROR
184 |
185 | This enumeration represents the return value of the `initSDK` and `setActivation` functions.
186 |
187 | | Feature| Value | Name |
188 | |------------------|------------------|------------------|
189 | | Successful activation or initialization | 0 | SDK_SUCCESS |
190 | | License key error | -1 | SDK_LICENSE_KEY_ERROR |
191 | | AppID error (Not used in Server SDK) | -2 | SDK_LICENSE_APPID_ERROR |
192 | | License expiration | -3 | SDK_LICENSE_EXPIRED |
193 | | Not activated | -4 | SDK_NO_ACTIVATED |
194 | | Failed to initialize SDK | -5 | SDK_INIT_ERROR |
195 |
196 | - FaceBox
197 |
198 | This structure represents the output of the face detection function.
199 |
200 | | Feature| Type | Name |
201 | |------------------|------------------|------------------|
202 | | Face rectangle | int | x1, y1, x2, y2 |
203 | | Liveness score (0 ~ 1) | float | liveness |
204 | | Face angles (-45 ~ 45) | float | yaw, roll, pitch |
205 | | Face quality (0 ~ 1) | float | face_quality |
206 | | Face luminance (0 ~ 255) | float | face_luminance |
207 | | Eye distance (pixels) | float | eye_dist |
208 | | Eye closure (0 ~ 1) | float | left_eye_closed, right_eye_closed |
209 | | Face occlusion (0 ~ 1) | float | face_occlusion |
210 | | Mouth opening (0 ~ 1) | float | mouth_opened |
211 | | 68 points facial landmark | float[] | landmarks_68 |
212 |
213 | > 68 points facial landmark
214 |
215 |
216 |
217 | ### 3. APIs
218 | - Face Detection
219 |
220 | The `Face SDK` provides a single API for detecting faces, performing `liveness detection`, determining `face orientation` (yaw, roll, pitch), assessing `face quality`, detecting `facial occlusion`, `eye closure`, `mouth opening`, and identifying `facial landmarks`.
221 |
222 | The function can be used as follows:
223 |
224 | ```python
225 | faceBoxes = (FaceBox * maxFaceCount)()
226 | faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
227 | ```
228 |
229 | This function requires 5 parameters.
230 | * The first parameter: the byte array of the `RGB` image buffer.
231 | * The second parameter: the width of the image.
232 | * The third parameter: the height of the image.
233 | * The fourth parameter: the `FaceBox` array allocated with `maxFaceCount` for storing the detected faces.
234 | * The fifth parameter: the count allocated for the maximum `FaceBox` objects.
235 |
236 | The function returns the count of the detected face.
237 |
238 | ### 4. Thresholds
239 | The default thresholds are as the following below:
240 | https://github.com/kby-ai/FaceLivenessDetection-Docker/blob/1e89ec05f49d55807164a92d19abc5149054ce2a/app.py#L17-L29
241 |
242 |
--------------------------------------------------------------------------------