├── .gitignore
├── LICENSE
├── README.md
├── demo.py
├── face_mesh
├── __init__.py
└── face_mesh.py
├── iris_landmark
├── __init__.py
├── iris_landmark.py
└── iris_landmark.tflite
└── utils
├── __init__.py
└── cvfpscalc.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | # bat
132 | *.bat
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ⚠Attention⚠
2 | MediaPipe 0.8.8 からFaceMeshにrefine_landmarksオプションが追加されました。
3 | このオプションを有効化すると虹彩の検出も同時に実施できるようになります。
4 | 特別な理由がない限り、[Kazuhito00/mediapipe-python-sample](https://github.com/Kazuhito00/mediapipe-python-sample)のFaceMeshを参考にすることをお勧めします。
5 |
6 | # iris-detection-using-py-mediapipe
7 | MediaPipeのIris(虹彩検出)をPythonで動作させるデモです。
8 | MediaPipeのFace Meshで顔のランドマークを検出し「[iris_landmark.tflite](https://github.com/google/mediapipe/blob/master/mediapipe/modules/iris_landmark/iris_landmark.tflite)」を用いて虹彩の検出をしています。
9 |
10 | 
11 | # Requirement
12 | * mediapipe 0.8.1 or later
13 | * OpenCV 3.4.2 or later
14 | * Tensorflow 2.3.0 or Later
15 |
16 | mediapipeはpipでインストールできます。
17 | ```bash
18 | pip install mediapipe
19 | ```
20 |
21 | # Demo
22 | デモの実行方法は以下です。
23 | ```bash
24 | python demo.py
25 | ```
26 | デモ実行時には、以下のオプションが指定可能です。
27 |
28 | * --device
29 | カメラデバイス番号の指定
30 | デフォルト:0
31 | * --width
32 | カメラキャプチャ時の横幅
33 | デフォルト:960
34 | * --height
35 | カメラキャプチャ時の縦幅
36 | デフォルト:540
37 | * --max_num_faces
38 | 顔の検出最大数
39 | デフォルト:1
40 | * --min_detection_confidence
41 | 検出信頼値の閾値
42 | デフォルト:0.7
43 | * --min_tracking_confidence
44 | トラッキング信頼値の閾値
45 | デフォルト:0.7
46 |
47 | # ToDo
48 | - [ ] 焦点距離から深度を推定するオプションを追加
49 |
50 | # Reference
51 | * [MediaPipe](https://github.com/google/mediapipe)
52 |
53 | # Author
54 | 高橋かずひと(https://twitter.com/KzhtTkhs)
55 |
56 | # License
57 | iris-detection-using-py-mediapipe is under [Apache-2.0 License](LICENSE).
58 |
59 | また、女性の画像は[フリー素材ぱくたそ](https://www.pakutaso.com)様の写真を利用しています。
60 |
--------------------------------------------------------------------------------
/demo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import copy
4 | import argparse
5 |
6 | import cv2 as cv
7 | import numpy as np
8 |
9 | from utils import CvFpsCalc
10 | from face_mesh.face_mesh import FaceMesh
11 | from iris_landmark.iris_landmark import IrisLandmark
12 |
13 |
14 | def get_args():
15 | parser = argparse.ArgumentParser()
16 |
17 | parser.add_argument("--device", type=int, default=0)
18 | parser.add_argument("--width", help='cap width', type=int, default=960)
19 | parser.add_argument("--height", help='cap height', type=int, default=540)
20 |
21 | parser.add_argument("--max_num_faces", type=int, default=1)
22 | parser.add_argument("--min_detection_confidence",
23 | help='min_detection_confidence',
24 | type=float,
25 | default=0.7)
26 | parser.add_argument("--min_tracking_confidence",
27 | help='min_tracking_confidence',
28 | type=int,
29 | default=0.7)
30 |
31 | args = parser.parse_args()
32 |
33 | return args
34 |
35 |
36 | def main():
37 | # 引数 #####################################################################
38 | args = get_args()
39 |
40 | cap_device = args.device
41 | cap_width = args.width
42 | cap_height = args.height
43 |
44 | max_num_faces = args.max_num_faces
45 | min_detection_confidence = args.min_detection_confidence
46 | min_tracking_confidence = args.min_tracking_confidence
47 |
48 | # カメラ準備 ###############################################################
49 | cap = cv.VideoCapture(cap_device)
50 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
51 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
52 |
53 | # モデルロード #############################################################
54 | face_mesh = FaceMesh(
55 | max_num_faces,
56 | min_detection_confidence,
57 | min_tracking_confidence,
58 | )
59 | iris_detector = IrisLandmark()
60 |
61 | # FPS計測モジュール ########################################################
62 | cvFpsCalc = CvFpsCalc(buffer_len=10)
63 |
64 | while True:
65 | display_fps = cvFpsCalc.get()
66 |
67 | # カメラキャプチャ #####################################################
68 | ret, image = cap.read()
69 | if not ret:
70 | break
71 | image = cv.flip(image, 1) # ミラー表示
72 | debug_image = copy.deepcopy(image)
73 |
74 | # 検出実施 #############################################################
75 | # Face Mesh検出
76 | face_results = face_mesh(image)
77 | for face_result in face_results:
78 | # 目周辺のバウンディングボックス計算
79 | left_eye, right_eye = face_mesh.calc_around_eye_bbox(face_result)
80 |
81 | # 虹彩検出
82 | left_iris, right_iris = detect_iris(image, iris_detector, left_eye,
83 | right_eye)
84 |
85 | # 虹彩の外接円を計算
86 | left_center, left_radius = calc_min_enc_losingCircle(left_iris)
87 | right_center, right_radius = calc_min_enc_losingCircle(right_iris)
88 |
89 | # デバッグ描画
90 | debug_image = draw_debug_image(
91 | debug_image,
92 | left_iris,
93 | right_iris,
94 | left_center,
95 | left_radius,
96 | right_center,
97 | right_radius,
98 | )
99 |
100 | cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30),
101 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA)
102 |
103 | # キー処理(ESC:終了) #################################################
104 | key = cv.waitKey(1)
105 | if key == 27: # ESC
106 | break
107 |
108 | # 画面反映 #############################################################
109 | cv.imshow('Iris(tflite) Demo', debug_image)
110 |
111 | cap.release()
112 | cv.destroyAllWindows()
113 |
114 | return
115 |
116 |
117 | def detect_iris(image, iris_detector, left_eye, right_eye):
118 | image_width, image_height = image.shape[1], image.shape[0]
119 | input_shape = iris_detector.get_input_shape()
120 |
121 | # 左目
122 | # 目の周辺の画像を切り抜き
123 | left_eye_x1 = max(left_eye[0], 0)
124 | left_eye_y1 = max(left_eye[1], 0)
125 | left_eye_x2 = min(left_eye[2], image_width)
126 | left_eye_y2 = min(left_eye[3], image_height)
127 | left_eye_image = copy.deepcopy(image[left_eye_y1:left_eye_y2,
128 | left_eye_x1:left_eye_x2])
129 | # 虹彩検出
130 | eye_contour, iris = iris_detector(left_eye_image)
131 | # 座標を相対座標から絶対座標に変換
132 | left_iris = calc_iris_point(left_eye, eye_contour, iris, input_shape)
133 |
134 | # 右目
135 | # 目の周辺の画像を切り抜き
136 | right_eye_x1 = max(right_eye[0], 0)
137 | right_eye_y1 = max(right_eye[1], 0)
138 | right_eye_x2 = min(right_eye[2], image_width)
139 | right_eye_y2 = min(right_eye[3], image_height)
140 | right_eye_image = copy.deepcopy(image[right_eye_y1:right_eye_y2,
141 | right_eye_x1:right_eye_x2])
142 | # 虹彩検出
143 | eye_contour, iris = iris_detector(right_eye_image)
144 | # 座標を相対座標から絶対座標に変換
145 | right_iris = calc_iris_point(right_eye, eye_contour, iris, input_shape)
146 |
147 | return left_iris, right_iris
148 |
149 |
150 | def calc_iris_point(eye_bbox, eye_contour, iris, input_shape):
151 | iris_list = []
152 | for index in range(5):
153 | point_x = int(iris[index * 3] *
154 | ((eye_bbox[2] - eye_bbox[0]) / input_shape[0]))
155 | point_y = int(iris[index * 3 + 1] *
156 | ((eye_bbox[3] - eye_bbox[1]) / input_shape[1]))
157 | point_x += eye_bbox[0]
158 | point_y += eye_bbox[1]
159 |
160 | iris_list.append((point_x, point_y))
161 |
162 | return iris_list
163 |
164 |
165 | def calc_min_enc_losingCircle(landmark_list):
166 | center, radius = cv.minEnclosingCircle(np.array(landmark_list))
167 | center = (int(center[0]), int(center[1]))
168 | radius = int(radius)
169 |
170 | return center, radius
171 |
172 |
173 | def draw_debug_image(
174 | debug_image,
175 | left_iris,
176 | right_iris,
177 | left_center,
178 | left_radius,
179 | right_center,
180 | right_radius,
181 | ):
182 | # 虹彩:外接円
183 | cv.circle(debug_image, left_center, left_radius, (0, 255, 0), 2)
184 | cv.circle(debug_image, right_center, right_radius, (0, 255, 0), 2)
185 |
186 | # 虹彩:ランドマーク
187 | for point in left_iris:
188 | cv.circle(debug_image, (point[0], point[1]), 1, (0, 0, 255), 2)
189 | for point in right_iris:
190 | cv.circle(debug_image, (point[0], point[1]), 1, (0, 0, 255), 2)
191 |
192 | # 虹彩:半径
193 | cv.putText(debug_image, 'r:' + str(left_radius) + 'px',
194 | (left_center[0] + int(left_radius * 1.5),
195 | left_center[1] + int(left_radius * 0.5)),
196 | cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
197 | cv.putText(debug_image, 'r:' + str(right_radius) + 'px',
198 | (right_center[0] + int(right_radius * 1.5),
199 | right_center[1] + int(right_radius * 0.5)),
200 | cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
201 |
202 | return debug_image
203 |
204 |
205 | if __name__ == '__main__':
206 | main()
207 |
--------------------------------------------------------------------------------
/face_mesh/__init__.py:
--------------------------------------------------------------------------------
1 | # IRIS LANDMARK
--------------------------------------------------------------------------------
/face_mesh/face_mesh.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import cv2 as cv
4 | import numpy as np
5 | import mediapipe as mp
6 |
7 |
8 | class FaceMesh(object):
9 | def __init__(
10 | self,
11 | max_num_faces=1,
12 | min_detection_confidence=0.7,
13 | min_tracking_confidence=0.7,
14 | ):
15 | mp_face_mesh = mp.solutions.face_mesh
16 | self._face_mesh = mp_face_mesh.FaceMesh(
17 | max_num_faces=max_num_faces,
18 | min_detection_confidence=min_detection_confidence,
19 | min_tracking_confidence=min_tracking_confidence,
20 | )
21 |
22 | def __call__(
23 | self,
24 | image,
25 | ):
26 | # 推論
27 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
28 | results = self._face_mesh.process(image)
29 |
30 | # X,Y座標を相対座標から絶対座標に変換
31 | # [X座標, Y座標, Z座標, Visibility, Presence]のリストに変更
32 | face_mesh_results = []
33 | if results.multi_face_landmarks is not None:
34 | for face_landmarks in results.multi_face_landmarks:
35 | face_mesh_results.append(
36 | self._calc_landmarks(image, face_landmarks.landmark))
37 | return face_mesh_results
38 |
39 | def _calc_landmarks(self, image, landmarks):
40 | image_width, image_height = image.shape[1], image.shape[0]
41 |
42 | landmark_list = []
43 | for _, landmark in enumerate(landmarks):
44 | landmark_x = min(int(landmark.x * image_width), image_width - 1)
45 | landmark_y = min(int(landmark.y * image_height), image_height - 1)
46 |
47 | landmark_list.append((landmark_x, landmark_y, landmark.z,
48 | landmark.visibility, landmark.presence))
49 | return landmark_list
50 |
51 | def _calc_bounding_rect(self, landmarks):
52 | landmark_array = np.empty((0, 2), int)
53 |
54 | for _, landmark in enumerate(landmarks):
55 | landmark_x = int(landmark[0])
56 | landmark_y = int(landmark[1])
57 |
58 | landmark_point = [np.array((landmark_x, landmark_y))]
59 | landmark_array = np.append(landmark_array, landmark_point, axis=0)
60 |
61 | x, y, w, h = cv.boundingRect(landmark_array)
62 |
63 | return [x, y, x + w, y + h]
64 |
65 | def get_eye_landmarks(self, landmarks):
66 | # 目の輪郭の座標列を取得
67 |
68 | left_eye_landmarks = []
69 | right_eye_landmarks = []
70 |
71 | if len(landmarks) > 0:
72 | # 参考:https://github.com/tensorflow/tfjs-models/blob/master/facemesh/mesh_map.jpg
73 | # 左目
74 | left_eye_landmarks.append((landmarks[133][0], landmarks[133][1]))
75 | left_eye_landmarks.append((landmarks[173][0], landmarks[173][1]))
76 | left_eye_landmarks.append((landmarks[157][0], landmarks[157][1]))
77 | left_eye_landmarks.append((landmarks[158][0], landmarks[158][1]))
78 | left_eye_landmarks.append((landmarks[159][0], landmarks[159][1]))
79 | left_eye_landmarks.append((landmarks[160][0], landmarks[160][1]))
80 | left_eye_landmarks.append((landmarks[161][0], landmarks[161][1]))
81 | left_eye_landmarks.append((landmarks[246][0], landmarks[246][1]))
82 | left_eye_landmarks.append((landmarks[163][0], landmarks[163][1]))
83 | left_eye_landmarks.append((landmarks[144][0], landmarks[144][1]))
84 | left_eye_landmarks.append((landmarks[145][0], landmarks[145][1]))
85 | left_eye_landmarks.append((landmarks[153][0], landmarks[153][1]))
86 | left_eye_landmarks.append((landmarks[154][0], landmarks[154][1]))
87 | left_eye_landmarks.append((landmarks[155][0], landmarks[155][1]))
88 |
89 | # 右目
90 | right_eye_landmarks.append((landmarks[362][0], landmarks[362][1]))
91 | right_eye_landmarks.append((landmarks[398][0], landmarks[398][1]))
92 | right_eye_landmarks.append((landmarks[384][0], landmarks[384][1]))
93 | right_eye_landmarks.append((landmarks[385][0], landmarks[385][1]))
94 | right_eye_landmarks.append((landmarks[386][0], landmarks[386][1]))
95 | right_eye_landmarks.append((landmarks[387][0], landmarks[387][1]))
96 | right_eye_landmarks.append((landmarks[388][0], landmarks[388][1]))
97 | right_eye_landmarks.append((landmarks[466][0], landmarks[466][1]))
98 | right_eye_landmarks.append((landmarks[390][0], landmarks[390][1]))
99 | right_eye_landmarks.append((landmarks[373][0], landmarks[373][1]))
100 | right_eye_landmarks.append((landmarks[374][0], landmarks[374][1]))
101 | right_eye_landmarks.append((landmarks[380][0], landmarks[380][1]))
102 | right_eye_landmarks.append((landmarks[381][0], landmarks[381][1]))
103 | right_eye_landmarks.append((landmarks[382][0], landmarks[382][1]))
104 |
105 | return left_eye_landmarks, right_eye_landmarks
106 |
107 | def calc_eye_bbox(self, landmarks):
108 | # 目に隣接するバウンディングボックスを取得
109 |
110 | left_eye_lm, right_eye_lm = self.get_eye_landmarks(landmarks)
111 |
112 | left_eye_bbox = self._calc_bounding_rect(left_eye_lm)
113 | right_eye_bbox = self._calc_bounding_rect(right_eye_lm)
114 |
115 | return left_eye_bbox, right_eye_bbox
116 |
117 | def calc_around_eye_bbox(self, landmarks, around_ratio=0.5):
118 | # 目の周囲のバウンディングボックスを取得
119 |
120 | left_eye_bbox, right_eye_bbox = self.calc_eye_bbox(landmarks)
121 |
122 | left_eye_bbox = self._calc_around_eye(left_eye_bbox, around_ratio)
123 | right_eye_bbox = self._calc_around_eye(right_eye_bbox, around_ratio)
124 |
125 | return left_eye_bbox, right_eye_bbox
126 |
127 | def _calc_around_eye(self, bbox, around_ratio=0.5):
128 | x1, y1, x2, y2 = bbox
129 | x = x1
130 | y = y1
131 | w = x2 - x1
132 | h = y2 - y1
133 |
134 | cx = int(x + (w / 2))
135 | cy = int(y + (h / 2))
136 | square_length = max(w, h)
137 | x = int(cx - (square_length / 2))
138 | y = int(cy - (square_length / 2))
139 | w = square_length
140 | h = square_length
141 |
142 | around_ratio = 0.5
143 | x = int(x - (square_length * around_ratio))
144 | y = int(y - (square_length * around_ratio))
145 | w = int(square_length * (1 + (around_ratio * 2)))
146 | h = int(square_length * (1 + (around_ratio * 2)))
147 |
148 | return [x, y, x + w, y + h]
149 |
--------------------------------------------------------------------------------
/iris_landmark/__init__.py:
--------------------------------------------------------------------------------
1 | # IRIS LANDMARK
--------------------------------------------------------------------------------
/iris_landmark/iris_landmark.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import cv2 as cv
4 | import numpy as np
5 | import tensorflow as tf
6 |
7 |
8 | class IrisLandmark(object):
9 | def __init__(
10 | self,
11 | model_path='iris_landmark/iris_landmark.tflite',
12 | num_threads=1,
13 | ):
14 | self._interpreter = tf.lite.Interpreter(model_path=model_path,
15 | num_threads=num_threads)
16 | self._interpreter.allocate_tensors()
17 | self._input_details = self._interpreter.get_input_details()
18 | self._output_details = self._interpreter.get_output_details()
19 |
20 | def __call__(
21 | self,
22 | image,
23 | ):
24 | input_shape = self._input_details[0]['shape']
25 |
26 | # 正規化・リサイズ
27 | img = cv.cvtColor(image, cv.COLOR_BGR2RGB)
28 | img = img / 255.0
29 | img_resized = tf.image.resize(img, [input_shape[1], input_shape[2]],
30 | method='bicubic',
31 | preserve_aspect_ratio=False)
32 | img_input = img_resized.numpy()
33 | img_input = (img_input - 0.5) / 0.5
34 |
35 | reshape_img = img_input.reshape(1, input_shape[1], input_shape[2],
36 | input_shape[3])
37 | tensor = tf.convert_to_tensor(reshape_img, dtype=tf.float32)
38 |
39 | # 推論実行
40 | input_details_tensor_index = self._input_details[0]['index']
41 | self._interpreter.set_tensor(input_details_tensor_index, tensor)
42 | self._interpreter.invoke()
43 |
44 | # 推論結果取得
45 | output_details_tensor_index0 = self._output_details[0]['index']
46 | output_details_tensor_index1 = self._output_details[1]['index']
47 | eye_contour = self._interpreter.get_tensor(
48 | output_details_tensor_index0)
49 | iris = self._interpreter.get_tensor(output_details_tensor_index1)
50 |
51 | return np.squeeze(eye_contour), np.squeeze(iris)
52 |
53 | def get_input_shape(self):
54 | input_shape = self._input_details[0]['shape']
55 | return [input_shape[1], input_shape[2]]
--------------------------------------------------------------------------------
/iris_landmark/iris_landmark.tflite:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Kazuhito00/iris-detection-using-py-mediapipe/85ead970598017967937d3f5d7ffe6238aa3fe9b/iris_landmark/iris_landmark.tflite
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from utils.cvfpscalc import CvFpsCalc
--------------------------------------------------------------------------------
/utils/cvfpscalc.py:
--------------------------------------------------------------------------------
1 | from collections import deque
2 | import cv2 as cv
3 |
4 |
5 | class CvFpsCalc(object):
6 | def __init__(self, buffer_len=1):
7 | self._start_tick = cv.getTickCount()
8 | self._freq = 1000.0 / cv.getTickFrequency()
9 | self._difftimes = deque(maxlen=buffer_len)
10 |
11 | def get(self):
12 | current_tick = cv.getTickCount()
13 | different_time = (current_tick - self._start_tick) * self._freq
14 | self._start_tick = current_tick
15 |
16 | self._difftimes.append(different_time)
17 |
18 | fps = 1000.0 / (sum(self._difftimes) / len(self._difftimes))
19 | fps_rounded = round(fps, 2)
20 |
21 | return fps_rounded
22 |
--------------------------------------------------------------------------------