├── .gitignore
├── LICENSE
├── README.md
├── README.template
├── augmentor.py
├── config.py
├── data
├── lfw_val_triplets.json
├── pairs.txt
└── people.txt
├── data_generator.py
├── demo.py
├── images
├── 0_a_image.png
├── 0_n_image.png
├── 0_p_image.png
├── 1_a_image.png
├── 1_n_image.png
├── 1_p_image.png
├── 2_a_image.png
├── 2_n_image.png
├── 2_p_image.png
├── 3_a_image.png
├── 3_n_image.png
├── 3_p_image.png
├── 4_a_image.png
├── 4_n_image.png
├── 4_p_image.png
├── 5_a_image.png
├── 5_n_image.png
├── 5_p_image.png
├── 6_a_image.png
├── 6_n_image.png
├── 6_p_image.png
├── 7_a_image.png
├── 7_n_image.png
├── 7_p_image.png
├── 8_a_image.png
├── 8_n_image.png
├── 8_p_image.png
├── 9_a_image.png
├── 9_n_image.png
├── 9_p_image.png
├── CelebA.png
├── imgaug_after_0.png
├── imgaug_after_1.png
├── imgaug_after_10.png
├── imgaug_after_11.png
├── imgaug_after_12.png
├── imgaug_after_13.png
├── imgaug_after_14.png
├── imgaug_after_15.png
├── imgaug_after_16.png
├── imgaug_after_17.png
├── imgaug_after_18.png
├── imgaug_after_19.png
├── imgaug_after_2.png
├── imgaug_after_20.png
├── imgaug_after_21.png
├── imgaug_after_22.png
├── imgaug_after_23.png
├── imgaug_after_24.png
├── imgaug_after_25.png
├── imgaug_after_26.png
├── imgaug_after_27.png
├── imgaug_after_28.png
├── imgaug_after_29.png
├── imgaug_after_3.png
├── imgaug_after_4.png
├── imgaug_after_5.png
├── imgaug_after_6.png
├── imgaug_after_7.png
├── imgaug_after_8.png
├── imgaug_after_9.png
├── imgaug_before_0.png
├── imgaug_before_1.png
├── imgaug_before_10.png
├── imgaug_before_11.png
├── imgaug_before_12.png
├── imgaug_before_13.png
├── imgaug_before_14.png
├── imgaug_before_15.png
├── imgaug_before_16.png
├── imgaug_before_17.png
├── imgaug_before_18.png
├── imgaug_before_19.png
├── imgaug_before_2.png
├── imgaug_before_20.png
├── imgaug_before_21.png
├── imgaug_before_22.png
├── imgaug_before_23.png
├── imgaug_before_24.png
├── imgaug_before_25.png
├── imgaug_before_26.png
├── imgaug_before_27.png
├── imgaug_before_28.png
├── imgaug_before_29.png
├── imgaug_before_3.png
├── imgaug_before_4.png
├── imgaug_before_5.png
├── imgaug_before_6.png
├── imgaug_before_7.png
├── imgaug_before_8.png
├── imgaug_before_9.png
├── model.png
├── sample_a_0.jpg
├── sample_a_1.jpg
├── sample_a_2.jpg
├── sample_a_3.jpg
├── sample_a_4.jpg
├── sample_a_5.jpg
├── sample_a_6.jpg
├── sample_a_7.jpg
├── sample_a_8.jpg
├── sample_a_9.jpg
├── sample_n_0.jpg
├── sample_n_1.jpg
├── sample_n_2.jpg
├── sample_n_3.jpg
├── sample_n_4.jpg
├── sample_n_5.jpg
├── sample_n_6.jpg
├── sample_n_7.jpg
├── sample_n_8.jpg
├── sample_n_9.jpg
├── sample_p_0.jpg
├── sample_p_1.jpg
├── sample_p_2.jpg
├── sample_p_3.jpg
├── sample_p_4.jpg
├── sample_p_5.jpg
├── sample_p_6.jpg
├── sample_p_7.jpg
├── sample_p_8.jpg
├── sample_p_9.jpg
└── summary.jpg
├── inference.py
├── lfw_eval.py
├── model.py
├── model.svg
├── pre_process.py
├── requirements.txt
├── result.json
├── start_train.sh
├── template.py
├── train.py
├── train_eval.py
├── triplets.py
└── utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | __pycache__/
3 | logs/
4 | models/
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # FaceNet
2 |
3 | 这是 FaceNet 的Keras实现 [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/abs/1503.03832).
4 |
5 | ## 依赖项
6 | - [NumPy](http://docs.scipy.org/doc/numpy-1.10.1/user/install.html)
7 | - [Tensorflow](https://www.tensorflow.org/versions/r0.8/get_started/os_setup.html)
8 | - [Keras](https://keras.io/#installation)
9 | - [OpenCV](https://opencv-python-tutroals.readthedocs.io/en/latest/)
10 |
11 | ## 数据集
12 |
13 | CelebFaces Attributes Dataset (CelebA) 是一个大型的人脸数据集,有10,177个身份和202,599张人脸图像。
14 |
15 | 
16 |
17 | 按照 [说明](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) 下载 CelebFaces Attributes (CelebA) 数据集.
18 |
19 | ## 模型结构
20 | 
21 |
22 | ## 工作流程
23 | 处理单个输入图像的工作流程如下:
24 |
25 | 1. 人脸检测:使用 Dlib 中预先训练的模型检测面部。
26 | 2. 人脸校准:使用 Dlib 的实时姿势估计与 OpenCV 的仿射变换来尝试使眼睛和下唇在每个图像上出现在相同位置。
27 | 3. 卷积网络:使用深度神经网络把人脸图片映射为 128 维单位超球面上的一个点。
28 |
29 | 
30 | [图片来源](https://cmusatyalab.github.io/openface/)
31 |
32 | ## 预训练模型
33 |
34 | 下载预训练模型,放在 models 目录下:
35 |
36 | 1. Dlib 人脸校准模型 [shape_predictor_5_face_landmarks.dat.bz2](http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2)
37 | 2. FaceNet 人脸识别模型 [model.10-0.0156.hdf5](https://github.com/foamliu/FaceNet/releases/download/v1.0/model.10-0.0156.hdf5)
38 |
39 | ## 性能评估
40 |
41 | 使用 Labeled Faces in the Wild (LFW) 数据集做性能评估:
42 |
43 | - 13233 人脸图片
44 | - 5749 人物身份
45 | - 1680 人有两张以上照片
46 |
47 | ### 准备数据
48 | 下载 [LFW database](http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz) 放在 data 目录下:
49 |
50 | ```bash
51 | $ wget http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz
52 | $ tar -xvf lfw-funneled.tgz
53 | $ wget http://vis-www.cs.umass.edu/lfw/pairs.txt
54 | $ wget http://vis-www.cs.umass.edu/lfw/people.txt
55 | ```
56 |
57 | ### 评估脚本
58 | ```bash
59 | $ python lfw_eval.py
60 | ```
61 |
62 | ### 测得结果
63 | 准确度: **89.27 %**.
64 |
65 | ## 如何使用
66 | ### 数据预处理
67 | 提取训练图像:
68 | ```bash
69 | $ python pre-process.py
70 | ```
71 | 总共 202,599张人脸图像中,5600张无法被 dlib 标定。因此 202599 - 5600 = 196999 张被用于训练。
72 |
73 | ### 训练
74 | ```bash
75 | $ python train.py
76 | ```
77 |
78 | 要想可视化训练过程,执行下面命令:
79 | ```bash
80 | $ tensorboard --logdir path_to_current_dir/logs
81 | ```
82 |
83 | ### DEMO
84 |
85 | ```bash
86 | $ python demo.py
87 | ```
88 |
89 | 正(P) | 欧式距离 | 锚(A) | 欧式距离 | 反(N) |
90 | |---|---|---|---|---|
91 | ||0.1716||1.6495||
92 | |1.2839|---|1.1502|---|1.1636|
93 | ||0.3566||0.9795||
94 | |1.6029|---|1.5733|---|1.2582|
95 | ||0.7500||1.2708||
96 | |1.4815|---|1.0065|---|1.7432|
97 | ||0.2974||1.2198||
98 | |2.0759|---|1.6838|---|1.3330|
99 | ||0.3072||1.2609||
100 | |0.5769|---|0.7416|---|0.8989|
101 | ||0.3422||0.4381||
102 | |1.4096|---|1.7690|---|1.0634|
103 | ||0.5896||1.3287||
104 | |1.7525|---|1.5093|---|0.9600|
105 | ||0.5894||1.4106||
106 | |1.5781|---|0.7706|---|1.7681|
107 | ||0.6818||0.8294||
108 | |1.1007|---|0.8181|---|1.1559|
109 | ||0.3873||0.9675||
110 |
111 |
112 | ## 附录
113 |
114 | ### 样本数据
115 | 执行下面命令查看样本数据:
116 | ```bash
117 | $ python data_generator.py
118 | ```
119 | 正(P) | 锚(A) | 反(N) |
120 | |---|---|---|
121 | ||||
122 | ||||
123 | ||||
124 | ||||
125 | ||||
126 | ||||
127 | ||||
128 | ||||
129 | ||||
130 | ||||
131 |
132 |
133 | ### 数据增强
134 | 执行下面命令查看数据增强效果:
135 | ```bash
136 | $ python data_generator.py
137 | ```
138 | 之前 | 之后 | 之前 | 之后 | 之前 | 之后 |
139 | |---|---|---|---|---|---|
140 | |||||||
141 | |||||||
142 | |||||||
143 | |||||||
144 | |||||||
145 | |||||||
146 | |||||||
147 | |||||||
148 | |||||||
149 | |||||||
150 |
--------------------------------------------------------------------------------
/README.template:
--------------------------------------------------------------------------------
1 | # FaceNet
2 |
3 | 这是 FaceNet 的Keras实现 [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/abs/1503.03832).
4 |
5 | ## 依赖项
6 | - [NumPy](http://docs.scipy.org/doc/numpy-1.10.1/user/install.html)
7 | - [Tensorflow](https://www.tensorflow.org/versions/r0.8/get_started/os_setup.html)
8 | - [Keras](https://keras.io/#installation)
9 | - [OpenCV](https://opencv-python-tutroals.readthedocs.io/en/latest/)
10 |
11 | ## 数据集
12 |
13 | CelebFaces Attributes Dataset (CelebA) 是一个大型的人脸数据集,有10,177个身份和202,599张人脸图像。
14 |
15 | 
16 |
17 | 按照 [说明](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) 下载 CelebFaces Attributes (CelebA) 数据集.
18 |
19 | ## 模型结构
20 | 
21 |
22 | ## 工作流程
23 | 处理单个输入图像的工作流程如下:
24 |
25 | 1. 人脸检测:使用 Dlib 中预先训练的模型检测面部。
26 | 2. 人脸校准:使用 Dlib 的实时姿势估计与 OpenCV 的仿射变换来尝试使眼睛和下唇在每个图像上出现在相同位置。
27 | 3. 卷积网络:使用深度神经网络把人脸图片映射为 128 维单位超球面上的一个点。
28 |
29 | 
30 | [图片来源](https://cmusatyalab.github.io/openface/)
31 |
32 | ## 预训练模型
33 |
34 | 下载预训练模型,放在 models 目录下:
35 |
36 | 1. Dlib 人脸校准模型 [shape_predictor_5_face_landmarks.dat.bz2](http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2)
37 | 2. FaceNet 人脸识别模型 [model.10-0.0156.hdf5](https://github.com/foamliu/FaceNet/releases/download/v1.0/model.10-0.0156.hdf5)
38 |
39 | ## 性能评估
40 |
41 | 使用 Labeled Faces in the Wild (LFW) 数据集做性能评估:
42 |
43 | - 13233 人脸图片
44 | - 5749 人物身份
45 | - 1680 人有两张以上照片
46 |
47 | ### 准备数据
48 | 下载 [LFW database](http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz) 放在 data 目录下:
49 |
50 | ```bash
51 | $ wget http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz
52 | $ tar -xvf lfw-funneled.tgz
53 | $ wget http://vis-www.cs.umass.edu/lfw/pairs.txt
54 | $ wget http://vis-www.cs.umass.edu/lfw/people.txt
55 | ```
56 |
57 | ### 评估脚本
58 | ```bash
59 | $ python lfw_eval.py
60 | ```
61 |
62 | ### 测得结果
63 | 准确度: **89.27 %**.
64 |
65 | ## 如何使用
66 | ### 数据预处理
67 | 提取训练图像:
68 | ```bash
69 | $ python pre-process.py
70 | ```
71 | 总共 202,599张人脸图像中,5600张无法被 dlib 标定。因此 202599 - 5600 = 196999 张被用于训练。
72 |
73 | ### 训练
74 | ```bash
75 | $ python train.py
76 | ```
77 |
78 | 要想可视化训练过程,执行下面命令:
79 | ```bash
80 | $ tensorboard --logdir path_to_current_dir/logs
81 | ```
82 |
83 | ### DEMO
84 |
85 | ```bash
86 | $ python demo.py
87 | ```
88 |
89 | 正(P) | 欧式距离 | 锚(A) | 欧式距离 | 反(N) |
90 | |---|---|---|---|---|
91 | ||$(distance_0_a_p)||$(distance_0_a_n)||
92 | |$(distance_0_1_p)|---|$(distance_0_1_a)|---|$(distance_0_1_n)|
93 | ||$(distance_1_a_p)||$(distance_1_a_n)||
94 | |$(distance_1_2_p)|---|$(distance_1_2_a)|---|$(distance_1_2_n)|
95 | ||$(distance_2_a_p)||$(distance_2_a_n)||
96 | |$(distance_2_3_p)|---|$(distance_2_3_a)|---|$(distance_2_3_n)|
97 | ||$(distance_3_a_p)||$(distance_3_a_n)||
98 | |$(distance_3_4_p)|---|$(distance_3_4_a)|---|$(distance_3_4_n)|
99 | ||$(distance_4_a_p)||$(distance_4_a_n)||
100 | |$(distance_4_5_p)|---|$(distance_4_5_a)|---|$(distance_4_5_n)|
101 | ||$(distance_5_a_p)||$(distance_5_a_n)||
102 | |$(distance_5_6_p)|---|$(distance_5_6_a)|---|$(distance_5_6_n)|
103 | ||$(distance_6_a_p)||$(distance_6_a_n)||
104 | |$(distance_6_7_p)|---|$(distance_6_7_a)|---|$(distance_6_7_n)|
105 | ||$(distance_7_a_p)||$(distance_7_a_n)||
106 | |$(distance_7_8_p)|---|$(distance_7_8_a)|---|$(distance_7_8_n)|
107 | ||$(distance_8_a_p)||$(distance_8_a_n)||
108 | |$(distance_8_9_p)|---|$(distance_8_9_a)|---|$(distance_8_9_n)|
109 | ||$(distance_9_a_p)||$(distance_9_a_n)||
110 |
111 |
112 | ## 附录
113 |
114 | ### 样本数据
115 | 执行下面命令查看样本数据:
116 | ```bash
117 | $ python data_generator.py
118 | ```
119 | 正(P) | 锚(A) | 反(N) |
120 | |---|---|---|
121 | ||||
122 | ||||
123 | ||||
124 | ||||
125 | ||||
126 | ||||
127 | ||||
128 | ||||
129 | ||||
130 | ||||
131 |
132 |
133 | ### 数据增强
134 | 执行下面命令查看数据增强效果:
135 | ```bash
136 | $ python data_generator.py
137 | ```
138 | 之前 | 之后 | 之前 | 之后 | 之前 | 之后 |
139 | |---|---|---|---|---|---|
140 | |||||||
141 | |||||||
142 | |||||||
143 | |||||||
144 | |||||||
145 | |||||||
146 | |||||||
147 | |||||||
148 | |||||||
149 | |||||||
150 |
--------------------------------------------------------------------------------
/augmentor.py:
--------------------------------------------------------------------------------
1 | from imgaug import augmenters as iaa
2 |
3 | ### augmentors by https://github.com/aleju/imgaug
4 | sometimes = lambda aug: iaa.Sometimes(0.5, aug)
5 |
6 | aug_pipe = iaa.Sequential(
7 | [
8 | iaa.Fliplr(0.5), # horizontally flip 50% of all images
9 |
10 | # execute 0 to 5 of the following (less important) augmenters per image
11 | # don't execute all of them, as that would often be way too strong
12 | iaa.SomeOf((0, 5),
13 | [
14 | # sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
15 | iaa.OneOf([
16 | iaa.GaussianBlur((0, 0.5)), # blur images with a sigma between 0 and 0.5
17 | # blur image using local medians with kernel sizes between 2 and 7
18 | ]),
19 | iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
20 | # iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
21 | # search either for all edges or for directed edges
22 | # sometimes(iaa.OneOf([
23 | # iaa.EdgeDetect(alpha=(0, 0.7)),
24 | # iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
25 | # ])),
26 | iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
27 | # add gaussian noise to images
28 | iaa.OneOf([
29 | iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
30 | # iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
31 | ]),
32 | # iaa.Invert(0.05, per_channel=True), # invert color channels
33 | iaa.Add((-10, 10), per_channel=0.5),
34 | # change brightness of images (by -10 to 10 of original value)
35 | iaa.Multiply((0.5, 1.5), per_channel=0.5),
36 | # change brightness of images (50-150% of original value)
37 | iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
38 | # iaa.Grayscale(alpha=(0.0, 1.0)),
39 | # sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
40 | # sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
41 | ],
42 | random_order=True
43 | )
44 | ],
45 | random_order=True
46 | )
47 |
48 | if __name__ == '__main__':
49 | import json
50 | import random
51 | import os
52 | from config import image_folder, img_size
53 | import cv2 as cv
54 |
55 | print('loading train samples')
56 | with open('data/train_triplets.json', 'r') as file:
57 | samples = json.load(file)
58 | samples = random.sample(samples, 30)
59 |
60 | for i, sample in enumerate(samples):
61 | image_name = sample['a']
62 | filename = os.path.join(image_folder, image_name)
63 | image_bgr = cv.imread(filename)
64 | image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
65 | cv.imwrite('images/imgaug_before_{}.png'.format(i), image_bgr)
66 | image_bgr = aug_pipe.augment_image(image_bgr)
67 | cv.imwrite('images/imgaug_after_{}.png'.format(i), image_bgr)
68 |
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | img_size = 139
2 | channel = 3
3 | batch_size = 128
4 | triplets_selection_batch_size = 1800
5 | epochs = 15
6 | patience = 10
7 | embedding_size = 128
8 | num_images = 202599
9 | num_identities = 10177
10 | valid_ratio = 0.005
11 | # 5,600 were excluded as they cannot be aligned by dlib
12 | # 202,599 - 5,600 = 196,999, separate into two classes: train and valid.
13 | num_train_samples = 196998
14 | num_lfw_valid_samples = 2185 # LFW data set: 6000 pairs => 2185 triplets
15 | predictor_path = 'models/shape_predictor_5_face_landmarks.dat'
16 | alpha = 0.2
17 | SENTINEL = 1
18 | threshold = 0.8
19 |
20 | image_folder = 'data/img_align_celeba'
21 | identity_annot_filename = 'data/identity_CelebA.txt'
22 | bbox_annot_filename = 'data/list_bbox_celeba.txt'
23 | lfw_folder = 'data/lfw_funneled'
24 |
25 | semi_hard_mode = 'semi-hard'
26 | hard_mode = 'hard'
27 | triplet_select_mode = hard_mode
28 |
29 | best_model = 'models/model.01-0.0087.hdf5'
30 |
--------------------------------------------------------------------------------
/data_generator.py:
--------------------------------------------------------------------------------
1 | # encoding=utf-8
2 | import json
3 | import os
4 |
5 | import cv2 as cv
6 | import dlib
7 | import numpy as np
8 | from keras.applications.inception_resnet_v2 import preprocess_input
9 | from keras.utils import Sequence
10 |
11 | from augmentor import aug_pipe
12 | from config import batch_size, img_size, channel, embedding_size, image_folder, lfw_folder, predictor_path
13 | from utils import get_random_triplets
14 |
15 |
16 | class DataGenSequence(Sequence):
17 | def __init__(self, usage):
18 | self.usage = usage
19 | if self.usage == 'train':
20 | print('loading train samples')
21 | self.image_folder = image_folder
22 | if os.path.isfile('data/train_triplets.json'):
23 | with open('data/train_triplets.json', 'r') as file:
24 | self.samples = json.load(file)
25 | else:
26 | self.samples = get_random_triplets('train')
27 | else:
28 | print('loading valid samples(LFW)')
29 | self.image_folder = lfw_folder
30 | with open('data/lfw_val_triplets.json', 'r') as file:
31 | self.samples = json.load(file)
32 |
33 | self.detector = dlib.get_frontal_face_detector()
34 | self.sp = dlib.shape_predictor(predictor_path)
35 |
36 | def __len__(self):
37 | return int(np.ceil(len(self.samples) / float(batch_size)))
38 |
39 | def __getitem__(self, idx):
40 | i = idx * batch_size
41 |
42 | length = min(batch_size, (len(self.samples) - i))
43 | batch_inputs = np.empty((3, length, img_size, img_size, channel), dtype=np.float32)
44 | batch_dummy_target = np.zeros((length, embedding_size * 3), dtype=np.float32)
45 |
46 | for i_batch in range(length):
47 | sample = self.samples[i + i_batch]
48 | for j, role in enumerate(['a', 'p', 'n']):
49 | image_name = sample[role]
50 | filename = os.path.join(self.image_folder, image_name)
51 | image = cv.imread(filename) # BGR
52 | image = image[:, :, ::-1] # RGB
53 | dets = self.detector(image, 1)
54 |
55 | num_faces = len(dets)
56 | if num_faces > 0:
57 | # Find the 5 face landmarks we need to do the alignment.
58 | faces = dlib.full_object_detections()
59 | for detection in dets:
60 | faces.append(self.sp(image, detection))
61 | image = dlib.get_face_chip(image, faces[0], size=img_size)
62 | else:
63 | image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)
64 |
65 | if self.usage == 'train':
66 | image = aug_pipe.augment_image(image)
67 |
68 | batch_inputs[j, i_batch] = preprocess_input(image)
69 |
70 | return [batch_inputs[0], batch_inputs[1], batch_inputs[2]], batch_dummy_target
71 |
72 | def on_epoch_end(self):
73 | np.random.shuffle(self.samples)
74 |
75 |
76 | def revert_pre_process(x):
77 | return ((x + 1) * 127.5).astype(np.uint8)
78 |
79 |
80 | if __name__ == '__main__':
81 | data_gen = DataGenSequence('train')
82 | item = data_gen.__getitem__(0)
83 | x, y = item
84 | a = revert_pre_process(x[0])
85 | p = revert_pre_process(x[1])
86 | n = revert_pre_process(x[2])
87 | for i in range(10):
88 | cv.imwrite('images/sample_a_{}.jpg'.format(i), a[i][:, :, ::-1])
89 | cv.imwrite('images/sample_p_{}.jpg'.format(i), p[i][:, :, ::-1])
90 | cv.imwrite('images/sample_n_{}.jpg'.format(i), n[i][:, :, ::-1])
91 |
--------------------------------------------------------------------------------
/demo.py:
--------------------------------------------------------------------------------
1 | # import the necessary packages
2 | import json
3 | import os
4 | import random
5 |
6 | import cv2 as cv
7 | import keras.backend as K
8 | import numpy as np
9 | from keras.applications.inception_resnet_v2 import preprocess_input
10 |
11 | from config import img_size, channel, embedding_size, lfw_folder
12 | from model import build_model
13 | from utils import get_best_model
14 |
15 | if __name__ == '__main__':
16 | model = build_model()
17 | model.load_weights(get_best_model())
18 |
19 | num_samples = 10
20 | print('loading valid samples(LFW)')
21 | with open('data/lfw_val_triplets.json', 'r') as file:
22 | samples = json.load(file)
23 | image_folder = lfw_folder
24 | samples = random.sample(samples, num_samples)
25 |
26 | a_list = np.empty((10, 128), dtype=np.float32)
27 | p_list = np.empty((10, 128), dtype=np.float32)
28 | n_list = np.empty((10, 128), dtype=np.float32)
29 |
30 | for i in range(num_samples):
31 | sample = samples[i]
32 | batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)
33 | batch_dummy_target = np.zeros((1, embedding_size * 3), dtype=np.float32)
34 |
35 | for j, role in enumerate(['a', 'p', 'n']):
36 | image_name = sample[role]
37 | filename = os.path.join(image_folder, image_name)
38 | image_bgr = cv.imread(filename)
39 | image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
40 | image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
41 | batch_inputs[j, 0] = preprocess_input(image_rgb)
42 | cv.imwrite('images/{}_{}_image.png'.format(i, role), image_bgr)
43 |
44 | y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
45 | a = y_pred[0, 0:128]
46 | p = y_pred[0, 128:256]
47 | n = y_pred[0, 256:384]
48 |
49 | a_list[i] = a
50 | p_list[i] = p
51 | n_list[i] = n
52 |
53 | result = {}
54 |
55 | for i in range(num_samples):
56 | a_i = a_list[i]
57 | p_i = p_list[i]
58 | n_i = n_list[i]
59 |
60 | distance_a_p = np.linalg.norm(a_i - p_i) ** 2
61 | distance_a_n = np.linalg.norm(a_i - n_i) ** 2
62 |
63 | result['distance_{}_a_p'.format(i)] = distance_a_p
64 | result['distance_{}_a_n'.format(i)] = distance_a_n
65 |
66 | for j in range(num_samples):
67 | a_j = a_list[j]
68 | p_j = p_list[j]
69 | n_j = n_list[j]
70 |
71 | distance_i_j_a = np.linalg.norm(a_i - a_j) ** 2
72 | distance_i_j_p = np.linalg.norm(p_i - p_j) ** 2
73 | distance_i_j_n = np.linalg.norm(n_i - n_j) ** 2
74 |
75 | result['distance_{}_{}_a'.format(i, j)] = distance_i_j_a
76 | result['distance_{}_{}_p'.format(i, j)] = distance_i_j_p
77 | result['distance_{}_{}_n'.format(i, j)] = distance_i_j_n
78 |
79 | with open('result.json', 'w') as file:
80 | json.dump(result, file, indent=4)
81 |
82 | from template import replace
83 |
84 | replace()
85 |
86 | K.clear_session()
87 |
--------------------------------------------------------------------------------
/images/0_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/0_a_image.png
--------------------------------------------------------------------------------
/images/0_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/0_n_image.png
--------------------------------------------------------------------------------
/images/0_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/0_p_image.png
--------------------------------------------------------------------------------
/images/1_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/1_a_image.png
--------------------------------------------------------------------------------
/images/1_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/1_n_image.png
--------------------------------------------------------------------------------
/images/1_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/1_p_image.png
--------------------------------------------------------------------------------
/images/2_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/2_a_image.png
--------------------------------------------------------------------------------
/images/2_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/2_n_image.png
--------------------------------------------------------------------------------
/images/2_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/2_p_image.png
--------------------------------------------------------------------------------
/images/3_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/3_a_image.png
--------------------------------------------------------------------------------
/images/3_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/3_n_image.png
--------------------------------------------------------------------------------
/images/3_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/3_p_image.png
--------------------------------------------------------------------------------
/images/4_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/4_a_image.png
--------------------------------------------------------------------------------
/images/4_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/4_n_image.png
--------------------------------------------------------------------------------
/images/4_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/4_p_image.png
--------------------------------------------------------------------------------
/images/5_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/5_a_image.png
--------------------------------------------------------------------------------
/images/5_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/5_n_image.png
--------------------------------------------------------------------------------
/images/5_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/5_p_image.png
--------------------------------------------------------------------------------
/images/6_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/6_a_image.png
--------------------------------------------------------------------------------
/images/6_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/6_n_image.png
--------------------------------------------------------------------------------
/images/6_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/6_p_image.png
--------------------------------------------------------------------------------
/images/7_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/7_a_image.png
--------------------------------------------------------------------------------
/images/7_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/7_n_image.png
--------------------------------------------------------------------------------
/images/7_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/7_p_image.png
--------------------------------------------------------------------------------
/images/8_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/8_a_image.png
--------------------------------------------------------------------------------
/images/8_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/8_n_image.png
--------------------------------------------------------------------------------
/images/8_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/8_p_image.png
--------------------------------------------------------------------------------
/images/9_a_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/9_a_image.png
--------------------------------------------------------------------------------
/images/9_n_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/9_n_image.png
--------------------------------------------------------------------------------
/images/9_p_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/9_p_image.png
--------------------------------------------------------------------------------
/images/CelebA.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/CelebA.png
--------------------------------------------------------------------------------
/images/imgaug_after_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_0.png
--------------------------------------------------------------------------------
/images/imgaug_after_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_1.png
--------------------------------------------------------------------------------
/images/imgaug_after_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_10.png
--------------------------------------------------------------------------------
/images/imgaug_after_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_11.png
--------------------------------------------------------------------------------
/images/imgaug_after_12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_12.png
--------------------------------------------------------------------------------
/images/imgaug_after_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_13.png
--------------------------------------------------------------------------------
/images/imgaug_after_14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_14.png
--------------------------------------------------------------------------------
/images/imgaug_after_15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_15.png
--------------------------------------------------------------------------------
/images/imgaug_after_16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_16.png
--------------------------------------------------------------------------------
/images/imgaug_after_17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_17.png
--------------------------------------------------------------------------------
/images/imgaug_after_18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_18.png
--------------------------------------------------------------------------------
/images/imgaug_after_19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_19.png
--------------------------------------------------------------------------------
/images/imgaug_after_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_2.png
--------------------------------------------------------------------------------
/images/imgaug_after_20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_20.png
--------------------------------------------------------------------------------
/images/imgaug_after_21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_21.png
--------------------------------------------------------------------------------
/images/imgaug_after_22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_22.png
--------------------------------------------------------------------------------
/images/imgaug_after_23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_23.png
--------------------------------------------------------------------------------
/images/imgaug_after_24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_24.png
--------------------------------------------------------------------------------
/images/imgaug_after_25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_25.png
--------------------------------------------------------------------------------
/images/imgaug_after_26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_26.png
--------------------------------------------------------------------------------
/images/imgaug_after_27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_27.png
--------------------------------------------------------------------------------
/images/imgaug_after_28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_28.png
--------------------------------------------------------------------------------
/images/imgaug_after_29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_29.png
--------------------------------------------------------------------------------
/images/imgaug_after_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_3.png
--------------------------------------------------------------------------------
/images/imgaug_after_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_4.png
--------------------------------------------------------------------------------
/images/imgaug_after_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_5.png
--------------------------------------------------------------------------------
/images/imgaug_after_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_6.png
--------------------------------------------------------------------------------
/images/imgaug_after_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_7.png
--------------------------------------------------------------------------------
/images/imgaug_after_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_8.png
--------------------------------------------------------------------------------
/images/imgaug_after_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_after_9.png
--------------------------------------------------------------------------------
/images/imgaug_before_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_0.png
--------------------------------------------------------------------------------
/images/imgaug_before_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_1.png
--------------------------------------------------------------------------------
/images/imgaug_before_10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_10.png
--------------------------------------------------------------------------------
/images/imgaug_before_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_11.png
--------------------------------------------------------------------------------
/images/imgaug_before_12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_12.png
--------------------------------------------------------------------------------
/images/imgaug_before_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_13.png
--------------------------------------------------------------------------------
/images/imgaug_before_14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_14.png
--------------------------------------------------------------------------------
/images/imgaug_before_15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_15.png
--------------------------------------------------------------------------------
/images/imgaug_before_16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_16.png
--------------------------------------------------------------------------------
/images/imgaug_before_17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_17.png
--------------------------------------------------------------------------------
/images/imgaug_before_18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_18.png
--------------------------------------------------------------------------------
/images/imgaug_before_19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_19.png
--------------------------------------------------------------------------------
/images/imgaug_before_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_2.png
--------------------------------------------------------------------------------
/images/imgaug_before_20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_20.png
--------------------------------------------------------------------------------
/images/imgaug_before_21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_21.png
--------------------------------------------------------------------------------
/images/imgaug_before_22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_22.png
--------------------------------------------------------------------------------
/images/imgaug_before_23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_23.png
--------------------------------------------------------------------------------
/images/imgaug_before_24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_24.png
--------------------------------------------------------------------------------
/images/imgaug_before_25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_25.png
--------------------------------------------------------------------------------
/images/imgaug_before_26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_26.png
--------------------------------------------------------------------------------
/images/imgaug_before_27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_27.png
--------------------------------------------------------------------------------
/images/imgaug_before_28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_28.png
--------------------------------------------------------------------------------
/images/imgaug_before_29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_29.png
--------------------------------------------------------------------------------
/images/imgaug_before_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_3.png
--------------------------------------------------------------------------------
/images/imgaug_before_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_4.png
--------------------------------------------------------------------------------
/images/imgaug_before_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_5.png
--------------------------------------------------------------------------------
/images/imgaug_before_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_6.png
--------------------------------------------------------------------------------
/images/imgaug_before_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_7.png
--------------------------------------------------------------------------------
/images/imgaug_before_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_8.png
--------------------------------------------------------------------------------
/images/imgaug_before_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/imgaug_before_9.png
--------------------------------------------------------------------------------
/images/model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/model.png
--------------------------------------------------------------------------------
/images/sample_a_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_0.jpg
--------------------------------------------------------------------------------
/images/sample_a_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_1.jpg
--------------------------------------------------------------------------------
/images/sample_a_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_2.jpg
--------------------------------------------------------------------------------
/images/sample_a_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_3.jpg
--------------------------------------------------------------------------------
/images/sample_a_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_4.jpg
--------------------------------------------------------------------------------
/images/sample_a_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_5.jpg
--------------------------------------------------------------------------------
/images/sample_a_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_6.jpg
--------------------------------------------------------------------------------
/images/sample_a_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_7.jpg
--------------------------------------------------------------------------------
/images/sample_a_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_8.jpg
--------------------------------------------------------------------------------
/images/sample_a_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_a_9.jpg
--------------------------------------------------------------------------------
/images/sample_n_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_0.jpg
--------------------------------------------------------------------------------
/images/sample_n_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_1.jpg
--------------------------------------------------------------------------------
/images/sample_n_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_2.jpg
--------------------------------------------------------------------------------
/images/sample_n_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_3.jpg
--------------------------------------------------------------------------------
/images/sample_n_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_4.jpg
--------------------------------------------------------------------------------
/images/sample_n_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_5.jpg
--------------------------------------------------------------------------------
/images/sample_n_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_6.jpg
--------------------------------------------------------------------------------
/images/sample_n_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_7.jpg
--------------------------------------------------------------------------------
/images/sample_n_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_8.jpg
--------------------------------------------------------------------------------
/images/sample_n_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_n_9.jpg
--------------------------------------------------------------------------------
/images/sample_p_0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_0.jpg
--------------------------------------------------------------------------------
/images/sample_p_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_1.jpg
--------------------------------------------------------------------------------
/images/sample_p_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_2.jpg
--------------------------------------------------------------------------------
/images/sample_p_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_3.jpg
--------------------------------------------------------------------------------
/images/sample_p_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_4.jpg
--------------------------------------------------------------------------------
/images/sample_p_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_5.jpg
--------------------------------------------------------------------------------
/images/sample_p_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_6.jpg
--------------------------------------------------------------------------------
/images/sample_p_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_7.jpg
--------------------------------------------------------------------------------
/images/sample_p_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_8.jpg
--------------------------------------------------------------------------------
/images/sample_p_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/sample_p_9.jpg
--------------------------------------------------------------------------------
/images/summary.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/foamliu/FaceNet/8ebce490c1c873b17f117e783239784267b090dd/images/summary.jpg
--------------------------------------------------------------------------------
/inference.py:
--------------------------------------------------------------------------------
1 | # encoding=utf-8
2 | # import the necessary packages
3 | import json
4 | import multiprocessing as mp
5 | import os
6 | import queue
7 | from multiprocessing import Process
8 |
9 | import cv2 as cv
10 | import numpy as np
11 | from keras.applications.inception_resnet_v2 import preprocess_input
12 | from tqdm import tqdm
13 |
14 | from config import img_size, image_folder, identity_annot_filename, num_images
15 |
16 |
17 | class InferenceWorker(Process):
18 | def __init__(self, gpuid, in_queue, out_queue, signal_queue):
19 | Process.__init__(self, name='ImageProcessor')
20 |
21 | self.gpuid = gpuid
22 | self.in_queue = in_queue
23 | self.out_queue = out_queue
24 | self.signal_queue = signal_queue
25 |
26 | def run(self):
27 | # set enviornment
28 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
29 | os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
30 | print("InferenceWorker init, GPU ID: {}".format(self.gpuid))
31 |
32 | from model import build_model
33 |
34 | # load models
35 | model_weights_path = 'models/model.00-0.0296.hdf5'
36 | model = build_model()
37 | model.load_weights(model_weights_path)
38 |
39 | while True:
40 | try:
41 | try:
42 | item = self.in_queue.get(block=False)
43 | except queue.Empty:
44 | continue
45 |
46 | image_name_0, image_name_1, image_name_2 = item
47 |
48 | filename = os.path.join(image_folder, image_name_0)
49 | image_bgr = cv.imread(filename)
50 | image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
51 | image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
52 | image_rgb_0 = preprocess_input(image_rgb)
53 | filename = os.path.join(image_folder, image_name_1)
54 | image_bgr = cv.imread(filename)
55 | image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
56 | image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
57 | image_rgb_1 = preprocess_input(image_rgb)
58 | filename = os.path.join(image_folder, image_name_2)
59 | image_bgr = cv.imread(filename)
60 | image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
61 | image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
62 | image_rgb_2 = preprocess_input(image_rgb)
63 |
64 | batch_inputs = np.empty((3, 1, img_size, img_size, 3), dtype=np.float32)
65 | batch_inputs[0] = image_rgb_0
66 | batch_inputs[1] = image_rgb_1
67 | batch_inputs[2] = image_rgb_2
68 | y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
69 |
70 | a = y_pred[0, 0:128]
71 | p = y_pred[0, 128:256]
72 | n = y_pred[0, 256:384]
73 |
74 | self.out_queue.put({'image_name': image_name_0, 'embedding': a})
75 | self.out_queue.put({'image_name': image_name_1, 'embedding': p})
76 | self.out_queue.put({'image_name': image_name_2, 'embedding': n})
77 | if self.in_queue.qsize() == 0:
78 | break
79 | except Exception as e:
80 | print(e)
81 |
82 | import keras.backend as K
83 | K.clear_session()
84 | print('InferenceWorker done, GPU ID {}'.format(self.gpuid))
85 |
86 |
87 | class Scheduler:
88 | def __init__(self, gpuids, signal_queue):
89 | self.signal_queue = signal_queue
90 | self.in_queue = manager.Queue()
91 | self.out_queue = manager.Queue()
92 | self._gpuids = gpuids
93 |
94 | self.__init_workers()
95 |
96 | def __init_workers(self):
97 | self._workers = list()
98 | for gpuid in self._gpuids:
99 | self._workers.append(InferenceWorker(gpuid, self.in_queue, self.out_queue, self.signal_queue))
100 |
101 | def start(self, names):
102 | # put all of image names into queue
103 | for name in names:
104 | self.in_queue.put(name)
105 |
106 | # start the workers
107 | for worker in self._workers:
108 | worker.start()
109 |
110 | # wait all fo workers finish
111 | for worker in self._workers:
112 | worker.join()
113 | print("all of workers have been done")
114 | return self.out_queue
115 |
116 |
117 | def run(gpuids, q):
118 | with open(identity_annot_filename, 'r') as file:
119 | lines = file.readlines()
120 |
121 | items = []
122 | for i in range(num_images // 3):
123 | image_names_0 = (lines[3 * i].split(' ')[0].strip())
124 | image_names_1 = (lines[3 * i + 1].split(' ')[0].strip())
125 | image_names_2 = (lines[3 * i + 2].split(' ')[0].strip())
126 | items.append((image_names_0, image_names_1, image_names_2))
127 |
128 | # init scheduler
129 | x = Scheduler(gpuids, q)
130 |
131 | # start processing and wait for complete
132 | return x.start(items)
133 |
134 |
135 | SENTINEL = 1
136 |
137 |
138 | def listener(q):
139 | pbar = tqdm(total=num_images)
140 | for item in iter(q.get, None):
141 | pbar.update()
142 |
143 |
144 | if __name__ == "__main__":
145 | gpuids = ['0', '1', '2', '3']
146 | print(gpuids)
147 |
148 | manager = mp.Manager()
149 | q = manager.Queue()
150 | proc = mp.Process(target=listener, args=(q,))
151 | proc.start()
152 |
153 | out_queue = run(gpuids, q)
154 | out_list = []
155 | while out_queue.qsize() > 0:
156 | out_list.append(out_queue.get())
157 |
158 | with open("data/preds.p", "w") as file:
159 | json.dump(out_list, file, indent=4)
160 |
161 | q.put(None)
162 | proc.join()
163 |
--------------------------------------------------------------------------------
/lfw_eval.py:
--------------------------------------------------------------------------------
1 | import multiprocessing as mp
2 | import os
3 | import pickle
4 | import queue
5 | from multiprocessing import Process
6 | from multiprocessing import Process
7 |
8 | import cv2 as cv
9 | import dlib
10 | import numpy as np
11 | from keras.applications.inception_resnet_v2 import preprocess_input
12 | from tqdm import tqdm
13 |
14 | from config import lfw_folder, img_size, channel, threshold, predictor_path
15 | from utils import get_lfw_images, get_lfw_pairs, get_best_model
16 |
17 |
18 | class InferenceWorker(Process):
19 | def __init__(self, gpuid, in_queue, out_queue, signal_queue):
20 | Process.__init__(self, name='ImageProcessor')
21 |
22 | self.gpuid = gpuid
23 | self.in_queue = in_queue
24 | self.out_queue = out_queue
25 | self.signal_queue = signal_queue
26 | self.detector = dlib.get_frontal_face_detector()
27 | self.sp = dlib.shape_predictor(predictor_path)
28 |
29 | def run(self):
30 | # set enviornment
31 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
32 | os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
33 | print("InferenceWorker init, GPU ID: {}".format(self.gpuid))
34 |
35 | from model import build_model
36 |
37 | # load models
38 | model = build_model()
39 | model.load_weights(get_best_model())
40 |
41 | while True:
42 | try:
43 | sample = {}
44 | try:
45 | sample['a'] = self.in_queue.get(block=False)
46 | sample['p'] = self.in_queue.get(block=False)
47 | sample['n'] = self.in_queue.get(block=False)
48 |
49 | except queue.Empty:
50 | break
51 |
52 | batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)
53 |
54 | for j, role in enumerate(['a', 'p', 'n']):
55 | image_name = sample[role]
56 | filename = os.path.join(lfw_folder, image_name)
57 | image = cv.imread(filename)
58 | image = image[:, :, ::-1] # RGB
59 | dets = self.detector(image, 1)
60 |
61 | num_faces = len(dets)
62 | if num_faces > 0:
63 | # Find the 5 face landmarks we need to do the alignment.
64 | faces = dlib.full_object_detections()
65 | for detection in dets:
66 | faces.append(self.sp(image, detection))
67 | image = dlib.get_face_chip(image, faces[0], size=img_size)
68 | else:
69 | image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)
70 |
71 | batch_inputs[j, 0] = preprocess_input(image)
72 |
73 | y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
74 | a = y_pred[0, 0:128]
75 | p = y_pred[0, 128:256]
76 | n = y_pred[0, 256:384]
77 |
78 | self.out_queue.put({'image_name': sample['a'], 'embedding': a})
79 | self.out_queue.put({'image_name': sample['p'], 'embedding': p})
80 | self.out_queue.put({'image_name': sample['n'], 'embedding': n})
81 | self.signal_queue.put(SENTINEL)
82 |
83 | if self.in_queue.qsize() == 0:
84 | break
85 | except Exception as e:
86 | print(e)
87 |
88 | import keras.backend as K
89 | K.clear_session()
90 | print('InferenceWorker done, GPU ID {}'.format(self.gpuid))
91 |
92 |
93 | class Scheduler:
94 | def __init__(self, gpuids, signal_queue):
95 | self.signal_queue = signal_queue
96 | manager = mp.Manager()
97 | self.in_queue = manager.Queue()
98 | self.out_queue = manager.Queue()
99 | self._gpuids = gpuids
100 |
101 | self.__init_workers()
102 |
103 | def __init_workers(self):
104 | self._workers = list()
105 | for gpuid in self._gpuids:
106 | self._workers.append(InferenceWorker(gpuid, self.in_queue, self.out_queue, self.signal_queue))
107 |
108 | def start(self, names):
109 | # put all of image names into queue
110 | for name in names:
111 | self.in_queue.put(name)
112 |
113 | # start the workers
114 | for worker in self._workers:
115 | worker.start()
116 |
117 | # wait all fo workers finish
118 | for worker in self._workers:
119 | worker.join()
120 | print("all of workers have been done")
121 | return self.out_queue
122 |
123 |
124 | def run(gpuids, q):
125 | # scan all files under img_path
126 | names = get_lfw_images()
127 |
128 | # init scheduler
129 | x = Scheduler(gpuids, q)
130 |
131 | # start processing and wait for complete
132 | return x.start(names)
133 |
134 |
135 | SENTINEL = 1
136 |
137 |
138 | def listener(q):
139 | pbar = tqdm(total=13233 // 3)
140 | for item in iter(q.get, None):
141 | pbar.update()
142 |
143 |
144 | def create_lfw_embeddings():
145 | gpuids = ['0', '1', '2', '3']
146 | print(gpuids)
147 |
148 | manager = mp.Manager()
149 | q = manager.Queue()
150 | proc = mp.Process(target=listener, args=(q,))
151 | proc.start()
152 |
153 | out_queue = run(gpuids, q)
154 | out_list = []
155 | while out_queue.qsize() > 0:
156 | out_list.append(out_queue.get())
157 |
158 | with open("data/lfw_embeddings.p", "wb") as file:
159 | pickle.dump(out_list, file)
160 |
161 | q.put(None)
162 | proc.join()
163 |
164 |
165 | if __name__ == "__main__":
166 | print('creating lfw embeddings')
167 | create_lfw_embeddings()
168 | with open('data/lfw_embeddings.p', 'rb') as file:
169 | embeddings = pickle.load(file)
170 |
171 | pairs = get_lfw_pairs()
172 | y_true_list = []
173 | y_pred_list = []
174 |
175 | print('evaluating lfw database')
176 | for pair in tqdm(pairs):
177 | image_name_1 = pair['image_name_1']
178 | image_name_2 = pair['image_name_2']
179 | y_true = pair['same_person']
180 | y_true_list.append(y_true)
181 | embedding_1 = np.array([x['embedding'] for x in embeddings if x['image_name'] == image_name_1][0])
182 | embedding_2 = np.array([x['embedding'] for x in embeddings if x['image_name'] == image_name_2][0])
183 | dist = np.square(np.linalg.norm(embedding_1 - embedding_2))
184 | y_pred = dist <= threshold
185 | y_pred_list.append(y_pred)
186 |
187 | y = np.array(y_true_list).astype(np.int32)
188 | pred = np.array(y_pred_list).astype(np.int32)
189 | from sklearn import metrics
190 |
191 | print(y)
192 | print(pred)
193 |
194 | fpr, tpr, thresholds = metrics.roc_curve(y, pred)
195 | print('showing lfw accuracy: ' + str(metrics.auc(fpr, tpr)))
196 |
--------------------------------------------------------------------------------
/model.py:
--------------------------------------------------------------------------------
1 | import keras.backend as K
2 | import tensorflow as tf
3 | from keras.applications.inception_resnet_v2 import InceptionResNetV2
4 | from keras.layers import Input, Dense, concatenate, Lambda
5 | from keras.models import Model
6 | from keras.utils import plot_model
7 |
8 | from config import img_size, channel, embedding_size
9 |
10 |
11 | def build_model():
12 | base_model = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=(img_size, img_size, channel),
13 | pooling='avg')
14 | image_input = base_model.input
15 | x = base_model.layers[-1].output
16 | out = Dense(embedding_size)(x)
17 | image_embedder = Model(image_input, out)
18 |
19 | input_a = Input((img_size, img_size, channel), name='anchor')
20 | input_p = Input((img_size, img_size, channel), name='positive')
21 | input_n = Input((img_size, img_size, channel), name='negative')
22 |
23 | normalize = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='normalize')
24 |
25 | x = image_embedder(input_a)
26 | output_a = normalize(x)
27 | x = image_embedder(input_p)
28 | output_p = normalize(x)
29 | x = image_embedder(input_n)
30 | output_n = normalize(x)
31 |
32 | merged_vector = concatenate([output_a, output_p, output_n], axis=-1)
33 |
34 | model = Model(inputs=[input_a, input_p, input_n],
35 | outputs=merged_vector)
36 | return model
37 |
38 |
39 | if __name__ == '__main__':
40 | with tf.device("/cpu:0"):
41 | model = build_model()
42 | print(model.summary())
43 | plot_model(model, to_file='model.svg', show_layer_names=True, show_shapes=True)
44 |
45 | K.clear_session()
46 |
--------------------------------------------------------------------------------
/model.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
117 |
--------------------------------------------------------------------------------
/pre_process.py:
--------------------------------------------------------------------------------
1 | import bz2
2 | import os
3 | import zipfile
4 | from multiprocessing import Pool
5 |
6 | import cv2 as cv
7 | import dlib
8 | from tqdm import tqdm
9 |
10 | from config import img_size, identity_annot_filename, image_folder, predictor_path
11 | from utils import ensure_folder
12 |
13 |
14 | def ensure_dlib_model():
15 | if not os.path.isfile(predictor_path):
16 | import urllib.request
17 | urllib.request.urlretrieve("http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2",
18 | filename="models/shape_predictor_5_face_landmarks.dat.bz2")
19 |
20 |
21 | def extract(folder):
22 | filename = '{}.zip'.format(folder)
23 | print('Extracting {}...'.format(filename))
24 | with zipfile.ZipFile(filename, 'r') as zip_ref:
25 | zip_ref.extractall('data')
26 |
27 |
28 | def extract_bz2(new):
29 | old = '{}.bz2'.format(new)
30 | print('Extracting {}...'.format(old))
31 | with open(new, 'wb') as new_file, bz2.BZ2File(old, 'rb') as file:
32 | for data in iter(lambda: file.read(100 * 1024), b''):
33 | new_file.write(data)
34 |
35 |
36 | def check_one_image(line):
37 | line = line.strip()
38 | if len(line) > 0:
39 | tokens = line.split(' ')
40 | image_name = tokens[0].strip()
41 | # print(image_name)
42 | filename = os.path.join(image_folder, image_name)
43 | # print(filename)
44 | img = cv.imread(filename)
45 | img = img[:, :, ::-1]
46 | dets = detector(img, 1)
47 |
48 | num_faces = len(dets)
49 | if num_faces == 0:
50 | return image_name
51 |
52 | # Find the 5 face landmarks we need to do the alignment.
53 | faces = dlib.full_object_detections()
54 | for detection in dets:
55 | faces.append(sp(img, detection))
56 |
57 | # It is also possible to get a single chip
58 | image = dlib.get_face_chip(img, faces[0], size=img_size)
59 | image = image[:, :, ::-1]
60 |
61 |
62 | def check_image():
63 | with open(identity_annot_filename, 'r') as file:
64 | lines = file.readlines()
65 | # check_one_image(lines[0])
66 |
67 | pool = Pool(24)
68 | results = []
69 | for item in tqdm(pool.imap_unordered(check_one_image, lines), total=len(lines)):
70 | results.append(item)
71 | pool.close()
72 | pool.join()
73 |
74 | results = [r for r in results if r is not None]
75 | print(len(results))
76 | with open('data/exclude.txt', 'w') as file:
77 | file.write('\n'.join(results))
78 |
79 |
80 | if __name__ == '__main__':
81 | ensure_folder('data')
82 | ensure_folder('models')
83 | ensure_dlib_model()
84 | extract_bz2(predictor_path)
85 |
86 | # Load all the models we need: a detector to find the faces, a shape predictor
87 | # to find face landmarks so we can precisely localize the face
88 | detector = dlib.get_frontal_face_detector()
89 | sp = dlib.shape_predictor(predictor_path)
90 |
91 | if not os.path.isdir(image_folder):
92 | extract(image_folder)
93 |
94 | check_image()
95 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | imgaug
2 | dlib
--------------------------------------------------------------------------------
/result.json:
--------------------------------------------------------------------------------
1 | {
2 | "distance_6_a_p": 0.5895593609896856,
3 | "distance_7_a_p": 0.589355993996751,
4 | "distance_0_7_a": 0.9216119751365568,
5 | "distance_9_9_p": 0.0,
6 | "distance_6_5_a": 1.769031394632762,
7 | "distance_3_0_p": 1.73171759495132,
8 | "distance_7_8_a": 0.7705714319189418,
9 | "distance_2_2_n": 0.0,
10 | "distance_1_a_n": 0.979476751163034,
11 | "distance_0_9_n": 1.0228653047889225,
12 | "distance_8_9_p": 1.1006829340051496,
13 | "distance_0_4_a": 0.7017801523672382,
14 | "distance_6_1_n": 0.7422552106658173,
15 | "distance_4_a_p": 0.30724229003911674,
16 | "distance_1_6_a": 1.8648480299546009,
17 | "distance_6_1_a": 1.8648480299546009,
18 | "distance_1_5_n": 0.9396923095874286,
19 | "distance_7_9_a": 1.196575405445003,
20 | "distance_1_1_p": 0.0,
21 | "distance_5_0_a": 0.9160862273760451,
22 | "distance_8_8_p": 0.0,
23 | "distance_7_5_n": 1.1180937265763191,
24 | "distance_5_0_n": 1.0743794031539124,
25 | "distance_7_1_n": 1.5588787342495039,
26 | "distance_8_2_n": 0.6946369476722261,
27 | "distance_4_3_a": 1.6837698620270345,
28 | "distance_9_8_n": 1.1559434490211515,
29 | "distance_9_a_p": 0.3872792846044746,
30 | "distance_2_7_n": 1.3931714006418048,
31 | "distance_0_a_n": 1.6494668308786373,
32 | "distance_7_4_p": 0.797200567882328,
33 | "distance_3_0_a": 1.3753716326666563,
34 | "distance_5_7_p": 0.9000133893362126,
35 | "distance_8_1_a": 0.4831989088163482,
36 | "distance_1_7_p": 0.5701372320815246,
37 | "distance_6_6_n": 0.0,
38 | "distance_9_4_a": 0.586161196410103,
39 | "distance_0_6_p": 1.8098653377592768,
40 | "distance_1_8_a": 0.4831989088163482,
41 | "distance_1_6_p": 1.428479878261001,
42 | "distance_9_7_n": 1.5910036117489312,
43 | "distance_0_6_a": 1.9163290478081905,
44 | "distance_9_3_a": 1.3556085654421537,
45 | "distance_1_2_n": 1.2582430909856726,
46 | "distance_9_1_p": 1.2679971832849333,
47 | "distance_2_8_p": 0.5733410122621194,
48 | "distance_5_2_a": 0.475253235380535,
49 | "distance_6_7_a": 1.509290475661146,
50 | "distance_8_0_p": 0.8519567973697271,
51 | "distance_9_7_a": 1.196575405445003,
52 | "distance_0_1_a": 1.150248047032676,
53 | "distance_2_4_p": 1.188184534050606,
54 | "distance_6_7_n": 0.9599646795629191,
55 | "distance_5_9_p": 1.00228296730171,
56 | "distance_2_a_n": 1.2708354465203797,
57 | "distance_0_4_n": 1.3066880195983686,
58 | "distance_0_4_p": 0.6286827002822157,
59 | "distance_8_2_a": 1.5109194657416083,
60 | "distance_8_5_n": 1.454071359950845,
61 | "distance_2_1_a": 1.5732924096585208,
62 | "distance_6_0_a": 1.9163290478081905,
63 | "distance_8_1_p": 1.1752557684385465,
64 | "distance_8_5_a": 0.9253115739572912,
65 | "distance_5_6_p": 1.4096090281515359,
66 | "distance_4_6_p": 2.022193991360055,
67 | "distance_7_4_a": 0.7667287689094024,
68 | "distance_3_6_p": 0.7215708586532834,
69 | "distance_5_6_n": 1.0633578110111017,
70 | "distance_3_4_a": 1.6837698620270345,
71 | "distance_8_0_a": 0.7574569408788854,
72 | "distance_0_5_a": 0.9160862273760451,
73 | "distance_4_8_p": 1.1621579125865225,
74 | "distance_1_9_n": 0.9274840307914225,
75 | "distance_7_2_n": 1.3931714006418048,
76 | "distance_3_6_n": 1.170603310811714,
77 | "distance_2_0_a": 1.0992423887082623,
78 | "distance_5_2_p": 0.9853346795821949,
79 | "distance_2_0_p": 0.9518766640312073,
80 | "distance_3_1_a": 1.9143265190010084,
81 | "distance_3_7_n": 1.058133668892026,
82 | "distance_3_5_n": 0.7835299084287186,
83 | "distance_0_5_p": 0.8610416909991869,
84 | "distance_6_9_n": 1.3585787615284204,
85 | "distance_1_4_p": 0.6074559840001257,
86 | "distance_9_5_n": 0.9014304327779996,
87 | "distance_8_8_a": 0.0,
88 | "distance_2_5_a": 0.475253235380535,
89 | "distance_1_0_a": 1.150248047032676,
90 | "distance_9_2_a": 1.4890938771993802,
91 | "distance_4_3_p": 2.075934042660265,
92 | "distance_6_9_a": 1.964927705387609,
93 | "distance_8_7_n": 1.768148674748332,
94 | "distance_3_8_n": 1.8408278489693117,
95 | "distance_7_5_a": 0.6696159773982266,
96 | "distance_0_3_p": 1.73171759495132,
97 | "distance_6_8_n": 2.2515126624097235,
98 | "distance_0_3_n": 0.8459312454920678,
99 | "distance_2_9_a": 1.4890938771993802,
100 | "distance_4_5_a": 0.7416181727183044,
101 | "distance_6_7_p": 1.7524709729867851,
102 | "distance_2_8_a": 1.5109194657416083,
103 | "distance_0_8_p": 0.8519567973697271,
104 | "distance_0_7_p": 1.5466340886088261,
105 | "distance_0_2_n": 1.8970771848785404,
106 | "distance_0_8_a": 0.7574569408788854,
107 | "distance_1_0_n": 1.1635678563021798,
108 | "distance_2_3_n": 1.7432304490398707,
109 | "distance_7_0_n": 0.7388840958073253,
110 | "distance_9_8_p": 1.1006829340051496,
111 | "distance_6_4_a": 2.2333358279587543,
112 | "distance_8_9_n": 1.1559434490211515,
113 | "distance_6_a_n": 1.328689686270991,
114 | "distance_4_5_p": 0.5769072011617915,
115 | "distance_3_4_n": 1.3329982639159113,
116 | "distance_1_6_n": 0.7422552106658173,
117 | "distance_8_7_a": 0.7705714319189418,
118 | "distance_2_1_p": 1.6028562938586788,
119 | "distance_6_5_p": 1.4096090281515359,
120 | "distance_5_3_a": 1.0787134176128035,
121 | "distance_2_4_a": 1.0743739663826233,
122 | "distance_7_7_p": 0.0,
123 | "distance_5_3_p": 1.435137054743393,
124 | "distance_1_9_p": 1.2679971832849333,
125 | "distance_7_6_n": 0.9599646795629191,
126 | "distance_9_a_n": 0.9675245794651346,
127 | "distance_9_6_p": 1.3661617049720576,
128 | "distance_7_1_a": 0.895247880832958,
129 | "distance_3_3_p": 0.0,
130 | "distance_1_3_n": 1.1417305525311008,
131 | "distance_9_4_n": 1.006218534236325,
132 | "distance_0_8_n": 2.1494397062883905,
133 | "distance_8_5_p": 0.7090511403499136,
134 | "distance_0_1_n": 1.1635678563021798,
135 | "distance_2_2_p": 0.0,
136 | "distance_3_8_a": 1.7120993844002896,
137 | "distance_8_a_p": 0.6818280083283668,
138 | "distance_0_3_a": 1.3753716326666563,
139 | "distance_2_a_p": 0.7500225824294517,
140 | "distance_1_3_p": 1.708809433476418,
141 | "distance_3_a_n": 1.2198440067149932,
142 | "distance_6_1_p": 1.428479878261001,
143 | "distance_2_0_n": 1.8970771848785404,
144 | "distance_6_2_p": 1.628770562676948,
145 | "distance_3_a_p": 0.2974470771232838,
146 | "distance_4_6_a": 2.2333358279587543,
147 | "distance_5_8_p": 0.7090511403499136,
148 | "distance_5_4_p": 0.5769072011617915,
149 | "distance_5_5_a": 0.0,
150 | "distance_5_a_p": 0.34221556040597534,
151 | "distance_4_7_p": 0.797200567882328,
152 | "distance_3_7_p": 1.7372040229247432,
153 | "distance_4_9_n": 1.006218534236325,
154 | "distance_7_8_p": 1.578112007510768,
155 | "distance_3_3_a": 0.0,
156 | "distance_2_4_n": 0.9711378399718029,
157 | "distance_1_3_a": 1.9143265190010084,
158 | "distance_5_8_a": 0.9253115739572912,
159 | "distance_0_0_a": 0.0,
160 | "distance_3_9_n": 0.9623285019175434,
161 | "distance_3_0_n": 0.8459312454920678,
162 | "distance_8_9_a": 0.8180991302766074,
163 | "distance_8_3_n": 1.8408278489693117,
164 | "distance_9_6_n": 1.3585787615284204,
165 | "distance_9_3_n": 0.9623285019175434,
166 | "distance_1_7_n": 1.5588787342495039,
167 | "distance_4_4_p": 0.0,
168 | "distance_6_0_p": 1.8098653377592768,
169 | "distance_3_9_a": 1.3556085654421537,
170 | "distance_5_0_p": 0.8610416909991869,
171 | "distance_2_9_p": 1.5347064887510555,
172 | "distance_3_6_a": 1.0803064767565616,
173 | "distance_7_8_n": 1.768148674748332,
174 | "distance_2_7_a": 1.197702855102989,
175 | "distance_1_9_a": 1.5088083928372384,
176 | "distance_8_6_a": 1.7674141940097456,
177 | "distance_2_6_a": 1.7475365650907406,
178 | "distance_0_2_a": 1.0992423887082623,
179 | "distance_3_8_p": 1.1076674406685925,
180 | "distance_1_a_p": 0.3566377027031855,
181 | "distance_4_2_n": 0.9711378399718029,
182 | "distance_7_5_p": 0.9000133893362126,
183 | "distance_5_6_a": 1.769031394632762,
184 | "distance_9_4_p": 1.2217048442844316,
185 | "distance_7_3_p": 1.7372040229247432,
186 | "distance_8_4_a": 0.8763262963975791,
187 | "distance_5_9_a": 1.1163732770297798,
188 | "distance_6_0_n": 0.900708365015138,
189 | "distance_5_1_a": 0.9322994743186577,
190 | "distance_2_5_n": 0.8022785571405997,
191 | "distance_9_8_a": 0.8180991302766074,
192 | "distance_2_3_p": 1.4814534761862888,
193 | "distance_5_5_n": 0.0,
194 | "distance_4_4_n": 0.0,
195 | "distance_6_8_p": 1.243962029845136,
196 | "distance_2_2_a": 0.0,
197 | "distance_7_9_n": 1.5910036117489312,
198 | "distance_6_8_a": 1.7674141940097456,
199 | "distance_8_0_n": 2.1494397062883905,
200 | "distance_7_0_a": 0.9216119751365568,
201 | "distance_0_5_n": 1.0743794031539124,
202 | "distance_4_9_p": 1.2217048442844316,
203 | "distance_7_7_n": 0.0,
204 | "distance_4_2_a": 1.0743739663826233,
205 | "distance_0_a_p": 0.17160003892478048,
206 | "distance_5_9_n": 0.9014304327779996,
207 | "distance_3_1_n": 1.1417305525311008,
208 | "distance_1_2_p": 1.6028562938586788,
209 | "distance_4_7_a": 0.7667287689094024,
210 | "distance_4_8_n": 1.5146275176097248,
211 | "distance_9_0_a": 0.7940329531837875,
212 | "distance_2_9_n": 1.1526721088817027,
213 | "distance_8_3_a": 1.7120993844002896,
214 | "distance_0_2_p": 0.9518766640312073,
215 | "distance_6_3_a": 1.0803064767565616,
216 | "distance_1_5_a": 0.9322994743186577,
217 | "distance_3_7_a": 1.5292893405050023,
218 | "distance_1_8_p": 1.1752557684385465,
219 | "distance_0_1_p": 1.2838908304729557,
220 | "distance_6_5_n": 1.0633578110111017,
221 | "distance_8_3_p": 1.1076674406685925,
222 | "distance_4_3_n": 1.3329982639159113,
223 | "distance_7_2_a": 1.197702855102989,
224 | "distance_5_4_a": 0.7416181727183044,
225 | "distance_6_9_p": 1.3661617049720576,
226 | "distance_5_3_n": 0.7835299084287186,
227 | "distance_1_1_a": 0.0,
228 | "distance_7_7_a": 0.0,
229 | "distance_8_8_n": 0.0,
230 | "distance_9_1_a": 1.5088083928372384,
231 | "distance_9_7_p": 1.738347426767234,
232 | "distance_0_7_n": 0.7388840958073253,
233 | "distance_2_6_p": 1.628770562676948,
234 | "distance_1_7_a": 0.895247880832958,
235 | "distance_9_5_p": 1.00228296730171,
236 | "distance_7_1_p": 0.5701372320815246,
237 | "distance_1_8_n": 1.7707280145280038,
238 | "distance_1_5_p": 0.4828491145537015,
239 | "distance_0_0_n": 0.0,
240 | "distance_6_4_n": 0.6800542901757503,
241 | "distance_5_7_n": 1.1180937265763191,
242 | "distance_5_8_n": 1.454071359950845,
243 | "distance_1_4_a": 1.2373792481458281,
244 | "distance_7_4_n": 1.038589736005079,
245 | "distance_6_3_n": 1.170603310811714,
246 | "distance_6_3_p": 0.7215708586532834,
247 | "distance_4_a_n": 1.260944856638858,
248 | "distance_9_0_p": 0.7606279547980002,
249 | "distance_2_8_n": 0.6946369476722261,
250 | "distance_7_3_n": 1.058133668892026,
251 | "distance_7_6_a": 1.509290475661146,
252 | "distance_2_1_n": 1.2582430909856726,
253 | "distance_6_2_a": 1.7475365650907406,
254 | "distance_5_7_a": 0.6696159773982266,
255 | "distance_4_0_a": 0.7017801523672382,
256 | "distance_4_7_n": 1.038589736005079,
257 | "distance_6_6_a": 0.0,
258 | "distance_9_5_a": 1.1163732770297798,
259 | "distance_7_9_p": 1.738347426767234,
260 | "distance_5_5_p": 0.0,
261 | "distance_6_6_p": 0.0,
262 | "distance_5_4_n": 0.8989421549204621,
263 | "distance_5_1_n": 0.9396923095874286,
264 | "distance_0_9_a": 0.7940329531837875,
265 | "distance_3_2_n": 1.7432304490398707,
266 | "distance_3_4_p": 2.075934042660265,
267 | "distance_2_7_p": 1.4853521436453434,
268 | "distance_3_5_a": 1.0787134176128035,
269 | "distance_4_8_a": 0.8763262963975791,
270 | "distance_3_2_p": 1.4814534761862888,
271 | "distance_0_0_p": 0.0,
272 | "distance_9_9_a": 0.0,
273 | "distance_8_6_n": 2.2515126624097235,
274 | "distance_9_3_p": 1.1921328705248015,
275 | "distance_4_5_n": 0.8989421549204621,
276 | "distance_4_0_n": 1.3066880195983686,
277 | "distance_3_9_p": 1.1921328705248015,
278 | "distance_2_5_p": 0.9853346795821949,
279 | "distance_9_1_n": 0.9274840307914225,
280 | "distance_4_2_p": 1.188184534050606,
281 | "distance_9_6_a": 1.964927705387609,
282 | "distance_8_7_p": 1.578112007510768,
283 | "distance_6_2_n": 1.751304632916245,
284 | "distance_9_9_n": 0.0,
285 | "distance_4_1_n": 0.9201085964182454,
286 | "distance_3_1_p": 1.708809433476418,
287 | "distance_1_4_n": 0.9201085964182454,
288 | "distance_3_5_p": 1.435137054743393,
289 | "distance_4_4_a": 0.0,
290 | "distance_4_1_a": 1.2373792481458281,
291 | "distance_9_2_p": 1.5347064887510555,
292 | "distance_8_1_n": 1.7707280145280038,
293 | "distance_1_0_p": 1.2838908304729557,
294 | "distance_0_6_n": 0.900708365015138,
295 | "distance_0_9_p": 0.7606279547980002,
296 | "distance_5_2_n": 0.8022785571405997,
297 | "distance_4_6_n": 0.6800542901757503,
298 | "distance_7_3_a": 1.5292893405050023,
299 | "distance_5_a_n": 0.438130210353993,
300 | "distance_7_6_p": 1.7524709729867851,
301 | "distance_1_2_a": 1.5732924096585208,
302 | "distance_3_2_a": 1.0065065019562667,
303 | "distance_2_6_n": 1.751304632916245,
304 | "distance_8_2_p": 0.5733410122621194,
305 | "distance_7_a_n": 1.4106084321775967,
306 | "distance_8_4_p": 1.1621579125865225,
307 | "distance_7_2_p": 1.4853521436453434,
308 | "distance_9_2_n": 1.1526721088817027,
309 | "distance_8_a_n": 0.8293850629416397,
310 | "distance_9_0_n": 1.0228653047889225,
311 | "distance_4_1_p": 0.6074559840001257,
312 | "distance_6_4_p": 2.022193991360055,
313 | "distance_4_9_a": 0.586161196410103,
314 | "distance_7_0_p": 1.5466340886088261,
315 | "distance_3_3_n": 0.0,
316 | "distance_2_3_a": 1.0065065019562667,
317 | "distance_4_0_p": 0.6286827002822157,
318 | "distance_8_4_n": 1.5146275176097248,
319 | "distance_5_1_p": 0.4828491145537015,
320 | "distance_1_1_n": 0.0,
321 | "distance_8_6_p": 1.243962029845136
322 | }
--------------------------------------------------------------------------------
/start_train.sh:
--------------------------------------------------------------------------------
1 |
2 | for i in {1..100}
3 | do
4 | echo "Looping ... number $i"
5 | python train.py
6 | python lfw_eval.py
7 | python train_eval.py
8 | done
--------------------------------------------------------------------------------
/template.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import json
3 |
4 |
5 | def replace():
6 | with open('result.json', 'r', encoding="utf-8") as file:
7 | result = json.load(file)
8 |
9 | with open('README.template', 'r', encoding="utf-8") as file:
10 | text = file.readlines()
11 |
12 | text = ''.join(text)
13 |
14 | for i in range(10):
15 | distance_a_p = 'distance_{}_a_p'.format(i)
16 | text = text.replace('$({})'.format(distance_a_p), "{0:.4f}".format(result[distance_a_p]))
17 | distance_a_n = 'distance_{}_a_n'.format(i)
18 | text = text.replace('$({})'.format(distance_a_n), "{0:.4f}".format(result[distance_a_n]))
19 |
20 | for j in range(10):
21 | distance_i_j_a = 'distance_{}_{}_a'.format(i, j)
22 | text = text.replace('$({})'.format(distance_i_j_a), "{0:.4f}".format(result[distance_i_j_a]))
23 | distance_i_j_p = 'distance_{}_{}_p'.format(i, j)
24 | text = text.replace('$({})'.format(distance_i_j_p), "{0:.4f}".format(result[distance_i_j_p]))
25 | distance_i_j_n = 'distance_{}_{}_n'.format(i, j)
26 | text = text.replace('$({})'.format(distance_i_j_n), "{0:.4f}".format(result[distance_i_j_n]))
27 |
28 | with open('README.md', 'w', encoding="utf-8") as file:
29 | file.write(text)
30 |
31 |
32 | if __name__ == '__main__':
33 | replace()
34 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import keras
4 | import tensorflow as tf
5 | from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
6 | from keras.utils import multi_gpu_model
7 |
8 | from config import patience, epochs, num_train_samples, num_lfw_valid_samples, batch_size
9 | from data_generator import DataGenSequence
10 | from model import build_model
11 | from utils import get_available_gpus, get_available_cpus, ensure_folder, triplet_loss, get_smallest_loss, get_best_model
12 |
13 | if __name__ == '__main__':
14 | # Parse arguments
15 | ap = argparse.ArgumentParser()
16 | args = vars(ap.parse_args())
17 | checkpoint_models_path = 'models/'
18 | pretrained_path = get_best_model()
19 | ensure_folder('models/')
20 |
21 | # Callbacks
22 | tensor_board = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
23 | model_names = checkpoint_models_path + 'model.{epoch:02d}-{val_loss:.4f}.hdf5'
24 | model_checkpoint = ModelCheckpoint(model_names, monitor='val_loss', verbose=1, save_best_only=True)
25 | early_stop = EarlyStopping('val_loss', patience=patience)
26 | reduce_lr = ReduceLROnPlateau('val_loss', factor=0.5, patience=int(patience / 2), verbose=1)
27 |
28 |
29 | class MyCbk(keras.callbacks.Callback):
30 | def __init__(self, model):
31 | keras.callbacks.Callback.__init__(self)
32 | self.model_to_save = model
33 |
34 | def on_epoch_end(self, epoch, logs=None):
35 | fmt = checkpoint_models_path + 'model.%02d-%.4f.hdf5'
36 | smallest_loss = get_smallest_loss()
37 | if float(logs['val_loss']) < smallest_loss:
38 | self.model_to_save.save(fmt % (epoch, logs['val_loss']))
39 |
40 |
41 | # Load our model, added support for Multi-GPUs
42 | num_gpu = len(get_available_gpus())
43 | if num_gpu >= 2:
44 | with tf.device("/cpu:0"):
45 | model = build_model()
46 | if pretrained_path is not None:
47 | model.load_weights(pretrained_path)
48 |
49 | new_model = multi_gpu_model(model, gpus=num_gpu)
50 | # rewrite the callback: saving through the original model and not the multi-gpu model.
51 | model_checkpoint = MyCbk(model)
52 | else:
53 | new_model = build_model()
54 | if pretrained_path is not None:
55 | new_model.load_weights(pretrained_path)
56 |
57 | sgd = keras.optimizers.SGD(lr=1e-5, momentum=0.9, nesterov=True, decay=1e-6)
58 | # adam = keras.optimizers.Adam(lr=0.001)
59 | new_model.compile(optimizer=sgd, loss=triplet_loss)
60 |
61 | print(new_model.summary())
62 |
63 | # Final callbacks
64 | callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]
65 |
66 | # Start Fine-tuning
67 | new_model.fit_generator(DataGenSequence('train'),
68 | steps_per_epoch=num_train_samples // batch_size,
69 | validation_data=DataGenSequence('valid'),
70 | validation_steps=num_lfw_valid_samples // batch_size,
71 | epochs=epochs,
72 | verbose=1,
73 | callbacks=callbacks,
74 | use_multiprocessing=True,
75 | workers=get_available_cpus() // 2
76 | )
77 |
--------------------------------------------------------------------------------
/train_eval.py:
--------------------------------------------------------------------------------
1 | import json
2 | import multiprocessing as mp
3 | import os
4 | import pickle
5 | import queue
6 | from multiprocessing import Process
7 | from multiprocessing import Process
8 |
9 | import cv2 as cv
10 | import numpy as np
11 | from keras.applications.inception_resnet_v2 import preprocess_input
12 | from tqdm import tqdm
13 |
14 | from config import image_folder, img_size, channel, num_train_samples, SENTINEL, semi_hard_mode
15 | from utils import get_best_model, get_train_images
16 |
17 |
18 | class InferenceWorker(Process):
19 | def __init__(self, gpuid, in_queue, out_queue, signal_queue):
20 | Process.__init__(self, name='ImageProcessor')
21 |
22 | self.gpuid = gpuid
23 | self.in_queue = in_queue
24 | self.out_queue = out_queue
25 | self.signal_queue = signal_queue
26 |
27 | def run(self):
28 | # set enviornment
29 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
30 | os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
31 | print("InferenceWorker init, GPU ID: {}".format(self.gpuid))
32 |
33 | from model import build_model
34 |
35 | # load models
36 | model = build_model()
37 | model.load_weights(get_best_model())
38 |
39 | while True:
40 | try:
41 | sample = {}
42 | try:
43 | sample['a'] = self.in_queue.get(block=False)
44 | sample['p'] = self.in_queue.get(block=False)
45 | sample['n'] = self.in_queue.get(block=False)
46 | except queue.Empty:
47 | break
48 |
49 | batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)
50 |
51 | for j, role in enumerate(['a', 'p', 'n']):
52 | image_name = sample[role]
53 | filename = os.path.join(image_folder, image_name)
54 | image_bgr = cv.imread(filename)
55 | image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
56 | image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
57 | batch_inputs[j, 0] = preprocess_input(image_rgb)
58 |
59 | y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
60 | a = y_pred[0, 0:128]
61 | p = y_pred[0, 128:256]
62 | n = y_pred[0, 256:384]
63 |
64 | self.out_queue.put({'image_name': sample['a'], 'embedding': a})
65 | self.out_queue.put({'image_name': sample['p'], 'embedding': p})
66 | self.out_queue.put({'image_name': sample['n'], 'embedding': n})
67 | self.signal_queue.put(SENTINEL)
68 |
69 | if self.in_queue.qsize() == 0:
70 | break
71 | except Exception as e:
72 | print(e)
73 |
74 | import keras.backend as K
75 | K.clear_session()
76 | print('InferenceWorker done, GPU ID {}'.format(self.gpuid))
77 |
78 |
79 | class Scheduler:
80 | def __init__(self, gpuids, signal_queue):
81 | self.signal_queue = signal_queue
82 | manager = mp.Manager()
83 | self.in_queue = manager.Queue()
84 | self.out_queue = manager.Queue()
85 | self._gpuids = gpuids
86 |
87 | self.__init_workers()
88 |
89 | def __init_workers(self):
90 | self._workers = list()
91 | for gpuid in self._gpuids:
92 | self._workers.append(InferenceWorker(gpuid, self.in_queue, self.out_queue, self.signal_queue))
93 |
94 | def start(self, names):
95 | # put all of image names into queue
96 | for name in names:
97 | self.in_queue.put(name)
98 |
99 | # start the workers
100 | for worker in self._workers:
101 | worker.start()
102 |
103 | # wait all fo workers finish
104 | for worker in self._workers:
105 | worker.join()
106 | print("all of workers have been done")
107 | return self.out_queue
108 |
109 |
110 | def run(gpuids, q):
111 | # scan all files under img_path
112 | names = get_train_images()
113 |
114 | # init scheduler
115 | x = Scheduler(gpuids, q)
116 |
117 | # start processing and wait for complete
118 | return x.start(names)
119 |
120 |
121 | def listener(q):
122 | pbar = tqdm(total=num_train_samples // 3)
123 | for item in iter(q.get, None):
124 | pbar.update()
125 |
126 |
127 | def create_train_embeddings():
128 | gpuids = ['0', '1', '2', '3']
129 | print('GPU IDs: ' + str(gpuids))
130 |
131 | manager = mp.Manager()
132 | q = manager.Queue()
133 | proc = mp.Process(target=listener, args=(q,))
134 | proc.start()
135 |
136 | out_queue = run(gpuids, q)
137 | out_dict = {}
138 | while out_queue.qsize() > 0:
139 | item = out_queue.get()
140 | out_dict[item['image_name']] = item['embedding']
141 |
142 | with open("data/train_embeddings.p", "wb") as file:
143 | pickle.dump(out_dict, file)
144 |
145 | q.put(None)
146 | proc.join()
147 |
148 |
149 | train_images = get_train_images()
150 |
151 |
152 | def calculate_distance_list(image_i):
153 | embedding_i = embeddings[image_i]
154 | distance_list = np.empty(shape=(num_train_samples,), dtype=np.float32)
155 | for j, image_j in enumerate(train_images):
156 | embedding_j = embeddings[image_j]
157 | dist = np.square(np.linalg.norm(embedding_i - embedding_j))
158 | distance_list[j] = dist
159 | return distance_list
160 |
161 |
162 | if __name__ == '__main__':
163 | print('creating train embeddings')
164 | create_train_embeddings()
165 |
166 | print('loading train embeddings')
167 | with open('data/train_embeddings.p', 'rb') as file:
168 | embeddings = pickle.load(file)
169 |
170 | print('selecting train triplets')
171 | from triplets import select_train_triplets
172 |
173 | train_triplets = select_train_triplets(semi_hard_mode)
174 | print('number of train triplets: ' + str(len(train_triplets)))
175 |
176 | print('saving train triplets')
177 | with open('data/train_triplets.json', 'w') as file:
178 | json.dump(train_triplets, file)
179 |
180 | print('loading train triplets')
181 | with open('data/train_triplets.json', 'r') as file:
182 | train_triplets = json.load(file)
183 |
184 | print('calculate distances')
185 | distance_a_p_list = []
186 | distance_a_n_list = []
187 | for triplet in tqdm(train_triplets):
188 | embedding_a = embeddings[triplet['a']]
189 | embedding_p = embeddings[triplet['p']]
190 | embedding_n = embeddings[triplet['n']]
191 | distance_a_p = np.square(np.linalg.norm(embedding_a - embedding_p))
192 | distance_a_p_list.append(distance_a_p)
193 | distance_a_n = np.square(np.linalg.norm(embedding_a - embedding_n))
194 | distance_a_n_list.append(distance_a_n)
195 |
196 | print('np.mean(distance_a_p_list)' + str(np.mean(distance_a_p_list)))
197 | print('np.max(distance_a_p_list)' + str(np.max(distance_a_p_list)))
198 | print('np.min(distance_a_p_list)' + str(np.min(distance_a_p_list)))
199 | print('np.std(distance_a_p_list)' + str(np.std(distance_a_p_list)))
200 | print('np.mean(distance_a_n_list)' + str(np.mean(distance_a_n_list)))
201 | print('np.max(distance_a_n_list)' + str(np.max(distance_a_n_list)))
202 | print('np.min(distance_a_n_list)' + str(np.min(distance_a_n_list)))
203 | print('np.std(distance_a_n_list)' + str(np.std(distance_a_n_list)))
204 |
--------------------------------------------------------------------------------
/triplets.py:
--------------------------------------------------------------------------------
1 | import json
2 | import math
3 | import pickle
4 | import random
5 | from multiprocessing import Pool
6 |
7 | import numpy as np
8 | from tqdm import tqdm
9 |
10 | from config import alpha, num_train_samples, triplets_selection_batch_size, semi_hard_mode, hard_mode
11 | from utils import get_data_stats
12 |
13 | ids, images, image2id, id2images = get_data_stats()
14 | train_images = images
15 | with open('data/train_embeddings.p', 'rb') as file:
16 | embeddings = pickle.load(file)
17 |
18 |
19 | def select_hard(batch, distance_mat, a_image, a_id, p_image):
20 | a_index = batch.index(a_image)
21 | # condition: argmin(distance_a_n)
22 | indices = np.argsort(distance_mat[a_index])
23 | for n_index in indices:
24 | n_image = batch[n_index]
25 | if n_image != a_image and n_image != p_image and image2id[n_image] != a_id:
26 | break
27 |
28 | return n_image
29 |
30 |
31 | def select_semi_hard(batch, distance_mat, a_image, a_id, p_image):
32 | a_index = batch.index(a_image)
33 |
34 | embedding_a = embeddings[a_image]
35 | embedding_p = embeddings[p_image]
36 | distance_a_p = np.square(np.linalg.norm(embedding_a - embedding_p))
37 |
38 | length = len(batch)
39 |
40 | # condition: distance_a_p <= distance_a_n <= distance_a_p + alpha
41 | n_candidates = [batch[n] for n in range(length) if
42 | image2id[batch[n]] != a_id and distance_mat[a_index, n] <= distance_a_p + alpha and distance_mat[
43 | a_index, n] > distance_a_p]
44 | if len(n_candidates) == 0:
45 | # if not found, loose condition: distance_a_n <= distance_a_p + alpha
46 | n_candidates = [batch[n] for n in range(length) if
47 | image2id[batch[n]] != a_id and distance_mat[a_index, n] <= distance_a_p + alpha]
48 | if len(n_candidates) == 0:
49 | # if still not found, select hard.
50 | # n_candidates = [batch[n] for n in range(length) if image2id[batch[n]] != a_id]
51 | n_image = select_hard(batch, distance_mat, a_image, a_id, p_image)
52 | n_candidates = [n_image]
53 |
54 | n_image = random.choice(n_candidates)
55 | return n_image
56 |
57 |
58 | def select_one_triplet(batch, a_index, distance_mat, select_mode):
59 | # choose a_image
60 | a_image = batch[a_index]
61 | a_id = image2id[a_image]
62 | if len(id2images[a_id]) < 2:
63 | raise ValueError('Cannot find any positives for the specified anchor image.')
64 |
65 | # choose p_image
66 | p_image = random.choice([p for p in id2images[a_id] if p != a_image])
67 |
68 | # choose n_image
69 | if select_mode == semi_hard_mode:
70 | n_image = select_semi_hard(batch, distance_mat, a_image, a_id, p_image)
71 | else:
72 | n_image = select_hard(batch, distance_mat, a_image, a_id, p_image)
73 |
74 | return a_image, p_image, n_image
75 |
76 |
77 | def select_one_batch(config):
78 | start, end, select_mode = config
79 | length = end - start
80 | batch = train_images[start:end]
81 | distance_mat = np.empty(shape=(length, length), dtype=np.float32)
82 | for i in range(length):
83 | for j in range(length):
84 | embedding_i = embeddings[batch[i]]
85 | embedding_j = embeddings[batch[j]]
86 | dist = np.square(np.linalg.norm(embedding_i - embedding_j))
87 | distance_mat[i, j] = dist
88 |
89 | batch_triplets = []
90 | for a_index in range(length):
91 | try:
92 | a_image, p_image, n_image = select_one_triplet(batch, a_index, distance_mat, select_mode)
93 | batch_triplets.append({'a': a_image, 'p': p_image, 'n': n_image})
94 | except ValueError:
95 | pass
96 |
97 | return batch_triplets
98 |
99 |
100 | def select_train_triplets(select_mode):
101 | num_batches = int(math.ceil(num_train_samples / triplets_selection_batch_size))
102 | remain = num_train_samples
103 | batch_configs = []
104 | for i in range(num_batches):
105 | start = i * triplets_selection_batch_size
106 | if remain >= triplets_selection_batch_size:
107 | end = start + triplets_selection_batch_size
108 | remain -= triplets_selection_batch_size
109 | else:
110 | end = start + remain
111 | batch_configs.append((start, end, select_mode))
112 |
113 | pool = Pool(20)
114 | result = list(tqdm(pool.imap(select_one_batch, batch_configs), total=num_batches))
115 | train_triplets = []
116 | for triplet_list in result:
117 | train_triplets.extend(triplet_list)
118 |
119 | return train_triplets
120 |
121 |
122 | if __name__ == '__main__':
123 | np.random.shuffle(train_images)
124 | train_triplets = select_train_triplets(hard_mode)
125 |
126 | with open('data/train_triplets.json', 'w') as file:
127 | json.dump(train_triplets, file)
128 |
129 | print('len(train_triplets): ' + str(len(train_triplets)))
130 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import os
3 | import random
4 |
5 | import cv2 as cv
6 | import keras.backend as K
7 | import numpy as np
8 | import tensorflow as tf
9 | from tensorflow.python.client import device_lib
10 | from tqdm import tqdm
11 |
12 | from config import alpha, identity_annot_filename, num_train_samples, lfw_folder
13 |
14 |
15 | def ensure_folder(folder):
16 | if not os.path.exists(folder):
17 | os.makedirs(folder)
18 |
19 |
20 | # getting the number of GPUs
21 | def get_available_gpus():
22 | local_device_protos = device_lib.list_local_devices()
23 | return [x.name for x in local_device_protos if x.device_type == 'GPU']
24 |
25 |
26 | # getting the number of CPUs
27 | def get_available_cpus():
28 | return multiprocessing.cpu_count()
29 |
30 |
31 | def draw_str(dst, target, s):
32 | x, y = target
33 | cv.putText(dst, s, (x + 1, y + 1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=2, lineType=cv.LINE_AA)
34 | cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
35 |
36 |
37 | def get_excludes():
38 | with open('data/exclude.txt') as file:
39 | lines = file.readlines()
40 | lines = [line.strip() for line in lines]
41 | return lines
42 |
43 |
44 | # Get statistics for train data
45 | def get_data_stats():
46 | with open(identity_annot_filename, 'r') as file:
47 | lines = file.readlines()
48 |
49 | ids = set()
50 | images = []
51 | image2id = {}
52 | id2images = {}
53 | excludes = get_excludes()
54 |
55 | for line in lines:
56 | line = line.strip()
57 | if len(line) > 0:
58 | tokens = line.split(' ')
59 | image_name = tokens[0].strip()
60 | if image_name not in excludes and image_name != '202599.jpg':
61 | id = tokens[1].strip()
62 | ids.add(id)
63 | images.append(image_name)
64 | image2id[image_name] = id
65 | if id in id2images.keys():
66 | id2images[id].append(image_name)
67 | else:
68 | id2images[id] = [image_name]
69 |
70 | return list(ids), sorted(images), image2id, id2images
71 |
72 |
73 | def triplet_loss(y_true, y_pred):
74 | a_pred = y_pred[:, 0:128]
75 | p_pred = y_pred[:, 128:256]
76 | n_pred = y_pred[:, 256:384]
77 | positive_distance = K.square(tf.norm(a_pred - p_pred, axis=-1))
78 | negative_distance = K.square(tf.norm(a_pred - n_pred, axis=-1))
79 | loss = K.mean(K.maximum(0.0, positive_distance - negative_distance + alpha))
80 | return loss
81 |
82 |
83 | def get_random_triplets():
84 | # Random selection of validation set samples
85 | ids, images, image2id, id2images = get_data_stats()
86 |
87 | images = images[:num_train_samples]
88 | num_random_triplets = num_train_samples
89 |
90 | data_set = []
91 |
92 | for i in tqdm(range(num_train_samples)):
93 | # choose a_image
94 | while True:
95 | a_image = random.choice(images)
96 | a_id = image2id[a_image]
97 | if len(id2images[a_id]) >= 2: break
98 |
99 | # choose p_image
100 | while True:
101 | p_image = random.choice(id2images[a_id])
102 | if p_image != a_image: break
103 |
104 | # choose n_image
105 | while True:
106 | n_image = random.choice(images)
107 | n_id = image2id[n_image]
108 | if n_id != a_id: break
109 |
110 | data_set.append({'a': a_image, 'p': p_image, 'n': n_image})
111 |
112 | return data_set
113 |
114 |
115 | def get_train_images():
116 | _, images, _, _ = get_data_stats()
117 | return images
118 |
119 |
120 | def get_lfw_images():
121 | with open('data/people.txt', 'r') as file:
122 | lines = file.readlines()
123 |
124 | names = []
125 |
126 | for i in (range(2, len(lines))):
127 | line = lines[i].strip()
128 | tokens = line.split()
129 | if len(tokens) > 1:
130 | person_name = tokens[0]
131 | count = int(tokens[1])
132 | for j in range(1, count + 1):
133 | name = '{0}/{0}_{1}.jpg'.format(person_name, str(j).zfill(4))
134 | filename = os.path.join(lfw_folder, name)
135 | if os.path.isfile(filename):
136 | names.append(name)
137 | else:
138 | raise Exception('File Not Found: {}'.format(filename))
139 |
140 | return names
141 |
142 |
143 | def get_lfw_pairs():
144 | with open('data/pairs.txt', 'r') as file:
145 | lines = file.readlines()
146 |
147 | pairs = []
148 |
149 | for i in (range(1, len(lines))):
150 | line = lines[i].strip()
151 | tokens = line.split()
152 | if len(tokens) == 3:
153 | person_name = tokens[0]
154 | id1 = int(tokens[1])
155 | id2 = int(tokens[2])
156 | image_name_1 = '{0}/{0}_{1}.jpg'.format(person_name, str(id1).zfill(4))
157 | image_name_2 = '{0}/{0}_{1}.jpg'.format(person_name, str(id2).zfill(4))
158 | pairs.append({'image_name_1': image_name_1, 'image_name_2': image_name_2, 'same_person': True})
159 | elif len(tokens) == 4:
160 | person_name_1 = tokens[0]
161 | id1 = int(tokens[1])
162 | person_name_2 = tokens[2]
163 | id2 = int(tokens[3])
164 | image_name_1 = '{0}/{0}_{1}.jpg'.format(person_name_1, str(id1).zfill(4))
165 | image_name_2 = '{0}/{0}_{1}.jpg'.format(person_name_2, str(id2).zfill(4))
166 | pairs.append({'image_name_1': image_name_1, 'image_name_2': image_name_2, 'same_person': False})
167 |
168 | return pairs
169 |
170 |
171 | def get_smallest_loss():
172 | import re
173 | pattern = 'model.(?P\d+)-(?P[0-9]*\.?[0-9]*).hdf5'
174 | p = re.compile(pattern)
175 | losses = [float(p.match(f).groups()[1]) for f in os.listdir('models/') if p.match(f)]
176 | if len(losses) == 0:
177 | import sys
178 | return sys.float_info.max
179 | else:
180 | return np.min(losses)
181 |
182 |
183 | def get_latest_model():
184 | import glob
185 | import os
186 | files = glob.glob('models/*.hdf5')
187 | files.sort(key=os.path.getmtime)
188 | if len(files) > 0:
189 | return files[-1]
190 | else:
191 | return None
192 |
193 |
194 | def get_best_model():
195 | import re
196 | pattern = 'model.(?P\d+)-(?P[0-9]*\.?[0-9]*).hdf5'
197 | p = re.compile(pattern)
198 | files = [f for f in os.listdir('models/') if p.match(f)]
199 | filename = None
200 | if len(files) > 0:
201 | losses = [float(p.match(f).groups()[1]) for f in files]
202 | best_index = int(np.argmin(losses))
203 | filename = os.path.join('models', files[best_index])
204 | print('loading best model: {}'.format(filename))
205 | return filename
206 |
--------------------------------------------------------------------------------