├── .gitignore ├── LICENSE ├── README.md ├── README_EN.md ├── align_faces.py ├── align_facescrub.py ├── align_megaface.py ├── config.py ├── data_gen.py ├── demo.py ├── export.py ├── extract.py ├── focal_loss.py ├── images ├── 0_fn_0.jpg ├── 0_fn_0_aligned.jpg ├── 0_fn_1.jpg ├── 0_fn_1_aligned.jpg ├── 0_fp_0.jpg ├── 0_fp_0_aligned.jpg ├── 0_fp_1.jpg ├── 0_fp_1_aligned.jpg ├── 10_fn_0.jpg ├── 10_fn_0_aligned.jpg ├── 10_fn_1.jpg ├── 10_fn_1_aligned.jpg ├── 11_fn_0.jpg ├── 11_fn_0_aligned.jpg ├── 11_fn_1.jpg ├── 11_fn_1_aligned.jpg ├── 1_fn_0.jpg ├── 1_fn_0_aligned.jpg ├── 1_fn_1.jpg ├── 1_fn_1_aligned.jpg ├── 1_fp_0.jpg ├── 1_fp_0_aligned.jpg ├── 1_fp_1.jpg ├── 1_fp_1_aligned.jpg ├── 2_fn_0.jpg ├── 2_fn_0_aligned.jpg ├── 2_fn_1.jpg ├── 2_fn_1_aligned.jpg ├── 2_fp_0.jpg ├── 2_fp_1.jpg ├── 3_fn_0.jpg ├── 3_fn_0_aligned.jpg ├── 3_fn_1.jpg ├── 3_fn_1_aligned.jpg ├── 4_fn_0.jpg ├── 4_fn_0_aligned.jpg ├── 4_fn_1.jpg ├── 4_fn_1_aligned.jpg ├── 5_fn_0.jpg ├── 5_fn_0_aligned.jpg ├── 5_fn_1.jpg ├── 5_fn_1_aligned.jpg ├── 6_fn_0.jpg ├── 6_fn_0_aligned.jpg ├── 6_fn_1.jpg ├── 6_fn_1_aligned.jpg ├── 7_fn_0.jpg ├── 7_fn_0_aligned.jpg ├── 7_fn_1.jpg ├── 7_fn_1_aligned.jpg ├── 8_fn_0.jpg ├── 8_fn_0_aligned.jpg ├── 8_fn_1.jpg ├── 8_fn_1_aligned.jpg ├── 9_fn_0.jpg ├── 9_fn_0_aligned.jpg ├── 9_fn_1.jpg ├── 9_fn_1_aligned.jpg ├── megaface_cmc.jpg ├── megaface_cmc_2.jpg ├── megaface_roc.jpg ├── megaface_roc_2.jpg ├── megaface_stats.png ├── sample.jpg └── theta_dist.png ├── megaface ├── README.md ├── __init__.py ├── devkit │ └── experiments │ │ └── run_experiment.py ├── draw_curve.m ├── facescrub_noises.txt ├── match_result.py ├── megaface_noises.txt └── results │ ├── cmc_facescrub_megaface_0_1000000_1.json │ └── matches_facescrub_megaface_0_1000000_1.json ├── megaface_eval.py ├── megaface_utils.py ├── models.py ├── optimizer.py ├── pre_process.py ├── requirements.txt ├── retinaface ├── data │ ├── FDDB │ │ └── img_list.txt │ ├── __init__.py │ ├── config.py │ ├── data_augment.py │ └── wider_face.py ├── detector.py ├── layers │ ├── __init__.py │ ├── functions │ │ └── prior_box.py │ └── modules │ │ ├── __init__.py │ │ └── multibox_loss.py ├── loader.py ├── models │ ├── __init__.py │ ├── net.py │ └── retinaface.py ├── utils │ ├── __init__.py │ ├── box_utils.py │ ├── nms │ │ ├── __init__.py │ │ └── py_cpu_nms.py │ └── timer.py └── weights │ └── mobilenet0.25_Final.pth ├── silu.py ├── sponsor.jpg ├── test ├── Aaron Eckhart_1.jpg ├── Aaron Eckhart_10.jpg ├── Aaron Eckhart_11.jpg ├── Aaron Eckhart_12.jpg ├── Aaron Eckhart_13.jpg ├── Aaron Eckhart_14.jpg ├── Aaron Eckhart_153.jpg ├── Aaron Eckhart_2.jpg ├── Aaron Eckhart_3.jpg ├── Aaron Eckhart_30.jpg ├── Aaron Eckhart_4.jpg ├── Aaron Eckhart_5.jpg ├── Aaron Eckhart_6.jpg ├── Aaron Eckhart_7.jpg ├── Aaron Eckhart_8.jpg ├── Aaron Eckhart_9.jpg ├── Aaron_Eckhart_153.jpg ├── Jason Behr_27968.JPG └── test_align.py ├── train.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | runs/ 3 | data/CASIA-WebFace 4 | data/lfw_funneled 5 | __pycache__/ 6 | *.tar -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # InsightFace 2 | 3 | PyTorch implementation of Additive Angular Margin Loss for Deep Face Recognition. 4 | [paper](https://arxiv.org/pdf/1801.07698.pdf). 5 | ``` 6 | @article{deng2018arcface, 7 | title={ArcFace: Additive Angular Margin Loss for Deep Face Recognition}, 8 | author={Deng, Jiankang and Guo, Jia and Niannan, Xue and Zafeiriou, Stefanos}, 9 | journal={arXiv:1801.07698}, 10 | year={2018} 11 | } 12 | ``` 13 | ## Performance 14 | 15 | - sgd with momentum 16 | - margin-m = 0.6 17 | - margin-s = 64.0 18 | - batch size = 256 19 | - input image is normalized with mean=[0.485, 0.456, 0.406] and std=[0.229, 0.224, 0.225] 20 | 21 | |Models|MegaFace|LFW|Download| 22 | |---|---|---|---| 23 | |SE-LResNet101E-IR|98.06%|99.80%|[Link](https://github.com/foamliu/InsightFace-v3/releases/download/v1.0/insight-face-v3.pt)| 24 | 25 | 26 | ## Dataset 27 | 28 | Function|Dataset| 29 | |---|---| 30 | |Train|MS-Celeb-1M| 31 | |Test|MegaFace| 32 | 33 | ### Introduction 34 | 35 | MS-Celeb-1M dataset for training, 3,804,846 faces over 85,164 identities. 36 | 37 | 38 | ## Dependencies 39 | - Python 3.6.8 40 | - PyTorch 1.3.0 41 | 42 | ## Usage 43 | 44 | ### Data wrangling 45 | Extract images, scan them, to get bounding boxes and landmarks: 46 | ```bash 47 | $ python extract.py 48 | $ python pre_process.py 49 | ``` 50 | 51 | Image alignment: 52 | 1. Face detection(Retinaface mobilenet0.25). 53 | 2. Face alignment(similar transformation). 54 | 3. Central face selection. 55 | 4. Resize -> 112x112. 56 | 57 | Original | Aligned & Resized | Original | Aligned & Resized | 58 | |---|---|---|---| 59 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/0_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/0_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/1_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/1_img.jpg)| 60 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/2_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/2_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/3_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/3_img.jpg)| 61 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/4_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/4_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/5_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/5_img.jpg)| 62 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/6_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/6_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/7_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/7_img.jpg)| 63 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/8_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/8_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/9_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/9_img.jpg)| 64 | 65 | ### Train 66 | ```bash 67 | $ python train.py 68 | ``` 69 | 70 | To visualize the training process: 71 | ```bash 72 | $ tensorboard --logdir=runs 73 | ``` 74 | 75 | ## Performance evaluation 76 | 77 | ### MegaFace 78 | 79 | #### Introduction 80 | 81 | [MegaFace](http://megaface.cs.washington.edu/) dataset includes 1,027,060 faces, 690,572 identities. 82 | 83 | Challenge 1 is taken to test our model with 1 million distractors. 84 | 85 | ![image](https://github.com/foamliu/InsightFace-v2/raw/master/images/megaface_stats.png) 86 | 87 | #### Download 88 | 89 | 1. Download MegaFace and FaceScrub Images 90 | 2. Download FaceScrub annotation files: 91 | - facescrub_actors.txt 92 | - facescrub_actresses.txt 93 | 3. Download Linux DevKit from [MagaFace WebSite](http://megaface.cs.washington.edu/) then extract to megaface folder: 94 | 95 | ```bash 96 | $ tar -vxf linux-devkit.tar.gz 97 | ``` 98 | 99 | #### Face Alignment 100 | 101 | 1. Align Megaface images: 102 | 103 | ```bash 104 | $ python3 align_megaface.py 105 | ``` 106 | 107 | 2. Align FaceScrub images with annotations: 108 | 109 | ```bash 110 | $ python3 align_facescrub.py 111 | ``` 112 | 113 | #### Evaluation 114 | 115 | ```bash 116 | $ python3 megaface_eval.py 117 | ``` 118 | 119 | It does following things: 120 | 1. Generate features for FaceScrub and MegaFace. 121 | 2. Remove noises. 122 |
Note: we used the noises list proposed by InsightFace, at https://github.com/deepinsight/insightface. 123 | 3. Start MegaFace evaluation through devkit. 124 | 125 | #### Results 126 | 127 | ##### Curves 128 | 129 | Draw curves with matlab script @ megaface/draw_curve.m. 130 | 131 | CMC|ROC| 132 | |---|---| 133 | |![image](https://github.com/foamliu/InsightFace-v3/raw/master/images/megaface_cmc.jpg)|![image](https://github.com/foamliu/InsightFace-v3/raw/master/images/megaface_roc.jpg)| 134 | |![image](https://github.com/foamliu/InsightFace-v3/raw/master/images/megaface_cmc_2.jpg)|![image](https://github.com/foamliu/InsightFace-v3/raw/master/images/megaface_roc_2.jpg)| 135 | 136 | ##### Textual results 137 |
138 | Done matching! Score matrix size: 3359 966804
139 | Saving to results/otherFiles/facescrub_megaface_0_1000000_1.bin
140 | Loaded 3359 probes spanning 80 classes
141 | Loading from results/otherFiles/facescrub_facescrub_0.bin
142 | Probe score matrix size: 3359 3359
143 | distractor score matrix size: 3359 966804
144 | Done loading. Time to compute some stats!
145 | Finding top distractors!
146 | Done sorting distractor scores
147 | Making gallery!
148 | Done Making Gallery!
149 | Allocating ranks (966884)
150 | 
151 | Rank 1: 0.980616
152 | 
153 | 
154 | 155 | 156 | ## 小小的赞助~ 157 |

158 | Sample 159 |

160 | 若对您有帮助可给予小小的赞助~ 161 |

162 |

163 |


-------------------------------------------------------------------------------- /README_EN.md: -------------------------------------------------------------------------------- 1 | # InsightFace 2 | 3 | PyTorch implementation of Additive Angular Margin Loss for Deep Face Recognition. 4 | [paper](https://arxiv.org/pdf/1801.07698.pdf). 5 | ``` 6 | @article{deng2018arcface, 7 | title={ArcFace: Additive Angular Margin Loss for Deep Face Recognition}, 8 | author={Deng, Jiankang and Guo, Jia and Niannan, Xue and Zafeiriou, Stefanos}, 9 | journal={arXiv:1801.07698}, 10 | year={2018} 11 | } 12 | ``` 13 | ## Performance 14 | 15 | - sgd with momentum 16 | - margin-m = 0.6 17 | - margin-s = 64.0 18 | - batch size = 256 19 | - input image is normalized with mean=[0.485, 0.456, 0.406] and std=[0.229, 0.224, 0.225] 20 | 21 | |Models|MegaFace|LFW|Download| 22 | |---|---|---|---| 23 | |SE-LResNet101E-IR|97.43%|99.80%|[Link](https://github.com/foamliu/InsightFace-v3/releases/download/v1.0/insight-face-v3.pt)| 24 | 25 | 26 | ## Dataset 27 | 28 | Function|Dataset| 29 | |---|---| 30 | |Train|MS-Celeb-1M| 31 | |Test|MegaFace| 32 | 33 | ### Introduction 34 | 35 | MS-Celeb-1M dataset for training, 3,804,846 faces over 85,164 identities. 36 | 37 | 38 | ## Dependencies 39 | - Python 3.6.8 40 | - PyTorch 1.3.0 41 | 42 | ## Usage 43 | 44 | ### Data wrangling 45 | Extract images, scan them, to get bounding boxes and landmarks: 46 | ```bash 47 | $ python extract.py 48 | $ python pre_process.py 49 | ``` 50 | 51 | Image alignment: 52 | 1. Face detection(Retinaface mobilenet0.25). 53 | 2. Face alignment(similar transformation). 54 | 3. Central face selection. 55 | 4. Resize -> 112x112. 56 | 57 | Original | Aligned & Resized | Original | Aligned & Resized | 58 | |---|---|---|---| 59 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/0_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/0_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/1_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/1_img.jpg)| 60 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/2_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/2_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/3_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/3_img.jpg)| 61 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/4_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/4_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/5_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/5_img.jpg)| 62 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/6_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/6_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/7_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/7_img.jpg)| 63 | |![image](https://github.com/foamliu/InsightFace/raw/master/images/8_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/8_img.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/9_raw.jpg)|![image](https://github.com/foamliu/InsightFace/raw/master/images/9_img.jpg)| 64 | 65 | ### Train 66 | ```bash 67 | $ python train.py 68 | ``` 69 | 70 | To visualize the training process: 71 | ```bash 72 | $ tensorboard --logdir=runs 73 | ``` 74 | 75 | ## Performance evaluation 76 | 77 | ### MegaFace 78 | 79 | #### Introduction 80 | 81 | [MegaFace](http://megaface.cs.washington.edu/) dataset includes 1,027,060 faces, 690,572 identities. 82 | 83 | Challenge 1 is taken to test our model with 1 million distractors. 84 | 85 | ![image](https://github.com/foamliu/InsightFace-v2/raw/master/images/megaface_stats.png) 86 | 87 | #### Download 88 | 89 | 1. Download MegaFace and FaceScrub Images 90 | 2. Download FaceScrub annotation files: 91 | - facescrub_actors.txt 92 | - facescrub_actresses.txt 93 | 3. Download Linux DevKit from [MagaFace WebSite](http://megaface.cs.washington.edu/) then extract to megaface folder: 94 | 95 | ```bash 96 | $ tar -vxf linux-devkit.tar.gz 97 | ``` 98 | 99 | #### Face Alignment 100 | 101 | 1. Align Megaface images: 102 | 103 | ```bash 104 | $ python3 align_megaface.py 105 | ``` 106 | 107 | 2. Align FaceScrub images with annotations: 108 | 109 | ```bash 110 | $ python3 align_facescrub.py 111 | ``` 112 | 113 | #### Evaluation 114 | 115 | ```bash 116 | $ python3 megaface_eval.py 117 | ``` 118 | 119 | It does following things: 120 | 1. Generate features for FaceScrub and MegaFace. 121 | 2. Remove noises. 122 |
Note: we used the noises list proposed by InsightFace, at https://github.com/deepinsight/insightface. 123 | 3. Start MegaFace evaluation through devkit. 124 | 125 | #### Results 126 | 127 | ##### Curves 128 | 129 | Draw curves with matlab script @ megaface/draw_curve.m. 130 | 131 | CMC|ROC| 132 | |---|---| 133 | |![image](https://github.com/foamliu/InsightFace-v3/raw/master/images/megaface_cmc.jpg)|![image](https://github.com/foamliu/InsightFace-v3/raw/master/images/megaface_roc.jpg)| 134 | |![image](https://github.com/foamliu/InsightFace-v3/raw/master/images/megaface_cmc_2.jpg)|![image](https://github.com/foamliu/InsightFace-v3/raw/master/images/megaface_roc_2.jpg)| 135 | 136 | ##### Textual results 137 |
138 | Done matching! Score matrix size: 3379 972313
139 | Saving to results/otherFiles/facescrub_megaface_0_1000000_1.bin
140 | Loaded 3379 probes spanning 80 classes
141 | Loading from results/otherFiles/facescrub_facescrub_0.bin
142 | Probe score matrix size: 3379 3379
143 | distractor score matrix size: 3379 972313
144 | Done loading. Time to compute some stats!
145 | Finding top distractors!
146 | Done sorting distractor scores
147 | Making gallery!
148 | Done Making Gallery!
149 | Allocating ranks (972393)
150 | 
151 | Rank 1: 0.974266
152 | 
153 | 
154 | -------------------------------------------------------------------------------- /align_faces.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Apr 24 15:43:29 2017 4 | @author: zhaoy 5 | """ 6 | import cv2 7 | import numpy as np 8 | from skimage import transform as trans 9 | 10 | # reference facial points, a list of coordinates (x,y) 11 | REFERENCE_FACIAL_POINTS = [ 12 | [30.29459953, 51.69630051], 13 | [65.53179932, 51.50139999], 14 | [48.02519989, 71.73660278], 15 | [33.54930115, 92.3655014], 16 | [62.72990036, 92.20410156] 17 | ] 18 | 19 | DEFAULT_CROP_SIZE = (96, 112) 20 | 21 | 22 | class FaceWarpException(Exception): 23 | def __str__(self): 24 | return 'In File {}:{}'.format( 25 | __file__, super.__str__(self)) 26 | 27 | 28 | def get_reference_facial_points(output_size=None, 29 | inner_padding_factor=0.0, 30 | outer_padding=(0, 0), 31 | default_square=False): 32 | tmp_5pts = np.array(REFERENCE_FACIAL_POINTS) 33 | tmp_crop_size = np.array(DEFAULT_CROP_SIZE) 34 | 35 | # 0) make the inner region a square 36 | if default_square: 37 | size_diff = max(tmp_crop_size) - tmp_crop_size 38 | tmp_5pts += size_diff / 2 39 | tmp_crop_size += size_diff 40 | 41 | # print('---> default:') 42 | # print(' crop_size = ', tmp_crop_size) 43 | # print(' reference_5pts = ', tmp_5pts) 44 | 45 | if (output_size and 46 | output_size[0] == tmp_crop_size[0] and 47 | output_size[1] == tmp_crop_size[1]): 48 | # print('output_size == DEFAULT_CROP_SIZE {}: return default reference points'.format(tmp_crop_size)) 49 | return tmp_5pts 50 | 51 | if (inner_padding_factor == 0 and 52 | outer_padding == (0, 0)): 53 | if output_size is None: 54 | print('No paddings to do: return default reference points') 55 | return tmp_5pts 56 | else: 57 | raise FaceWarpException( 58 | 'No paddings to do, output_size must be None or {}'.format(tmp_crop_size)) 59 | 60 | # check output size 61 | if not (0 <= inner_padding_factor <= 1.0): 62 | raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)') 63 | 64 | if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) 65 | and output_size is None): 66 | output_size = tmp_crop_size * \ 67 | (1 + inner_padding_factor * 2).astype(np.int32) 68 | output_size += np.array(outer_padding) 69 | print(' deduced from paddings, output_size = ', output_size) 70 | 71 | if not (outer_padding[0] < output_size[0] 72 | and outer_padding[1] < output_size[1]): 73 | raise FaceWarpException('Not (outer_padding[0] < output_size[0]' 74 | 'and outer_padding[1] < output_size[1])') 75 | 76 | # 1) pad the inner region according inner_padding_factor 77 | # print('---> STEP1: pad the inner region according inner_padding_factor') 78 | if inner_padding_factor > 0: 79 | size_diff = tmp_crop_size * inner_padding_factor * 2 80 | tmp_5pts += size_diff / 2 81 | tmp_crop_size += np.round(size_diff).astype(np.int32) 82 | 83 | # print(' crop_size = ', tmp_crop_size) 84 | # print(' reference_5pts = ', tmp_5pts) 85 | 86 | # 2) resize the padded inner region 87 | # print('---> STEP2: resize the padded inner region') 88 | size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2 89 | # print(' crop_size = ', tmp_crop_size) 90 | # print(' size_bf_outer_pad = ', size_bf_outer_pad) 91 | 92 | if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]: 93 | raise FaceWarpException('Must have (output_size - outer_padding)' 94 | '= some_scale * (crop_size * (1.0 + inner_padding_factor)') 95 | 96 | scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0] 97 | # print(' resize scale_factor = ', scale_factor) 98 | tmp_5pts = tmp_5pts * scale_factor 99 | # size_diff = tmp_crop_size * (scale_factor - min(scale_factor)) 100 | # tmp_5pts = tmp_5pts + size_diff / 2 101 | tmp_crop_size = size_bf_outer_pad 102 | # print(' crop_size = ', tmp_crop_size) 103 | # print(' reference_5pts = ', tmp_5pts) 104 | 105 | # 3) add outer_padding to make output_size 106 | reference_5point = tmp_5pts + np.array(outer_padding) 107 | tmp_crop_size = output_size 108 | # print('---> STEP3: add outer_padding to make output_size') 109 | # print(' crop_size = ', tmp_crop_size) 110 | # print(' reference_5pts = ', tmp_5pts) 111 | # 112 | # print('===> end get_reference_facial_points\n') 113 | 114 | return reference_5point 115 | 116 | 117 | def get_affine_transform_matrix(src_pts, dst_pts): 118 | tfm = np.float32([[1, 0, 0], [0, 1, 0]]) 119 | n_pts = src_pts.shape[0] 120 | ones = np.ones((n_pts, 1), src_pts.dtype) 121 | src_pts_ = np.hstack([src_pts, ones]) 122 | dst_pts_ = np.hstack([dst_pts, ones]) 123 | 124 | A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_) 125 | 126 | if rank == 3: 127 | tfm = np.float32([ 128 | [A[0, 0], A[1, 0], A[2, 0]], 129 | [A[0, 1], A[1, 1], A[2, 1]] 130 | ]) 131 | elif rank == 2: 132 | tfm = np.float32([ 133 | [A[0, 0], A[1, 0], 0], 134 | [A[0, 1], A[1, 1], 0] 135 | ]) 136 | 137 | return tfm 138 | 139 | 140 | def warp_and_crop_face(src_img, # BGR 141 | facial_pts, 142 | reference_pts=None, 143 | crop_size=(96, 112), 144 | align_type='smilarity'): 145 | if reference_pts is None: 146 | if crop_size[0] == 96 and crop_size[1] == 112: 147 | reference_pts = REFERENCE_FACIAL_POINTS 148 | else: 149 | default_square = False 150 | inner_padding_factor = 0 151 | outer_padding = (0, 0) 152 | output_size = crop_size 153 | 154 | reference_pts = get_reference_facial_points(output_size, 155 | inner_padding_factor, 156 | outer_padding, 157 | default_square) 158 | 159 | ref_pts = np.float32(reference_pts) 160 | ref_pts_shp = ref_pts.shape 161 | if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2: 162 | raise FaceWarpException( 163 | 'reference_pts.shape must be (K,2) or (2,K) and K>2') 164 | 165 | if ref_pts_shp[0] == 2: 166 | ref_pts = ref_pts.T 167 | 168 | src_pts = np.float32(facial_pts) 169 | src_pts_shp = src_pts.shape 170 | if max(src_pts_shp) < 3 or min(src_pts_shp) != 2: 171 | raise FaceWarpException( 172 | 'facial_pts.shape must be (K,2) or (2,K) and K>2') 173 | 174 | if src_pts_shp[0] == 2: 175 | src_pts = src_pts.T 176 | 177 | if src_pts.shape != ref_pts.shape: 178 | raise FaceWarpException( 179 | 'facial_pts and reference_pts must have the same shape') 180 | 181 | if align_type is 'cv2_affine': 182 | tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3]) 183 | # print('cv2.getAffineTransform() returns tfm=\n' + str(tfm)) 184 | elif align_type is 'affine': 185 | tfm = get_affine_transform_matrix(src_pts, ref_pts) 186 | # print('get_affine_transform_matrix() returns tfm=\n' + str(tfm)) 187 | else: 188 | # tfm = get_similarity_transform_for_cv2(src_pts, ref_pts) 189 | tform = trans.SimilarityTransform() 190 | tform.estimate(src_pts, ref_pts) 191 | tfm = tform.params[0:2, :] 192 | 193 | face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1])) 194 | 195 | return face_img # BGR 196 | -------------------------------------------------------------------------------- /align_facescrub.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import re 4 | from multiprocessing import Pool 5 | 6 | import cv2 as cv 7 | import numpy as np 8 | from tqdm import tqdm 9 | 10 | 11 | def resize(img): 12 | max_size = 800 13 | ratio = 1 14 | h, w = img.shape[:2] 15 | 16 | if h > max_size or w > max_size: 17 | if h > w: 18 | ratio = max_size / h 19 | else: 20 | ratio = max_size / w 21 | 22 | img = cv.resize(img, (int(round(w * ratio)), int(round(h * ratio))), interpolation=cv.INTER_CUBIC) 23 | return img, ratio 24 | 25 | 26 | def get_files(): 27 | annotation_files = ['facescrub_actors.txt', 'facescrub_actresses.txt'] 28 | 29 | samples = [] 30 | 31 | for anno in annotation_files: 32 | anno_file = os.path.join('megaface', anno) 33 | 34 | with open(anno_file, 'r') as fp: 35 | lines = fp.readlines() 36 | 37 | for line in lines[1:]: 38 | tokens = line.split('\t') 39 | name = tokens[0] 40 | face_id = tokens[2] 41 | url = tokens[3] 42 | # print(url) 43 | ext = url.split('.')[-1] 44 | # print(ext) 45 | 46 | bbox = tokens[4] 47 | filename = '{0}/{0}_{1}.{2}'.format(name, face_id, ext) 48 | full_path = 'megaface/FaceScrub/{}'.format(filename) 49 | if os.path.isfile(full_path): 50 | samples.append({'filename': filename, 'bbox': bbox}) 51 | 52 | # print(len(samples)) 53 | return samples 54 | 55 | 56 | def bb_intersection_over_union(boxA, boxB): 57 | # determine the (x, y)-coordinates of the intersection rectangle 58 | xA = max(boxA[0], boxB[0]) 59 | yA = max(boxA[1], boxB[1]) 60 | xB = min(boxA[2], boxB[2]) 61 | yB = min(boxA[3], boxB[3]) 62 | 63 | # compute the area of intersection rectangle 64 | interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1) 65 | 66 | # compute the area of both the prediction and ground-truth 67 | # rectangles 68 | boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) 69 | boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) 70 | 71 | # compute the intersection over union by taking the intersection 72 | # area and dividing it by the sum of prediction + ground-truth 73 | # areas - the interesection area 74 | iou = interArea / float(boxAArea + boxBArea - interArea) 75 | 76 | # return the intersection over union value 77 | return iou 78 | 79 | 80 | def select_face(bboxes, boxB): 81 | max_iou = 0 82 | max_idx = 0 83 | 84 | for idx, boxA in enumerate(bboxes): 85 | iou = bb_intersection_over_union(boxA, boxB) 86 | # print(iou) 87 | 88 | if iou > max_iou: 89 | max_iou = max(iou, max_iou) 90 | max_idx = idx 91 | 92 | return max_idx 93 | 94 | 95 | def detect_face(data): 96 | from retinaface.detector import detector 97 | from utils import align_face 98 | 99 | src_path = data['src_path'] 100 | dst_path = data['dst_path'] 101 | boxB = np.array(data['boxB']) 102 | 103 | img = cv.imread(src_path) 104 | if img is not None: 105 | img, ratio = resize(img) 106 | boxB = boxB * ratio 107 | 108 | try: 109 | bboxes, landmarks = detector.detect_faces(img) 110 | 111 | if len(bboxes) > 0: 112 | i = select_face(bboxes, boxB) 113 | bbox, landms = bboxes[i], landmarks[i] 114 | img = align_face(img, [landms]) 115 | dirname = os.path.dirname(dst_path) 116 | os.makedirs(dirname, exist_ok=True) 117 | cv.imwrite(dst_path, img) 118 | except ValueError as err: 119 | print(err) 120 | except cv.error as err: 121 | print(err) 122 | 123 | return True 124 | 125 | 126 | def align_facescrub(src, dst): 127 | image_paths = [] 128 | for sample in get_files(): 129 | fname = sample['filename'] 130 | boxB = eval(sample['bbox']) 131 | src_path = os.path.join(src, fname) 132 | dst_path = os.path.join(dst, fname).replace(' ', '_') 133 | pattern = re.compile(re.escape('.png'), re.IGNORECASE) 134 | dst_path = pattern.sub('.jpg', dst_path) 135 | image_paths.append({'src_path': src_path, 'dst_path': dst_path, 'boxB': boxB}) 136 | 137 | # print(image_paths[:20]) 138 | num_images = len(image_paths) 139 | print('num_images: ' + str(num_images)) 140 | 141 | with Pool(4) as p: 142 | r = list(tqdm(p.imap(detect_face, image_paths), total=num_images)) 143 | 144 | # for image_path in tqdm(image_paths): 145 | # detect_face(image_path) 146 | 147 | print('Completed!') 148 | 149 | 150 | def parse_args(): 151 | parser = argparse.ArgumentParser(description='Train face network') 152 | # general 153 | parser.add_argument('--src', type=str, default='megaface/FaceScrub', help='src path') 154 | parser.add_argument('--dst', type=str, default='megaface/FaceScrub_aligned', help='dst path') 155 | 156 | args = parser.parse_args() 157 | return args 158 | 159 | 160 | if __name__ == '__main__': 161 | args = parse_args() 162 | 163 | src = args.src 164 | dst = args.dst 165 | 166 | align_facescrub(src, dst) 167 | 168 | # python3 align_facescrub.py --src megaface/FaceScrub --dst megaface/FaceScrub_aligned 169 | -------------------------------------------------------------------------------- /align_megaface.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from multiprocessing import Pool 4 | 5 | import cv2 as cv 6 | import tqdm 7 | from tqdm import tqdm 8 | 9 | 10 | def resize(img): 11 | max_size = 800 12 | ratio = 1 13 | h, w = img.shape[:2] 14 | 15 | if h > max_size or w > max_size: 16 | if h > w: 17 | ratio = max_size / h 18 | else: 19 | ratio = max_size / w 20 | 21 | img = cv.resize(img, (int(round(w * ratio)), int(round(h * ratio)))) 22 | return img, ratio 23 | 24 | 25 | def detect_face(data): 26 | from retinaface.detector import detector 27 | from utils import align_face 28 | 29 | src_path = data['src_path'] 30 | dst_path = data['dst_path'] 31 | # print(src_path) 32 | 33 | img_raw = cv.imread(src_path) 34 | if img_raw is not None: 35 | img, _ = resize(img_raw) 36 | 37 | try: 38 | bboxes, landmarks = detector.detect_faces(img) 39 | 40 | if len(bboxes) > 0: 41 | bbox, landms = bboxes[0], landmarks[0] 42 | img = align_face(img, [landms]) 43 | dirname = os.path.dirname(dst_path) 44 | os.makedirs(dirname, exist_ok=True) 45 | cv.imwrite(dst_path, img) 46 | return True 47 | 48 | except ValueError as err: 49 | print(err) 50 | 51 | return False 52 | 53 | 54 | def align_megaface(src, dst): 55 | image_paths = [] 56 | for dirName, subdirList, fileList in tqdm(os.walk(src)): 57 | for fname in fileList: 58 | if fname.lower().endswith(('.jpg', '.png')): 59 | src_path = os.path.join(dirName, fname) 60 | dst_path = os.path.join(dirName.replace(src, dst), fname).replace(' ', '_') 61 | image_paths.append({'src_path': src_path, 'dst_path': dst_path}) 62 | 63 | # print(image_paths[:20]) 64 | num_images = len(image_paths) 65 | print('num_images: ' + str(num_images)) 66 | 67 | with Pool(4) as p: 68 | r = list(tqdm(p.imap(detect_face, image_paths), total=num_images)) 69 | 70 | # for image_path in tqdm(image_paths): 71 | # detect_face(image_path) 72 | 73 | print('Completed!') 74 | 75 | 76 | def parse_args(): 77 | parser = argparse.ArgumentParser(description='Train face network') 78 | # general 79 | parser.add_argument('--src', type=str, default='megaface/MegaFace', help='src path') 80 | parser.add_argument('--dst', type=str, default='megaface/MegaFace_aligned', help='dst path') 81 | 82 | args = parser.parse_args() 83 | return args 84 | 85 | 86 | if __name__ == '__main__': 87 | args = parse_args() 88 | 89 | src = args.src 90 | dst = args.dst 91 | 92 | align_megaface(src, dst) 93 | 94 | # python3 align_megaface.py --src megaface/MegaFace --dst megaface/MegaFace_aligned 95 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | import torch 5 | 6 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # sets device for model and PyTorch tensors 7 | 8 | # Model parameters 9 | im_size = 112 10 | channel = 3 11 | emb_size = 512 12 | 13 | # Training parameters 14 | num_workers = 4 # for data-loading; right now, only 1 works with h5py 15 | grad_clip = 5. # clip gradients at an absolute value of 16 | print_freq = 100 # print training/validation stats every __ batches 17 | checkpoint = None # path to checkpoint, None if none 18 | 19 | # Data parameters 20 | num_classes = 85742 21 | num_samples = 5822653 22 | DATA_DIR = 'data' 23 | # faces_ms1m_folder = 'data/faces_ms1m_112x112' 24 | # faces_ms1m_folder = 'data/ms1m-retinaface-t1' 25 | faces_ms1m_folder = 'data/faces_emore' 26 | path_imgidx = os.path.join(faces_ms1m_folder, 'train.idx') 27 | path_imgrec = os.path.join(faces_ms1m_folder, 'train.rec') 28 | IMG_DIR = 'data/images' 29 | pickle_file = 'data/faces_ms1m_112x112.pickle' 30 | 31 | 32 | def get_logger(): 33 | logger = logging.getLogger() 34 | handler = logging.StreamHandler() 35 | formatter = logging.Formatter("%(asctime)s %(levelname)s \t%(message)s") 36 | handler.setFormatter(formatter) 37 | logger.addHandler(handler) 38 | logger.setLevel(logging.INFO) 39 | return logger 40 | 41 | 42 | logger = get_logger() 43 | -------------------------------------------------------------------------------- /data_gen.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | 4 | from PIL import Image 5 | from torch.utils.data import Dataset 6 | from torchvision import transforms 7 | 8 | from config import IMG_DIR 9 | from config import pickle_file 10 | 11 | # Data augmentation and normalization for training 12 | # Just normalization for validation 13 | data_transforms = { 14 | 'train': transforms.Compose([ 15 | transforms.RandomHorizontalFlip(), 16 | transforms.ColorJitter(brightness=0.125, contrast=0.125, saturation=0.125), 17 | transforms.ToTensor(), 18 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), 19 | ]), 20 | 'val': transforms.Compose([ 21 | transforms.ToTensor(), 22 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 23 | ]), 24 | } 25 | 26 | 27 | class ArcFaceDataset(Dataset): 28 | def __init__(self, split): 29 | with open(pickle_file, 'rb') as file: 30 | data = pickle.load(file) 31 | 32 | self.split = split 33 | self.samples = data 34 | self.transformer = data_transforms['train'] 35 | 36 | def __getitem__(self, i): 37 | sample = self.samples[i] 38 | filename = sample['img'] 39 | label = sample['label'] 40 | 41 | filename = os.path.join(IMG_DIR, filename) 42 | img = Image.open(filename) 43 | img = self.transformer(img) 44 | 45 | return img, label 46 | 47 | def __len__(self): 48 | return len(self.samples) 49 | -------------------------------------------------------------------------------- /demo.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | import numpy as np 3 | import torch 4 | from torchvision import transforms 5 | 6 | data_transforms = { 7 | 'train': transforms.Compose([ 8 | transforms.RandomHorizontalFlip(), 9 | transforms.ToTensor(), 10 | transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) 11 | ]), 12 | 'val': transforms.Compose([ 13 | transforms.ToTensor(), 14 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 15 | ]), 16 | } 17 | transformer = data_transforms['train'] 18 | 19 | if __name__ == "__main__": 20 | img = cv.imread('images/0_fn_0.jpg') 21 | img = transforms.ToPILImage()(img) 22 | arr = np.array(img) 23 | print(arr) 24 | print(np.max(arr)) 25 | print(np.min(arr)) 26 | print(np.mean(arr)) 27 | print(np.std(arr)) 28 | 29 | arr = arr.astype(np.float) 30 | arr = (arr - 127.5) / 128 31 | print(arr) 32 | print(np.max(arr)) 33 | print(np.min(arr)) 34 | print(np.mean(arr)) 35 | print(np.std(arr)) 36 | 37 | img = transformer(img) 38 | print(img) 39 | print(torch.max(img)) 40 | print(torch.min(img)) 41 | print(torch.mean(img)) 42 | print(torch.std(img)) 43 | -------------------------------------------------------------------------------- /export.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import torch 4 | 5 | if __name__ == '__main__': 6 | checkpoint = 'BEST_checkpoint.tar' 7 | print('loading {}...'.format(checkpoint)) 8 | start = time.time() 9 | checkpoint = torch.load(checkpoint) 10 | print('elapsed {} sec'.format(time.time() - start)) 11 | model = checkpoint['model'].module 12 | print(type(model)) 13 | print('use_se: ' + str(model.use_se)) 14 | print('fc: ' + str(model.fc)) 15 | # print('layer1: ' + str(model.layer1)) 16 | # print('layer2: ' + str(model.layer2)) 17 | # print('layer3: ' + str(model.layer3)) 18 | # print('layer4: ' + str(model.layer4)) 19 | 20 | # model.eval() 21 | filename = 'insight-face-v3.pt' 22 | print('saving {}...'.format(filename)) 23 | start = time.time() 24 | torch.save(model.state_dict(), filename) 25 | print('elapsed {} sec'.format(time.time() - start)) 26 | 27 | 28 | class HParams: 29 | def __init__(self): 30 | self.pretrained = False 31 | self.use_se = True 32 | 33 | 34 | config = HParams() 35 | 36 | print('loading {}...'.format(filename)) 37 | start = time.time() 38 | from models import resnet101 39 | 40 | model = resnet101(config) 41 | model.load_state_dict(torch.load(filename)) 42 | print('elapsed {} sec'.format(time.time() - start)) 43 | -------------------------------------------------------------------------------- /extract.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import zipfile 4 | 5 | 6 | def extract(filename): 7 | print('Extracting {}...'.format(filename)) 8 | zip_ref = zipfile.ZipFile(filename, 'r') 9 | zip_ref.extractall('data') 10 | zip_ref.close() 11 | 12 | 13 | if __name__ == "__main__": 14 | if not os.path.isdir('data/faces_emore'): 15 | extract('data/faces_emore.zip') 16 | -------------------------------------------------------------------------------- /focal_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class FocalLoss(nn.Module): 6 | 7 | def __init__(self, gamma=0): 8 | super(FocalLoss, self).__init__() 9 | self.gamma = gamma 10 | self.ce = torch.nn.CrossEntropyLoss() 11 | 12 | def forward(self, input, target): 13 | logp = self.ce(input, target) 14 | p = torch.exp(-logp) 15 | loss = (1 - p) ** self.gamma * logp 16 | return loss.mean() 17 | 18 | # class FocalLoss(nn.Module): 19 | # def __init__(self, gamma=0, size_average=True): 20 | # super(FocalLoss, self).__init__() 21 | # self.gamma = gamma 22 | # 23 | # def forward(self, input, target): 24 | # if input.dim() > 2: 25 | # input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W 26 | # input = input.transpose(1, 2) # N,C,H*W => N,H*W,C 27 | # input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C 28 | # target = target.view(-1, 1) 29 | # 30 | # logpt = F.log_softmax(input) 31 | # logpt = logpt.gather(1, target) 32 | # logpt = logpt.view(-1) 33 | # pt = Variable(logpt.data.exp()) 34 | # 35 | # loss = -1 * (1 - pt) ** self.gamma * logpt 36 | # return loss.mean() 37 | -------------------------------------------------------------------------------- /images/0_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/0_fn_0.jpg -------------------------------------------------------------------------------- /images/0_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/0_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/0_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/0_fn_1.jpg -------------------------------------------------------------------------------- /images/0_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/0_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/0_fp_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/0_fp_0.jpg -------------------------------------------------------------------------------- /images/0_fp_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/0_fp_0_aligned.jpg -------------------------------------------------------------------------------- /images/0_fp_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/0_fp_1.jpg -------------------------------------------------------------------------------- /images/0_fp_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/0_fp_1_aligned.jpg -------------------------------------------------------------------------------- /images/10_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/10_fn_0.jpg -------------------------------------------------------------------------------- /images/10_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/10_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/10_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/10_fn_1.jpg -------------------------------------------------------------------------------- /images/10_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/10_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/11_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/11_fn_0.jpg -------------------------------------------------------------------------------- /images/11_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/11_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/11_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/11_fn_1.jpg -------------------------------------------------------------------------------- /images/11_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/11_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/1_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/1_fn_0.jpg -------------------------------------------------------------------------------- /images/1_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/1_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/1_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/1_fn_1.jpg -------------------------------------------------------------------------------- /images/1_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/1_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/1_fp_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/1_fp_0.jpg -------------------------------------------------------------------------------- /images/1_fp_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/1_fp_0_aligned.jpg -------------------------------------------------------------------------------- /images/1_fp_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/1_fp_1.jpg -------------------------------------------------------------------------------- /images/1_fp_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/1_fp_1_aligned.jpg -------------------------------------------------------------------------------- /images/2_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/2_fn_0.jpg -------------------------------------------------------------------------------- /images/2_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/2_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/2_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/2_fn_1.jpg -------------------------------------------------------------------------------- /images/2_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/2_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/2_fp_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/2_fp_0.jpg -------------------------------------------------------------------------------- /images/2_fp_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/2_fp_1.jpg -------------------------------------------------------------------------------- /images/3_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/3_fn_0.jpg -------------------------------------------------------------------------------- /images/3_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/3_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/3_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/3_fn_1.jpg -------------------------------------------------------------------------------- /images/3_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/3_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/4_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/4_fn_0.jpg -------------------------------------------------------------------------------- /images/4_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/4_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/4_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/4_fn_1.jpg -------------------------------------------------------------------------------- /images/4_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/4_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/5_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/5_fn_0.jpg -------------------------------------------------------------------------------- /images/5_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/5_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/5_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/5_fn_1.jpg -------------------------------------------------------------------------------- /images/5_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/5_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/6_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/6_fn_0.jpg -------------------------------------------------------------------------------- /images/6_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/6_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/6_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/6_fn_1.jpg -------------------------------------------------------------------------------- /images/6_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/6_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/7_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/7_fn_0.jpg -------------------------------------------------------------------------------- /images/7_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/7_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/7_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/7_fn_1.jpg -------------------------------------------------------------------------------- /images/7_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/7_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/8_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/8_fn_0.jpg -------------------------------------------------------------------------------- /images/8_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/8_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/8_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/8_fn_1.jpg -------------------------------------------------------------------------------- /images/8_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/8_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/9_fn_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/9_fn_0.jpg -------------------------------------------------------------------------------- /images/9_fn_0_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/9_fn_0_aligned.jpg -------------------------------------------------------------------------------- /images/9_fn_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/9_fn_1.jpg -------------------------------------------------------------------------------- /images/9_fn_1_aligned.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/9_fn_1_aligned.jpg -------------------------------------------------------------------------------- /images/megaface_cmc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/megaface_cmc.jpg -------------------------------------------------------------------------------- /images/megaface_cmc_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/megaface_cmc_2.jpg -------------------------------------------------------------------------------- /images/megaface_roc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/megaface_roc.jpg -------------------------------------------------------------------------------- /images/megaface_roc_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/megaface_roc_2.jpg -------------------------------------------------------------------------------- /images/megaface_stats.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/megaface_stats.png -------------------------------------------------------------------------------- /images/sample.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/sample.jpg -------------------------------------------------------------------------------- /images/theta_dist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/images/theta_dist.png -------------------------------------------------------------------------------- /megaface/README.md: -------------------------------------------------------------------------------- 1 | # MegaFace 2 | 3 | Delete feature files: 4 | ```bash 5 | find facescrub_images -name "*.bin" -type f 6 | find facescrub_images -name "*.bin" -type f -delete 7 | find MegaFace/FlickrFinal2 -name "*.bin" -type f 8 | find MegaFace/FlickrFinal2 -name "*.bin" -type f -delete 9 | ``` 10 | -------------------------------------------------------------------------------- /megaface/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/megaface/__init__.py -------------------------------------------------------------------------------- /megaface/devkit/experiments/run_experiment.py: -------------------------------------------------------------------------------- 1 | # Created by MegaFace Team 2 | # Please cite the our paper if you use our code, results, or dataset in a publication 3 | # http://megaface.cs.washington.edu/ 4 | 5 | import argparse 6 | import json 7 | import os 8 | import subprocess 9 | import sys 10 | 11 | ROOT = 'megaface/devkit' 12 | MODEL = os.path.join(ROOT, 'models', 'jb_identity.bin') 13 | IDENTIFICATION_EXE = os.path.join(ROOT, 'bin', 'Identification') 14 | FUSE_RESULTS_EXE = os.path.join(ROOT, 'bin', 'FuseResults') 15 | MEGAFACE_LIST_BASENAME = os.path.join(ROOT, 'templatelists', 'megaface_features_list.json') 16 | PROBE_LIST_BASENAME = os.path.join(ROOT, 'templatelists', 'facescrub_features_list.json') 17 | 18 | 19 | def main(): 20 | parser = argparse.ArgumentParser(description= 21 | 'Runs the MegaFace challenge experiment with the provided feature files') 22 | parser.add_argument('distractor_feature_path', help='Path to MegaFace Features') 23 | parser.add_argument('probe_feature_path', help='Path to FaceScrub Features') 24 | parser.add_argument('file_ending', 25 | help='Ending appended to original photo files. i.e. 11084833664_0.jpg_LBP_100x100.bin => _LBP_100x100.bin') 26 | parser.add_argument( 27 | 'out_root', help='File output directory, outputs results files, score matrix files, and feature lists used') 28 | parser.add_argument('-s', '--sizes', type=int, nargs='+', 29 | help='(optional) Size(s) of feature list(s) to create. Default: 10 100 1000 10000 100000 1000000') 30 | parser.add_argument('-m', '--model', type=str, 31 | help='(optional) Scoring model to use. Default: ../models/jb_identity.bin') 32 | parser.add_argument('-ns', '--num_sets', help='Set to change number of sets to run on. Default: 1') 33 | parser.add_argument('-d', '--delete_matrices', dest='delete_matrices', action='store_true', 34 | help='Deletes matrices used while computing results. Reduces space needed to run test.') 35 | parser.add_argument('-p', '--probe_list', 36 | help='Set to use different probe list. Default: ../templatelists/facescrub_features_list.json') 37 | parser.add_argument('-dlp', '--distractor_list_path', help='Set to change path used for distractor lists') 38 | parser.set_defaults(model=MODEL, num_sets=1, sizes=[10, 100, 1000, 10000, 100000, 1000000], 39 | probe_list=PROBE_LIST_BASENAME, distractor_list_path=os.path.dirname(MEGAFACE_LIST_BASENAME)) 40 | args = parser.parse_args() 41 | 42 | distractor_feature_path = args.distractor_feature_path 43 | out_root = args.out_root 44 | probe_feature_path = args.probe_feature_path 45 | model = args.model 46 | num_sets = args.num_sets 47 | sizes = args.sizes 48 | file_ending = args.file_ending 49 | alg_name = file_ending.split('.')[0].strip('_') 50 | delete_matrices = args.delete_matrices 51 | probe_list_basename = args.probe_list 52 | megaface_list_basename = os.path.join(args.distractor_list_path, os.path.basename(MEGAFACE_LIST_BASENAME)) 53 | set_indices = range(1, int(num_sets) + 1) 54 | 55 | assert os.path.exists(distractor_feature_path) 56 | assert os.path.exists(probe_feature_path) 57 | if not os.path.exists(out_root): 58 | os.makedirs(out_root) 59 | if not os.path.exists(os.path.join(out_root, "otherFiles")): 60 | os.makedirs(os.path.join(out_root, "otherFiles")) 61 | other_out_root = os.path.join(out_root, "otherFiles") 62 | 63 | probe_name = os.path.basename(probe_list_basename).split('_')[0] 64 | distractor_name = os.path.basename(megaface_list_basename).split('_')[0] 65 | 66 | # Create feature lists for megaface for all sets and sizes and verifies all features exist 67 | missing = False 68 | for index in set_indices: 69 | for size in sizes: 70 | print('Creating feature list of {} photos for set {}'.format(size, str(index))) 71 | cur_list_name = megaface_list_basename + "_{}_{}".format(str(size), str(index)) 72 | print(cur_list_name) 73 | with open(cur_list_name) as fp: 74 | featureFile = json.load(fp) 75 | path_list = featureFile["path"] 76 | path_list_f = [] 77 | for i in range(len(path_list)): 78 | path_list[i] = os.path.join(distractor_feature_path, path_list[i] + file_ending) 79 | if os.path.isfile(path_list[i]): 80 | path_list_f.append(path_list[i]) 81 | # print path_list[i] + " is missing" 82 | # missing = True 83 | if i % 10000 == 0 and i > 0: 84 | print(str(i) + " / " + str(len(path_list))) 85 | featureFile["path"] = path_list_f 86 | filename = os.path.join(other_out_root, 87 | '{}_features_{}_{}_{}'.format(distractor_name, alg_name, size, index)) 88 | print(filename) 89 | json.dump(featureFile, open(filename, 'w'), sort_keys=True, indent=4) 90 | if missing: 91 | sys.exit("Features are missing...") 92 | 93 | # Create feature list for probe set 94 | probeidx = 0 95 | probelist = open('probelist', 'w') 96 | with open(probe_list_basename) as fp: 97 | featureFile = json.load(fp) 98 | path_list = featureFile["path"] 99 | id_list = featureFile["id"] 100 | path_list_f = [] 101 | id_list_f = [] 102 | for i in range(len(path_list)): 103 | path_list[i] = os.path.join(probe_feature_path, path_list[i].replace(' ', '_') + file_ending) 104 | print(path_list[i]) 105 | if os.path.isfile(path_list[i]): 106 | path_list_f.append(path_list[i]) 107 | id_list_f.append(id_list[i]) 108 | probelist.write(str(probeidx) + '\t' + id_list[i] + '\t' + path_list[i] + '\n') 109 | probeidx += 1 110 | # print path_list[i] + " is missing" 111 | # missing = True 112 | featureFile["path"] = path_list_f 113 | featureFile["id"] = id_list_f 114 | filename = os.path.join( 115 | other_out_root, '{}_features_{}'.format(probe_name, alg_name)) 116 | print(filename) 117 | json.dump(featureFile, open(filename, 'w'), sort_keys=True, indent=4) 118 | probe_feature_list = os.path.join(other_out_root, '{}_features_{}'.format(probe_name, alg_name)) 119 | probelist.close() 120 | if missing: 121 | sys.exit("Features are missing...") 122 | 123 | print('Running probe to probe comparison') 124 | probe_score_filename = os.path.join( 125 | other_out_root, '{}_{}_{}.bin'.format(probe_name, probe_name, alg_name)) 126 | args = [IDENTIFICATION_EXE, model, "path", probe_feature_list, probe_feature_list, probe_score_filename] 127 | print(args) 128 | proc = subprocess.Popen(args) 129 | proc.communicate() 130 | 131 | for index in set_indices: 132 | for size in sizes: 133 | print('Running test with size {} images for set {}'.format( 134 | str(size), str(index))) 135 | args = [IDENTIFICATION_EXE, model, "path", 136 | os.path.join(other_out_root, 137 | '{}_features_{}_{}_{}'.format(distractor_name, alg_name, size, index) 138 | ), probe_feature_list, os.path.join(other_out_root, 139 | '{}_{}_{}_{}_{}.bin'.format(probe_name, 140 | distractor_name, 141 | alg_name, 142 | str(size), 143 | str(index)))] 144 | print(args) 145 | proc = subprocess.Popen(args) 146 | proc.communicate() 147 | 148 | print('Computing test results with {} images for set {}'.format( 149 | str(size), str(index))) 150 | args = [FUSE_RESULTS_EXE] 151 | args += [os.path.join(other_out_root, '{}_{}_{}_{}_{}.bin'.format( 152 | probe_name, distractor_name, alg_name, str(size), str(index)))] 153 | args += [os.path.join(other_out_root, '{}_{}_{}.bin'.format( 154 | probe_name, probe_name, alg_name)), probe_feature_list, str(size)] 155 | args += [os.path.join(out_root, "cmc_{}_{}_{}_{}_{}.json".format( 156 | probe_name, distractor_name, alg_name, str(size), str(index)))] 157 | args += [os.path.join(out_root, "matches_{}_{}_{}_{}_{}.json".format( 158 | probe_name, distractor_name, alg_name, str(size), str(index)))] 159 | print(args) 160 | proc = subprocess.Popen(args) 161 | proc.communicate() 162 | 163 | if delete_matrices: 164 | os.remove(os.path.join(other_out_root, '{}_{}_{}_{}_{}.bin'.format( 165 | probe_name, distractor_name, alg_name, str(size), str(index)))) 166 | 167 | 168 | if __name__ == '__main__': 169 | main() 170 | -------------------------------------------------------------------------------- /megaface/draw_curve.m: -------------------------------------------------------------------------------- 1 | % requirement: JSONLab: https://cn.mathworks.com/matlabcentral/fileexchange/33381-jsonlab--a-toolbox-to-encode-decode-json-files 2 | 3 | format long 4 | addpath('D:\Users\foamliu\code\jsonlab'); 5 | facescrub_cmc_file = 'D:\Users\foamliu\code\InsightFace-v3\megaface\results\cmc_facescrub_megaface_0_1000000_1.json' 6 | facescrub_cmc_json = loadjson(fileread(facescrub_cmc_file)); 7 | facescrub_cmc_json 8 | 9 | 10 | figure(1); 11 | semilogx(facescrub_cmc_json.cmc(1,:)+1,facescrub_cmc_json.cmc(2,:)*100,'LineWidth',2); 12 | title(['Identification @ 1e6 distractors = ' num2str(facescrub_cmc_json.cmc(2,:)(1))]); 13 | xlabel('Rank'); 14 | ylabel('Identification Rate %'); 15 | %ylim([0 100]); 16 | grid on; 17 | box on; 18 | hold on; 19 | 20 | facescrub_cmc_json.roc(1,:) 21 | 22 | figure(2); 23 | %semilogx(facescrub_cmc_json.roc(1,:),facescrub_cmc_json.roc(2,:),'LineWidth',2); 24 | xdata=[0.0, 1.034335816996190e-08, 4.137343267984761e-08, 5.171678907345267e-08, 7.240350896609016e-08, 1.137769416459378e-07, 1.965237999002056e-07, 1.841117750700505e-06, 0.001009718631394207, 1.0 ], 25 | ydata=[ 0.8872767686843872, 0.9050645828247070, 0.92467862367630, 0.9406545758247375, 0.9531793594360352, 0.9637916684150696, 0.9753674268722534, 0.9853757619857788, 0.9953840970993042, 1.0] 26 | semilogx(xdata,ydata,'LineWidth',2); 27 | %semilogx(facescrub_cmc_json.roc{1},facescrub_cmc_json.roc{2},'LineWidth',2); 28 | title(['Verification @ 1e-6 = ' num2str(interp1(xdata, ydata, 1e-6))]); 29 | xlim([1e-6 1]); 30 | ylim([0 1]); 31 | xlabel('False Positive Rate'); 32 | ylabel('True Positive Rate'); 33 | grid on; 34 | box on; 35 | hold on; -------------------------------------------------------------------------------- /megaface/facescrub_noises.txt: -------------------------------------------------------------------------------- 1 | #Please strictly follow the rules in https://github.com/deepinsight/insightface/tree/master/src/megaface/README.md if you want to use this list. 2 | Aaron_Eckhart_136.png 3 | Aaron_Eckhart_221.png 4 | Adam_McKay_468.png 5 | Adam_McKay_478.png 6 | Adam_Sandler_558.png 7 | Adrienne_Barbeau_4259.png 8 | Adrienne_Frantz_27683.png 9 | Adrienne_Frantz_27693.png 10 | Adrienne_Frantz_27699.png 11 | Adrienne_Frantz_27701.png 12 | Adrienne_Frantz_27829.png 13 | Aisha_Hinds_35768.png 14 | Alec_Baldwin_2121.png 15 | Alice_Krige_40717.png 16 | Alley_Mills_52029.png 17 | Alley_Mills_52039.png 18 | Alley_Mills_52044.png 19 | Allison_Janney_37357.png 20 | Allison_Janney_37447.png 21 | Alyson_Hannigan_32288.png 22 | Alyssa_Milano_51641.png 23 | America_Ferrera_25859.png 24 | America_Ferrera_25870.png 25 | Amy_Davidson_18553.png 26 | Andrea_Bogart_8427.png 27 | Andrea_Bowen_8992.png 28 | Andrea_Bowen_9090.png 29 | Andy_Richter_3141.png 30 | Angell_Conwell_15508.png 31 | Anne_Hathaway_33648.png 32 | Anne_Hathaway_33685.png 33 | Annie_Ilonzeh_37123.png 34 | Anthony_Hopkins_3562.png 35 | Anthony_Hopkins_3595.png 36 | Anthony_Hopkins_3694.png 37 | Antonio_Banderas_4016.png 38 | Arnold_Vosloo_4287.png 39 | Ashley_Benson_4144.png 40 | Ashley_Johnson_37631.png 41 | Ashley_Johnson_37649.png 42 | Audra_McDonald_49657.png 43 | Audrey_Landers_42201.png 44 | Audrey_Landers_42246.png 45 | Barbara_Carrera_11961.png 46 | Ben_Affleck_4700.png 47 | Ben_Affleck_4706.png 48 | Ben_Affleck_4762.png 49 | Ben_Affleck_4885.png 50 | Ben_Kingsley_5146.png 51 | Ben_McKenzie_5219.png 52 | Ben_Stiller_5419.png 53 | Ben_Stiller_5432.png 54 | Bernard_Hill_5869.png 55 | Bernard_Hill_5969.png 56 | Bernie_Mac_6083.png 57 | Billy_Bob_Thornton_7010.png 58 | Billy_Bob_Thornton_7016.png 59 | Billy_Bob_Thornton_7028.png 60 | Billy_Bob_Thornton_7047.png 61 | Billy_Bob_Thornton_7069.png 62 | Billy_Bob_Thornton_7093.png 63 | Billy_Bob_Thornton_7106.png 64 | Billy_Bob_Thornton_7140.png 65 | Billy_Bob_Thornton_7161.png 66 | Billy_Boyd_7281.png 67 | Billy_Boyd_7334.png 68 | Billy_Burke_7418.png 69 | Billy_Burke_7556.png 70 | Billy_Burke_7558.png 71 | Billy_Zane_7799.png 72 | Billy_Zane_7821.png 73 | Bobbie_Eakes_23690.png 74 | Bobbie_Eakes_23702.png 75 | Brad_Pitt_8285.png 76 | Bradley_Cooper_8416.png 77 | Brianna_Brown_10455.png 78 | Brianna_Brown_10482.png 79 | Brooke_Langton_42515.png 80 | Brooke_Langton_42603.png 81 | Brooke_Langton_42604.png 82 | Brooke_Langton_42628.png 83 | Brooke_Langton_42634.png 84 | Brooke_Langton_42643.png 85 | Bruce_Greenwood_8831.png 86 | Candice_Bergen_6028.png 87 | Candice_Bergen_6055.png 88 | Candice_Bergen_6080.png 89 | Carla_Gallo_28085.png 90 | Carla_Gallo_28139.png 91 | Carmen_Electra_24315.png 92 | Caroline_Dhavernas_21720.png 93 | Cary_Elwes_9746.png 94 | Casey_Affleck_9925.png 95 | Cathy_Lee_Crosby_16548.png 96 | Cathy_Lee_Crosby_16564.png 97 | Chase_Masterson_48256.png 98 | Chazz_Palminteri_10758.png 99 | Cheryl_Hines_35834.png 100 | Chris_Evans_10942.png 101 | Chris_Evans_10943.png 102 | Chris_Evans_10990.png 103 | Chris_Evans_11000.png 104 | Chris_Evans_11039.png 105 | Chris_Kattan_11206.png 106 | Chris_Klein_11329.png 107 | Chris_Klein_11346.png 108 | Chris_Klein_11471.png 109 | Christa_Miller_51872.png 110 | Christian_Bale_11961.png 111 | Christina_Applegate_2776.png 112 | Christopher_Lloyd_12504.png 113 | Christopher_Reeve_12692.png 114 | Christopher_Reeve_12737.png 115 | Chyler_Leigh_43366.png 116 | Chyler_Leigh_43374.png 117 | Ciara_Bravo_10094.png 118 | Ciara_Bravo_10161.png 119 | Ciara_Bravo_10214.png 120 | Clint_Eastwood_12918.png 121 | Colin_Farrell_13196.png 122 | Colin_Farrell_13268.png 123 | Colin_Farrell_13282.png 124 | Colin_Firth_13455.png 125 | Colin_Firth_13566.png 126 | Courteney_Cox_16360.png 127 | Crystal_Chappell_13859.png 128 | Crystal_Chappell_13921.png 129 | Crystal_Chappell_13932.png 130 | Crystal_Chappell_13949.png 131 | Crystal_Chappell_13961.png 132 | Dan_Lauria_13992.png 133 | Dan_Lauria_14073.png 134 | Dana_Delany_20425.png 135 | Dana_Delany_20561.png 136 | Dana_Delany_20621.png 137 | Dana_Delany_20632.png 138 | Daniel_Day-Lewis_14356.png 139 | Daniel_Day-Lewis_14361.png 140 | Daniel_Day-Lewis_14397.png 141 | Daniel_Day-Lewis_14545.png 142 | Daniel_Radcliffe_14770.png 143 | David_Schwimmer_15942.png 144 | David_Wenham_16210.png 145 | David_Wenham_16221.png 146 | David_Wenham_16224.png 147 | David_Wenham_16235.png 148 | David_Wenham_16241.png 149 | David_Wenham_16261.png 150 | David_Wenham_16295.png 151 | David_Wenham_16300.png 152 | David_Wenham_16327.png 153 | Dean_Cain_16435.png 154 | Debra_Messing_50555.png 155 | Debra_Messing_50597.png 156 | Debra_Messing_50703.png 157 | Debra_Messing_50746.png 158 | Delta_Burke_11111.png 159 | Delta_Burke_11176.png 160 | Denzel_Washington_16698.png 161 | Desmond_Harrington_17178.png 162 | Diahann_Carroll_12351.png 163 | Diahann_Carroll_12397.png 164 | Dianna_Agron_269.png 165 | Dianna_Agron_363.png 166 | Dianna_Agron_375.png 167 | Dianna_Agron_396.png 168 | Dustin_Hoffman_17932.png 169 | Elizabeth_Berkley_6214.png 170 | Elizabeth_Berkley_6259.png 171 | Elizabeth_Hendrickson_34413.png 172 | Ellen_Greene_30978.png 173 | Ellen_Greene_31003.png 174 | Ellen_Greene_31005.png 175 | Emily_Deschanel_20666.png 176 | Erin_Cummings_17368.png 177 | Erin_Cummings_17400.png 178 | Erin_Cummings_17457.png 179 | Ethan_Hawke_19548.png 180 | Eva_Longoria_45027.png 181 | Farah_Fath_25299.png 182 | Farah_Fath_25343.png 183 | Farah_Fath_25365.png 184 | Farah_Fath_25401.png 185 | Farrah_Fawcett_25594.png 186 | Florencia_Lozano_45592.png 187 | Florencia_Lozano_45594.png 188 | Florencia_Lozano_45606.png 189 | Florencia_Lozano_45632.png 190 | Florencia_Lozano_45640.png 191 | Fran_Drescher_22955.png 192 | Fran_Drescher_23030.png 193 | Gabrielle_Carteris_12657.png 194 | Gates_McFadden_49864.png 195 | Gates_McFadden_49868.png 196 | Glenn_Close_14672.png 197 | Glenn_Close_14739.png 198 | Harrison_Ford_22656.png 199 | Hayden_Christensen_23089.png 200 | Heather_Locklear_44410.png 201 | Heather_Locklear_44498.png 202 | Heather_Locklear_44504.png 203 | Heather_Locklear_44527.png 204 | Holly_Marie_Combs_15225.png 205 | Ian_Holm_24214.png 206 | Jackee_Harry_32964.png 207 | Jackee_Harry_33011.png 208 | James_Brolin_26065.png 209 | James_Frain_26352.png 210 | James_Franco_26574.png 211 | James_Marsden_26731.png 212 | James_Marsden_26756.png 213 | James_McAvoy_26932.png 214 | Jamie_Lee_Curtis_17869.png 215 | Jamie_Lee_Curtis_18018.png 216 | Jamie_Lee_Curtis_18056.png 217 | Jamie_Luner_46356.png 218 | Jamie_Luner_46423.png 219 | Jane_Curtin_17787.png 220 | Jane_Curtin_17820.png 221 | Jane_Leeves_42833.png 222 | Jane_Lynch_46639.png 223 | Jane_Lynch_46743.png 224 | January_Jones_38213.png 225 | January_Jones_38384.png 226 | January_Jones_38414.png 227 | Jasmine_Guy_31496.png 228 | Jasmine_Guy_31562.png 229 | Jasmine_Guy_31594.png 230 | Jasmine_Guy_31618.png 231 | Jasmine_Guy_31678.png 232 | Jason_Biggs_28339.png 233 | Jason_Lee_55690.png 234 | Jason_Lee_55703.png 235 | Jeanne_Cooper_15551.png 236 | Jeanne_Cooper_15652.png 237 | Jeanne_Cooper_15696.png 238 | Jenilee_Harrison_32833.png 239 | Jenilee_Harrison_32864.png 240 | Jenilee_Harrison_32895.png 241 | Jennette_McCurdy_49314.png 242 | Jennie_Garth_28672.png 243 | Jennie_Garth_28686.png 244 | Jeremy_Sisto_29948.png 245 | Jessica_Biel_7413.png 246 | Jessica_Capshaw_11891.png 247 | Jessica_Capshaw_11909.png 248 | Jessica_Leccia_42714.png 249 | Jessica_Leccia_42719.png 250 | Jill_Eikenberry_24108.png 251 | Jill_Eikenberry_24159.png 252 | Jill_Eikenberry_24166.png 253 | Jill_Eikenberry_24179.png 254 | Jill_Eikenberry_24188.png 255 | Jill_Hennessy_35087.png 256 | Jim_Carrey_30976.png 257 | Jim_Carrey_31137.png 258 | Jim_Carrey_31150.png 259 | Joan_Collins_14963.png 260 | Joanna_Kerns_40119.png 261 | Joanna_Kerns_40127.png 262 | Joanna_Kerns_40145.png 263 | Joanna_Kerns_40162.png 264 | Joanna_Kerns_40170.png 265 | Joanna_Kerns_40177.png 266 | John_Malkovich_32908.png 267 | John_Malkovich_32934.png 268 | Jon_Hamm_33859.png 269 | Jonathan_Rhys_Meyers_34545.png 270 | Jonathan_Sadowski_55304.png 271 | Josh_Brolin_34866.png 272 | Josh_Duhamel_35073.png 273 | Josie_Bissett_7155.png 274 | Josie_Bissett_7222.png 275 | Josie_Bissett_7243.png 276 | Joyce_DeWitt_20190.png 277 | Julia_Louis-Dreyfus_45687.png 278 | Julia_Louis-Dreyfus_45785.png 279 | Julie_Marie_Berman_6437.png 280 | Justin_Long_35979.png 281 | Justin_Long_35983.png 282 | Justine_Bateman_4879.png 283 | Kassie_DePaiva_19841.png 284 | Katherine_Helmond_34045.png 285 | Kathy_Baker_3934.png 286 | Kathy_Baker_3984.png 287 | Kathy_Griffin_31087.png 288 | Kathy_Griffin_31113.png 289 | Kellan_Lutz_36883.png 290 | Kevin_Costner_37976.png 291 | Kim_Cattrall_13087.png 292 | Kim_Delaney_20329.png 293 | Kim_Delaney_20363.png 294 | Kim_Fields_26573.png 295 | Kimberlin_Brown_10564.png 296 | Kimberly_McCullough_49225.png 297 | Kirstie_Alley_1565.png 298 | Kit_Harington_38601.png 299 | Kit_Harington_38638.png 300 | Kristen_Alderson_615.png 301 | Kristen_Alderson_714.png 302 | Kristen_Johnston_37751.png 303 | Kristin_Chenoweth_14161.png 304 | Kristin_Chenoweth_14220.png 305 | Kristin_Chenoweth_14242.png 306 | Kristy_McNichol_50183.png 307 | Kristy_McNichol_50205.png 308 | Kristy_McNichol_50256.png 309 | Kristy_McNichol_50272.png 310 | Kristy_McNichol_50295.png 311 | Kristy_McNichol_50304.png 312 | Kristy_McNichol_50314.png 313 | Lacey_Chabert_13239.png 314 | Lacey_Chabert_13291.png 315 | Laura_Innes_37302.png 316 | Laura_Innes_37329.png 317 | Laura_Leighton_43101.png 318 | Laura_Leighton_43169.png 319 | Lauralee_Bell_5169.png 320 | Lauralee_Bell_5206.png 321 | Lauralee_Bell_5272.png 322 | Lauren_Holly_36179.png 323 | Lauren_Holly_36191.png 324 | Lauren_Holly_36211.png 325 | Lauren_Koslow_40373.png 326 | Lauren_Koslow_40426.png 327 | Lauren_Koslow_40428.png 328 | Lauren_Koslow_40434.png 329 | Lauren_Koslow_40453.png 330 | Laurie_Metcalf_50846.png 331 | Laurie_Metcalf_50878.png 332 | Laurie_Metcalf_50883.png 333 | Laurie_Metcalf_50897.png 334 | Laurie_Metcalf_50916.png 335 | Laurie_Metcalf_50917.png 336 | Laurie_Metcalf_50920.png 337 | Laurie_Metcalf_50946.png 338 | Laurie_Metcalf_50973.png 339 | Laurie_Metcalf_50984.png 340 | Laurie_Metcalf_50986.png 341 | Laurie_Metcalf_50989.png 342 | Laurie_Metcalf_51019.png 343 | Lea_Michele_51327.png 344 | Lea_Michele_51337.png 345 | Lea_Michele_51362.png 346 | Lea_Michele_51363.png 347 | Lea_Michele_51427.png 348 | Lecy_Goranson_30511.png 349 | Lecy_Goranson_30538.png 350 | Lecy_Goranson_30539.png 351 | Lesley-Anne_Down_22118.png 352 | Lesley-Anne_Down_22144.png 353 | Lesley-Anne_Down_22249.png 354 | Lexi_Ainsworth_126.png 355 | Lexi_Ainsworth_129.png 356 | Lexi_Ainsworth_150.png 357 | Lexi_Ainsworth_28.png 358 | Lexi_Ainsworth_47.png 359 | Lexi_Ainsworth_54.png 360 | Liev_Schreiber_39673.png 361 | Linda_Evans_24646.png 362 | Linda_Evans_24670.png 363 | Linda_Gray_30789.png 364 | Linda_Gray_30823.png 365 | Linda_Gray_30895.png 366 | Lindsay_Hartley_33089.png 367 | Lindsay_Hartley_33091.png 368 | Lindsay_Hartley_33182.png 369 | Lindsay_Hartley_33188.png 370 | Lindsay_Hartley_33192.png 371 | Lisa_Bonet_8489.png 372 | Lisa_Bonet_8547.png 373 | Lisa_Kudrow_40922.png 374 | Lisa_LoCicero_44328.png 375 | Lisa_LoCicero_44334.png 376 | Loni_Anderson_2152.png 377 | Loni_Anderson_2190.png 378 | Lorraine_Bracco_9632.png 379 | Lorraine_Bracco_9746.png 380 | Lourdes_Benedicto_5324.png 381 | Lourdes_Benedicto_5360.png 382 | Mary_Beth_Evans_24742.png 383 | Mary_Crosby_16814.png 384 | Mary_Crosby_16826.png 385 | Mary_Crosby_16827.png 386 | Mary_Crosby_16835.png 387 | Mary_Crosby_16856.png 388 | Mary_Crosby_16867.png 389 | Mary_Crosby_16876.png 390 | Matt_Czuchry_41100.png 391 | Matt_Dillon_41441.png 392 | Matt_Dillon_41584.png 393 | Melina_Kanakaredes_39464.png 394 | Melissa_Archer_3127.png 395 | Melissa_Archer_3169.png 396 | Melissa_Benoist_5425.png 397 | Melissa_Benoist_5490.png 398 | Melissa_Benoist_5507.png 399 | Melissa_Benoist_5523.png 400 | Melissa_Benoist_5538.png 401 | Michael_Douglas_43514.png 402 | Michael_Douglas_43520.png 403 | Michael_Douglas_43593.png 404 | Michael_Landes_43643.png 405 | Michael_Landes_43712.png 406 | Michael_Vartan_43752.png 407 | Michael_Vartan_43873.png 408 | Mila_Kunis_41190.png 409 | Miranda_Cosgrove_15892.png 410 | Molly_Burnett_11287.png 411 | Molly_Burnett_11323.png 412 | Morena_Baccarin_3662.png 413 | Natalia_Livingston_44183.png 414 | Natalie_Hall_31866.png 415 | Natalie_Hall_31875.png 416 | Natalie_Hall_31898.png 417 | Natalie_Hall_31914.png 418 | Natalie_Martinez_48106.png 419 | Natalie_Martinez_48117.png 420 | Natalie_Martinez_48191.png 421 | Neve_Campbell_11584.png 422 | Nicole_Eggert_23928.png 423 | Nicole_Eggert_24088.png 424 | Nicole_de_Boer_19422.png 425 | Nicole_de_Boer_19448.png 426 | Olivia_d'Abo_18077.png 427 | Olivia_d'Abo_18126.png 428 | Olivia_d'Abo_18133.png 429 | Olivia_d'Abo_18166.png 430 | Olivia_d'Abo_18188.png 431 | Olivia_d'Abo_18202.png 432 | Olivia_d'Abo_18224.png 433 | Olivia_d'Abo_18228.png 434 | Olivia_d'Abo_18229.png 435 | Pamela_Sue_Martin_47876.png 436 | Pamela_Sue_Martin_48018.png 437 | Pamela_Sue_Martin_48027.png 438 | Patricia_Arquette_3360.png 439 | Patricia_Kalember_39346.png 440 | Patricia_Kalember_39361.png 441 | Patricia_Kalember_39365.png 442 | Patricia_Kalember_39366.png 443 | Patricia_Kalember_39367.png 444 | Patricia_Kalember_39369.png 445 | Peggy_Lipton_43900.png 446 | Peggy_Lipton_43935.png 447 | Peggy_Lipton_43962.png 448 | Peggy_Lipton_44032.png 449 | Peggy_Lipton_44070.png 450 | Peggy_Lipton_44071.png 451 | Peggy_McCay_48867.png 452 | Peggy_McCay_48870.png 453 | Peggy_McCay_48883.png 454 | Peggy_McCay_48897.png 455 | Peri_Gilpin_29456.png 456 | Philip_Seymour_Hoffman_47737.png 457 | Philip_Seymour_Hoffman_47778.png 458 | Portia_Doubleday_22036.png 459 | Portia_Doubleday_22101.png 460 | Portia_de_Rossi_19903.png 461 | Portia_de_Rossi_19908.png 462 | Portia_de_Rossi_19957.png 463 | Portia_de_Rossi_19963.png 464 | Portia_de_Rossi_19997.png 465 | Portia_de_Rossi_20036.png 466 | Portia_de_Rossi_20069.png 467 | Rachel_Dratch_22646.png 468 | Rachel_Dratch_22751.png 469 | Rebecca_Budig_10668.png 470 | Rebecca_Budig_10714.png 471 | Rebecca_Budig_10756.png 472 | Rebecca_Budig_10785.png 473 | Rebecca_Budig_10792.png 474 | Rebecca_Budig_10805.png 475 | Rebecca_Herbst_35381.png 476 | Rebecca_Herbst_35391.png 477 | Rebecca_Herbst_35407.png 478 | Rebecca_Herbst_35454.png 479 | Richard_E._Grant_48348.png 480 | Richard_Gere_48555.png 481 | Richard_Gere_48559.png 482 | Richard_Madden_48806.png 483 | Robert_Di_Niro_49242.png 484 | Robert_Downey_Jr._49395.png 485 | Robert_Downey_Jr._49485.png 486 | Robert_Duvall_49683.png 487 | Robert_Duvall_49684.png 488 | Robert_Duvall_49744.png 489 | Robert_Knepper_49865.png 490 | Roma_Downey_22357.png 491 | Roma_Downey_22378.png 492 | Roseanne_Barr_4520.png 493 | Roseanne_Barr_4546.png 494 | Roseanne_Barr_4622.png 495 | Rue_McClanahan_48968.png 496 | Rue_McClanahan_49092.png 497 | Rue_McClanahan_49151.png 498 | Rupert_Friend_50825.png 499 | Ryan_Gosling_51576.png 500 | S._Epatha_Merkerson_50436.png 501 | S._Epatha_Merkerson_50464.png 502 | Sara_Gilbert_29260.png 503 | Sara_Gilbert_29333.png 504 | Sarah_Drew_23164.png 505 | Sarah_Drew_23166.png 506 | Sarah_Drew_23174.png 507 | Sarah_Drew_23209.png 508 | Sarah_Drew_23245.png 509 | Sarah_Drew_23272.png 510 | Sarah_Drew_23286.png 511 | Sarah_Hyland_36867.png 512 | Sean_Bean_52769.png 513 | Selena_Gomez_30458.png 514 | Selena_Gomez_30471.png 515 | Seth_Rogen_52915.png 516 | Seth_Rogen_53117.png 517 | Seth_Rogen_53123.png 518 | Shannen_Doherty_21826.png 519 | Shannon_Kane_39638.png 520 | Shannon_Kane_39729.png 521 | Shannon_Kane_39732.png 522 | Sharon_Case_12719.png 523 | Sharon_Case_12779.png 524 | Sharon_Case_12889.png 525 | Sharon_Gless_29936.png 526 | Sharon_Gless_29976.png 527 | Sharon_Gless_29987.png 528 | Sharon_Gless_29989.png 529 | Shelley_Hack_31728.png 530 | Shelley_Hack_31736.png 531 | Shelley_Hack_31826.png 532 | Sherilyn_Fenn_25694.png 533 | Shirley_Jones_38512.png 534 | Shirley_Jones_38556.png 535 | Staci_Keanan_39993.png 536 | Staci_Keanan_40018.png 537 | Staci_Keanan_40038.png 538 | Staci_Keanan_40068.png 539 | Stana_Katic_39947.png 540 | Susan_Flannery_27075.png 541 | Susan_Flannery_27095.png 542 | Susan_Flannery_27128.png 543 | Swoosie_Kurtz_41518.png 544 | Swoosie_Kurtz_41605.png 545 | Tamala_Jones_38648.png 546 | Tamara_Braun_9874.png 547 | Tatyana_M._Ali_1228.png 548 | Tatyana_M._Ali_1266.png 549 | Tatyana_M._Ali_1290.png 550 | Tatyana_M._Ali_1349.png 551 | Taylor_Atelian_3507.png 552 | Taylor_Atelian_3517.png 553 | Taylor_Atelian_3526.png 554 | Taylor_Atelian_3532.png 555 | Taylor_Atelian_3533.png 556 | Taylor_Atelian_3553.png 557 | Taylor_Atelian_3559.png 558 | Taylor_Atelian_3566.png 559 | Taylor_Atelian_3581.png 560 | Taylor_Lautner_54170.png 561 | Taylor_Lautner_54238.png 562 | Taylor_Lautner_54258.png 563 | Taylor_Lautner_54279.png 564 | Taylor_Lautner_54292.png 565 | Taylor_Lautner_54296.png 566 | Taylor_Lautner_54328.png 567 | Tempestt_Bledsoe_8014.png 568 | Tempestt_Bledsoe_8062.png 569 | Tempestt_Bledsoe_8063.png 570 | Teri_Hatcher_33393.png 571 | Teri_Hatcher_33395.png 572 | Teri_Hatcher_33401.png 573 | Terry_Farrell_25136.png 574 | Terry_Farrell_25203.png 575 | Tia_Carrere_12136.png 576 | Tia_Carrere_12240.png 577 | Tobey_Maguire_54398.png 578 | Tom_Hanks_54730.png 579 | Tyne_Daly_18276.png 580 | Tyne_Daly_18293.png 581 | Tyne_Daly_18322.png 582 | Tyne_Daly_18362.png 583 | Tyne_Daly_18370.png 584 | Tyne_Daly_18399.png 585 | Tyne_Daly_18420.png 586 | Valerie_Cruz_17158.png 587 | Valerie_Harper_32638.png 588 | Valerie_Harper_32705.png 589 | Valerie_Harper_32771.png 590 | Vanessa_Marcil_47131.png 591 | Vanessa_Marcil_47158.png 592 | Vanessa_Marcil_47182.png 593 | Veronica_Hamel_32010.png 594 | Veronica_Hamel_32016.png 595 | Veronica_Hamel_32020.png 596 | Veronica_Hamel_32030.png 597 | Victor_Garber_54809.png 598 | Victor_Garber_54894.png 599 | Victoria_Justice_39219.png 600 | Wanda_De_Jesus_19687.png 601 | Wendie_Malick_46848.png 602 | Yasmine_Bleeth_8219.png 603 | Yasmine_Bleeth_8238.png 604 | Yasmine_Bleeth_8259.png 605 | Yasmine_Bleeth_8350.png 606 | Zooey_Deschanel_20937.png 607 | -------------------------------------------------------------------------------- /megaface/match_result.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | 5 | def match_result(): 6 | with open('results/matches_facescrub_megaface_0_1000000_1.json', 'r') as file: 7 | data = json.load(file) 8 | 9 | print(len(data)) 10 | num_total = 0 11 | num_incorrect = 0 12 | 13 | # # print(load_dict) 14 | for i, item in enumerate(data): 15 | probes = item['probes'] 16 | idx = probes['idx'] 17 | rank = probes['rank'] 18 | incorrect = [] 19 | for j in range(len(idx)): 20 | if rank[j] > 0: 21 | incorrect.append(str(idx[j])) 22 | if len(incorrect) > 0: 23 | print(i) 24 | print(' '.join(incorrect)) 25 | num_total += len(idx) 26 | num_incorrect += len(incorrect) 27 | 28 | print(num_incorrect) 29 | print(num_total) 30 | print(num_incorrect / num_total) 31 | 32 | 33 | def check_facescrub(): 34 | num_bins = 0 35 | folder = 'facescrub_images' 36 | dir_list = [d for d in os.listdir(folder)] 37 | for d in dir_list: 38 | str_dir = os.path.join(folder, d) 39 | bin_list = [f for f in os.listdir(str_dir) if f.endswith('.bin')] 40 | num_bins += len(bin_list) 41 | print('num_bins: ' + str(num_bins)) 42 | 43 | 44 | if __name__ == '__main__': 45 | match_result() 46 | # check_facescrub() 47 | -------------------------------------------------------------------------------- /megaface/megaface_noises.txt: -------------------------------------------------------------------------------- 1 | #Please strictly follow the rules in https://github.com/deepinsight/insightface/tree/master/src/megaface/README.md if you want to use this list. 2 | 375/37531198@N00/4174611825_0.jpg 3 | 375/37531198@N00/4174611825_1.jpg 4 | 925/92596048@N00/3910487430_2.jpg 5 | 587/58736276@N00/3023685783_2.jpg 6 | 905/90553008@N00/824787574_0.jpg 7 | 350/35034345966@N01/3086429616_0.jpg 8 | 388/38847947@N03/4351812234_0.jpg 9 | 788/78865207@N05/8267101710_0.jpg 10 | 495/49503172960@N01/2674784671_0.jpg 11 | 481/48122140@N00/3488870947_0.jpg 12 | 770/77029195@N00/490108118_3.jpg 13 | 566/56692742@N05/8202697602_1.jpg 14 | 546/54613470@N00/6155519468_0.jpg 15 | 486/48600102544@N01/206574155_6.jpg 16 | 857/85705006@N00/2052818739_0.jpg 17 | 345/34531071@N07/3374444062_1.jpg 18 | 714/71490499@N06/6460912773_0.jpg 19 | 790/79045485@N00/8312851597_9.jpg 20 | 497/49703429@N00/487110767_5.jpg 21 | 588/58819758@N00/502852994_0.jpg 22 | 603/60370779@N07/12733436605_1.jpg 23 | 351/35197415@N06/3392955585_0.jpg 24 | 414/41464693@N02/4431128027_1.jpg 25 | 745/74536253@N07/7744893278_0.jpg 26 | 745/74536253@N07/7744893278_1.jpg 27 | 745/74536253@N07/7744893278_2.jpg 28 | 745/74536253@N07/7746445288_0.jpg 29 | 720/72093892@N00/8394540214_0.jpg 30 | 943/94306126@N05/8585087758_0.jpg 31 | 316/31635293@N06/10300923803_0.jpg 32 | 316/31635293@N06/10300923803_1.jpg 33 | 461/46151867@N00/1361048872_0.jpg 34 | 316/31658559@N04/11248071845_2.jpg 35 | 510/51035763730@N01/3487351916_0.jpg 36 | 381/38176611@N04/6845223441_1.jpg 37 | 812/8127814@N07/1169159373_0.jpg 38 | 596/59689442@N00/397368405_0.jpg 39 | 457/45702277@N04/4404748345_2.jpg 40 | 457/45702277@N04/4405512456_0.jpg 41 | 315/31557769@N04/5183029470_0.jpg 42 | 105/105042823@N05/10224905845_1.jpg 43 | 233/23302147@N08/6717536683_0.jpg 44 | 943/94324445@N02/8585299005_5.jpg 45 | 865/86525452@N06/9026905203_1.jpg 46 | 575/57593906@N00/183103326_0.jpg 47 | 575/57593906@N00/183103326_1.jpg 48 | 513/51313580@N08/6077728920_10.jpg 49 | 794/79447530@N00/2646557330_3.jpg 50 | 838/8385182@N06/8791791352_2.jpg 51 | 834/83452322@N00/1379291226_0.jpg 52 | 299/29932833@N06/4605882096_0.jpg 53 | 125/12523672@N02/1368391674_0.jpg 54 | 251/25193498@N00/4015722351_2.jpg 55 | 207/20741443@N00/5045852254_2.jpg 56 | 313/31369133@N04/5050193084_3.jpg 57 | 313/31369133@N04/5050193084_5.jpg 58 | 504/50440126@N03/8061480293_0.jpg 59 | 353/35336901@N00/4353383267_0.jpg 60 | 102/10249456@N08/839447586_7.jpg 61 | 621/62158648@N07/5765099754_0.jpg 62 | 779/77991982@N00/3351774300_0.jpg 63 | 779/77991982@N00/3351774300_2.jpg 64 | 207/20741443@N00/5045852254_0.jpg 65 | 579/57954142@N07/6082189380_0.jpg 66 | 210/21091679@N08/5811404199_1.jpg 67 | 555/55530018@N04/5481680731_1.jpg 68 | 830/8302818@N06/7180492423_4.jpg 69 | 108/108788939@N08/11207124956_0.jpg 70 | 358/35803015@N03/5571012744_5.jpg 71 | 125/12508217@N08/3458308016_3.jpg 72 | 526/52617239@N07/9550573423_3.jpg 73 | 760/7600622@N07/442516245_2.jpg 74 | 838/83855583@N00/139393859_0.jpg 75 | 357/35775623@N00/4387679624_0.jpg 76 | 508/50824803@N04/5101695889_0.jpg 77 | 813/81369265@N00/5962625787_1.jpg 78 | 849/84987970@N00/6503971169_3.jpg 79 | 366/36692718@N00/7508265904_1.jpg 80 | 101/101878765@N05/9792203325_2.jpg 81 | 149/14994959@N04/3611150762_2.jpg 82 | 257/25777683@N04/3781609919_2.jpg 83 | 206/20625597@N07/3362909564_1.jpg 84 | 768/7686538@N06/1370874019_0.jpg 85 | 286/28675521@N05/2677267827_0.jpg 86 | 958/95818633@N00/317066357_0.jpg 87 | 114/11461909@N06/3569039014_2.jpg 88 | 114/11461909@N06/3926753297_2.jpg 89 | 827/8271358@N08/4748146669_0.jpg 90 | 341/34120602@N05/5979478480_5.jpg 91 | 114/11461909@N06/8281364025_1.jpg 92 | 467/46721620@N00/3849799981_0.jpg 93 | 407/40719493@N06/4008300252_1.jpg 94 | 764/76491372@N00/540480752_1.jpg 95 | 330/33019425@N00/275100215_2.jpg 96 | 316/31677763@N00/273039584_0.jpg 97 | 213/21349468@N00/82691007_2.jpg 98 | 425/42568333@N05/4079636121_3.jpg 99 | 972/97205184@N04/10076409423_0.jpg 100 | 827/8279509@N08/2712311284_0.jpg 101 | 311/31150907@N03/4133932870_0.jpg 102 | 412/41259870@N06/5812928114_0.jpg 103 | 332/33213804@N04/7599995760_0.jpg 104 | 375/37552553@N03/8157072811_0.jpg 105 | 737/7376233@N08/8712184839_0.jpg 106 | 665/66598017@N08/9614369266_3.jpg 107 | 352/35237099299@N01/16090400_0.jpg 108 | 395/39545243@N00/1121668561_11.jpg 109 | 529/52968060@N00/4836487807_1.jpg 110 | 466/46648278@N00/68839900_0.jpg 111 | 768/76841197@N00/349452085_0.jpg 112 | 971/97193933@N00/7441014914_1.jpg 113 | 711/71164686@N00/2199159152_3.jpg 114 | 956/95633051@N00/2504698743_0.jpg 115 | 850/85094310@N00/2536647566_0.jpg 116 | 850/85094310@N00/2536647566_1.jpg 117 | 546/54673576@N00/3231373075_1.jpg 118 | 419/41942641@N02/3911619734_0.jpg 119 | 706/70654989@N00/435992217_0.jpg 120 | 665/66588862@N00/4990604401_0.jpg 121 | 665/66588862@N00/4990604401_1.jpg 122 | 175/17514301@N00/504360950_0.jpg 123 | 242/24256726@N00/2284033395_1.jpg 124 | 388/38847947@N03/4278156225_0.jpg 125 | 120/12057709@N00/4817523632_5.jpg 126 | 958/95876452@N07/8757383318_5.jpg 127 | 958/95876452@N07/9136979141_0.jpg 128 | 511/51135741@N00/1197314078_2.jpg 129 | 732/73257494@N00/254360075_0.jpg 130 | 217/21725313@N04/2735944275_4.jpg 131 | 859/85928975@N00/3054044988_0.jpg 132 | 803/8035175@N05/511570673_2.jpg 133 | 169/16922208@N08/5320713367_0.jpg 134 | 370/37016273@N02/8089047614_0.jpg 135 | 108/10807869@N03/9369308884_2.jpg 136 | 859/85928975@N00/3320166109_0.jpg 137 | 365/36521958172@N01/100498068_4.jpg 138 | 724/7243800@N05/2227611248_0.jpg 139 | 724/7243800@N05/2227611248_3.jpg 140 | 117/11738031@N00/2998834476_0.jpg 141 | 795/7959345@N03/5237906433_0.jpg 142 | 236/23634892@N07/8479172294_0.jpg 143 | 334/33484476@N00/1631862642_0.jpg 144 | 793/7934170@N07/4553285915_3.jpg 145 | 603/60370779@N07/12733436605_0.jpg 146 | 943/94366076@N00/3048525915_0.jpg 147 | 277/27721729@N07/3509070727_2.jpg 148 | 759/75933558@N00/3620382660_0.jpg 149 | 460/46041863@N08/4441052512_0.jpg 150 | 129/12989638@N00/6212397379_0.jpg 151 | 337/33757663@N00/7358306448_0.jpg 152 | 943/94306126@N05/8585087758_1.jpg 153 | 953/95388136@N08/8862410806_0.jpg 154 | 223/22320444@N08/5673545350_0.jpg 155 | 434/43411679@N00/262399750_0.jpg 156 | 635/63534101@N00/2646352652_1.jpg 157 | 225/22591084@N00/6196535614_0.jpg 158 | 635/63587775@N03/6196638593_25.jpg 159 | 353/35399223@N08/6016093673_0.jpg 160 | 410/41063459@N02/3864724318_0.jpg 161 | 822/8220615@N05/4552969733_3.jpg 162 | 549/54949957@N00/5233621265_1.jpg 163 | 701/70177760@N04/9477029774_4.jpg 164 | 105/10564470@N04/2648385175_0.jpg 165 | 199/19909714@N00/3830545099_2.jpg 166 | 758/75818171@N02/7146473317_0.jpg 167 | 233/23354880@N03/2452852823_0.jpg 168 | 399/39915396@N00/5193727577_1.jpg 169 | 575/57503924@N07/6296483205_0.jpg 170 | 430/43093552@N00/6705048843_0.jpg 171 | 417/41739894@N04/4830148462_0.jpg 172 | 417/41739894@N04/4830148462_1.jpg 173 | 101/101386609@N04/13523151125_0.jpg 174 | 381/38117284@N00/1590829110_0.jpg 175 | 827/8279509@N08/2712311284_1.jpg 176 | 379/37963250@N06/3588816064_0.jpg 177 | 510/51035607419@N01/3883605563_0.jpg 178 | 311/31150907@N03/4133932870_1.jpg 179 | 311/31150907@N03/4333226349_0.jpg 180 | 409/40989913@N03/5368897603_1.jpg 181 | 332/33213804@N04/7599995760_2.jpg 182 | 519/51973188@N06/8553574872_0.jpg 183 | 665/66598017@N08/9614369266_1.jpg 184 | 131/13117300@N05/2323998041_0.jpg 185 | 489/48949906@N00/3917465141_1.jpg 186 | 489/48949906@N00/3917465141_2.jpg 187 | 251/25117187@N03/9118076515_0.jpg 188 | 225/22547477@N05/10576991153_1.jpg 189 | 169/16926338@N06/3551147251_1.jpg 190 | 120/12057709@N00/4817523632_1.jpg 191 | 327/32721085@N04/5095989423_2.jpg 192 | 404/40406266@N08/7499062102_1.jpg 193 | 227/22766186@N07/5625438347_0.jpg 194 | 227/22766186@N07/5626032050_2.jpg 195 | 302/30237597@N00/3761331400_0.jpg 196 | 412/41259870@N06/5811920381_0.jpg 197 | 369/36934384@N07/7153191901_0.jpg 198 | 369/36934384@N07/7153191901_1.jpg 199 | 332/33213804@N04/7599995760_6.jpg 200 | 526/52604186@N03/9982748374_2.jpg 201 | 125/12508217@N08/3458308016_0.jpg 202 | 793/79383703@N08/8188874944_2.jpg 203 | 328/32820037@N04/12550334735_0.jpg 204 | 554/55456932@N08/5239999140_0.jpg 205 | 193/19323934@N05/5236281857_0.jpg 206 | 300/30035643@N03/6318064786_1.jpg 207 | 260/26071009@N04/8133430835_0.jpg 208 | 395/39545243@N00/1121668561_23.jpg 209 | 927/92723020@N00/198034899_0.jpg 210 | 732/73257494@N00/254360075_1.jpg 211 | 768/76815233@N00/318365457_0.jpg 212 | 768/76815233@N00/318365457_1.jpg 213 | 758/75818171@N02/6811401849_0.jpg 214 | 960/96009072@N00/7213113090_1.jpg 215 | 440/44042276@N00/11078882754_1.jpg 216 | 563/56379629@N00/231943094_0.jpg 217 | 984/98411817@N00/2372250142_0.jpg 218 | 984/98411817@N00/2372250142_4.jpg 219 | 758/75815807@N00/3141877049_8.jpg 220 | 779/77991982@N00/3351774300_1.jpg 221 | 956/95668756@N00/9137041215_1.jpg 222 | 664/66487272@N00/3175332074_0.jpg 223 | 714/71484472@N08/7749429212_0.jpg 224 | 622/62292236@N03/12280954993_0.jpg 225 | 716/71635685@N00/2171921996_1.jpg 226 | 703/70335407@N00/2629913923_0.jpg 227 | 375/37526864@N03/3575384689_0.jpg 228 | 441/44124482892@N01/4076872185_0.jpg 229 | 760/7600622@N07/442516245_2.jpg 230 | 923/92399379@N00/3745627039_0.jpg 231 | 236/23634892@N07/8479172294_1.jpg 232 | 620/62092813@N00/12460977104_8.jpg 233 | 486/48600102544@N01/206574155_2.jpg 234 | 486/48600102544@N01/206574155_9.jpg 235 | 120/12057709@N00/4817523632_4.jpg 236 | 421/42197860@N05/7964010316_0.jpg 237 | 790/79045485@N00/8312851597_13.jpg 238 | 943/94366076@N00/3048525915_2.jpg 239 | 904/9047144@N06/4039658431_2.jpg 240 | 222/22292214@N00/4243141797_0.jpg 241 | 736/73694732@N00/4431487460_0.jpg 242 | 527/52706816@N04/4862401074_0.jpg 243 | 696/69654695@N04/8112732403_0.jpg 244 | 147/14754516@N00/6746387961_0.jpg 245 | 497/49703429@N00/487110767_7.jpg 246 | 145/14537247@N02/3736211228_8.jpg 247 | 286/28629203@N00/3064397634_3.jpg 248 | 332/33213624@N05/3106373748_0.jpg 249 | 334/33415234@N02/3114165513_1.jpg 250 | 314/31492856@N08/3175678730_1.jpg 251 | 333/33369864@N07/3244523696_4.jpg 252 | 309/30975003@N06/3322588992_1.jpg 253 | 262/26212498@N08/3372483710_1.jpg 254 | 373/37310116@N04/3434757683_0.jpg 255 | 246/24652718@N07/3504274334_0.jpg 256 | 311/31170710@N02/3541648434_0.jpg 257 | 868/8683186@N05/3611020792_0.jpg 258 | 868/8683186@N05/3611020792_6.jpg 259 | 145/14537247@N02/3736211228_1.jpg 260 | 145/14537247@N02/3736211228_10.jpg 261 | 145/14537247@N02/3736211228_3.jpg 262 | 145/14537247@N02/3736211228_7.jpg 263 | 403/40384578@N06/4101631794_0.jpg 264 | 495/49503010002@N01/4159540475_1.jpg 265 | 868/8683186@N05/4808516991_6.jpg 266 | 412/41275121@N06/4903601396_5.jpg 267 | 529/52947003@N08/5062807160_1.jpg 268 | 529/52947003@N08/5062807160_10.jpg 269 | 529/52947003@N08/5062807160_6.jpg 270 | 556/55642376@N02/5907157097_2.jpg 271 | 556/55642376@N02/5907157097_3.jpg 272 | 294/29475291@N05/6520878321_0.jpg 273 | 788/78832981@N04/7439193650_0.jpg 274 | 861/86125876@N07/7890416460_0.jpg 275 | 495/49503124519@N01/8054014324_8.jpg 276 | 170/17040371@N08/8095753999_5.jpg 277 | 170/17040371@N08/8095788948_4.jpg 278 | 336/33672038@N05/8147806935_1.jpg 279 | 340/34085730@N06/8292774555_1.jpg 280 | 268/26881063@N08/13878266544_2.jpg 281 | 309/30932831@N00/2773069134_2.jpg 282 | 276/27674701@N00/306654282_1.jpg 283 | 402/40272616@N07/4800636882_1.jpg 284 | 980/98055082@N00/992566662_1.jpg 285 | 232/23206546@N04/8621794327_0.jpg 286 | 232/23206546@N04/8621794327_1.jpg 287 | 224/22491837@N08/3626048130_0.jpg 288 | 224/22491837@N08/3641245154_2.jpg 289 | 950/95026061@N00/2542285242_0.jpg 290 | 759/75923111@N02/8430338965_0.jpg 291 | 106/10647915@N04/6503063251_1.jpg 292 | 759/7598168@N03/5226476291_0.jpg 293 | 105/105042823@N05/10224905845_2.jpg 294 | 450/45019754@N02/7606879826_2.jpg 295 | 943/94324445@N02/8585299005_4.jpg 296 | 310/31083236@N06/2986649565_0.jpg 297 | 441/44124482892@N01/4076872185_4.jpg 298 | 292/29276771@N03/11641854816_0.jpg 299 | 363/36382552@N04/5425274524_3.jpg 300 | 767/76795962@N03/12297733704_0.jpg 301 | 121/121110687@N08/13323427194_2.jpg 302 | 229/22969849@N00/1349964647_17.jpg 303 | 371/37147296@N03/3720755210_3.jpg 304 | 366/36684003@N06/4437979063_0.jpg 305 | 533/53384351@N06/4978715463_0.jpg 306 | 125/12572929@N05/5221666460_1.jpg 307 | 641/64158255@N00/5489160164_0.jpg 308 | 299/29912007@N05/5921111759_2.jpg 309 | 388/38820321@N06/6281589012_0.jpg 310 | 266/26657772@N02/7323904516_0.jpg 311 | 812/81253095@N08/7563494124_0.jpg 312 | 812/81253095@N08/7563494124_7.jpg 313 | 131/13117300@N05/2323998041_1.jpg 314 | 239/23927487@N05/4499177940_1.jpg 315 | 624/62477948@N02/5691704999_3.jpg 316 | 458/45842803@N00/2702028061_3.jpg 317 | 213/21385420@N00/9351830159_2.jpg 318 | 111/11149280@N02/9354553803_0.jpg 319 | 297/29785808@N04/2869578452_0.jpg 320 | 581/58167807@N00/4216179051_0.jpg 321 | 309/30932831@N00/2773069134_0.jpg 322 | 113/113235388@N08/11701952496_5.jpg 323 | 431/43152922@N00/3293843245_0.jpg 324 | 270/27088911@N08/5601971419_0.jpg 325 | 291/29123483@N00/8613949068_1.jpg 326 | 619/61992454@N00/119165477_2.jpg 327 | 260/26071009@N04/8133430835_1.jpg 328 | 561/56198214@N00/11354524454_0.jpg 329 | 295/29599105@N00/5802624346_6.jpg 330 | 330/33085931@N08/9400799871_0.jpg 331 | 341/34128229@N06/3267947070_0.jpg 332 | 213/21385420@N00/9351830159_0.jpg 333 | 758/75815807@N00/3141877049_7.jpg 334 | 529/52909086@N08/4912165639_0.jpg 335 | 923/92319630@N00/2022738688_0.jpg 336 | 318/31826784@N06/3346271475_0.jpg 337 | 437/43791698@N07/4031782636_4.jpg 338 | 269/26966164@N08/2699439482_3.jpg 339 | 525/52545972@N00/7991923484_0.jpg 340 | 250/25030443@N03/2852010154_6.jpg 341 | 886/88664590@N00/702044241_0.jpg 342 | 658/65873073@N00/8667101819_1.jpg 343 | 274/27433266@N00/5972034863_0.jpg 344 | 292/29233640@N07/3691901778_5.jpg 345 | 122/12254431@N02/5261928850_0.jpg 346 | 765/76562640@N00/2561153799_0.jpg 347 | 664/66487272@N00/3175332074_3.jpg 348 | 349/34946027@N06/3899131678_3.jpg 349 | 914/91409706@N00/7296129156_3.jpg 350 | 914/91409706@N00/7296129156_5.jpg 351 | 329/32996060@N05/7496967290_1.jpg 352 | 241/24112094@N00/7545432624_0.jpg 353 | 588/58871905@N03/8212289784_0.jpg 354 | 284/28426408@N00/4985494194_0.jpg 355 | 315/31594932@N00/8242400525_1.jpg 356 | 315/31594932@N00/8243469114_0.jpg 357 | 500/50016899@N03/4605388504_0.jpg 358 | 263/26357527@N05/7223540266_2.jpg 359 | 498/49814762@N00/8014123645_0.jpg 360 | 498/49814762@N00/8014123645_1.jpg 361 | 875/87504239@N08/10670731404_4.jpg 362 | 875/87504239@N08/10852751085_3.jpg 363 | 372/37244828@N04/13173856803_10.jpg 364 | 371/37147296@N03/3720755210_1.jpg 365 | 125/12572929@N05/5221666460_2.jpg 366 | 641/64158255@N00/5489160164_2.jpg 367 | 356/35658425@N08/5726282580_0.jpg 368 | 766/76681787@N00/5957052480_2.jpg 369 | 812/81253095@N08/7563494124_10.jpg 370 | 812/81253095@N08/7563494124_6.jpg 371 | 699/69958247@N05/13388922034_0.jpg 372 | 748/74896593@N07/6764119963_0.jpg 373 | 846/84655869@N08/7775105362_0.jpg 374 | 181/18155385@N00/239228291_7.jpg 375 | 421/42103613@N08/4040845679_1.jpg 376 | 313/31369133@N04/5050193084_4.jpg 377 | 785/78573292@N00/2256084892_0.jpg 378 | 167/16759096@N00/489481531_0.jpg 379 | 957/95723376@N06/8735477642_1.jpg 380 | 863/8630870@N02/5322350325_0.jpg 381 | 863/8630870@N02/5322350325_2.jpg 382 | 863/8630870@N02/5322350325_3.jpg 383 | 863/8630870@N02/5322350325_4.jpg 384 | 863/8630870@N02/5322350325_5.jpg 385 | 735/73531427@N00/218215108_0.jpg 386 | 309/30932831@N00/2773069134_3.jpg 387 | 727/72707136@N00/33992579_1.jpg 388 | 762/76236359@N00/6234006071_5.jpg 389 | 736/73626930@N00/9428192473_0.jpg 390 | 105/105042823@N05/10224905845_4.jpg 391 | 284/28476480@N04/3975041778_0.jpg 392 | 943/94324445@N02/8585299005_1.jpg 393 | 601/60125017@N00/344049639_2.jpg 394 | 876/87619178@N03/8590731823_0.jpg 395 | 277/27712137@N04/2623505924_0.jpg 396 | 701/70154022@N00/3925362192_1.jpg 397 | 459/45975847@N07/4691684655_2.jpg 398 | 459/45975847@N07/4692316090_0.jpg 399 | 514/51460103@N07/4823559383_0.jpg 400 | 220/22072051@N03/5011260955_0.jpg 401 | 535/53533856@N08/5361207739_0.jpg 402 | 535/53533856@N08/5361207739_1.jpg 403 | 535/53533856@N08/5361207739_4.jpg 404 | 528/52858230@N08/5564688606_0.jpg 405 | 574/57438188@N07/5627487722_0.jpg 406 | 176/17694278@N04/6508529521_0.jpg 407 | 362/36204621@N08/6698329979_0.jpg 408 | 697/69794372@N08/6785690647_2.jpg 409 | 100/100739634@N06/9717775343_0.jpg 410 | 267/26728047@N05/5524462661_5.jpg 411 | 156/15699085@N05/6408711257_0.jpg 412 | 327/32721085@N04/5095989423_0.jpg 413 | 669/66964839@N00/8741217920_1.jpg 414 | 608/60842486@N00/2702417524_0.jpg 415 | 206/20625597@N07/3362909564_5.jpg 416 | 554/55456932@N08/5239999140_1.jpg 417 | 616/61628022@N02/6999143216_0.jpg 418 | 685/68558611@N06/6390155109_1.jpg 419 | 770/77029195@N00/490108118_4.jpg 420 | 258/25827417@N00/2574466319_0.jpg 421 | 878/87855339@N00/3244971644_0.jpg 422 | 878/87855339@N00/3244971644_1.jpg 423 | 654/65497908@N00/6813913952_1.jpg 424 | 868/8683186@N05/3611020792_2.jpg 425 | 868/8683186@N05/4808516991_9.jpg 426 | 294/29475291@N05/6520878321_1.jpg 427 | 716/71678139@N05/6807828131_0.jpg 428 | 716/71678139@N05/6807828131_1.jpg 429 | 716/71678139@N05/6807828131_2.jpg 430 | 716/71678139@N05/6807828131_3.jpg 431 | 716/71678139@N05/6807828131_4.jpg 432 | 716/71678139@N05/6807828131_5.jpg 433 | 716/71678139@N05/6807828131_6.jpg 434 | 170/17040371@N08/8095753999_3.jpg 435 | 170/17040371@N08/8095788948_3.jpg 436 | 336/33672038@N05/8147806935_2.jpg 437 | 441/44124395142@N01/407637002_0.jpg 438 | 795/79543373@N00/2529893768_1.jpg 439 | 575/57545119@N04/8673142329_0.jpg 440 | 359/35909637@N06/3917967715_0.jpg 441 | 407/40719493@N06/4008300252_0.jpg 442 | 764/76491372@N00/540480752_0.jpg 443 | 221/22132798@N08/8018992056_1.jpg 444 | 402/40245280@N00/1936928371_2.jpg 445 | 114/114772050@N03/12008186175_0.jpg 446 | 636/63601558@N00/199623056_0.jpg 447 | 185/18502090@N00/2524917681_47.jpg 448 | 793/79398354@N00/2622290309_0.jpg 449 | 898/8982863@N07/10257490644_5.jpg 450 | 898/8982863@N07/10257490644_8.jpg 451 | 458/45861060@N00/7900853588_1.jpg 452 | 102/102077743@N07/12190186456_0.jpg 453 | 228/22882274@N04/3632475832_2.jpg 454 | 233/23357263@N03/3953791578_5.jpg 455 | 317/31769130@N03/11011170923_0.jpg 456 | 371/37195519@N02/3424150218_0.jpg 457 | 795/79589933@N00/3352567863_0.jpg 458 | 623/62362697@N00/8577869641_3.jpg 459 | 365/36521958172@N01/100498068_1.jpg 460 | 898/8982863@N07/10257490644_10.jpg 461 | 102/102077743@N07/12190186456_2.jpg 462 | 354/35468148224@N01/12347079_0.jpg 463 | 905/90516586@N00/159075833_0.jpg 464 | 773/77364737@N00/2503183115_0.jpg 465 | 975/97584199@N00/2632241655_1.jpg 466 | 527/52772894@N00/90756428_0.jpg 467 | 527/52772894@N00/90756428_2.jpg 468 | 972/97205184@N04/10076409423_0.jpg 469 | 101/101386609@N04/13523144805_0.jpg 470 | 827/8279509@N08/2712311284_0.jpg 471 | 379/37963250@N06/3588816064_1.jpg 472 | 379/37963250@N06/3588816064_2.jpg 473 | 510/51035607419@N01/3883605563_1.jpg 474 | 311/31150907@N03/4133932870_0.jpg 475 | 409/40989913@N03/5368897603_0.jpg 476 | 412/41259870@N06/5812928114_0.jpg 477 | 332/33213804@N04/7599995760_0.jpg 478 | 332/33213804@N04/7599995760_1.jpg 479 | 375/37552553@N03/8157072811_0.jpg 480 | 665/66598017@N08/9614369266_3.jpg 481 | 241/24112094@N00/3994684107_2.jpg 482 | 388/38847947@N03/4351812234_5.jpg 483 | 510/51035620166@N01/237428859_2.jpg 484 | 450/45019754@N02/7606879826_0.jpg 485 | 664/66474562@N00/19195378_2.jpg 486 | 287/28771658@N03/3905862519_0.jpg 487 | 123/12360228@N04/7166710688_1.jpg 488 | 123/12360228@N04/7166710688_2.jpg 489 | 302/30245869@N07/2843650382_4.jpg 490 | 972/97205184@N04/10079707226_0.jpg 491 | 570/57023246@N00/4623144791_0.jpg 492 | 332/33213804@N04/7599995760_4.jpg 493 | 487/48782814@N07/13847705683_0.jpg 494 | 595/59533494@N04/5479759436_0.jpg 495 | 938/93841400@N00/8327059841_1.jpg 496 | 824/8240241@N06/4114864483_0.jpg 497 | 513/51313580@N08/6077728920_5.jpg 498 | 289/28917877@N00/6333929606_0.jpg 499 | 395/39545243@N00/1121668561_35.jpg 500 | 640/64073015@N00/2831021702_0.jpg 501 | 141/14195956@N04/3948300818_3.jpg 502 | 539/53906287@N00/5101060235_7.jpg 503 | 500/50034633@N05/4837007562_1.jpg 504 | 254/25414047@N00/145668608_3.jpg 505 | 489/48949906@N00/3917465141_0.jpg 506 | 808/80812769@N00/442495313_0.jpg 507 | 459/45964884@N08/5242276755_2.jpg 508 | 103/10394437@N03/4568233297_5.jpg 509 | 169/16989146@N06/7641494140_4.jpg 510 | 597/59751999@N00/3289666452_1.jpg 511 | 437/43709093@N07/4256796727_0.jpg 512 | 445/44521275@N00/506634574_2.jpg 513 | 716/71616417@N05/6509071195_2.jpg 514 | 716/71616417@N05/6509074321_2.jpg 515 | 524/52407821@N00/7984244356_0.jpg 516 | 366/36624962@N03/8288304168_3.jpg 517 | 697/69756126@N00/4662338037_0.jpg 518 | 631/63177605@N08/6792309851_2.jpg 519 | 616/61670399@N07/10102978284_1.jpg 520 | 105/105042823@N05/10225015423_0.jpg 521 | 273/27357514@N00/11343847725_0.jpg 522 | 463/46327697@N00/2124534031_4.jpg 523 | 984/98411817@N00/2372250142_6.jpg 524 | 257/25797798@N07/3211844955_1.jpg 525 | 111/11134149@N02/3317710642_0.jpg 526 | 388/38847947@N03/4178270889_3.jpg 527 | 822/8220615@N05/4552969733_5.jpg 528 | 611/61109191@N00/5543733500_2.jpg 529 | 806/80604586@N07/9070005912_4.jpg 530 | 241/24113869@N07/9379713988_1.jpg 531 | 748/74888386@N00/9692493485_0.jpg 532 | 695/69501650@N00/46407370_1.jpg 533 | 950/95082307@N00/8773335932_0.jpg 534 | 510/51035620166@N01/237428859_2.jpg 535 | 434/43411679@N00/262399750_3.jpg 536 | 634/63465779@N07/6362029047_0.jpg 537 | 987/98736785@N00/4499296_3.jpg 538 | 715/7155702@N03/2302080410_0.jpg 539 | 715/7155702@N03/2302080410_1.jpg 540 | 715/7155702@N03/2302080410_3.jpg 541 | 715/7155702@N03/2302080410_4.jpg 542 | 715/7155702@N03/2302080410_5.jpg 543 | 715/7155702@N03/3631806363_2.jpg 544 | 874/87472210@N00/3105959620_1.jpg 545 | 246/24638567@N00/3321513653_0.jpg 546 | 945/94545836@N00/1443145007_1.jpg 547 | 362/36298222@N08/5935447071_0.jpg 548 | 222/22284790@N06/2176524544_0.jpg 549 | 747/7478277@N04/3155354570_0.jpg 550 | 747/7478277@N04/3155354570_1.jpg 551 | 375/37531198@N00/4174611825_2.jpg 552 | 375/37531198@N00/4174611825_4.jpg 553 | 768/76841197@N00/349452085_2.jpg 554 | 768/76841197@N00/349452085_3.jpg 555 | 468/46868174@N08/6511978997_0.jpg 556 | 100/10058188@N05/8295748234_0.jpg 557 | 108/108285828@N06/10779570026_1.jpg 558 | 108/108285828@N06/10779778303_0.jpg 559 | 168/16873194@N05/1801191486_0.jpg 560 | 857/8572970@N04/2079107678_1.jpg 561 | 472/47253165@N00/360990372_0.jpg 562 | 647/64767764@N00/450008059_0.jpg 563 | 729/72971773@N00/58588039_0.jpg 564 | 911/91148289@N00/7916666268_1.jpg 565 | 416/41608212@N00/8718283094_0.jpg 566 | 911/91113759@N00/2539574881_0.jpg 567 | 293/29322132@N08/3569343093_3.jpg 568 | 876/87671668@N00/9013803280_0.jpg 569 | 381/38176611@N04/8638753913_4.jpg 570 | 976/97644214@N00/6173832483_1.jpg 571 | 232/23206546@N04/8621794327_6.jpg 572 | 563/56379629@N00/231943094_3.jpg 573 | 864/86429182@N00/330808508_1.jpg 574 | 353/35336901@N00/4353383267_1.jpg 575 | 631/63122283@N06/7978644677_2.jpg 576 | 932/93211492@N06/8487757725_206.jpg 577 | 575/57545119@N04/8673142329_3.jpg 578 | 354/35479068@N06/3306075644_0.jpg 579 | 277/27721729@N07/3509070727_0.jpg 580 | 188/18899983@N00/4100299699_2.jpg 581 | 213/21349468@N00/82691007_1.jpg 582 | 110/11021333@N02/2957267195_2.jpg 583 | 925/9259500@N03/5710775169_0.jpg 584 | 486/48600102544@N01/206574155_11.jpg 585 | 486/48600102544@N01/206574155_12.jpg 586 | 486/48600102544@N01/206574155_7.jpg 587 | 339/33959433@N05/8396364213_0.jpg 588 | 997/9975353@N03/12630170935_1.jpg 589 | 247/24733288@N07/5846541456_0.jpg 590 | 872/8729914@N05/5099947628_0.jpg 591 | 889/88976184@N05/8244475370_1.jpg 592 | 889/88976184@N05/8244475370_3.jpg 593 | 630/63095109@N00/1079079769_0.jpg 594 | 664/66422694@N00/182828985_0.jpg 595 | 645/64503524@N00/2480285692_0.jpg 596 | 864/86429182@N00/330808508_0.jpg 597 | 447/44754496@N00/4287694240_1.jpg 598 | 363/36382552@N04/5425274524_6.jpg 599 | 210/21091679@N08/5811404199_2.jpg 600 | 562/56213435@N08/9008148399_0.jpg 601 | 695/69501650@N00/46407370_0.jpg 602 | 889/88976184@N05/8244475370_0.jpg 603 | 889/88976184@N05/8244475370_2.jpg 604 | 889/88976184@N05/8244475370_4.jpg 605 | 889/88976184@N05/8244475370_5.jpg 606 | 527/52706816@N04/4862401074_1.jpg 607 | 794/79428123@N06/7349516920_1.jpg 608 | 790/79024059@N04/7351250586_0.jpg 609 | 693/69318970@N00/9266107252_1.jpg 610 | 943/94324445@N02/8585410655_1.jpg 611 | 599/59947594@N00/5684370261_0.jpg 612 | 849/84986567@N00/177640275_0.jpg 613 | 345/34514184@N03/3562741086_0.jpg 614 | 158/15803691@N00/6914149962_2.jpg 615 | 817/81708918@N03/7855914580_5.jpg 616 | 309/30975003@N06/3322588992_8.jpg 617 | 900/90054085@N00/43987316_0.jpg 618 | 136/13675730@N06/2452368334_0.jpg 619 | 386/38675455@N00/6803844225_0.jpg 620 | 717/71763791@N00/5532357066_0.jpg 621 | 606/60604521@N07/5584865343_0.jpg 622 | 395/39545243@N00/1121668561_29.jpg 623 | 391/39196943@N05/3992501716_2.jpg 624 | 207/20719052@N04/8646466859_0.jpg 625 | 207/20719052@N04/8646466859_1.jpg 626 | 758/75815807@N00/3141877049_0.jpg 627 | 471/47170787@N05/6477792561_2.jpg 628 | 317/31734244@N00/542792072_0.jpg 629 | 931/9319402@N03/3358320101_0.jpg 630 | 362/36223735@N03/3784170734_3.jpg 631 | 534/53400644@N05/5044502598_0.jpg 632 | 513/51313580@N08/6077728920_1.jpg 633 | 137/13774680@N04/12313323523_0.jpg 634 | 636/63608960@N08/13553141815_2.jpg 635 | 434/43463445@N07/5723825501_0.jpg 636 | 237/23740675@N05/6877915376_0.jpg 637 | 868/8683186@N05/3611020792_4.jpg 638 | 216/21653251@N04/5633592631_0.jpg 639 | 216/21653251@N04/5633592631_1.jpg 640 | 170/17040371@N08/8095753999_1.jpg 641 | 170/17040371@N08/8095788948_1.jpg 642 | 620/62092813@N00/12460977104_4.jpg 643 | 236/23686718@N08/10662589695_3.jpg 644 | 264/26451473@N00/1342290947_5.jpg 645 | 636/63608960@N08/13553141815_6.jpg 646 | 409/40926212@N05/13257463775_7.jpg 647 | 449/44915880@N03/4248497494_1.jpg 648 | 235/23588194@N00/4494098639_1.jpg 649 | 532/53201041@N07/4911217327_0.jpg 650 | 532/53201041@N07/4911259327_0.jpg 651 | 606/60625084@N04/6450812495_0.jpg 652 | 650/65013293@N02/7724906252_3.jpg 653 | 560/56021903@N02/8227297222_2.jpg 654 | 359/35904083@N08/4345934789_0.jpg 655 | 593/59319260@N00/8743143610_2.jpg 656 | 271/27125798@N05/2710711437_0.jpg 657 | 402/40210933@N02/6043303646_0.jpg 658 | 122/12280910@N06/6299940378_1.jpg 659 | 207/20775043@N07/3627508299_0.jpg 660 | 595/59592751@N08/9096179270_0.jpg 661 | 395/39545243@N00/1121668561_14.jpg 662 | 296/29625288@N00/12891226005_0.jpg 663 | 548/54858071@N00/205845735_1.jpg 664 | 751/75148156@N00/3485469298_1.jpg 665 | 407/40764492@N08/3797968770_0.jpg 666 | 333/33369864@N07/3244523696_0.jpg 667 | 312/31236383@N03/3259555928_1.jpg 668 | 385/38524878@N04/3563411595_0.jpg 669 | 259/25955358@N06/3921002900_2.jpg 670 | 788/78832981@N04/7447167480_1.jpg 671 | 696/69696568@N05/6872955321_1.jpg 672 | 757/75741170@N02/6829396274_0.jpg 673 | 724/72448202@N00/359801202_1.jpg 674 | 969/96934953@N00/4315376370_0.jpg 675 | 133/13361855@N00/989708629_1.jpg 676 | 664/66478195@N00/11649050243_3.jpg 677 | 122/12276055@N02/4214939371_0.jpg 678 | 463/46327697@N00/2124534031_3.jpg 679 | 534/53400644@N05/5044502598_1.jpg 680 | 669/66944684@N03/6306275408_0.jpg 681 | 710/71006820@N00/4037980088_0.jpg 682 | 766/76656964@N04/9363467181_3.jpg 683 | 112/11262617@N07/12873348265_0.jpg 684 | 409/40926212@N05/13257463775_4.jpg 685 | 409/40926212@N05/13835647984_0.jpg 686 | 528/52858230@N08/5636750038_0.jpg 687 | 528/52858230@N08/5636750038_1.jpg 688 | 203/20335591@N04/9752849313_1.jpg 689 | 350/35034345966@N01/3086429616_1.jpg 690 | 736/73694732@N00/4430689233_0.jpg 691 | 605/60517117@N00/10370054943_0.jpg 692 | 421/42173774@N03/3889203507_0.jpg 693 | 511/51135741@N00/1197314078_3.jpg 694 | 267/26753167@N04/5922076258_2.jpg 695 | 330/33062815@N00/8606375434_1.jpg 696 | 108/10807869@N03/9369308884_0.jpg 697 | 761/76151808@N00/6945480767_0.jpg 698 | 757/75741170@N02/6829396274_1.jpg 699 | 220/22057861@N07/2126995093_0.jpg 700 | 665/66572814@N00/8242014606_0.jpg 701 | 849/84986567@N00/177640275_1.jpg 702 | 444/44460493@N03/4085897885_0.jpg 703 | 330/33085931@N08/9400799871_1.jpg 704 | 533/53314395@N00/5174087013_11.jpg 705 | 864/86429182@N00/330808508_2.jpg 706 | 151/15133426@N08/3646529532_0.jpg 707 | 302/30223854@N00/6535512351_0.jpg 708 | 929/92903356@N04/8497572867_0.jpg 709 | 412/41232325@N03/9045866185_0.jpg 710 | 582/58246614@N00/2842715287_1.jpg 711 | 513/51304493@N05/4932744035_0.jpg 712 | 301/30120696@N02/3068639840_0.jpg 713 | 407/40764492@N08/3797968770_0.jpg 714 | 231/23100621@N04/3986465605_0.jpg 715 | 288/28891066@N00/4210148052_2.jpg 716 | 940/94066910@N00/448278027_1.jpg 717 | 532/53282124@N00/3858373020_0.jpg 718 | 597/59751999@N00/3289666452_2.jpg 719 | 365/36521958172@N01/100498068_4.jpg 720 | 293/29322132@N08/3569343093_3.jpg 721 | -------------------------------------------------------------------------------- /megaface/results/cmc_facescrub_megaface_0_1000000_1.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "cmc" : 4 | [ 5 | [ 0, 13, 966804 ], 6 | [ 0.980616033077240, 0.9906746745109558, 1.0 ] 7 | ], 8 | "roc" : 9 | [ 10 | 11 | [ 12 | 0.0, 13 | 1.034335816996190e-08, 14 | 4.137343267984761e-08, 15 | 5.171678907345267e-08, 16 | 7.240350896609016e-08, 17 | 1.137769416459378e-07, 18 | 1.965237999002056e-07, 19 | 1.841117750700505e-06, 20 | 0.001009718631394207, 21 | 1.0 22 | ], 23 | 24 | [ 25 | 0.8872767686843872, 26 | 0.9050645828247070, 27 | 0.92467862367630, 28 | 0.9406545758247375, 29 | 0.9531793594360352, 30 | 0.9637916684150696, 31 | 0.9753674268722534, 32 | 0.9853757619857788, 33 | 0.9953840970993042, 34 | 1.0 35 | ] 36 | ], 37 | "traditional_cmc" : 38 | [ 39 | [ 0, 12, 966883 ], 40 | [ 0.9822201728820801, 0.9923146963119507, 1.000002264976501 ] 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /megaface_eval.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | import torch 4 | 5 | from config import device 6 | from megaface_utils import gen_feature, remove_noise 7 | 8 | 9 | # from torch import nn 10 | 11 | 12 | def megaface_test(model): 13 | cmd = 'find megaface/FaceScrub_aligned -name "*.bin" -type f -delete' 14 | print(cmd) 15 | output = subprocess.check_output(cmd, shell=True).decode("utf-8") 16 | print(output) 17 | 18 | cmd = 'find megaface/MegaFace_aligned/FlickrFinal2 -name "*.bin" -type f -delete' 19 | print(cmd) 20 | output = subprocess.check_output(cmd, shell=True).decode("utf-8") 21 | print(output) 22 | 23 | gen_feature('megaface/FaceScrub_aligned', model) 24 | gen_feature('megaface/MegaFace_aligned/FlickrFinal2', model) 25 | remove_noise() 26 | 27 | cmd = 'python megaface/devkit/experiments/run_experiment.py -p megaface/devkit/templatelists/facescrub_uncropped_features_list.json megaface/MegaFace_aligned/FlickrFinal2 megaface/FaceScrub_aligned _0.bin results -s 1000000' 28 | # print(cmd) 29 | output = subprocess.check_output(cmd, shell=True).decode("utf-8") 30 | # print(output) 31 | 32 | lines = output.split('\n') 33 | line = [l for l in lines if l.startswith('Rank 1: ')][0] 34 | accuracy = float(line[8:]) 35 | 36 | print('Megaface accuracy: ' + str(accuracy)) 37 | 38 | return accuracy 39 | 40 | 41 | if __name__ == '__main__': 42 | import time 43 | from torch import nn 44 | 45 | # checkpoint = 'BEST_checkpoint.tar' 46 | # print('loading model: {}...'.format(checkpoint)) 47 | # checkpoint = torch.load(checkpoint) 48 | # model = checkpoint['model'].module.to(device) 49 | # model.eval() 50 | 51 | filename = 'insight-face-v3.pt' 52 | 53 | 54 | class HParams: 55 | def __init__(self): 56 | self.pretrained = False 57 | self.use_se = True 58 | 59 | 60 | config = HParams() 61 | 62 | print('loading {}...'.format(filename)) 63 | start = time.time() 64 | from models import resnet101 65 | 66 | model = resnet101(config) 67 | model.load_state_dict(torch.load(filename)) 68 | print('elapsed {} sec'.format(time.time() - start)) 69 | 70 | model = nn.DataParallel(model) 71 | model = model.to(device) 72 | model.eval() 73 | 74 | # scripted_model_file = 'mobilefacenet_scripted.pt' 75 | # print('loading {}...'.format(scripted_model_file)) 76 | # model = torch.jit.load(scripted_model_file) 77 | # # model = nn.DataParallel(model) 78 | # model = model.to(device) 79 | # model.eval() 80 | 81 | megaface_test(model) 82 | -------------------------------------------------------------------------------- /megaface_utils.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | import struct 5 | 6 | import cv2 as cv 7 | import numpy as np 8 | import torch 9 | import tqdm 10 | from PIL import Image, ImageOps 11 | from tqdm import tqdm 12 | 13 | from config import device 14 | from data_gen import data_transforms 15 | from utils import align_face, get_central_face_attributes 16 | 17 | 18 | def walkdir(folder, ext): 19 | # Walk through each files in a directory 20 | for dirpath, dirs, files in os.walk(folder): 21 | for filename in [f for f in files if f.lower().endswith(ext)]: 22 | yield os.path.abspath(os.path.join(dirpath, filename)) 23 | 24 | 25 | def crop_one_image(filepath, oldkey, newkey): 26 | new_fn = filepath.replace(oldkey, newkey) 27 | tardir = os.path.dirname(new_fn) 28 | if not os.path.isdir(tardir): 29 | os.makedirs(tardir) 30 | 31 | if not os.path.exists(new_fn): 32 | is_valid, bounding_boxes, landmarks = get_central_face_attributes(filepath) 33 | if is_valid: 34 | img = align_face(filepath, landmarks) 35 | cv.imwrite(new_fn, img) 36 | 37 | 38 | def crop(path, oldkey, newkey): 39 | print('Counting images under {}...'.format(path)) 40 | # Preprocess the total files count 41 | filecounter = 0 42 | for filepath in walkdir(path, '.jpg'): 43 | filecounter += 1 44 | 45 | for filepath in tqdm(walkdir(path, '.jpg'), total=filecounter, unit="files"): 46 | crop_one_image(filepath, oldkey, newkey) 47 | 48 | print('{} images were cropped successfully.'.format(filecounter)) 49 | 50 | 51 | def get_image(transformer, filepath, flip=False): 52 | img = Image.open(filepath) 53 | if flip: 54 | img = ImageOps.mirror(img) 55 | img = transformer(img) 56 | return img.to(device) 57 | 58 | 59 | def gen_feature(path, model): 60 | model.eval() 61 | 62 | print('gen features {}...'.format(path)) 63 | # Preprocess the total files count 64 | files = [] 65 | for filepath in walkdir(path, ('.jpg', '.png')): 66 | files.append(filepath) 67 | file_count = len(files) 68 | 69 | transformer = data_transforms['val'] 70 | 71 | batch_size = 128 72 | 73 | with torch.no_grad(): 74 | for start_idx in tqdm(range(0, file_count, batch_size)): 75 | end_idx = min(file_count, start_idx + batch_size) 76 | length = end_idx - start_idx 77 | 78 | imgs_0 = torch.zeros([length, 3, 112, 112], dtype=torch.float, device=device) 79 | for idx in range(0, length): 80 | i = start_idx + idx 81 | filepath = files[i] 82 | imgs_0[idx] = get_image(transformer, filepath, flip=False) 83 | 84 | features_0 = model(imgs_0.to(device)) 85 | features_0 = features_0.cpu().numpy() 86 | 87 | imgs_1 = torch.zeros([length, 3, 112, 112], dtype=torch.float, device=device) 88 | for idx in range(0, length): 89 | i = start_idx + idx 90 | filepath = files[i] 91 | imgs_1[idx] = get_image(transformer, filepath, flip=True) 92 | 93 | features_1 = model(imgs_1.to(device)) 94 | features_1 = features_1.cpu().numpy() 95 | 96 | for idx in range(0, length): 97 | i = start_idx + idx 98 | filepath = files[i] 99 | filepath = filepath.replace(' ', '_') 100 | tarfile = filepath + '_0.bin' 101 | feature = features_0[idx] + features_1[idx] 102 | write_feature(tarfile, feature / np.linalg.norm(feature)) 103 | 104 | 105 | def read_feature(filename): 106 | f = open(filename, 'rb') 107 | rows, cols, stride, type_ = struct.unpack('iiii', f.read(4 * 4)) 108 | mat = np.fromstring(f.read(rows * 4), dtype=np.dtype('float32')) 109 | return mat.reshape(rows, 1) 110 | 111 | 112 | def write_feature(filename, m): 113 | header = struct.pack('iiii', m.shape[0], 1, 4, 5) 114 | f = open(filename, 'wb') 115 | f.write(header) 116 | f.write(m.data) 117 | 118 | 119 | def remove_noise(): 120 | megaface_count = 0 121 | for line in open('megaface/megaface_noises.txt', 'r'): 122 | filename = 'megaface/MegaFace_aligned/FlickrFinal2/' + line.strip() + '_0.bin' 123 | if os.path.exists(filename): 124 | # print(filename) 125 | os.remove(filename) 126 | megaface_count += 1 127 | 128 | print('remove noise - megaface: ' + str(megaface_count)) 129 | 130 | facescrub_count = 0 131 | noise = set() 132 | for line in open('megaface/facescrub_noises.txt', 'r'): 133 | noise.add((line.strip().replace('.png', '.jpg') + '_0.bin')) 134 | 135 | for root, dirs, files in os.walk('megaface/FaceScrub_aligned'): 136 | for f in files: 137 | # print(f) 138 | if f in noise: 139 | filename = os.path.join(root, f) 140 | if os.path.exists(filename): 141 | # print(filename) 142 | os.remove(filename) 143 | facescrub_count += 1 144 | 145 | print('remove noise - facescrub: ' + str(facescrub_count)) 146 | 147 | 148 | def test(): 149 | root1 = '/root/lin/data/FaceScrub_aligned/Benicio Del Toro' 150 | root2 = '/root/lin/data/FaceScrub_aligned/Ben Kingsley' 151 | for f1 in os.listdir(root1): 152 | for f2 in os.listdir(root2): 153 | if f1.lower().endswith('.bin') and f2.lower().endswith('.bin'): 154 | filename1 = os.path.join(root1, f1) 155 | filename2 = os.path.join(root2, f2) 156 | fea1 = read_feature(filename1) 157 | fea2 = read_feature(filename2) 158 | print(((fea1 - fea2) ** 2).sum() ** 0.5) 159 | 160 | 161 | def match_result(): 162 | with open('matches_facescrub_megaface_0_1000000_1.json', 'r') as load_f: 163 | load_dict = json.load(load_f) 164 | print(load_dict) 165 | for i in range(len(load_dict)): 166 | print(load_dict[i]['probes']) 167 | 168 | 169 | def pngtojpg(path): 170 | for root, dirs, files in os.walk(path): 171 | for f in files: 172 | if os.path.splitext(f)[1] == '.png': 173 | img = cv.imread(os.path.join(root, f)) 174 | newfilename = f.replace(".png", ".jpg") 175 | cv.imwrite(os.path.join(root, newfilename), img) 176 | 177 | 178 | def parse_args(): 179 | parser = argparse.ArgumentParser(description='Train face network') 180 | # general 181 | parser.add_argument('--action', default='crop_megaface', help='action') 182 | args = parser.parse_args() 183 | return args 184 | 185 | 186 | if __name__ == '__main__': 187 | args = parse_args() 188 | if args.action == 'crop_megaface': 189 | crop('megaface/MegaFace/FlickrFinal2', 'MegaFace', 'MegaFace_aligned') 190 | elif args.action == 'crop_facescrub': 191 | crop('megaface/facescrub_images', 'facescrub', 'facescrub_aligned') 192 | elif args.action == 'gen_features': 193 | gen_feature('megaface/facescrub_images') 194 | gen_feature('megaface/MegaFace_aligned/FlickrFinal2') 195 | remove_noise() 196 | elif args.action == 'pngtojpg': 197 | pngtojpg('megaface/facescrub_images') 198 | elif args.action == 'remove_noise': 199 | remove_noise() 200 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | import torch.utils.model_zoo as model_zoo 6 | from torch import nn 7 | from torch.nn import Parameter 8 | 9 | from config import device, num_classes 10 | 11 | __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 12 | 'resnet152'] 13 | 14 | model_urls = { 15 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 16 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 17 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 18 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 19 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 20 | } 21 | 22 | 23 | def conv3x3(in_planes, out_planes, stride=1): 24 | """3x3 convolution with padding""" 25 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 26 | padding=1, bias=False) 27 | 28 | 29 | class BasicBlock(nn.Module): 30 | expansion = 1 31 | 32 | def __init__(self, inplanes, planes, stride=1, downsample=None): 33 | super(BasicBlock, self).__init__() 34 | self.conv1 = conv3x3(inplanes, planes, stride) 35 | self.bn1 = nn.BatchNorm2d(planes) 36 | self.relu = nn.ReLU(inplace=True) 37 | self.conv2 = conv3x3(planes, planes) 38 | self.bn2 = nn.BatchNorm2d(planes) 39 | self.downsample = downsample 40 | self.stride = stride 41 | 42 | def forward(self, x): 43 | residual = x 44 | 45 | out = self.conv1(x) 46 | out = self.bn1(out) 47 | out = self.relu(out) 48 | 49 | out = self.conv2(out) 50 | out = self.bn2(out) 51 | 52 | if self.downsample is not None: 53 | residual = self.downsample(x) 54 | 55 | out += residual 56 | out = self.relu(out) 57 | 58 | return out 59 | 60 | 61 | class Bottleneck(nn.Module): 62 | expansion = 4 63 | 64 | def __init__(self, inplanes, planes, stride=1, downsample=None): 65 | super(Bottleneck, self).__init__() 66 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 67 | self.bn1 = nn.BatchNorm2d(planes) 68 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 69 | padding=1, bias=False) 70 | self.bn2 = nn.BatchNorm2d(planes) 71 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 72 | self.bn3 = nn.BatchNorm2d(planes * 4) 73 | self.relu = nn.ReLU(inplace=True) 74 | self.downsample = downsample 75 | self.stride = stride 76 | 77 | def forward(self, x): 78 | residual = x 79 | 80 | out = self.conv1(x) 81 | out = self.bn1(out) 82 | out = self.relu(out) 83 | 84 | out = self.conv2(out) 85 | out = self.bn2(out) 86 | out = self.relu(out) 87 | 88 | out = self.conv3(out) 89 | out = self.bn3(out) 90 | 91 | if self.downsample is not None: 92 | residual = self.downsample(x) 93 | 94 | out += residual 95 | out = self.relu(out) 96 | 97 | return out 98 | 99 | 100 | class SEBlock(nn.Module): 101 | def __init__(self, channel, reduction=16): 102 | super(SEBlock, self).__init__() 103 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 104 | self.fc = nn.Sequential( 105 | nn.Linear(channel, channel // reduction), 106 | nn.PReLU(), 107 | nn.Linear(channel // reduction, channel), 108 | nn.Sigmoid() 109 | ) 110 | 111 | def forward(self, x): 112 | b, c, _, _ = x.size() 113 | y = self.avg_pool(x).view(b, c) 114 | y = self.fc(y).view(b, c, 1, 1) 115 | return x * y 116 | 117 | 118 | class IRBlock(nn.Module): 119 | expansion = 1 120 | 121 | def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): 122 | super(IRBlock, self).__init__() 123 | self.bn0 = nn.BatchNorm2d(inplanes) 124 | self.conv1 = conv3x3(inplanes, inplanes) 125 | self.bn1 = nn.BatchNorm2d(inplanes) 126 | self.prelu = nn.PReLU() 127 | self.conv2 = conv3x3(inplanes, planes, stride) 128 | self.bn2 = nn.BatchNorm2d(planes) 129 | self.downsample = downsample 130 | self.stride = stride 131 | self.use_se = use_se 132 | if self.use_se: 133 | self.se = SEBlock(planes) 134 | 135 | def forward(self, x): 136 | residual = x 137 | out = self.bn0(x) 138 | out = self.conv1(out) 139 | out = self.bn1(out) 140 | out = self.prelu(out) 141 | 142 | out = self.conv2(out) 143 | out = self.bn2(out) 144 | if self.use_se: 145 | out = self.se(out) 146 | 147 | if self.downsample is not None: 148 | residual = self.downsample(x) 149 | 150 | out += residual 151 | out = self.prelu(out) 152 | 153 | return out 154 | 155 | 156 | class ResNet(nn.Module): 157 | 158 | def __init__(self, block, layers, use_se=True, im_size=112): 159 | self.inplanes = 64 160 | self.use_se = use_se 161 | super(ResNet, self).__init__() 162 | self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False) 163 | self.bn1 = nn.BatchNorm2d(64) 164 | self.prelu = nn.PReLU() 165 | self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) 166 | self.layer1 = self._make_layer(block, 64, layers[0]) 167 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 168 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 169 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 170 | self.bn2 = nn.BatchNorm2d(512) 171 | self.dropout = nn.Dropout() 172 | 173 | if im_size == 112: 174 | self.fc = nn.Linear(512 * 7 * 7, 512) 175 | else: # 224 176 | self.fc = nn.Linear(512 * 14 * 14, 512) 177 | self.bn3 = nn.BatchNorm1d(512) 178 | 179 | for m in self.modules(): 180 | if isinstance(m, nn.Conv2d): 181 | nn.init.xavier_normal_(m.weight) 182 | elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): 183 | nn.init.constant_(m.weight, 1) 184 | nn.init.constant_(m.bias, 0) 185 | elif isinstance(m, nn.Linear): 186 | nn.init.xavier_normal_(m.weight) 187 | nn.init.constant_(m.bias, 0) 188 | 189 | def _make_layer(self, block, planes, blocks, stride=1): 190 | downsample = None 191 | if stride != 1 or self.inplanes != planes * block.expansion: 192 | downsample = nn.Sequential( 193 | nn.Conv2d(self.inplanes, planes * block.expansion, 194 | kernel_size=1, stride=stride, bias=False), 195 | nn.BatchNorm2d(planes * block.expansion), 196 | ) 197 | 198 | layers = [] 199 | layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) 200 | self.inplanes = planes 201 | for i in range(1, blocks): 202 | layers.append(block(self.inplanes, planes, use_se=self.use_se)) 203 | 204 | return nn.Sequential(*layers) 205 | 206 | def forward(self, x): 207 | x = self.conv1(x) 208 | x = self.bn1(x) 209 | x = self.prelu(x) 210 | x = self.maxpool(x) 211 | 212 | x = self.layer1(x) 213 | x = self.layer2(x) 214 | x = self.layer3(x) 215 | x = self.layer4(x) 216 | 217 | x = self.bn2(x) 218 | x = self.dropout(x) 219 | x = x.view(x.size(0), -1) 220 | x = self.fc(x) 221 | x = self.bn3(x) 222 | 223 | return x 224 | 225 | 226 | def resnet18(args, **kwargs): 227 | model = ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs) 228 | if args.pretrained: 229 | model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) 230 | return model 231 | 232 | 233 | def resnet34(args, **kwargs): 234 | model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs) 235 | if args.pretrained: 236 | model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) 237 | return model 238 | 239 | 240 | def resnet50(args, **kwargs): 241 | model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs) 242 | if args.pretrained: 243 | model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) 244 | return model 245 | 246 | 247 | def resnet101(args, **kwargs): 248 | model = ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs) 249 | # if args.pretrained: 250 | # model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) 251 | return model 252 | 253 | 254 | def resnet152(args, **kwargs): 255 | model = ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs) 256 | if args.pretrained: 257 | model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) 258 | return model 259 | 260 | 261 | class ArcMarginModel(nn.Module): 262 | def __init__(self, args): 263 | super(ArcMarginModel, self).__init__() 264 | 265 | self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) 266 | nn.init.xavier_uniform_(self.weight) 267 | 268 | self.easy_margin = args.easy_margin 269 | self.m = args.margin_m 270 | self.s = args.margin_s 271 | 272 | self.cos_m = math.cos(self.m) 273 | self.sin_m = math.sin(self.m) 274 | self.th = math.cos(math.pi - self.m) 275 | self.mm = math.sin(math.pi - self.m) * self.m 276 | 277 | def forward(self, input, label): 278 | x = F.normalize(input) 279 | W = F.normalize(self.weight) 280 | cosine = F.linear(x, W) 281 | sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) 282 | phi = cosine * self.cos_m - sine * self.sin_m # cos(theta + m) 283 | if self.easy_margin: 284 | phi = torch.where(cosine > 0, phi, cosine) 285 | else: 286 | phi = torch.where(cosine > self.th, phi, cosine - self.mm) 287 | one_hot = torch.zeros(cosine.size(), device=device) 288 | one_hot.scatter_(1, label.view(-1, 1).long(), 1) 289 | output = (one_hot * phi) + ((1.0 - one_hot) * cosine) 290 | output *= self.s 291 | return output 292 | 293 | 294 | if __name__ == "__main__": 295 | from utils import parse_args 296 | from torchscope import scope 297 | 298 | args = parse_args() 299 | model = resnet101(args) 300 | scope(model, (3, 112, 112)) 301 | -------------------------------------------------------------------------------- /optimizer.py: -------------------------------------------------------------------------------- 1 | class InsightFaceOptimizer(object): 2 | """A simple wrapper class for learning rate scheduling""" 3 | 4 | def __init__(self, optimizer): 5 | self.optimizer = optimizer 6 | self.step_num = 0 7 | self.lr = 0.1 8 | 9 | def zero_grad(self): 10 | self.optimizer.zero_grad() 11 | 12 | def step(self): 13 | self._update_lr() 14 | self.optimizer.step() 15 | 16 | def _update_lr(self): 17 | self.step_num += 1 18 | # divide the learning rate at 100K,160K iterations 19 | if self.step_num in [100000, 160000]: 20 | self.lr = self.lr / 10 21 | for param_group in self.optimizer.param_groups: 22 | param_group['lr'] = self.lr 23 | 24 | def clip_gradient(self, grad_clip): 25 | for group in self.optimizer.param_groups: 26 | for param in group['params']: 27 | if param.grad is not None: 28 | param.grad.data.clamp_(-grad_clip, grad_clip) 29 | 30 | def adjust_learning_rate(self, new_lr): 31 | for param_group in self.optimizer.param_groups: 32 | param_group['lr'] = new_lr 33 | print("The new learning rate is %f\n" % (self.optimizer.param_groups[0]['lr'],)) 34 | -------------------------------------------------------------------------------- /pre_process.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | 4 | import cv2 as cv 5 | import mxnet as mx 6 | from mxnet import recordio 7 | from tqdm import tqdm 8 | 9 | from config import path_imgidx, path_imgrec, IMG_DIR, pickle_file 10 | from utils import ensure_folder 11 | 12 | if __name__ == "__main__": 13 | ensure_folder(IMG_DIR) 14 | imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') 15 | # print(len(imgrec)) 16 | 17 | samples = [] 18 | class_ids = set() 19 | 20 | # # %% 1 ~ 5179510 21 | 22 | try: 23 | for i in tqdm(range(10000000)): 24 | # print(i) 25 | header, s = recordio.unpack(imgrec.read_idx(i + 1)) 26 | img = mx.image.imdecode(s).asnumpy() 27 | # print(img.shape) 28 | img = cv.cvtColor(img, cv.COLOR_RGB2BGR) 29 | # print(header.label) 30 | # print(type(header.label)) 31 | label = int(header.label) 32 | class_ids.add(label) 33 | filename = '{}.jpg'.format(i) 34 | samples.append({'img': filename, 'label': label}) 35 | filename = os.path.join(IMG_DIR, filename) 36 | cv.imwrite(filename, img) 37 | # except KeyboardInterrupt: 38 | # raise 39 | except Exception as err: 40 | print(err) 41 | 42 | with open(pickle_file, 'wb') as file: 43 | pickle.dump(samples, file) 44 | 45 | print('num_samples: ' + str(len(samples))) 46 | 47 | class_ids = list(class_ids) 48 | print(len(class_ids)) 49 | print(max(class_ids)) 50 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | scipy 3 | tqdm 4 | opencv-python 5 | pillow 6 | torch 7 | torchvision 8 | numpy 9 | scikit-image 10 | 11 | 12 | -------------------------------------------------------------------------------- /retinaface/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .wider_face import WiderFaceDetection, detection_collate 2 | from .data_augment import * 3 | from .config import * 4 | -------------------------------------------------------------------------------- /retinaface/data/config.py: -------------------------------------------------------------------------------- 1 | # config.py 2 | 3 | cfg_mnet = { 4 | 'name': 'mobilenet0.25', 5 | 'min_sizes': [[16, 32], [64, 128], [256, 512]], 6 | 'steps': [8, 16, 32], 7 | 'variance': [0.1, 0.2], 8 | 'clip': False, 9 | 'loc_weight': 2.0, 10 | 'gpu_train': True, 11 | 'batch_size': 32, 12 | 'ngpu': 1, 13 | 'epoch': 250, 14 | 'decay1': 190, 15 | 'decay2': 220, 16 | 'image_size': 640, 17 | 'pretrain': False, 18 | 'return_layers': {'stage1': 1, 'stage2': 2, 'stage3': 3}, 19 | 'in_channel': 32, 20 | 'out_channel': 64 21 | } 22 | 23 | cfg_re50 = { 24 | 'name': 'Resnet50', 25 | 'min_sizes': [[16, 32], [64, 128], [256, 512]], 26 | 'steps': [8, 16, 32], 27 | 'variance': [0.1, 0.2], 28 | 'clip': False, 29 | 'loc_weight': 2.0, 30 | 'gpu_train': True, 31 | 'batch_size': 24, 32 | 'ngpu': 4, 33 | 'epoch': 100, 34 | 'decay1': 70, 35 | 'decay2': 90, 36 | 'image_size': 840, 37 | 'pretrain': False, 38 | 'return_layers': {'layer2': 1, 'layer3': 2, 'layer4': 3}, 39 | 'in_channel': 256, 40 | 'out_channel': 256 41 | } 42 | 43 | -------------------------------------------------------------------------------- /retinaface/data/data_augment.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import random 4 | from retinaface.utils.box_utils import matrix_iof 5 | 6 | 7 | def _crop(image, boxes, labels, landm, img_dim): 8 | height, width, _ = image.shape 9 | pad_image_flag = True 10 | 11 | for _ in range(250): 12 | """ 13 | if random.uniform(0, 1) <= 0.2: 14 | scale = 1.0 15 | else: 16 | scale = random.uniform(0.3, 1.0) 17 | """ 18 | PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0] 19 | scale = random.choice(PRE_SCALES) 20 | short_side = min(width, height) 21 | w = int(scale * short_side) 22 | h = w 23 | 24 | if width == w: 25 | l = 0 26 | else: 27 | l = random.randrange(width - w) 28 | if height == h: 29 | t = 0 30 | else: 31 | t = random.randrange(height - h) 32 | roi = np.array((l, t, l + w, t + h)) 33 | 34 | value = matrix_iof(boxes, roi[np.newaxis]) 35 | flag = (value >= 1) 36 | if not flag.any(): 37 | continue 38 | 39 | centers = (boxes[:, :2] + boxes[:, 2:]) / 2 40 | mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1) 41 | boxes_t = boxes[mask_a].copy() 42 | labels_t = labels[mask_a].copy() 43 | landms_t = landm[mask_a].copy() 44 | landms_t = landms_t.reshape([-1, 5, 2]) 45 | 46 | if boxes_t.shape[0] == 0: 47 | continue 48 | 49 | image_t = image[roi[1]:roi[3], roi[0]:roi[2]] 50 | 51 | boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2]) 52 | boxes_t[:, :2] -= roi[:2] 53 | boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:]) 54 | boxes_t[:, 2:] -= roi[:2] 55 | 56 | # landm 57 | landms_t[:, :, :2] = landms_t[:, :, :2] - roi[:2] 58 | landms_t[:, :, :2] = np.maximum(landms_t[:, :, :2], np.array([0, 0])) 59 | landms_t[:, :, :2] = np.minimum(landms_t[:, :, :2], roi[2:] - roi[:2]) 60 | landms_t = landms_t.reshape([-1, 10]) 61 | 62 | 63 | # make sure that the cropped image contains at least one face > 16 pixel at training image scale 64 | b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_dim 65 | b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_dim 66 | mask_b = np.minimum(b_w_t, b_h_t) > 0.0 67 | boxes_t = boxes_t[mask_b] 68 | labels_t = labels_t[mask_b] 69 | landms_t = landms_t[mask_b] 70 | 71 | if boxes_t.shape[0] == 0: 72 | continue 73 | 74 | pad_image_flag = False 75 | 76 | return image_t, boxes_t, labels_t, landms_t, pad_image_flag 77 | return image, boxes, labels, landm, pad_image_flag 78 | 79 | 80 | def _distort(image): 81 | 82 | def _convert(image, alpha=1, beta=0): 83 | tmp = image.astype(float) * alpha + beta 84 | tmp[tmp < 0] = 0 85 | tmp[tmp > 255] = 255 86 | image[:] = tmp 87 | 88 | image = image.copy() 89 | 90 | if random.randrange(2): 91 | 92 | #brightness distortion 93 | if random.randrange(2): 94 | _convert(image, beta=random.uniform(-32, 32)) 95 | 96 | #contrast distortion 97 | if random.randrange(2): 98 | _convert(image, alpha=random.uniform(0.5, 1.5)) 99 | 100 | image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) 101 | 102 | #saturation distortion 103 | if random.randrange(2): 104 | _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5)) 105 | 106 | #hue distortion 107 | if random.randrange(2): 108 | tmp = image[:, :, 0].astype(int) + random.randint(-18, 18) 109 | tmp %= 180 110 | image[:, :, 0] = tmp 111 | 112 | image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) 113 | 114 | else: 115 | 116 | #brightness distortion 117 | if random.randrange(2): 118 | _convert(image, beta=random.uniform(-32, 32)) 119 | 120 | image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) 121 | 122 | #saturation distortion 123 | if random.randrange(2): 124 | _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5)) 125 | 126 | #hue distortion 127 | if random.randrange(2): 128 | tmp = image[:, :, 0].astype(int) + random.randint(-18, 18) 129 | tmp %= 180 130 | image[:, :, 0] = tmp 131 | 132 | image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) 133 | 134 | #contrast distortion 135 | if random.randrange(2): 136 | _convert(image, alpha=random.uniform(0.5, 1.5)) 137 | 138 | return image 139 | 140 | 141 | def _expand(image, boxes, fill, p): 142 | if random.randrange(2): 143 | return image, boxes 144 | 145 | height, width, depth = image.shape 146 | 147 | scale = random.uniform(1, p) 148 | w = int(scale * width) 149 | h = int(scale * height) 150 | 151 | left = random.randint(0, w - width) 152 | top = random.randint(0, h - height) 153 | 154 | boxes_t = boxes.copy() 155 | boxes_t[:, :2] += (left, top) 156 | boxes_t[:, 2:] += (left, top) 157 | expand_image = np.empty( 158 | (h, w, depth), 159 | dtype=image.dtype) 160 | expand_image[:, :] = fill 161 | expand_image[top:top + height, left:left + width] = image 162 | image = expand_image 163 | 164 | return image, boxes_t 165 | 166 | 167 | def _mirror(image, boxes, landms): 168 | _, width, _ = image.shape 169 | if random.randrange(2): 170 | image = image[:, ::-1] 171 | boxes = boxes.copy() 172 | boxes[:, 0::2] = width - boxes[:, 2::-2] 173 | 174 | # landm 175 | landms = landms.copy() 176 | landms = landms.reshape([-1, 5, 2]) 177 | landms[:, :, 0] = width - landms[:, :, 0] 178 | tmp = landms[:, 1, :].copy() 179 | landms[:, 1, :] = landms[:, 0, :] 180 | landms[:, 0, :] = tmp 181 | tmp1 = landms[:, 4, :].copy() 182 | landms[:, 4, :] = landms[:, 3, :] 183 | landms[:, 3, :] = tmp1 184 | landms = landms.reshape([-1, 10]) 185 | 186 | return image, boxes, landms 187 | 188 | 189 | def _pad_to_square(image, rgb_mean, pad_image_flag): 190 | if not pad_image_flag: 191 | return image 192 | height, width, _ = image.shape 193 | long_side = max(width, height) 194 | image_t = np.empty((long_side, long_side, 3), dtype=image.dtype) 195 | image_t[:, :] = rgb_mean 196 | image_t[0:0 + height, 0:0 + width] = image 197 | return image_t 198 | 199 | 200 | def _resize_subtract_mean(image, insize, rgb_mean): 201 | interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4] 202 | interp_method = interp_methods[random.randrange(5)] 203 | image = cv2.resize(image, (insize, insize), interpolation=interp_method) 204 | image = image.astype(np.float32) 205 | image -= rgb_mean 206 | return image.transpose(2, 0, 1) 207 | 208 | 209 | class preproc(object): 210 | 211 | def __init__(self, img_dim, rgb_means): 212 | self.img_dim = img_dim 213 | self.rgb_means = rgb_means 214 | 215 | def __call__(self, image, targets): 216 | assert targets.shape[0] > 0, "this image does not have gt" 217 | 218 | boxes = targets[:, :4].copy() 219 | labels = targets[:, -1].copy() 220 | landm = targets[:, 4:-1].copy() 221 | 222 | image_t, boxes_t, labels_t, landm_t, pad_image_flag = _crop(image, boxes, labels, landm, self.img_dim) 223 | image_t = _distort(image_t) 224 | image_t = _pad_to_square(image_t,self.rgb_means, pad_image_flag) 225 | image_t, boxes_t, landm_t = _mirror(image_t, boxes_t, landm_t) 226 | height, width, _ = image_t.shape 227 | image_t = _resize_subtract_mean(image_t, self.img_dim, self.rgb_means) 228 | boxes_t[:, 0::2] /= width 229 | boxes_t[:, 1::2] /= height 230 | 231 | landm_t[:, 0::2] /= width 232 | landm_t[:, 1::2] /= height 233 | 234 | labels_t = np.expand_dims(labels_t, 1) 235 | targets_t = np.hstack((boxes_t, landm_t, labels_t)) 236 | 237 | return image_t, targets_t 238 | -------------------------------------------------------------------------------- /retinaface/data/wider_face.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.utils.data as data 4 | 5 | 6 | class WiderFaceDetection(data.Dataset): 7 | def __init__(self, txt_path, preproc=None): 8 | self.preproc = preproc 9 | self.imgs_path = [] 10 | self.words = [] 11 | f = open(txt_path, 'r') 12 | lines = f.readlines() 13 | isFirst = True 14 | labels = [] 15 | for line in lines: 16 | line = line.rstrip() 17 | if line.startswith('#'): 18 | if isFirst is True: 19 | isFirst = False 20 | else: 21 | labels_copy = labels.copy() 22 | self.words.append(labels_copy) 23 | labels.clear() 24 | path = line[2:] 25 | path = txt_path.replace('label.txt', 'images/') + path 26 | self.imgs_path.append(path) 27 | else: 28 | line = line.split(' ') 29 | label = [float(x) for x in line] 30 | labels.append(label) 31 | 32 | self.words.append(labels) 33 | 34 | def __len__(self): 35 | return len(self.imgs_path) 36 | 37 | def __getitem__(self, index): 38 | img = cv2.imread(self.imgs_path[index]) 39 | height, width, _ = img.shape 40 | 41 | labels = self.words[index] 42 | annotations = np.zeros((0, 15)) 43 | if len(labels) == 0: 44 | return annotations 45 | for idx, label in enumerate(labels): 46 | annotation = np.zeros((1, 15)) 47 | # bbox 48 | annotation[0, 0] = label[0] # x1 49 | annotation[0, 1] = label[1] # y1 50 | annotation[0, 2] = label[0] + label[2] # x2 51 | annotation[0, 3] = label[1] + label[3] # y2 52 | 53 | # landmarks 54 | annotation[0, 4] = label[4] # l0_x 55 | annotation[0, 5] = label[5] # l0_y 56 | annotation[0, 6] = label[7] # l1_x 57 | annotation[0, 7] = label[8] # l1_y 58 | annotation[0, 8] = label[10] # l2_x 59 | annotation[0, 9] = label[11] # l2_y 60 | annotation[0, 10] = label[13] # l3_x 61 | annotation[0, 11] = label[14] # l3_y 62 | annotation[0, 12] = label[16] # l4_x 63 | annotation[0, 13] = label[17] # l4_y 64 | if (annotation[0, 4] < 0): 65 | annotation[0, 14] = -1 66 | else: 67 | annotation[0, 14] = 1 68 | 69 | annotations = np.append(annotations, annotation, axis=0) 70 | target = np.array(annotations) 71 | if self.preproc is not None: 72 | img, target = self.preproc(img, target) 73 | 74 | return torch.from_numpy(img), target 75 | 76 | 77 | def detection_collate(batch): 78 | """Custom collate fn for dealing with batches of images that have a different 79 | number of associated object annotations (bounding boxes). 80 | 81 | Arguments: 82 | batch: (tuple) A tuple of tensor images and lists of annotations 83 | 84 | Return: 85 | A tuple containing: 86 | 1) (tensor) batch of images stacked on their 0 dim 87 | 2) (list of tensors) annotations for a given image are stacked on 0 dim 88 | """ 89 | targets = [] 90 | imgs = [] 91 | for _, sample in enumerate(batch): 92 | for _, tup in enumerate(sample): 93 | if torch.is_tensor(tup): 94 | imgs.append(tup) 95 | elif isinstance(tup, type(np.empty(0))): 96 | annos = torch.from_numpy(tup).float() 97 | targets.append(annos) 98 | 99 | return (torch.stack(imgs, 0), targets) 100 | -------------------------------------------------------------------------------- /retinaface/detector.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | import torch 5 | import torch.backends.cudnn as cudnn 6 | 7 | from retinaface.data import cfg_mnet 8 | from retinaface.layers.functions.prior_box import PriorBox 9 | from retinaface.loader import load_model 10 | from retinaface.utils.box_utils import decode, decode_landm 11 | from retinaface.utils.nms.py_cpu_nms import py_cpu_nms 12 | 13 | 14 | class RetinafaceDetector: 15 | def __init__(self, net='mnet', type='cuda'): 16 | cudnn.benchmark = True 17 | self.net = net 18 | self.device = torch.device(type) 19 | self.model = load_model(net).to(self.device) 20 | self.model.eval() 21 | 22 | def detect_faces(self, img_raw, confidence_threshold=0.9, top_k=5000, nms_threshold=0.4, keep_top_k=750, resize=1): 23 | img = np.float32(img_raw) 24 | im_height, im_width = img.shape[:2] 25 | scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]) 26 | img -= (104, 117, 123) 27 | img = img.transpose(2, 0, 1) 28 | img = torch.from_numpy(img).unsqueeze(0) 29 | img = img.to(self.device) 30 | scale = scale.to(self.device) 31 | 32 | # tic = time.time() 33 | with torch.no_grad(): 34 | loc, conf, landms = self.model(img) # forward pass 35 | # print('net forward time: {:.4f}'.format(time.time() - tic)) 36 | 37 | priorbox = PriorBox(cfg_mnet, image_size=(im_height, im_width)) 38 | priors = priorbox.forward() 39 | priors = priors.to(self.device) 40 | prior_data = priors.data 41 | boxes = decode(loc.data.squeeze(0), prior_data, cfg_mnet['variance']) 42 | boxes = boxes * scale / resize 43 | boxes = boxes.cpu().numpy() 44 | scores = conf.squeeze(0).data.cpu().numpy()[:, 1] 45 | landms = decode_landm(landms.data.squeeze(0), prior_data, cfg_mnet['variance']) 46 | scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2], 47 | img.shape[3], img.shape[2], img.shape[3], img.shape[2], 48 | img.shape[3], img.shape[2]]) 49 | scale1 = scale1.to(self.device) 50 | landms = landms * scale1 / resize 51 | landms = landms.cpu().numpy() 52 | 53 | # ignore low scores 54 | inds = np.where(scores > confidence_threshold)[0] 55 | boxes = boxes[inds] 56 | landms = landms[inds] 57 | scores = scores[inds] 58 | 59 | # keep top-K before NMS 60 | order = scores.argsort()[::-1][:top_k] 61 | boxes = boxes[order] 62 | landms = landms[order] 63 | scores = scores[order] 64 | 65 | # do NMS 66 | dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False) 67 | keep = py_cpu_nms(dets, nms_threshold) 68 | # keep = nms(dets, args.nms_threshold,force_cpu=args.cpu) 69 | dets = dets[keep, :] 70 | landms = landms[keep] 71 | 72 | # keep top-K faster NMS 73 | dets = dets[:keep_top_k, :] 74 | landms = landms[:keep_top_k, :] 75 | # print(landms.shape) 76 | landms = landms.reshape((-1, 5, 2)) 77 | # print(landms.shape) 78 | landms = landms.transpose((0, 2, 1)) 79 | # print(landms.shape) 80 | landms = landms.reshape(-1, 10, ) 81 | # print(landms.shape) 82 | 83 | return dets, landms 84 | 85 | 86 | detector = RetinafaceDetector(net='mnet') 87 | -------------------------------------------------------------------------------- /retinaface/layers/__init__.py: -------------------------------------------------------------------------------- 1 | from .functions import * 2 | from .modules import * 3 | -------------------------------------------------------------------------------- /retinaface/layers/functions/prior_box.py: -------------------------------------------------------------------------------- 1 | from itertools import product as product 2 | from math import ceil 3 | 4 | import torch 5 | 6 | 7 | class PriorBox(object): 8 | def __init__(self, cfg, image_size=None, phase='train'): 9 | super(PriorBox, self).__init__() 10 | self.min_sizes = cfg['min_sizes'] 11 | self.steps = cfg['steps'] 12 | self.clip = cfg['clip'] 13 | self.image_size = image_size 14 | self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps] 15 | self.name = "s" 16 | 17 | def forward(self): 18 | anchors = [] 19 | for k, f in enumerate(self.feature_maps): 20 | min_sizes = self.min_sizes[k] 21 | for i, j in product(range(f[0]), range(f[1])): 22 | for min_size in min_sizes: 23 | s_kx = min_size / self.image_size[1] 24 | s_ky = min_size / self.image_size[0] 25 | dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]] 26 | dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]] 27 | for cy, cx in product(dense_cy, dense_cx): 28 | anchors += [cx, cy, s_kx, s_ky] 29 | 30 | # back to torch land 31 | output = torch.Tensor(anchors).view(-1, 4) 32 | if self.clip: 33 | output.clamp_(max=1, min=0) 34 | return output 35 | -------------------------------------------------------------------------------- /retinaface/layers/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .multibox_loss import MultiBoxLoss 2 | 3 | __all__ = ['MultiBoxLoss'] 4 | -------------------------------------------------------------------------------- /retinaface/layers/modules/multibox_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from retinaface.data import cfg_mnet 6 | from retinaface.utils.box_utils import match, log_sum_exp 7 | 8 | GPU = cfg_mnet['gpu_train'] 9 | 10 | 11 | class MultiBoxLoss(nn.Module): 12 | """SSD Weighted Loss Function 13 | Compute Targets: 14 | 1) Produce Confidence Target Indices by matching ground truth boxes 15 | with (default) 'priorboxes' that have jaccard index > threshold parameter 16 | (default threshold: 0.5). 17 | 2) Produce localization target by 'encoding' variance into offsets of ground 18 | truth boxes and their matched 'priorboxes'. 19 | 3) Hard negative mining to filter the excessive number of negative examples 20 | that comes with using a large number of default bounding boxes. 21 | (default negative:positive ratio 3:1) 22 | Objective Loss: 23 | L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N 24 | Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss 25 | weighted by α which is set to 1 by cross val. 26 | Args: 27 | c: class confidences, 28 | l: predicted boxes, 29 | g: ground truth boxes 30 | N: number of matched default boxes 31 | See: https://arxiv.org/pdf/1512.02325.pdf for more details. 32 | """ 33 | 34 | def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, 35 | encode_target): 36 | super(MultiBoxLoss, self).__init__() 37 | self.num_classes = num_classes 38 | self.threshold = overlap_thresh 39 | self.background_label = bkg_label 40 | self.encode_target = encode_target 41 | self.use_prior_for_matching = prior_for_matching 42 | self.do_neg_mining = neg_mining 43 | self.negpos_ratio = neg_pos 44 | self.neg_overlap = neg_overlap 45 | self.variance = [0.1, 0.2] 46 | 47 | def forward(self, predictions, priors, targets): 48 | """Multibox Loss 49 | Args: 50 | predictions (tuple): A tuple containing loc preds, conf preds, 51 | and prior boxes from SSD net. 52 | conf shape: torch.size(batch_size,num_priors,num_classes) 53 | loc shape: torch.size(batch_size,num_priors,4) 54 | priors shape: torch.size(num_priors,4) 55 | 56 | ground_truth (tensor): Ground truth boxes and labels for a batch, 57 | shape: [batch_size,num_objs,5] (last idx is the label). 58 | """ 59 | 60 | loc_data, conf_data, landm_data = predictions 61 | priors = priors 62 | num = loc_data.size(0) 63 | num_priors = (priors.size(0)) 64 | 65 | # match priors (default boxes) and ground truth boxes 66 | loc_t = torch.Tensor(num, num_priors, 4) 67 | landm_t = torch.Tensor(num, num_priors, 10) 68 | conf_t = torch.LongTensor(num, num_priors) 69 | for idx in range(num): 70 | truths = targets[idx][:, :4].data 71 | labels = targets[idx][:, -1].data 72 | landms = targets[idx][:, 4:14].data 73 | defaults = priors.data 74 | match(self.threshold, truths, defaults, self.variance, labels, landms, loc_t, conf_t, landm_t, idx) 75 | if GPU: 76 | loc_t = loc_t.cuda() 77 | conf_t = conf_t.cuda() 78 | landm_t = landm_t.cuda() 79 | 80 | zeros = torch.tensor(0).cuda() 81 | # landm Loss (Smooth L1) 82 | # Shape: [batch,num_priors,10] 83 | pos1 = conf_t > zeros 84 | num_pos_landm = pos1.long().sum(1, keepdim=True) 85 | N1 = max(num_pos_landm.data.sum().float(), 1) 86 | pos_idx1 = pos1.unsqueeze(pos1.dim()).expand_as(landm_data) 87 | landm_p = landm_data[pos_idx1].view(-1, 10) 88 | landm_t = landm_t[pos_idx1].view(-1, 10) 89 | loss_landm = F.smooth_l1_loss(landm_p, landm_t, reduction='sum') 90 | 91 | pos = conf_t != zeros 92 | conf_t[pos] = 1 93 | 94 | # Localization Loss (Smooth L1) 95 | # Shape: [batch,num_priors,4] 96 | pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data) 97 | loc_p = loc_data[pos_idx].view(-1, 4) 98 | loc_t = loc_t[pos_idx].view(-1, 4) 99 | loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum') 100 | 101 | # Compute max conf across batch for hard negative mining 102 | batch_conf = conf_data.view(-1, self.num_classes) 103 | loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1)) 104 | 105 | # Hard Negative Mining 106 | loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now 107 | loss_c = loss_c.view(num, -1) 108 | _, loss_idx = loss_c.sort(1, descending=True) 109 | _, idx_rank = loss_idx.sort(1) 110 | num_pos = pos.long().sum(1, keepdim=True) 111 | num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1) 112 | neg = idx_rank < num_neg.expand_as(idx_rank) 113 | 114 | # Confidence Loss Including Positive and Negative Examples 115 | pos_idx = pos.unsqueeze(2).expand_as(conf_data) 116 | neg_idx = neg.unsqueeze(2).expand_as(conf_data) 117 | conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes) 118 | targets_weighted = conf_t[(pos + neg).gt(0)] 119 | loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum') 120 | 121 | # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N 122 | N = max(num_pos.data.sum().float(), 1) 123 | loss_l /= N 124 | loss_c /= N 125 | loss_landm /= N1 126 | 127 | return loss_l, loss_c, loss_landm 128 | -------------------------------------------------------------------------------- /retinaface/loader.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import torch 4 | 5 | from retinaface.data import cfg_mnet, cfg_re50 6 | from retinaface.models.retinaface import RetinaFace 7 | 8 | 9 | def check_keys(model, pretrained_state_dict): 10 | ckpt_keys = set(pretrained_state_dict.keys()) 11 | model_keys = set(model.state_dict().keys()) 12 | used_pretrained_keys = model_keys & ckpt_keys 13 | unused_pretrained_keys = ckpt_keys - model_keys 14 | missing_keys = model_keys - ckpt_keys 15 | # print('Missing keys:{}'.format(len(missing_keys))) 16 | # print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys))) 17 | # print('Used keys:{}'.format(len(used_pretrained_keys))) 18 | assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint' 19 | return True 20 | 21 | 22 | def remove_prefix(state_dict, prefix): 23 | ''' Old style model is stored with all names of parameters sharing common prefix 'module.' ''' 24 | # print('remove prefix \'{}\''.format(prefix)) 25 | f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x 26 | return {f(key): value for key, value in state_dict.items()} 27 | 28 | 29 | def load_model(net='mnet'): 30 | if net == 'mnet': 31 | pretrained_path = 'retinaface/weights/mobilenet0.25_Final.pth' 32 | # print('Loading pretrained model from {}'.format(pretrained_path)) 33 | model = RetinaFace(cfg=cfg_mnet, phase='test') 34 | else: 35 | pretrained_path = 'retinaface/weights/Resnet50_Final.pth' 36 | # print('Loading pretrained model from {}'.format(pretrained_path)) 37 | model = RetinaFace(cfg=cfg_re50, phase='test') 38 | 39 | device = torch.cuda.current_device() 40 | pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device)) 41 | if "state_dict" in pretrained_dict.keys(): 42 | pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.') 43 | else: 44 | pretrained_dict = remove_prefix(pretrained_dict, 'module.') 45 | check_keys(model, pretrained_dict) 46 | model.load_state_dict(pretrained_dict, strict=False) 47 | # print('Finished loading model!') 48 | return model 49 | -------------------------------------------------------------------------------- /retinaface/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/retinaface/models/__init__.py -------------------------------------------------------------------------------- /retinaface/models/net.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | def conv_bn(inp, oup, stride=1, leaky=0): 7 | return nn.Sequential( 8 | nn.Conv2d(inp, oup, 3, stride, 1, bias=False), 9 | nn.BatchNorm2d(oup), 10 | nn.LeakyReLU(negative_slope=leaky, inplace=True) 11 | ) 12 | 13 | 14 | def conv_bn_no_relu(inp, oup, stride): 15 | return nn.Sequential( 16 | nn.Conv2d(inp, oup, 3, stride, 1, bias=False), 17 | nn.BatchNorm2d(oup), 18 | ) 19 | 20 | 21 | def conv_bn1X1(inp, oup, stride, leaky=0): 22 | return nn.Sequential( 23 | nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False), 24 | nn.BatchNorm2d(oup), 25 | nn.LeakyReLU(negative_slope=leaky, inplace=True) 26 | ) 27 | 28 | 29 | def conv_dw(inp, oup, stride, leaky=0.1): 30 | return nn.Sequential( 31 | nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), 32 | nn.BatchNorm2d(inp), 33 | nn.LeakyReLU(negative_slope=leaky, inplace=True), 34 | 35 | nn.Conv2d(inp, oup, 1, 1, 0, bias=False), 36 | nn.BatchNorm2d(oup), 37 | nn.LeakyReLU(negative_slope=leaky, inplace=True), 38 | ) 39 | 40 | 41 | class SSH(nn.Module): 42 | def __init__(self, in_channel, out_channel): 43 | super(SSH, self).__init__() 44 | assert out_channel % 4 == 0 45 | leaky = 0 46 | if (out_channel <= 64): 47 | leaky = 0.1 48 | self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1) 49 | 50 | self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky) 51 | self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1) 52 | 53 | self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky) 54 | self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1) 55 | 56 | def forward(self, input): 57 | conv3X3 = self.conv3X3(input) 58 | 59 | conv5X5_1 = self.conv5X5_1(input) 60 | conv5X5 = self.conv5X5_2(conv5X5_1) 61 | 62 | conv7X7_2 = self.conv7X7_2(conv5X5_1) 63 | conv7X7 = self.conv7x7_3(conv7X7_2) 64 | 65 | out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1) 66 | out = F.relu(out) 67 | return out 68 | 69 | 70 | class FPN(nn.Module): 71 | def __init__(self, in_channels_list, out_channels): 72 | super(FPN, self).__init__() 73 | leaky = 0 74 | if (out_channels <= 64): 75 | leaky = 0.1 76 | self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky) 77 | self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky) 78 | self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky) 79 | 80 | self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky) 81 | self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky) 82 | 83 | def forward(self, input): 84 | # names = list(input.keys()) 85 | input = list(input.values()) 86 | 87 | output1 = self.output1(input[0]) 88 | output2 = self.output2(input[1]) 89 | output3 = self.output3(input[2]) 90 | 91 | up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode="nearest") 92 | output2 = output2 + up3 93 | output2 = self.merge2(output2) 94 | 95 | up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode="nearest") 96 | output1 = output1 + up2 97 | output1 = self.merge1(output1) 98 | 99 | out = [output1, output2, output3] 100 | return out 101 | 102 | 103 | class MobileNetV1(nn.Module): 104 | def __init__(self): 105 | super(MobileNetV1, self).__init__() 106 | self.stage1 = nn.Sequential( 107 | conv_bn(3, 8, 2, leaky=0.1), # 3 108 | conv_dw(8, 16, 1), # 7 109 | conv_dw(16, 32, 2), # 11 110 | conv_dw(32, 32, 1), # 19 111 | conv_dw(32, 64, 2), # 27 112 | conv_dw(64, 64, 1), # 43 113 | ) 114 | self.stage2 = nn.Sequential( 115 | conv_dw(64, 128, 2), # 43 + 16 = 59 116 | conv_dw(128, 128, 1), # 59 + 32 = 91 117 | conv_dw(128, 128, 1), # 91 + 32 = 123 118 | conv_dw(128, 128, 1), # 123 + 32 = 155 119 | conv_dw(128, 128, 1), # 155 + 32 = 187 120 | conv_dw(128, 128, 1), # 187 + 32 = 219 121 | ) 122 | self.stage3 = nn.Sequential( 123 | conv_dw(128, 256, 2), # 219 +3 2 = 241 124 | conv_dw(256, 256, 1), # 241 + 64 = 301 125 | ) 126 | self.avg = nn.AdaptiveAvgPool2d((1, 1)) 127 | self.fc = nn.Linear(256, 1000) 128 | 129 | def forward(self, x): 130 | x = self.stage1(x) 131 | x = self.stage2(x) 132 | x = self.stage3(x) 133 | x = self.avg(x) 134 | # x = self.model(x) 135 | x = x.view(-1, 256) 136 | x = self.fc(x) 137 | return x 138 | -------------------------------------------------------------------------------- /retinaface/models/retinaface.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import torchvision.models._utils as _utils 5 | 6 | from retinaface.models.net import FPN as FPN 7 | from retinaface.models.net import MobileNetV1 as MobileNetV1 8 | from retinaface.models.net import SSH as SSH 9 | 10 | 11 | class ClassHead(nn.Module): 12 | def __init__(self, inchannels=512, num_anchors=3): 13 | super(ClassHead, self).__init__() 14 | self.num_anchors = num_anchors 15 | self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) 16 | 17 | def forward(self, x): 18 | out = self.conv1x1(x) 19 | out = out.permute(0, 2, 3, 1).contiguous() 20 | 21 | return out.view(out.shape[0], -1, 2) 22 | 23 | 24 | class BboxHead(nn.Module): 25 | def __init__(self, inchannels=512, num_anchors=3): 26 | super(BboxHead, self).__init__() 27 | self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(1, 1), stride=1, padding=0) 28 | 29 | def forward(self, x): 30 | out = self.conv1x1(x) 31 | out = out.permute(0, 2, 3, 1).contiguous() 32 | 33 | return out.view(out.shape[0], -1, 4) 34 | 35 | 36 | class LandmarkHead(nn.Module): 37 | def __init__(self, inchannels=512, num_anchors=3): 38 | super(LandmarkHead, self).__init__() 39 | self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0) 40 | 41 | def forward(self, x): 42 | out = self.conv1x1(x) 43 | out = out.permute(0, 2, 3, 1).contiguous() 44 | 45 | return out.view(out.shape[0], -1, 10) 46 | 47 | 48 | class RetinaFace(nn.Module): 49 | def __init__(self, cfg=None, phase='train'): 50 | """ 51 | :param cfg: Network related settings. 52 | :param phase: train or test. 53 | """ 54 | super(RetinaFace, self).__init__() 55 | self.phase = phase 56 | # backbone = MobileNetV1() 57 | if cfg['name'] == 'mobilenet0.25': 58 | backbone = MobileNetV1() 59 | if cfg['pretrain']: 60 | checkpoint = torch.load("./weights/mobilenetV1X0.25_pretrain.tar", map_location=torch.device('cpu')) 61 | from collections import OrderedDict 62 | new_state_dict = OrderedDict() 63 | for k, v in checkpoint['state_dict'].items(): 64 | name = k[7:] # remove module. 65 | new_state_dict[name] = v 66 | # load params 67 | backbone.load_state_dict(new_state_dict) 68 | elif cfg['name'] == 'Resnet50': 69 | import torchvision.models as models 70 | backbone = models.resnet50(pretrained=cfg['pretrain']) 71 | 72 | self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers']) 73 | in_channels_stage2 = cfg['in_channel'] 74 | in_channels_list = [ 75 | in_channels_stage2 * 2, 76 | in_channels_stage2 * 4, 77 | in_channels_stage2 * 8, 78 | ] 79 | out_channels = cfg['out_channel'] 80 | self.fpn = FPN(in_channels_list, out_channels) 81 | self.ssh1 = SSH(out_channels, out_channels) 82 | self.ssh2 = SSH(out_channels, out_channels) 83 | self.ssh3 = SSH(out_channels, out_channels) 84 | 85 | self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel']) 86 | self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel']) 87 | self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel']) 88 | 89 | def _make_class_head(self, fpn_num=3, inchannels=64, anchor_num=2): 90 | classhead = nn.ModuleList() 91 | for i in range(fpn_num): 92 | classhead.append(ClassHead(inchannels, anchor_num)) 93 | return classhead 94 | 95 | def _make_bbox_head(self, fpn_num=3, inchannels=64, anchor_num=2): 96 | bboxhead = nn.ModuleList() 97 | for i in range(fpn_num): 98 | bboxhead.append(BboxHead(inchannels, anchor_num)) 99 | return bboxhead 100 | 101 | def _make_landmark_head(self, fpn_num=3, inchannels=64, anchor_num=2): 102 | landmarkhead = nn.ModuleList() 103 | for i in range(fpn_num): 104 | landmarkhead.append(LandmarkHead(inchannels, anchor_num)) 105 | return landmarkhead 106 | 107 | def forward(self, inputs): 108 | out = self.body(inputs) 109 | 110 | # FPN 111 | fpn = self.fpn(out) 112 | 113 | # SSH 114 | feature1 = self.ssh1(fpn[0]) 115 | feature2 = self.ssh2(fpn[1]) 116 | feature3 = self.ssh3(fpn[2]) 117 | features = [feature1, feature2, feature3] 118 | 119 | bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1) 120 | classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1) 121 | ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1) 122 | 123 | if self.phase == 'train': 124 | output = (bbox_regressions, classifications, ldm_regressions) 125 | else: 126 | output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions) 127 | return output 128 | -------------------------------------------------------------------------------- /retinaface/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/retinaface/utils/__init__.py -------------------------------------------------------------------------------- /retinaface/utils/box_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | 5 | def point_form(boxes): 6 | """ Convert prior_boxes to (xmin, ymin, xmax, ymax) 7 | representation for comparison to point form ground truth data. 8 | Args: 9 | boxes: (tensor) center-size default boxes from priorbox layers. 10 | Return: 11 | boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. 12 | """ 13 | return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin 14 | boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax 15 | 16 | 17 | def center_size(boxes): 18 | """ Convert prior_boxes to (cx, cy, w, h) 19 | representation for comparison to center-size form ground truth data. 20 | Args: 21 | boxes: (tensor) point_form boxes 22 | Return: 23 | boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. 24 | """ 25 | return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy 26 | boxes[:, 2:] - boxes[:, :2], 1) # w, h 27 | 28 | 29 | def intersect(box_a, box_b): 30 | """ We resize both tensors to [A,B,2] without new malloc: 31 | [A,2] -> [A,1,2] -> [A,B,2] 32 | [B,2] -> [1,B,2] -> [A,B,2] 33 | Then we compute the area of intersect between box_a and box_b. 34 | Args: 35 | box_a: (tensor) bounding boxes, Shape: [A,4]. 36 | box_b: (tensor) bounding boxes, Shape: [B,4]. 37 | Return: 38 | (tensor) intersection area, Shape: [A,B]. 39 | """ 40 | A = box_a.size(0) 41 | B = box_b.size(0) 42 | max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), 43 | box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) 44 | min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), 45 | box_b[:, :2].unsqueeze(0).expand(A, B, 2)) 46 | inter = torch.clamp((max_xy - min_xy), min=0) 47 | return inter[:, :, 0] * inter[:, :, 1] 48 | 49 | 50 | def jaccard(box_a, box_b): 51 | """Compute the jaccard overlap of two sets of boxes. The jaccard overlap 52 | is simply the intersection over union of two boxes. Here we operate on 53 | ground truth boxes and default boxes. 54 | E.g.: 55 | A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) 56 | Args: 57 | box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4] 58 | box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4] 59 | Return: 60 | jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)] 61 | """ 62 | inter = intersect(box_a, box_b) 63 | area_a = ((box_a[:, 2]-box_a[:, 0]) * 64 | (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B] 65 | area_b = ((box_b[:, 2]-box_b[:, 0]) * 66 | (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B] 67 | union = area_a + area_b - inter 68 | return inter / union # [A,B] 69 | 70 | 71 | def matrix_iou(a, b): 72 | """ 73 | return iou of a and b, numpy version for data augenmentation 74 | """ 75 | lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) 76 | rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) 77 | 78 | area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) 79 | area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) 80 | area_b = np.prod(b[:, 2:] - b[:, :2], axis=1) 81 | return area_i / (area_a[:, np.newaxis] + area_b - area_i) 82 | 83 | 84 | def matrix_iof(a, b): 85 | """ 86 | return iof of a and b, numpy version for data augenmentation 87 | """ 88 | lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) 89 | rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) 90 | 91 | area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) 92 | area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) 93 | return area_i / np.maximum(area_a[:, np.newaxis], 1) 94 | 95 | 96 | def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx): 97 | """Match each prior box with the ground truth box of the highest jaccard 98 | overlap, encode the bounding boxes, then return the matched indices 99 | corresponding to both confidence and location preds. 100 | Args: 101 | threshold: (float) The overlap threshold used when mathing boxes. 102 | truths: (tensor) Ground truth boxes, Shape: [num_obj, 4]. 103 | priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4]. 104 | variances: (tensor) Variances corresponding to each prior coord, 105 | Shape: [num_priors, 4]. 106 | labels: (tensor) All the class labels for the image, Shape: [num_obj]. 107 | landms: (tensor) Ground truth landms, Shape [num_obj, 10]. 108 | loc_t: (tensor) Tensor to be filled w/ endcoded location targets. 109 | conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds. 110 | landm_t: (tensor) Tensor to be filled w/ endcoded landm targets. 111 | idx: (int) current batch index 112 | Return: 113 | The matched indices corresponding to 1)location 2)confidence 3)landm preds. 114 | """ 115 | # jaccard index 116 | overlaps = jaccard( 117 | truths, 118 | point_form(priors) 119 | ) 120 | # (Bipartite Matching) 121 | # [1,num_objects] best prior for each ground truth 122 | best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True) 123 | 124 | # ignore hard gt 125 | valid_gt_idx = best_prior_overlap[:, 0] >= 0.2 126 | best_prior_idx_filter = best_prior_idx[valid_gt_idx, :] 127 | if best_prior_idx_filter.shape[0] <= 0: 128 | loc_t[idx] = 0 129 | conf_t[idx] = 0 130 | return 131 | 132 | # [1,num_priors] best ground truth for each prior 133 | best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True) 134 | best_truth_idx.squeeze_(0) 135 | best_truth_overlap.squeeze_(0) 136 | best_prior_idx.squeeze_(1) 137 | best_prior_idx_filter.squeeze_(1) 138 | best_prior_overlap.squeeze_(1) 139 | best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior 140 | # TODO refactor: index best_prior_idx with long tensor 141 | # ensure every gt matches with its prior of max overlap 142 | for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes 143 | best_truth_idx[best_prior_idx[j]] = j 144 | matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来 145 | conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来 146 | conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本 147 | loc = encode(matches, priors, variances) 148 | 149 | matches_landm = landms[best_truth_idx] 150 | landm = encode_landm(matches_landm, priors, variances) 151 | loc_t[idx] = loc # [num_priors,4] encoded offsets to learn 152 | conf_t[idx] = conf # [num_priors] top class label for each prior 153 | landm_t[idx] = landm 154 | 155 | 156 | def encode(matched, priors, variances): 157 | """Encode the variances from the priorbox layers into the ground truth boxes 158 | we have matched (based on jaccard overlap) with the prior boxes. 159 | Args: 160 | matched: (tensor) Coords of ground truth for each prior in point-form 161 | Shape: [num_priors, 4]. 162 | priors: (tensor) Prior boxes in center-offset form 163 | Shape: [num_priors,4]. 164 | variances: (list[float]) Variances of priorboxes 165 | Return: 166 | encoded boxes (tensor), Shape: [num_priors, 4] 167 | """ 168 | 169 | # dist b/t match center and prior's center 170 | g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2] 171 | # encode variance 172 | g_cxcy /= (variances[0] * priors[:, 2:]) 173 | # match wh / prior wh 174 | g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:] 175 | g_wh = torch.log(g_wh) / variances[1] 176 | # return target for smooth_l1_loss 177 | return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4] 178 | 179 | def encode_landm(matched, priors, variances): 180 | """Encode the variances from the priorbox layers into the ground truth boxes 181 | we have matched (based on jaccard overlap) with the prior boxes. 182 | Args: 183 | matched: (tensor) Coords of ground truth for each prior in point-form 184 | Shape: [num_priors, 10]. 185 | priors: (tensor) Prior boxes in center-offset form 186 | Shape: [num_priors,4]. 187 | variances: (list[float]) Variances of priorboxes 188 | Return: 189 | encoded landm (tensor), Shape: [num_priors, 10] 190 | """ 191 | 192 | # dist b/t match center and prior's center 193 | matched = torch.reshape(matched, (matched.size(0), 5, 2)) 194 | priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) 195 | priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) 196 | priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) 197 | priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) 198 | priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2) 199 | g_cxcy = matched[:, :, :2] - priors[:, :, :2] 200 | # encode variance 201 | g_cxcy /= (variances[0] * priors[:, :, 2:]) 202 | # g_cxcy /= priors[:, :, 2:] 203 | g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1) 204 | # return target for smooth_l1_loss 205 | return g_cxcy 206 | 207 | 208 | # Adapted from https://github.com/Hakuyume/chainer-ssd 209 | def decode(loc, priors, variances): 210 | """Decode locations from predictions using priors to undo 211 | the encoding we did for offset regression at train time. 212 | Args: 213 | loc (tensor): location predictions for loc layers, 214 | Shape: [num_priors,4] 215 | priors (tensor): Prior boxes in center-offset form. 216 | Shape: [num_priors,4]. 217 | variances: (list[float]) Variances of priorboxes 218 | Return: 219 | decoded bounding box predictions 220 | """ 221 | 222 | boxes = torch.cat(( 223 | priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], 224 | priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) 225 | boxes[:, :2] -= boxes[:, 2:] / 2 226 | boxes[:, 2:] += boxes[:, :2] 227 | return boxes 228 | 229 | def decode_landm(pre, priors, variances): 230 | """Decode landm from predictions using priors to undo 231 | the encoding we did for offset regression at train time. 232 | Args: 233 | pre (tensor): landm predictions for loc layers, 234 | Shape: [num_priors,10] 235 | priors (tensor): Prior boxes in center-offset form. 236 | Shape: [num_priors,4]. 237 | variances: (list[float]) Variances of priorboxes 238 | Return: 239 | decoded landm predictions 240 | """ 241 | landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:], 242 | priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:], 243 | priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:], 244 | priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:], 245 | priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:], 246 | ), dim=1) 247 | return landms 248 | 249 | 250 | def log_sum_exp(x): 251 | """Utility function for computing log_sum_exp while determining 252 | This will be used to determine unaveraged confidence loss across 253 | all examples in a batch. 254 | Args: 255 | x (Variable(tensor)): conf_preds from conf layers 256 | """ 257 | x_max = x.data.max() 258 | return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max 259 | 260 | 261 | # Original author: Francisco Massa: 262 | # https://github.com/fmassa/object-detection.torch 263 | # Ported to PyTorch by Max deGroot (02/01/2017) 264 | def nms(boxes, scores, overlap=0.5, top_k=200): 265 | """Apply non-maximum suppression at test time to avoid detecting too many 266 | overlapping bounding boxes for a given object. 267 | Args: 268 | boxes: (tensor) The location preds for the img, Shape: [num_priors,4]. 269 | scores: (tensor) The class predscores for the img, Shape:[num_priors]. 270 | overlap: (float) The overlap thresh for suppressing unnecessary boxes. 271 | top_k: (int) The Maximum number of box preds to consider. 272 | Return: 273 | The indices of the kept boxes with respect to num_priors. 274 | """ 275 | 276 | keep = torch.Tensor(scores.size(0)).fill_(0).long() 277 | if boxes.numel() == 0: 278 | return keep 279 | x1 = boxes[:, 0] 280 | y1 = boxes[:, 1] 281 | x2 = boxes[:, 2] 282 | y2 = boxes[:, 3] 283 | area = torch.mul(x2 - x1, y2 - y1) 284 | v, idx = scores.sort(0) # sort in ascending order 285 | # I = I[v >= 0.01] 286 | idx = idx[-top_k:] # indices of the top-k largest vals 287 | xx1 = boxes.new() 288 | yy1 = boxes.new() 289 | xx2 = boxes.new() 290 | yy2 = boxes.new() 291 | w = boxes.new() 292 | h = boxes.new() 293 | 294 | # keep = torch.Tensor() 295 | count = 0 296 | while idx.numel() > 0: 297 | i = idx[-1] # index of current largest val 298 | # keep.append(i) 299 | keep[count] = i 300 | count += 1 301 | if idx.size(0) == 1: 302 | break 303 | idx = idx[:-1] # remove kept element from view 304 | # load bboxes of next highest vals 305 | torch.index_select(x1, 0, idx, out=xx1) 306 | torch.index_select(y1, 0, idx, out=yy1) 307 | torch.index_select(x2, 0, idx, out=xx2) 308 | torch.index_select(y2, 0, idx, out=yy2) 309 | # store element-wise max with next highest score 310 | xx1 = torch.clamp(xx1, min=x1[i]) 311 | yy1 = torch.clamp(yy1, min=y1[i]) 312 | xx2 = torch.clamp(xx2, max=x2[i]) 313 | yy2 = torch.clamp(yy2, max=y2[i]) 314 | w.resize_as_(xx2) 315 | h.resize_as_(yy2) 316 | w = xx2 - xx1 317 | h = yy2 - yy1 318 | # check sizes of xx1 and xx2.. after each iteration 319 | w = torch.clamp(w, min=0.0) 320 | h = torch.clamp(h, min=0.0) 321 | inter = w*h 322 | # IoU = i / (area(a) + area(b) - i) 323 | rem_areas = torch.index_select(area, 0, idx) # load remaining areas) 324 | union = (rem_areas - inter) + area[i] 325 | IoU = inter/union # store result in iou 326 | # keep only elements with an IoU <= overlap 327 | idx = idx[IoU.le(overlap)] 328 | return keep, count 329 | 330 | 331 | -------------------------------------------------------------------------------- /retinaface/utils/nms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/retinaface/utils/nms/__init__.py -------------------------------------------------------------------------------- /retinaface/utils/nms/py_cpu_nms.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | 10 | def py_cpu_nms(dets, thresh): 11 | """Pure Python NMS baseline.""" 12 | x1 = dets[:, 0] 13 | y1 = dets[:, 1] 14 | x2 = dets[:, 2] 15 | y2 = dets[:, 3] 16 | scores = dets[:, 4] 17 | 18 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 19 | order = scores.argsort()[::-1] 20 | 21 | keep = [] 22 | while order.size > 0: 23 | i = order[0] 24 | keep.append(i) 25 | xx1 = np.maximum(x1[i], x1[order[1:]]) 26 | yy1 = np.maximum(y1[i], y1[order[1:]]) 27 | xx2 = np.minimum(x2[i], x2[order[1:]]) 28 | yy2 = np.minimum(y2[i], y2[order[1:]]) 29 | 30 | w = np.maximum(0.0, xx2 - xx1 + 1) 31 | h = np.maximum(0.0, yy2 - yy1 + 1) 32 | inter = w * h 33 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 34 | 35 | inds = np.where(ovr <= thresh)[0] 36 | order = order[inds + 1] 37 | 38 | return keep 39 | -------------------------------------------------------------------------------- /retinaface/utils/timer.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import time 9 | 10 | 11 | class Timer(object): 12 | """A simple timer.""" 13 | def __init__(self): 14 | self.total_time = 0. 15 | self.calls = 0 16 | self.start_time = 0. 17 | self.diff = 0. 18 | self.average_time = 0. 19 | 20 | def tic(self): 21 | # using time.time instead of time.clock because time time.clock 22 | # does not normalize for multithreading 23 | self.start_time = time.time() 24 | 25 | def toc(self, average=True): 26 | self.diff = time.time() - self.start_time 27 | self.total_time += self.diff 28 | self.calls += 1 29 | self.average_time = self.total_time / self.calls 30 | if average: 31 | return self.average_time 32 | else: 33 | return self.diff 34 | 35 | def clear(self): 36 | self.total_time = 0. 37 | self.calls = 0 38 | self.start_time = 0. 39 | self.diff = 0. 40 | self.average_time = 0. 41 | -------------------------------------------------------------------------------- /retinaface/weights/mobilenet0.25_Final.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/retinaface/weights/mobilenet0.25_Final.pth -------------------------------------------------------------------------------- /silu.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | 5 | # simply define a silu function 6 | def silu(input): 7 | ''' 8 | Applies the Sigmoid Linear Unit (SiLU) function element-wise: 9 | SiLU(x) = x * sigmoid(x) 10 | ''' 11 | return input * torch.sigmoid( 12 | input) # use torch.sigmoid to make sure that we created the most efficient implemetation based on builtin PyTorch functions 13 | 14 | 15 | # create a class wrapper from PyTorch nn.Module, so 16 | # the function now can be easily used in models 17 | class SiLU(nn.Module): 18 | ''' 19 | Applies the Sigmoid Linear Unit (SiLU) function element-wise: 20 | SiLU(x) = x * sigmoid(x) 21 | Shape: 22 | - Input: (N, *) where * means, any number of additional 23 | dimensions 24 | - Output: (N, *), same shape as the input 25 | References: 26 | - Related paper: 27 | https://arxiv.org/pdf/1606.08415.pdf 28 | Examples: 29 | >>> m = silu() 30 | >>> input = torch.randn(2) 31 | >>> output = m(input) 32 | ''' 33 | 34 | def __init__(self): 35 | ''' 36 | Init method. 37 | ''' 38 | super().__init__() # init the base class 39 | 40 | def forward(self, input): 41 | ''' 42 | Forward pass of the function. 43 | ''' 44 | return silu(input) # simply apply already implemented SiLU 45 | -------------------------------------------------------------------------------- /sponsor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/sponsor.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_1.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_10.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_11.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_12.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_13.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_14.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_153.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_153.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_2.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_3.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_30.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_30.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_4.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_5.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_6.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_7.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_8.jpg -------------------------------------------------------------------------------- /test/Aaron Eckhart_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron Eckhart_9.jpg -------------------------------------------------------------------------------- /test/Aaron_Eckhart_153.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Aaron_Eckhart_153.jpg -------------------------------------------------------------------------------- /test/Jason Behr_27968.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foamliu/InsightFace-PyTorch/5b6dba6078b4fb8a9b83a1ec83ff02e308e8e658/test/Jason Behr_27968.JPG -------------------------------------------------------------------------------- /test/test_align.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | from PIL import Image 3 | 4 | import mtcnn.detector as mtcnn 5 | import retinaface.detector as retinaface 6 | 7 | 8 | def show_bboxes(full_path, bboxes, landmarks): 9 | img_raw = cv.imread(full_path) 10 | num_faces = bboxes.shape[0] 11 | 12 | # show image 13 | for i in range(num_faces): 14 | b = bboxes[i] 15 | width = b[2] - b[0] 16 | height = b[3] - b[1] 17 | area = width * height 18 | print('width: ' + str(width)) 19 | print('height: ' + str(height)) 20 | print('area: ' + str(area)) 21 | scores = bboxes[:, 4] 22 | text = "{:.4f}".format(scores[i]) 23 | b = list(map(int, b)) 24 | cv.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2) 25 | cx = b[0] 26 | cy = b[1] + 12 27 | cv.putText(img_raw, text, (cx, cy), 28 | cv.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255)) 29 | 30 | # landms 31 | landms = landmarks[i] 32 | cv.circle(img_raw, (landms[0], landms[5]), 1, (0, 0, 255), 4) 33 | cv.circle(img_raw, (landms[1], landms[6]), 1, (0, 255, 255), 4) 34 | cv.circle(img_raw, (landms[2], landms[7]), 1, (255, 0, 255), 4) 35 | cv.circle(img_raw, (landms[3], landms[8]), 1, (0, 255, 0), 4) 36 | cv.circle(img_raw, (landms[4], landms[9]), 1, (255, 0, 0), 4) 37 | 38 | # save image 39 | 40 | cv.imwrite('images/result.jpg', img_raw) 41 | cv.imshow('image', img_raw) 42 | cv.waitKey(0) 43 | 44 | 45 | if __name__ == "__main__": 46 | full_path = 'test/Jason Behr_27968.JPG' 47 | img = Image.open(full_path).convert('RGB') 48 | bboxes, landmarks = mtcnn.detect_faces(img) 49 | print(bboxes) 50 | print(landmarks) 51 | show_bboxes(full_path, bboxes, landmarks) 52 | 53 | bboxes, landmarks = retinaface.detect_faces(img) 54 | print(bboxes) 55 | print(landmarks) 56 | show_bboxes(full_path, bboxes, landmarks) 57 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch import nn 4 | from torch.optim.lr_scheduler import MultiStepLR 5 | from torch.utils.tensorboard import SummaryWriter 6 | 7 | from config import device, grad_clip, print_freq, num_workers, logger 8 | from data_gen import ArcFaceDataset 9 | from focal_loss import FocalLoss 10 | from megaface_eval import megaface_test 11 | from models import resnet18, resnet34, resnet50, resnet101, resnet152, ArcMarginModel 12 | from utils import parse_args, save_checkpoint, AverageMeter, accuracy, clip_gradient 13 | 14 | 15 | def train_net(args): 16 | torch.manual_seed(7) 17 | np.random.seed(7) 18 | checkpoint = args.checkpoint 19 | start_epoch = 0 20 | best_acc = float('-inf') 21 | writer = SummaryWriter() 22 | epochs_since_improvement = 0 23 | 24 | # Initialize / load checkpoint 25 | if checkpoint is None: 26 | if args.network == 'r18': 27 | model = resnet18(args) 28 | elif args.network == 'r34': 29 | model = resnet34(args) 30 | elif args.network == 'r50': 31 | model = resnet50(args) 32 | elif args.network == 'r101': 33 | model = resnet101(args) 34 | elif args.network == 'r152': 35 | model = resnet152(args) 36 | else: 37 | raise TypeError('network {} is not supported.'.format(args.network)) 38 | 39 | if args.pretrained: 40 | model.load_state_dict(torch.load('insight-face-v3.pt')) 41 | 42 | model = nn.DataParallel(model) 43 | metric_fc = ArcMarginModel(args) 44 | metric_fc = nn.DataParallel(metric_fc) 45 | 46 | if args.optimizer == 'sgd': 47 | optimizer = torch.optim.SGD([{'params': model.parameters()}, {'params': metric_fc.parameters()}], 48 | lr=args.lr, momentum=args.mom, nesterov=True, weight_decay=args.weight_decay) 49 | else: 50 | optimizer = torch.optim.Adam([{'params': model.parameters()}, {'params': metric_fc.parameters()}], 51 | lr=args.lr, weight_decay=args.weight_decay) 52 | 53 | else: 54 | checkpoint = torch.load(checkpoint) 55 | start_epoch = checkpoint['epoch'] + 1 56 | epochs_since_improvement = checkpoint['epochs_since_improvement'] 57 | model = checkpoint['model'] 58 | metric_fc = checkpoint['metric_fc'] 59 | optimizer = checkpoint['optimizer'] 60 | 61 | # Move to GPU, if available 62 | model = model.to(device) 63 | metric_fc = metric_fc.to(device) 64 | 65 | # Loss function 66 | if args.focal_loss: 67 | criterion = FocalLoss(gamma=args.gamma) 68 | else: 69 | criterion = nn.CrossEntropyLoss() 70 | 71 | # Custom dataloaders 72 | train_dataset = ArcFaceDataset('train') 73 | train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, 74 | num_workers=num_workers) 75 | 76 | scheduler = MultiStepLR(optimizer, milestones=[8, 16, 24, 32], gamma=0.1) 77 | 78 | # Epochs 79 | for epoch in range(start_epoch, args.end_epoch): 80 | lr = optimizer.param_groups[0]['lr'] 81 | logger.info('\nCurrent effective learning rate: {}\n'.format(lr)) 82 | # print('Step num: {}\n'.format(optimizer.step_num)) 83 | writer.add_scalar('model/learning_rate', lr, epoch) 84 | 85 | # One epoch's training 86 | train_loss, train_top1_accs = train(train_loader=train_loader, 87 | model=model, 88 | metric_fc=metric_fc, 89 | criterion=criterion, 90 | optimizer=optimizer, 91 | epoch=epoch) 92 | 93 | writer.add_scalar('model/train_loss', train_loss, epoch) 94 | writer.add_scalar('model/train_accuracy', train_top1_accs, epoch) 95 | 96 | # One epoch's validation 97 | megaface_acc = megaface_test(model) 98 | writer.add_scalar('model/megaface_accuracy', megaface_acc, epoch) 99 | 100 | scheduler.step(epoch) 101 | 102 | # Check if there was an improvement 103 | is_best = megaface_acc > best_acc 104 | best_acc = max(megaface_acc, best_acc) 105 | if not is_best: 106 | epochs_since_improvement += 1 107 | logger.info("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,)) 108 | else: 109 | epochs_since_improvement = 0 110 | 111 | # Save checkpoint 112 | save_checkpoint(epoch, epochs_since_improvement, model, metric_fc, optimizer, best_acc, is_best, scheduler) 113 | 114 | 115 | def train(train_loader, model, metric_fc, criterion, optimizer, epoch): 116 | model.train() # train mode (dropout and batchnorm is used) 117 | metric_fc.train() 118 | 119 | losses = AverageMeter() 120 | top1_accs = AverageMeter() 121 | 122 | # Batches 123 | for i, (img, label) in enumerate(train_loader): 124 | # Move to GPU, if available 125 | img = img.to(device) 126 | label = label.to(device) # [N, 1] 127 | 128 | # Forward prop. 129 | feature = model(img) # embedding => [N, 512] 130 | output = metric_fc(feature, label) # class_id_out => [N, 10575] 131 | 132 | # Calculate loss 133 | loss = criterion(output, label) 134 | 135 | # Back prop. 136 | optimizer.zero_grad() 137 | loss.backward() 138 | 139 | # Clip gradients 140 | clip_gradient(optimizer, grad_clip) 141 | 142 | # Update weights 143 | optimizer.step() 144 | 145 | # Keep track of metrics 146 | losses.update(loss.item()) 147 | top1_accuracy = accuracy(output, label, 1) 148 | top1_accs.update(top1_accuracy) 149 | 150 | # Print status 151 | if i % print_freq == 0: 152 | logger.info('Epoch: [{0}][{1}/{2}]\t' 153 | 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 154 | 'Top1 Accuracy {top1_accs.val:.3f} ({top1_accs.avg:.3f})'.format(epoch, i, len(train_loader), 155 | loss=losses, 156 | top1_accs=top1_accs)) 157 | 158 | return losses.avg, top1_accs.avg 159 | 160 | 161 | def main(): 162 | global args 163 | args = parse_args() 164 | train_net(args) 165 | 166 | 167 | if __name__ == '__main__': 168 | main() 169 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from shutil import copyfile 4 | 5 | import cv2 as cv 6 | import numpy as np 7 | import torch 8 | 9 | from align_faces import get_reference_facial_points, warp_and_crop_face 10 | from config import im_size 11 | from retinaface.detector import detector 12 | 13 | 14 | def clip_gradient(optimizer, grad_clip): 15 | """ 16 | Clips gradients computed during backpropagation to avoid explosion of gradients. 17 | :param optimizer: optimizer with the gradients to be clipped 18 | :param grad_clip: clip value 19 | """ 20 | for group in optimizer.param_groups: 21 | for param in group['params']: 22 | if param.grad is not None: 23 | param.grad.data.clamp_(-grad_clip, grad_clip) 24 | 25 | 26 | def save_checkpoint(epoch, epochs_since_improvement, model, metric_fc, optimizer, acc, is_best, scheduler): 27 | state = {'epoch': epoch, 28 | 'epochs_since_improvement': epochs_since_improvement, 29 | 'acc': acc, 30 | 'model': model, 31 | 'metric_fc': metric_fc, 32 | 'optimizer': optimizer, 33 | 'scheduler': scheduler} 34 | # filename = 'checkpoint_' + str(epoch) + '_' + str(loss) + '.tar' 35 | filename = 'checkpoint.tar' 36 | torch.save(state, filename) 37 | # If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint 38 | if is_best: 39 | torch.save(state, 'BEST_checkpoint.tar') 40 | 41 | 42 | class AverageMeter(object): 43 | """ 44 | Keeps track of most recent, average, sum, and count of a metric. 45 | """ 46 | 47 | def __init__(self): 48 | self.reset() 49 | 50 | def reset(self): 51 | self.val = 0 52 | self.avg = 0 53 | self.sum = 0 54 | self.count = 0 55 | 56 | def update(self, val, n=1): 57 | self.val = val 58 | self.sum += val * n 59 | self.count += n 60 | self.avg = self.sum / self.count 61 | 62 | 63 | def adjust_learning_rate(optimizer, shrink_factor): 64 | """ 65 | Shrinks learning rate by a specified factor. 66 | :param optimizer: optimizer whose learning rate must be shrunk. 67 | :param shrink_factor: factor in interval (0, 1) to multiply learning rate with. 68 | """ 69 | 70 | print("\nDECAYING learning rate.") 71 | for param_group in optimizer.param_groups: 72 | param_group['lr'] = param_group['lr'] * shrink_factor 73 | print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],)) 74 | 75 | 76 | def get_learning_rate(optimizer): 77 | for param_group in optimizer.param_groups: 78 | return param_group['lr'] 79 | 80 | 81 | def accuracy(scores, targets, k=1): 82 | batch_size = targets.size(0) 83 | _, ind = scores.topk(k, 1, True, True) 84 | correct = ind.eq(targets.view(-1, 1).expand_as(ind)) 85 | correct_total = correct.view(-1).float().sum() # 0D tensor 86 | return correct_total.item() * (100.0 / batch_size) 87 | 88 | 89 | def align_face(raw, facial5points): 90 | # raw = cv.imread(img_fn, True) # BGR 91 | facial5points = np.reshape(facial5points, (2, 5)) 92 | 93 | crop_size = (im_size, im_size) 94 | 95 | default_square = True 96 | inner_padding_factor = 0.25 97 | outer_padding = (0, 0) 98 | output_size = (im_size, im_size) 99 | 100 | # get the reference 5 landmarks position in the crop settings 101 | reference_5pts = get_reference_facial_points( 102 | output_size, inner_padding_factor, outer_padding, default_square) 103 | 104 | # dst_img = warp_and_crop_face(raw, facial5points) 105 | dst_img = warp_and_crop_face(raw, facial5points, reference_pts=reference_5pts, crop_size=crop_size) 106 | return dst_img 107 | 108 | 109 | def get_face_attributes(full_path): 110 | try: 111 | img = cv.imread(full_path) 112 | bounding_boxes, landmarks = detector.detect_faces(img) 113 | 114 | if len(landmarks) > 0: 115 | landmarks = [int(round(x)) for x in landmarks[0]] 116 | return True, landmarks 117 | 118 | except KeyboardInterrupt: 119 | raise 120 | except: 121 | pass 122 | return False, None 123 | 124 | 125 | def select_significant_face(bboxes): 126 | best_index = -1 127 | best_rank = float('-inf') 128 | for i, b in enumerate(bboxes): 129 | bbox_w, bbox_h = b[2] - b[0], b[3] - b[1] 130 | area = bbox_w * bbox_h 131 | score = b[4] 132 | rank = score * area 133 | if rank > best_rank: 134 | best_rank = rank 135 | best_index = i 136 | 137 | return best_index 138 | 139 | 140 | def get_central_face_attributes(full_path): 141 | try: 142 | img = cv.imread(full_path) 143 | bboxes, landmarks = detector.detect_faces(img) 144 | 145 | if len(landmarks) > 0: 146 | i = select_significant_face(bboxes) 147 | return True, [bboxes[i]], [landmarks[i]] 148 | 149 | except KeyboardInterrupt: 150 | raise 151 | except ValueError: 152 | pass 153 | except IOError: 154 | pass 155 | return False, None, None 156 | 157 | 158 | def get_all_face_attributes(full_path): 159 | img = cv.imread(full_path) 160 | bounding_boxes, landmarks = detector.detect_faces(img) 161 | return bounding_boxes, landmarks 162 | 163 | 164 | def draw_bboxes(img, bounding_boxes, facial_landmarks=[]): 165 | for b in bounding_boxes: 166 | cv.rectangle(img, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255), 1) 167 | 168 | for p in facial_landmarks: 169 | for i in range(5): 170 | cv.circle(img, (int(p[i]), int(p[i + 5])), 1, (0, 255, 0), -1) 171 | 172 | break # only first 173 | 174 | return img 175 | 176 | 177 | def parse_args(): 178 | parser = argparse.ArgumentParser(description='Train face network') 179 | # general 180 | parser.add_argument('--pretrained', type=bool, default=False, help='pretrained model') 181 | parser.add_argument('--network', default='r101', help='specify network') 182 | parser.add_argument('--end-epoch', type=int, default=1000, help='training epoch size.') 183 | parser.add_argument('--lr', type=float, default=0.1, help='start learning rate') 184 | parser.add_argument('--lr-step', type=int, default=10, help='period of learning rate decay') 185 | parser.add_argument('--optimizer', default='sgd', help='optimizer') 186 | parser.add_argument('--weight-decay', type=float, default=5e-4, help='weight decay') 187 | parser.add_argument('--mom', type=float, default=0.9, help='momentum') 188 | parser.add_argument('--emb-size', type=int, default=512, help='embedding length') 189 | parser.add_argument('--batch-size', type=int, default=256, help='batch size in each context') 190 | parser.add_argument('--margin-m', type=float, default=0.5, help='angular margin m') 191 | parser.add_argument('--margin-s', type=float, default=64.0, help='feature scale s') 192 | parser.add_argument('--easy-margin', type=bool, default=False, help='easy margin') 193 | parser.add_argument('--focal-loss', type=bool, default=False, help='focal loss') 194 | parser.add_argument('--gamma', type=float, default=2.0, help='focusing parameter gamma') 195 | parser.add_argument('--use-se', type=bool, default=True, help='use SEBlock') 196 | parser.add_argument('--full-log', type=bool, default=False, help='full logging') 197 | parser.add_argument('--checkpoint', type=str, default=None, help='checkpoint') 198 | args = parser.parse_args() 199 | return args 200 | 201 | 202 | def ensure_folder(folder): 203 | import os 204 | if not os.path.isdir(folder): 205 | os.mkdir(folder) 206 | 207 | 208 | def full_log(epoch): 209 | full_log_dir = 'data/full_log' 210 | if not os.path.isdir(full_log_dir): 211 | os.mkdir(full_log_dir) 212 | filename = 'angles_{}.txt'.format(epoch) 213 | dst_file = os.path.join(full_log_dir, filename) 214 | src_file = 'data/angles.txt' 215 | copyfile(src_file, dst_file) 216 | --------------------------------------------------------------------------------