├── .gitignore
├── .gitmodules
├── Makefile
├── README.md
├── alignment
├── RetinaFace_Mx
│ ├── 1.jpg
│ ├── 2.jpg
│ ├── Makefile
│ ├── README.md
│ ├── a.jpg
│ ├── align_trans.py
│ ├── matlab_cp2tform.py
│ ├── model
│ │ ├── config.py
│ │ ├── mnet.25-0000.params
│ │ ├── mnet.25-symbol.json
│ │ ├── mobilenet_0_25-0000.params
│ │ └── mobilenet_0_25-symbol.json
│ ├── rcnn
│ │ ├── PY_OP
│ │ │ ├── __init__.py
│ │ │ └── rpn_fpn_ohem3.py
│ │ ├── __init__.py
│ │ ├── core
│ │ │ ├── __init__.py
│ │ │ ├── callback.py
│ │ │ ├── loader.py
│ │ │ ├── metric.py
│ │ │ ├── module.py
│ │ │ ├── module_bak.py
│ │ │ └── tester.py
│ │ ├── cython
│ │ │ ├── .gitignore
│ │ │ ├── __init__.py
│ │ │ ├── anchors.pyx
│ │ │ ├── bbox.pyx
│ │ │ ├── cpu_nms.pyx
│ │ │ ├── gpu_nms.hpp
│ │ │ ├── gpu_nms.pyx
│ │ │ ├── nms_kernel.cu
│ │ │ └── setup.py
│ │ ├── dataset
│ │ │ ├── __init__.py
│ │ │ ├── ds_utils.py
│ │ │ ├── imdb.py
│ │ │ └── retinaface.py
│ │ ├── io
│ │ │ ├── __init__.py
│ │ │ ├── image.py
│ │ │ ├── rcnn.py
│ │ │ └── rpn.py
│ │ ├── logger.py
│ │ ├── processing
│ │ │ ├── __init__.py
│ │ │ ├── assign_levels.py
│ │ │ ├── bbox_regression.py
│ │ │ ├── bbox_transform.py
│ │ │ ├── generate_anchor.py
│ │ │ └── nms.py
│ │ ├── pycocotools
│ │ │ ├── UPSTREAM_REV
│ │ │ ├── __init__.py
│ │ │ ├── _mask.c
│ │ │ ├── _mask.pyx
│ │ │ ├── coco.py
│ │ │ ├── cocoeval.py
│ │ │ ├── mask.py
│ │ │ ├── maskApi.c
│ │ │ ├── maskApi.h
│ │ │ └── setup.py
│ │ ├── sample_config.py
│ │ ├── symbol
│ │ │ ├── __init__.py
│ │ │ ├── pyramidbox.py
│ │ │ ├── symbol_common.py
│ │ │ ├── symbol_common.py.bak
│ │ │ ├── symbol_mnet.py
│ │ │ ├── symbol_mnet.py.bak
│ │ │ ├── symbol_resnet.py
│ │ │ └── symbol_ssh.py
│ │ ├── tools
│ │ │ ├── __init__.py
│ │ │ ├── demo_images.py
│ │ │ ├── demo_single_image.py
│ │ │ ├── reeval.py
│ │ │ ├── test_rcnn.py
│ │ │ ├── test_rpn.py
│ │ │ ├── train_maskrcnn.py
│ │ │ ├── train_rcnn.py
│ │ │ └── train_rpn.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── combine_model.py
│ │ │ ├── load_data.py
│ │ │ ├── load_model.py
│ │ │ └── save_model.py
│ ├── retinaface.py
│ ├── test.py
│ ├── test_widerface.py
│ └── train.py
├── detector.py
├── mtcnn.py
├── mtcnn_pytorch
│ ├── .gitignore
│ ├── LICENSE
│ ├── README.md
│ ├── extract_weights_from_caffe_models.py
│ ├── get_aligned_face_from_mtcnn.ipynb
│ ├── refine_faces.ipynb
│ └── src
│ │ ├── __init__.py
│ │ ├── align_trans.py
│ │ ├── box_utils.py
│ │ ├── detector.py
│ │ ├── first_stage.py
│ │ ├── get_nets.py
│ │ ├── matlab_cp2tform.py
│ │ ├── visualization_utils.py
│ │ └── weights
│ │ ├── onet.npy
│ │ ├── pnet.npy
│ │ └── rnet.npy
├── retinaface.py
├── retinaface_pytorch
│ ├── README.md
│ ├── align_trans.py
│ ├── checkpoint.pth
│ ├── cython
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── anchors.pyx
│ │ ├── bbox.pyx
│ │ ├── cpu_nms.pyx
│ │ ├── gpu_nms.hpp
│ │ ├── gpu_nms.pyx
│ │ ├── nms_kernel.cu
│ │ └── setup.py
│ ├── detector.py
│ ├── inference.py
│ ├── matlab_cp2tform.py
│ ├── mnet.25-0000.params
│ ├── mnet.25-symbol.json
│ ├── retinaface.py
│ ├── test_images
│ │ ├── t2.jpg
│ │ └── t4.jpg
│ ├── test_results
│ │ ├── t2.jpg
│ │ └── t4.jpg
│ └── utils.py
├── t2.jpg
└── t6.jpg
├── api.py
├── app
├── LICENSE
├── README.md
├── capture_dialog.py
├── gen_data_ui.py
├── icon
│ └── default.jpg
├── libs
│ ├── canvas.py
│ ├── constants.py
│ ├── label_dialog.py
│ ├── setting
│ │ └── setting.pkl
│ ├── shape.py
│ ├── ustr.py
│ └── utils.py
├── main.py
├── recordings
│ ├── 04-Th04-2019:05-27-17.avi
│ └── 30-Mar-2019:11-05-13.avi
├── requirements.txt
├── res.jpg
├── setting_ui.py
└── window_ui.py
├── demo
├── face_recognition.py
├── face_verify.py
├── infer_on_video.py
├── processer.py
└── untitled.py
├── requirements.txt
├── smoofing
├── Smoofing.py
└── replay-attack_ycrcb_luv_extraTreesClassifier.pkl
├── src
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.MD
├── backbone
│ ├── __init__.py
│ ├── demo_postrequest.py
│ ├── mnas_net.py
│ ├── model.py
│ ├── model_proxyless_nas.py
│ ├── pnasnet_mobile.py
│ ├── proxyless_cpu.config
│ └── proxyless_nas
│ │ ├── __init__.py
│ │ ├── layers.py
│ │ ├── model_zoo.py
│ │ ├── nas_modules.py
│ │ └── utils.py
├── config.py
├── dataset
│ ├── VGG_FP.py
│ ├── __init__.py
│ ├── agedb.py
│ ├── casia_webface.py
│ ├── cfp.py
│ ├── lfw.py
│ └── megaface.py
├── docker-compose.yml
├── eval
│ ├── eval_agedb30.py
│ ├── eval_cfp.py
│ ├── eval_lfw.py
│ ├── eval_lfw_blufr.py
│ └── eval_megaface.py
├── evaluate_model.ipynb
├── margin
│ ├── ArcMarginProduct.py
│ ├── CosineMarginProduct.py
│ ├── InnerProduct.py
│ ├── SphereMarginProduct.py
│ └── __init__.py
├── requirements.txt
├── res.jpg
├── train.py
├── train.sh
└── utils
│ ├── __init__.py
│ ├── constants.py
│ ├── load_images_from_bin.py
│ ├── logging.py
│ ├── plot_theta.py
│ └── visualize.py
├── stream
├── .gitignore
├── client.py
├── imagezmq
│ ├── __init__.py
│ ├── __version__.py
│ └── imagezmq.py
├── server.py
├── static
│ ├── base.css
│ ├── dialog-polyfill.css
│ ├── dialog-polyfill.js
│ ├── favicon.ico
│ ├── fetch.js
│ ├── script.js
│ ├── settings.js
│ ├── style.css
│ └── touchicon.png
└── templates
│ ├── base.html
│ ├── imagezmq
│ ├── __init__.py
│ ├── __version__.py
│ └── imagezmq.py
│ ├── index.html
│ ├── realtime.html
│ └── vms.html
├── utils
├── config.py
├── constants.py
└── utils.py
└── web
├── BaseCamera.py
├── app.py
├── camera
├── BaseCamera.py
└── camera_opencv.py
├── camera_opencv.py
├── imagezmq
├── __init__.py
├── __version__.py
└── imagezmq.py
├── server.py
├── static
├── favicon.ico
├── script.js
└── style.css
└── templates
├── _formhelpers.html
├── main.html
├── realtime.html
├── video.html
└── vms.html
/.gitignore:
--------------------------------------------------------------------------------
1 | #pre-train
2 | backbone/proxyless_cpu.pth
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | public_test/
15 | src/weights/*
16 | /weights/
17 | *.mp4/
18 | *.flv/
19 | build/
20 | develop-eggs/
21 | dist/
22 | downloads/
23 | eggs/
24 | .eggs/
25 | lib/
26 | lib64/
27 | parts/
28 | sdist/
29 | var/
30 | wheels/
31 | *.egg-info/
32 | .installed.cfg
33 | *.egg
34 | MANIFEST
35 |
36 | # PyInstaller
37 | # Usually these files are written by a python script from a template
38 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
39 | *.manifest
40 | *.spec
41 |
42 | # Installer logs
43 | pip-log.txt
44 | pip-delete-this-directory.txt
45 |
46 | # Unit test / coverage reports
47 | htmlcov/
48 | .tox/
49 | .coverage
50 | .coverage.*
51 | .cache
52 | nosetests.xml
53 | coverage.xml
54 | *.cover
55 | .hypothesis/
56 | .pytest_cache/
57 |
58 | # Translations
59 | *.mo
60 | *.pot
61 |
62 | # Django stuff:
63 | *.log
64 | local_settings.py
65 | db.sqlite3
66 |
67 | # Flask stuff:
68 | instance/
69 | .webassets-cache
70 |
71 | # Scrapy stuff:
72 | .scrapy
73 |
74 | # Sphinx documentation
75 | docs/_build/
76 |
77 | # PyBuilder
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # pyenv
84 | .python-version
85 |
86 | # celery beat schedule file
87 | celerybeat-schedule
88 |
89 | # SageMath parsed files
90 | *.sage.py
91 |
92 | # Environments
93 | .env
94 | .venv
95 | env/
96 | venv/
97 | ENV/
98 | env.bak/
99 | venv.bak/
100 |
101 | # Spyder project settings
102 | .spyderproject
103 | .spyproject
104 |
105 | # Rope project settings
106 | .ropeproject
107 |
108 | # mkdocs documentation
109 | /site
110 |
111 | # mypy
112 | .mypy_cache/
113 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/.gitmodules
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | all:
2 | cd alignment/RetinaFace_Mx/rcnn/cython/; python setup.py build_ext --inplace; rm -rf build; cd ../../
3 | cd alignment/RetinaFace_Mx/rcnn/pycocotools/; python setup.py build_ext --inplace; rm -rf build; cd ../../
4 | clean:
5 | cd alignment/RetinaFace_Mx/rcnn/cython/; rm *.so *.c *.cpp; cd ../../
6 | cd alignment/RetinaFace_Mx/rcnn/pycocotools/; rm *.so; cd ../../
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Face_recognize_system
2 | ## Requirements
3 | Python 3.5+
4 |
5 | Commanline:
6 | ```
7 | pip3 install -r requirements.txt
8 | ```
9 | ## Usage:
10 | ### Download
11 | ```
12 | git clone https://github.com/vanlong96tg/Face_recognize_pytorch
13 | mkdir face_recognize/weights
14 | cd face_recognize/weights
15 | wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth
16 | wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth
17 | wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth
18 | ```
19 | ### Python:
20 | Run demo:
21 | ```
22 | cd demo
23 | python infer_on_video.py
24 | ```
25 | Run web_demo:
26 | ```
27 | cd web
28 | python app.py
29 | ```
30 | Run demo system manager for rasperi:
31 | ```
32 | cd stream
33 | python client.py -s ip_adress
34 | python servr.py
35 | ```
36 | ### Trainning:
37 | * Performance
38 |
39 | |[LFW](https://hal.inria.fr/file/index/docid/321923/filename/Huang_long_eccv2008-lfw.pdf)|[CFP_FF](http://www.cfpw.io/paper.pdf)|[AgeDB](http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf)|[Vggface2_FP](https://arxiv.org/pdf/1710.08092.pdf)|
40 | |:---:|:---:|:---:|:---:|
41 | |99.73|99.68|97.32|94.88|
42 |
43 | ### Pretrain:
44 | *Check in folder 'src'.
45 |
46 | ### Acknowledgement
47 | * This repo is inspired by [InsightFace.MXNet](https://github.com/deepinsight/insightface), [InsightFace.PyTorch](https://github.com/TreB1eN/InsightFace_Pytorch), [ArcFace.PyTorch](https://github.com/ronghuaiyang/arcface-pytorch), [MTCNN.MXNet](https://github.com/pangyupo/mxnet_mtcnn_face_detection) and [PretrainedModels.PyTorch](https://github.com/Cadene/pretrained-models.pytorch).
48 | * Training Datasets [Dataset-Zoo](https://github.com/deepinsight/insightface/wiki/Dataset-Zoo)
49 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/1.jpg
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/2.jpg
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/Makefile:
--------------------------------------------------------------------------------
1 | all:
2 | cd rcnn/cython/; python setup.py build_ext --inplace; rm -rf build; cd ../../
3 | cd rcnn/pycocotools/; python setup.py build_ext --inplace; rm -rf build; cd ../../
4 | clean:
5 | cd rcnn/cython/; rm *.so *.c *.cpp; cd ../../
6 | cd rcnn/pycocotools/; rm *.so; cd ../../
7 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/README.md:
--------------------------------------------------------------------------------
1 | # RetinaFace Face Detector
2 |
3 | ## Introduction
4 |
5 | RetinaFace is a practical single-stage [SOTA](http://shuoyang1213.me/WIDERFACE/WiderFace_Results.html) face detector which is initially described in [arXiv technical report](https://arxiv.org/abs/1905.00641)
6 |
7 | 
8 |
9 | 
10 |
11 | ## Data
12 |
13 | 1. Download our annotations (face bounding boxes & five facial landmarks) from [baidu cloud](https://pan.baidu.com/s/1Laby0EctfuJGgGMgRRgykA) or [dropbox](https://www.dropbox.com/s/7j70r3eeepe4r2g/retinaface_gt_v1.1.zip?dl=0)
14 |
15 | 2. Download the [WIDERFACE](http://shuoyang1213.me/WIDERFACE/WiderFace_Results.html) dataset.
16 |
17 | 3. Organise the dataset directory under ``insightface/RetinaFace/`` as follows:
18 |
19 | ```Shell
20 | data/retinaface/
21 | train/
22 | images/
23 | label.txt
24 | val/
25 | images/
26 | label.txt
27 | test/
28 | images/
29 | label.txt
30 | ```
31 |
32 | ## Install
33 |
34 | 1. Install MXNet with GPU support.
35 | 2. Install Deformable Convolution V2 operator from [Deformable-ConvNets](https://github.com/msracver/Deformable-ConvNets) if you use the DCN based backbone.
36 | 3. Type ``make`` to build cxx tools.
37 |
38 | ## Training
39 |
40 | Please check ``train.py`` for training.
41 |
42 | 1. Copy ``rcnn/sample_config.py`` to ``rcnn/config.py``
43 | 2. Download pretrained models and put them into ``model/``.
44 |
45 | ImageNet ResNet50 ([baidu cloud](https://pan.baidu.com/s/1WAkU9ZA_j-OmzO-sdk9whA) and [dropbox](https://www.dropbox.com/s/48b850vmnaaasfl/imagenet-resnet-50.zip?dl=0)).
46 |
47 | ImageNet ResNet152 ([baidu cloud](https://pan.baidu.com/s/1nzQ6CzmdKFzg8bM8ChZFQg) and [dropbox](https://www.dropbox.com/s/8ypcra4nqvm32v6/imagenet-resnet-152.zip?dl=0)).
48 |
49 | 3. Start training with ``CUDA_VISIBLE_DEVICES='0,1,2,3' python -u train.py --prefix ./model/retina --network resnet``.
50 | Before training, you can check the ``resnet`` network configuration (e.g. pretrained model path, anchor setting and learning rate policy etc..) in ``rcnn/config.py``.
51 | 4. We have two predefined network settings named ``resnet``(for medium and large models) and ``mnet``(for lightweight models).
52 |
53 | ## Testing
54 |
55 | Please check ``test.py`` for testing.
56 |
57 | ## Models
58 |
59 | Pretrained Model: RetinaFace-R50 ([baidu cloud](https://pan.baidu.com/s/1C6nKq122gJxRhb37vK0_LQ) or [dropbox](https://www.dropbox.com/s/53ftnlarhyrpkg2/retinaface-R50.zip?dl=0)) is a medium size model with ResNet50 backbone.
60 | It can output face bounding boxes and five facial landmarks in a single forward pass.
61 |
62 | WiderFace validation mAP: Easy 96.5, Medium 95.6, Hard 90.4.
63 |
64 | To avoid the confliction with the WiderFace Challenge (ICCV 2019), we postpone the release time of our best model.
65 |
66 | ## Third-party Models
67 |
68 | [yangfly](https://github.com/yangfly): RetinaFace-MobileNet0.25 ([baidu cloud](https://pan.baidu.com/s/1P1ypO7VYUbNAezdvLm2m9w)).
69 |
70 | WiderFace validation mAP: Hard 82.5. (model size: 1.68Mb)
71 |
72 | ## References
73 |
74 | ```
75 | @inproceedings{yang2016wider,
76 | title = {WIDER FACE: A Face Detection Benchmark},
77 | author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou},
78 | booktitle = {CVPR},
79 | year = {2016}
80 | }
81 |
82 | @inproceedings{deng2019retinaface,
83 | title={RetinaFace: Single-stage Dense Face Localisation in the Wild},
84 | author={Deng, Jiankang and Guo, Jia and Yuxiang, Zhou and Jinke Yu and Irene Kotsia and Zafeiriou, Stefanos},
85 | booktitle={arxiv},
86 | year={2019}
87 | }
88 | ```
89 |
90 |
91 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/a.jpg
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/model/mnet.25-0000.params:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/model/mnet.25-0000.params
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/model/mobilenet_0_25-0000.params:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/model/mobilenet_0_25-0000.params
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/PY_OP/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/rcnn/PY_OP/__init__.py
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/rcnn/__init__.py
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/rcnn/core/__init__.py
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/core/callback.py:
--------------------------------------------------------------------------------
1 | import mxnet as mx
2 |
3 |
4 | def do_checkpoint(prefix, means, stds):
5 | def _callback(iter_no, sym, arg, aux):
6 | if 'bbox_pred_weight' in arg:
7 | arg['bbox_pred_weight_test'] = (arg['bbox_pred_weight'].T * mx.nd.array(stds)).T
8 | arg['bbox_pred_bias_test'] = arg['bbox_pred_bias'] * mx.nd.array(stds) + mx.nd.array(means)
9 | mx.model.save_checkpoint(prefix, iter_no + 1, sym, arg, aux)
10 | if 'bbox_pred_weight' in arg:
11 | arg.pop('bbox_pred_weight_test')
12 | arg.pop('bbox_pred_bias_test')
13 | return _callback
14 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/cython/.gitignore:
--------------------------------------------------------------------------------
1 | *.c
2 | *.cpp
3 | *.so
4 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/cython/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/rcnn/cython/__init__.py
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/cython/anchors.pyx:
--------------------------------------------------------------------------------
1 | cimport cython
2 | import numpy as np
3 | cimport numpy as np
4 |
5 | DTYPE = np.float32
6 | ctypedef np.float32_t DTYPE_t
7 |
8 | def anchors_cython(int height, int width, int stride, np.ndarray[DTYPE_t, ndim=2] base_anchors):
9 | """
10 | Parameters
11 | ----------
12 | height: height of plane
13 | width: width of plane
14 | stride: stride ot the original image
15 | anchors_base: (A, 4) a base set of anchors
16 | Returns
17 | -------
18 | all_anchors: (height, width, A, 4) ndarray of anchors spreading over the plane
19 | """
20 | cdef unsigned int A = base_anchors.shape[0]
21 | cdef np.ndarray[DTYPE_t, ndim=4] all_anchors = np.zeros((height, width, A, 4), dtype=DTYPE)
22 | cdef unsigned int iw, ih
23 | cdef unsigned int k
24 | cdef unsigned int sh
25 | cdef unsigned int sw
26 | for iw in range(width):
27 | sw = iw * stride
28 | for ih in range(height):
29 | sh = ih * stride
30 | for k in range(A):
31 | all_anchors[ih, iw, k, 0] = base_anchors[k, 0] + sw
32 | all_anchors[ih, iw, k, 1] = base_anchors[k, 1] + sh
33 | all_anchors[ih, iw, k, 2] = base_anchors[k, 2] + sw
34 | all_anchors[ih, iw, k, 3] = base_anchors[k, 3] + sh
35 | return all_anchors
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/cython/bbox.pyx:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Sergey Karayev
6 | # --------------------------------------------------------
7 |
8 | cimport cython
9 | import numpy as np
10 | cimport numpy as np
11 |
12 | DTYPE = np.float
13 | ctypedef np.float_t DTYPE_t
14 |
15 | def bbox_overlaps_cython(
16 | np.ndarray[DTYPE_t, ndim=2] boxes,
17 | np.ndarray[DTYPE_t, ndim=2] query_boxes):
18 | """
19 | Parameters
20 | ----------
21 | boxes: (N, 4) ndarray of float
22 | query_boxes: (K, 4) ndarray of float
23 | Returns
24 | -------
25 | overlaps: (N, K) ndarray of overlap between boxes and query_boxes
26 | """
27 | cdef unsigned int N = boxes.shape[0]
28 | cdef unsigned int K = query_boxes.shape[0]
29 | cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE)
30 | cdef DTYPE_t iw, ih, box_area
31 | cdef DTYPE_t ua
32 | cdef unsigned int k, n
33 | for k in range(K):
34 | box_area = (
35 | (query_boxes[k, 2] - query_boxes[k, 0] + 1) *
36 | (query_boxes[k, 3] - query_boxes[k, 1] + 1)
37 | )
38 | for n in range(N):
39 | iw = (
40 | min(boxes[n, 2], query_boxes[k, 2]) -
41 | max(boxes[n, 0], query_boxes[k, 0]) + 1
42 | )
43 | if iw > 0:
44 | ih = (
45 | min(boxes[n, 3], query_boxes[k, 3]) -
46 | max(boxes[n, 1], query_boxes[k, 1]) + 1
47 | )
48 | if ih > 0:
49 | ua = float(
50 | (boxes[n, 2] - boxes[n, 0] + 1) *
51 | (boxes[n, 3] - boxes[n, 1] + 1) +
52 | box_area - iw * ih
53 | )
54 | overlaps[n, k] = iw * ih / ua
55 | return overlaps
56 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/cython/cpu_nms.pyx:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
8 | import numpy as np
9 | cimport numpy as np
10 |
11 | cdef inline np.float32_t max(np.float32_t a, np.float32_t b):
12 | return a if a >= b else b
13 |
14 | cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
15 | return a if a <= b else b
16 |
17 | def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh):
18 | cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0]
19 | cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1]
20 | cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2]
21 | cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3]
22 | cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4]
23 |
24 | cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1)
25 | cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1]
26 |
27 | cdef int ndets = dets.shape[0]
28 | cdef np.ndarray[np.int_t, ndim=1] suppressed = \
29 | np.zeros((ndets), dtype=np.int)
30 |
31 | # nominal indices
32 | cdef int _i, _j
33 | # sorted indices
34 | cdef int i, j
35 | # temp variables for box i's (the box currently under consideration)
36 | cdef np.float32_t ix1, iy1, ix2, iy2, iarea
37 | # variables for computing overlap with box j (lower scoring box)
38 | cdef np.float32_t xx1, yy1, xx2, yy2
39 | cdef np.float32_t w, h
40 | cdef np.float32_t inter, ovr
41 |
42 | keep = []
43 | for _i in range(ndets):
44 | i = order[_i]
45 | if suppressed[i] == 1:
46 | continue
47 | keep.append(i)
48 | ix1 = x1[i]
49 | iy1 = y1[i]
50 | ix2 = x2[i]
51 | iy2 = y2[i]
52 | iarea = areas[i]
53 | for _j in range(_i + 1, ndets):
54 | j = order[_j]
55 | if suppressed[j] == 1:
56 | continue
57 | xx1 = max(ix1, x1[j])
58 | yy1 = max(iy1, y1[j])
59 | xx2 = min(ix2, x2[j])
60 | yy2 = min(iy2, y2[j])
61 | w = max(0.0, xx2 - xx1 + 1)
62 | h = max(0.0, yy2 - yy1 + 1)
63 | inter = w * h
64 | ovr = inter / (iarea + areas[j] - inter)
65 | if ovr >= thresh:
66 | suppressed[j] = 1
67 |
68 | return keep
69 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/cython/gpu_nms.hpp:
--------------------------------------------------------------------------------
1 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
2 | int boxes_dim, float nms_overlap_thresh, int device_id);
3 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/cython/gpu_nms.pyx:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Faster R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
8 | import numpy as np
9 | cimport numpy as np
10 |
11 | assert sizeof(int) == sizeof(np.int32_t)
12 |
13 | cdef extern from "gpu_nms.hpp":
14 | void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
15 |
16 | def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh,
17 | np.int32_t device_id=0):
18 | cdef int boxes_num = dets.shape[0]
19 | cdef int boxes_dim = dets.shape[1]
20 | cdef int num_out
21 | cdef np.ndarray[np.int32_t, ndim=1] \
22 | keep = np.zeros(boxes_num, dtype=np.int32)
23 | cdef np.ndarray[np.float32_t, ndim=1] \
24 | scores = dets[:, 4]
25 | cdef np.ndarray[np.int_t, ndim=1] \
26 | order = scores.argsort()[::-1]
27 | cdef np.ndarray[np.float32_t, ndim=2] \
28 | sorted_dets = dets[order, :]
29 | _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
30 | keep = keep[:num_out]
31 | return list(order[keep])
32 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from .imdb import IMDB
2 | from .retinaface import retinaface
3 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/dataset/ds_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def unique_boxes(boxes, scale=1.0):
5 | """ return indices of unique boxes """
6 | v = np.array([1, 1e3, 1e6, 1e9])
7 | hashes = np.round(boxes * scale).dot(v).astype(np.int)
8 | _, index = np.unique(hashes, return_index=True)
9 | return np.sort(index)
10 |
11 |
12 | def filter_small_boxes(boxes, min_size):
13 | w = boxes[:, 2] - boxes[:, 0]
14 | h = boxes[:, 3] - boxes[:, 1]
15 | keep = np.where((w >= min_size) & (h > min_size))[0]
16 | return keep
17 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/io/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/rcnn/io/__init__.py
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | # set up logger
4 | logging.basicConfig()
5 | logger = logging.getLogger()
6 | logger.setLevel(logging.INFO)
7 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/processing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/rcnn/processing/__init__.py
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/processing/assign_levels.py:
--------------------------------------------------------------------------------
1 | from rcnn.config import config
2 | import numpy as np
3 |
4 |
5 | def compute_assign_targets(rois, threshold):
6 | rois_area = np.sqrt((rois[:, 2] - rois[:, 0] + 1) * (rois[:, 3] - rois[:, 1] + 1))
7 | num_rois = np.shape(rois)[0]
8 | assign_levels = np.zeros(num_rois, dtype=np.uint8)
9 | for i, stride in enumerate(config.RCNN_FEAT_STRIDE):
10 | thd = threshold[i]
11 | idx = np.logical_and(thd[1] <= rois_area, rois_area < thd[0])
12 | assign_levels[idx] = stride
13 |
14 | assert 0 not in assign_levels, "All rois should assign to specify levels."
15 | return assign_levels
16 |
17 |
18 | def add_assign_targets(roidb):
19 | """
20 | given roidb, add ['assign_level']
21 | :param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
22 | """
23 | print 'add assign targets'
24 | assert len(roidb) > 0
25 | assert 'boxes' in roidb[0]
26 |
27 | area_threshold = [[np.inf, 448],
28 | [448, 224],
29 | [224, 112],
30 | [112, 0]]
31 |
32 | assert len(config.RCNN_FEAT_STRIDE) == len(area_threshold)
33 |
34 | num_images = len(roidb)
35 | for im_i in range(num_images):
36 | rois = roidb[im_i]['boxes']
37 | roidb[im_i]['assign_levels'] = compute_assign_targets(rois, area_threshold)
38 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/processing/generate_anchor.py:
--------------------------------------------------------------------------------
1 | """
2 | Generate base anchors on index 0
3 | """
4 | from __future__ import print_function
5 | import sys
6 | from builtins import range
7 | import numpy as np
8 | from ..cython.anchors import anchors_cython
9 | #from ..config import config
10 |
11 |
12 | def anchors_plane(feat_h, feat_w, stride, base_anchor):
13 | return anchors_cython(feat_h, feat_w, stride, base_anchor)
14 |
15 | def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
16 | scales=2 ** np.arange(3, 6), stride=16, dense_anchor=False):
17 | """
18 | Generate anchor (reference) windows by enumerating aspect ratios X
19 | scales wrt a reference (0, 0, 15, 15) window.
20 | """
21 |
22 | base_anchor = np.array([1, 1, base_size, base_size]) - 1
23 | ratio_anchors = _ratio_enum(base_anchor, ratios)
24 | anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
25 | for i in range(ratio_anchors.shape[0])])
26 | if dense_anchor:
27 | assert stride%2==0
28 | anchors2 = anchors.copy()
29 | anchors2[:,:] += int(stride/2)
30 | anchors = np.vstack( (anchors, anchors2) )
31 | #print('GA',base_anchor.shape, ratio_anchors.shape, anchors.shape)
32 | return anchors
33 |
34 | #def generate_anchors_fpn(base_size=[64,32,16,8,4], ratios=[0.5, 1, 2], scales=8):
35 | # """
36 | # Generate anchor (reference) windows by enumerating aspect ratios X
37 | # scales wrt a reference (0, 0, 15, 15) window.
38 | # """
39 | # anchors = []
40 | # _ratios = ratios.reshape( (len(base_size), -1) )
41 | # _scales = scales.reshape( (len(base_size), -1) )
42 | # for i,bs in enumerate(base_size):
43 | # __ratios = _ratios[i]
44 | # __scales = _scales[i]
45 | # #print('anchors_fpn', bs, __ratios, __scales, file=sys.stderr)
46 | # r = generate_anchors(bs, __ratios, __scales)
47 | # #print('anchors_fpn', r.shape, file=sys.stderr)
48 | # anchors.append(r)
49 | # return anchors
50 |
51 | def generate_anchors_fpn(dense_anchor=False, cfg = None):
52 | #assert(False)
53 | """
54 | Generate anchor (reference) windows by enumerating aspect ratios X
55 | scales wrt a reference (0, 0, 15, 15) window.
56 | """
57 | if cfg is None:
58 | from ..config import config
59 | cfg = config.RPN_ANCHOR_CFG
60 | RPN_FEAT_STRIDE = []
61 | for k in cfg:
62 | RPN_FEAT_STRIDE.append( int(k) )
63 | RPN_FEAT_STRIDE = sorted(RPN_FEAT_STRIDE, reverse=True)
64 | anchors = []
65 | for k in RPN_FEAT_STRIDE:
66 | v = cfg[str(k)]
67 | bs = v['BASE_SIZE']
68 | __ratios = np.array(v['RATIOS'])
69 | __scales = np.array(v['SCALES'])
70 | stride = int(k)
71 | #print('anchors_fpn', bs, __ratios, __scales, file=sys.stderr)
72 | r = generate_anchors(bs, __ratios, __scales, stride, dense_anchor)
73 | #print('anchors_fpn', r.shape, file=sys.stderr)
74 | anchors.append(r)
75 |
76 | return anchors
77 |
78 | def _whctrs(anchor):
79 | """
80 | Return width, height, x center, and y center for an anchor (window).
81 | """
82 |
83 | w = anchor[2] - anchor[0] + 1
84 | h = anchor[3] - anchor[1] + 1
85 | x_ctr = anchor[0] + 0.5 * (w - 1)
86 | y_ctr = anchor[1] + 0.5 * (h - 1)
87 | return w, h, x_ctr, y_ctr
88 |
89 |
90 | def _mkanchors(ws, hs, x_ctr, y_ctr):
91 | """
92 | Given a vector of widths (ws) and heights (hs) around a center
93 | (x_ctr, y_ctr), output a set of anchors (windows).
94 | """
95 |
96 | ws = ws[:, np.newaxis]
97 | hs = hs[:, np.newaxis]
98 | anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
99 | y_ctr - 0.5 * (hs - 1),
100 | x_ctr + 0.5 * (ws - 1),
101 | y_ctr + 0.5 * (hs - 1)))
102 | return anchors
103 |
104 |
105 | def _ratio_enum(anchor, ratios):
106 | """
107 | Enumerate a set of anchors for each aspect ratio wrt an anchor.
108 | """
109 |
110 | w, h, x_ctr, y_ctr = _whctrs(anchor)
111 | size = w * h
112 | size_ratios = size / ratios
113 | ws = np.round(np.sqrt(size_ratios))
114 | hs = np.round(ws * ratios)
115 | anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
116 | return anchors
117 |
118 |
119 | def _scale_enum(anchor, scales):
120 | """
121 | Enumerate a set of anchors for each scale wrt an anchor.
122 | """
123 |
124 | w, h, x_ctr, y_ctr = _whctrs(anchor)
125 | ws = w * scales
126 | hs = h * scales
127 | anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
128 | return anchors
129 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/processing/nms.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from ..cython.cpu_nms import cpu_nms
3 | try:
4 | from ..cython.gpu_nms import gpu_nms
5 | except ImportError:
6 | gpu_nms = None
7 |
8 |
9 | def py_nms_wrapper(thresh):
10 | def _nms(dets):
11 | return nms(dets, thresh)
12 | return _nms
13 |
14 |
15 | def cpu_nms_wrapper(thresh):
16 | def _nms(dets):
17 | return cpu_nms(dets, thresh)
18 | return _nms
19 |
20 |
21 | def gpu_nms_wrapper(thresh, device_id):
22 | def _nms(dets):
23 | return gpu_nms(dets, thresh, device_id)
24 | if gpu_nms is not None:
25 | return _nms
26 | else:
27 | return cpu_nms_wrapper(thresh)
28 |
29 |
30 | def nms(dets, thresh):
31 | """
32 | greedily select boxes with high confidence and overlap with current maximum <= thresh
33 | rule out overlap >= thresh
34 | :param dets: [[x1, y1, x2, y2 score]]
35 | :param thresh: retain overlap < thresh
36 | :return: indexes to keep
37 | """
38 | x1 = dets[:, 0]
39 | y1 = dets[:, 1]
40 | x2 = dets[:, 2]
41 | y2 = dets[:, 3]
42 | scores = dets[:, 4]
43 |
44 | areas = (x2 - x1 + 1) * (y2 - y1 + 1)
45 | order = scores.argsort()[::-1]
46 |
47 | keep = []
48 | while order.size > 0:
49 | i = order[0]
50 | keep.append(i)
51 | xx1 = np.maximum(x1[i], x1[order[1:]])
52 | yy1 = np.maximum(y1[i], y1[order[1:]])
53 | xx2 = np.minimum(x2[i], x2[order[1:]])
54 | yy2 = np.minimum(y2[i], y2[order[1:]])
55 |
56 | w = np.maximum(0.0, xx2 - xx1 + 1)
57 | h = np.maximum(0.0, yy2 - yy1 + 1)
58 | inter = w * h
59 | ovr = inter / (areas[i] + areas[order[1:]] - inter)
60 |
61 | inds = np.where(ovr <= thresh)[0]
62 | order = order[inds + 1]
63 |
64 | return keep
65 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/pycocotools/UPSTREAM_REV:
--------------------------------------------------------------------------------
1 | https://github.com/pdollar/coco/commit/336d2a27c91e3c0663d2dcf0b13574674d30f88e
2 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/pycocotools/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/pycocotools/mask.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tsungyi'
2 |
3 | from rcnn.pycocotools import _mask
4 |
5 | # Interface for manipulating masks stored in RLE format.
6 | #
7 | # RLE is a simple yet efficient format for storing binary masks. RLE
8 | # first divides a vector (or vectorized image) into a series of piecewise
9 | # constant regions and then for each piece simply stores the length of
10 | # that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would
11 | # be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1]
12 | # (note that the odd counts are always the numbers of zeros). Instead of
13 | # storing the counts directly, additional compression is achieved with a
14 | # variable bitrate representation based on a common scheme called LEB128.
15 | #
16 | # Compression is greatest given large piecewise constant regions.
17 | # Specifically, the size of the RLE is proportional to the number of
18 | # *boundaries* in M (or for an image the number of boundaries in the y
19 | # direction). Assuming fairly simple shapes, the RLE representation is
20 | # O(sqrt(n)) where n is number of pixels in the object. Hence space usage
21 | # is substantially lower, especially for large simple objects (large n).
22 | #
23 | # Many common operations on masks can be computed directly using the RLE
24 | # (without need for decoding). This includes computations such as area,
25 | # union, intersection, etc. All of these operations are linear in the
26 | # size of the RLE, in other words they are O(sqrt(n)) where n is the area
27 | # of the object. Computing these operations on the original mask is O(n).
28 | # Thus, using the RLE can result in substantial computational savings.
29 | #
30 | # The following API functions are defined:
31 | # encode - Encode binary masks using RLE.
32 | # decode - Decode binary masks encoded via RLE.
33 | # merge - Compute union or intersection of encoded masks.
34 | # iou - Compute intersection over union between masks.
35 | # area - Compute area of encoded masks.
36 | # toBbox - Get bounding boxes surrounding encoded masks.
37 | # frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask.
38 | #
39 | # Usage:
40 | # Rs = encode( masks )
41 | # masks = decode( Rs )
42 | # R = merge( Rs, intersect=false )
43 | # o = iou( dt, gt, iscrowd )
44 | # a = area( Rs )
45 | # bbs = toBbox( Rs )
46 | # Rs = frPyObjects( [pyObjects], h, w )
47 | #
48 | # In the API the following formats are used:
49 | # Rs - [dict] Run-length encoding of binary masks
50 | # R - dict Run-length encoding of binary mask
51 | # masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order)
52 | # iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore
53 | # bbs - [nx4] Bounding box(es) stored as [x y w h]
54 | # poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list)
55 | # dt,gt - May be either bounding boxes or encoded masks
56 | # Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel).
57 | #
58 | # Finally, a note about the intersection over union (iou) computation.
59 | # The standard iou of a ground truth (gt) and detected (dt) object is
60 | # iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt))
61 | # For "crowd" regions, we use a modified criteria. If a gt object is
62 | # marked as "iscrowd", we allow a dt to match any subregion of the gt.
63 | # Choosing gt' in the crowd gt that best matches the dt can be done using
64 | # gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing
65 | # iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt)
66 | # For crowd gt regions we use this modified criteria above for the iou.
67 | #
68 | # To compile run "python setup.py build_ext --inplace"
69 | # Please do not contact us for help with compiling.
70 | #
71 | # Microsoft COCO Toolbox. version 2.0
72 | # Data, paper, and tutorials available at: http://mscoco.org/
73 | # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
74 | # Licensed under the Simplified BSD License [see coco/license.txt]
75 |
76 | iou = _mask.iou
77 | merge = _mask.merge
78 | frPyObjects = _mask.frPyObjects
79 |
80 | def encode(bimask):
81 | if len(bimask.shape) == 3:
82 | return _mask.encode(bimask)
83 | elif len(bimask.shape) == 2:
84 | h, w = bimask.shape
85 | return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0]
86 |
87 | def decode(rleObjs):
88 | if type(rleObjs) == list:
89 | return _mask.decode(rleObjs)
90 | else:
91 | return _mask.decode([rleObjs])[:,:,0]
92 |
93 | def area(rleObjs):
94 | if type(rleObjs) == list:
95 | return _mask.area(rleObjs)
96 | else:
97 | return _mask.area([rleObjs])[0]
98 |
99 | def toBbox(rleObjs):
100 | if type(rleObjs) == list:
101 | return _mask.toBbox(rleObjs)
102 | else:
103 | return _mask.toBbox([rleObjs])[0]
104 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/pycocotools/maskApi.h:
--------------------------------------------------------------------------------
1 | /**************************************************************************
2 | * Microsoft COCO Toolbox. version 2.0
3 | * Data, paper, and tutorials available at: http://mscoco.org/
4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
5 | * Licensed under the Simplified BSD License [see coco/license.txt]
6 | **************************************************************************/
7 | #pragma once
8 |
9 | typedef unsigned int uint;
10 | typedef unsigned long siz;
11 | typedef unsigned char byte;
12 | typedef double* BB;
13 | typedef struct { siz h, w, m; uint *cnts; } RLE;
14 |
15 | /* Initialize/destroy RLE. */
16 | void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts );
17 | void rleFree( RLE *R );
18 |
19 | /* Initialize/destroy RLE array. */
20 | void rlesInit( RLE **R, siz n );
21 | void rlesFree( RLE **R, siz n );
22 |
23 | /* Encode binary masks using RLE. */
24 | void rleEncode( RLE *R, const byte *mask, siz h, siz w, siz n );
25 |
26 | /* Decode binary masks encoded via RLE. */
27 | void rleDecode( const RLE *R, byte *mask, siz n );
28 |
29 | /* Compute union or intersection of encoded masks. */
30 | void rleMerge( const RLE *R, RLE *M, siz n, int intersect );
31 |
32 | /* Compute area of encoded masks. */
33 | void rleArea( const RLE *R, siz n, uint *a );
34 |
35 | /* Compute intersection over union between masks. */
36 | void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o );
37 |
38 | /* Compute non-maximum suppression between bounding masks */
39 | void rleNms( RLE *dt, siz n, uint *keep, double thr );
40 |
41 | /* Compute intersection over union between bounding boxes. */
42 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o );
43 |
44 | /* Compute non-maximum suppression between bounding boxes */
45 | void bbNms( BB dt, siz n, uint *keep, double thr );
46 |
47 | /* Get bounding boxes surrounding encoded masks. */
48 | void rleToBbox( const RLE *R, BB bb, siz n );
49 |
50 | /* Convert bounding boxes to encoded masks. */
51 | void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n );
52 |
53 | /* Convert polygon to encoded mask. */
54 | void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w );
55 |
56 | /* Get compressed string representation of encoded mask. */
57 | char* rleToString( const RLE *R );
58 |
59 | /* Convert from compressed string representation of encoded mask. */
60 | void rleFrString( RLE *R, char *s, siz h, siz w );
61 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/pycocotools/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 | from Cython.Build import cythonize
3 | from distutils.extension import Extension
4 | import numpy as np
5 |
6 | # To compile and install locally run "python setup.py build_ext --inplace"
7 | # To install library to Python site-packages run "python setup.py build_ext install"
8 |
9 | ext_modules = [
10 | Extension(
11 | '_mask',
12 | sources=['maskApi.c', '_mask.pyx'],
13 | include_dirs=[np.get_include()],
14 | extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'],
15 | )
16 | ]
17 |
18 | setup(name='pycocotools',
19 | ext_modules=cythonize(ext_modules)
20 | )
21 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/symbol/__init__.py:
--------------------------------------------------------------------------------
1 | from .symbol_ssh import *
2 | from .symbol_mnet import *
3 | from .symbol_resnet import *
4 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/rcnn/tools/__init__.py
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/tools/reeval.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | try:
3 | import cPickle as pickle
4 | except ImportError:
5 | import pickle
6 | import os
7 | import mxnet as mx
8 |
9 | from ..logger import logger
10 | from ..config import config, default, generate_config
11 | from ..dataset import *
12 |
13 |
14 | def reeval(args):
15 | # load imdb
16 | imdb = eval(args.dataset)(args.image_set, args.root_path, args.dataset_path)
17 |
18 | # load detection results
19 | cache_file = os.path.join(imdb.cache_path, imdb.name, 'detections.pkl')
20 | with open(cache_file) as f:
21 | detections = pickle.load(f)
22 |
23 | # eval
24 | imdb.evaluate_detections(detections)
25 |
26 |
27 | def parse_args():
28 | parser = argparse.ArgumentParser(description='imdb test')
29 | # general
30 | parser.add_argument('--network', help='network name', default=default.network, type=str)
31 | parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
32 | args, rest = parser.parse_known_args()
33 | generate_config(args.network, args.dataset)
34 | parser.add_argument('--image_set', help='image_set name', default=default.image_set, type=str)
35 | parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
36 | parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
37 | # other
38 | parser.add_argument('--no_shuffle', help='disable random shuffle', action='store_true')
39 | args = parser.parse_args()
40 | return args
41 |
42 |
43 | def main():
44 | args = parse_args()
45 | logger.info('Called with argument: %s' % args)
46 | reeval(args)
47 |
48 |
49 | if __name__ == '__main__':
50 | main()
51 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/tools/test_rpn.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import pprint
3 | import mxnet as mx
4 |
5 | from ..logger import logger
6 | from ..config import config, default, generate_config
7 | from ..symbol import *
8 | from ..dataset import *
9 | from ..core.loader import TestLoader
10 | from ..core.tester import Predictor, generate_proposals, test_proposals
11 | from ..utils.load_model import load_param
12 |
13 |
14 | def test_rpn(network, dataset, image_set, root_path, dataset_path,
15 | ctx, prefix, epoch,
16 | vis, shuffle, thresh, test_output=False):
17 | # rpn generate proposal config
18 | config.TEST.HAS_RPN = True
19 |
20 | # print config
21 | logger.info(pprint.pformat(config))
22 |
23 | # load symbol
24 | sym = eval('get_' + network + '_rpn_test')()
25 |
26 | # load dataset and prepare imdb for training
27 | imdb = eval(dataset)(image_set, root_path, dataset_path)
28 | roidb = imdb.gt_roidb()
29 | test_data = TestLoader(roidb, batch_size=1, shuffle=shuffle, has_rpn=True, withlabel=True)
30 |
31 | # load model
32 | arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx)
33 |
34 | # infer shape
35 | data_shape_dict = dict(test_data.provide_data)
36 | arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)
37 | arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
38 | aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
39 |
40 | # check parameters
41 | for k in sym.list_arguments():
42 | if k in data_shape_dict or 'label' in k:
43 | continue
44 | assert k in arg_params, k + ' not initialized'
45 | assert arg_params[k].shape == arg_shape_dict[k], \
46 | 'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
47 | for k in sym.list_auxiliary_states():
48 | assert k in aux_params, k + ' not initialized'
49 | assert aux_params[k].shape == aux_shape_dict[k], \
50 | 'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
51 |
52 | # decide maximum shape
53 | data_names = [k[0] for k in test_data.provide_data]
54 | label_names = None if test_data.provide_label is None else [k[0] for k in test_data.provide_label]
55 | max_data_shape = [('data', (1, 3, max([v[1] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]
56 |
57 | # create predictor
58 | predictor = Predictor(sym, data_names, label_names,
59 | context=ctx, max_data_shapes=max_data_shape,
60 | provide_data=test_data.provide_data, provide_label=test_data.provide_label,
61 | arg_params=arg_params, aux_params=aux_params)
62 |
63 | # start testing
64 | if not test_output:
65 | imdb_boxes = generate_proposals(predictor, test_data, imdb, vis=vis, thresh=thresh)
66 | imdb.evaluate_recall(roidb, candidate_boxes=imdb_boxes)
67 | else:
68 | test_proposals(predictor, test_data, imdb, roidb, vis=vis)
69 |
70 |
71 | def parse_args():
72 | parser = argparse.ArgumentParser(description='Test a Region Proposal Network')
73 | # general
74 | parser.add_argument('--network', help='network name', default=default.network, type=str)
75 | parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
76 | args, rest = parser.parse_known_args()
77 | generate_config(args.network, args.dataset)
78 | parser.add_argument('--image_set', help='image_set name', default=default.test_image_set, type=str)
79 | parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
80 | parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
81 | # testing
82 | parser.add_argument('--prefix', help='model to test with', default=default.rpn_prefix, type=str)
83 | parser.add_argument('--epoch', help='model to test with', default=default.rpn_epoch, type=int)
84 | # rpn
85 | parser.add_argument('--gpu', help='GPU device to test with', default=0, type=int)
86 | parser.add_argument('--vis', help='turn on visualization', action='store_true')
87 | parser.add_argument('--thresh', help='rpn proposal threshold', default=0, type=float)
88 | parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
89 | args = parser.parse_args()
90 | return args
91 |
92 |
93 | def main():
94 | args = parse_args()
95 | logger.info('Called with argument: %s' % args)
96 | ctx = mx.gpu(args.gpu)
97 | test_rpn(args.network, args.dataset, args.image_set, args.root_path, args.dataset_path,
98 | ctx, args.prefix, args.epoch,
99 | args.vis, args.shuffle, args.thresh)
100 |
101 | if __name__ == '__main__':
102 | main()
103 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/RetinaFace_Mx/rcnn/utils/__init__.py
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/utils/combine_model.py:
--------------------------------------------------------------------------------
1 | from .load_model import load_checkpoint
2 | from .save_model import save_checkpoint
3 |
4 |
5 | def combine_model(prefix1, epoch1, prefix2, epoch2, prefix_out, epoch_out):
6 | args1, auxs1 = load_checkpoint(prefix1, epoch1)
7 | args2, auxs2 = load_checkpoint(prefix2, epoch2)
8 | arg_names = args1.keys() + args2.keys()
9 | aux_names = auxs1.keys() + auxs2.keys()
10 | args = dict()
11 | for arg in arg_names:
12 | if arg in args1:
13 | args[arg] = args1[arg]
14 | else:
15 | args[arg] = args2[arg]
16 | auxs = dict()
17 | for aux in aux_names:
18 | if aux in auxs1:
19 | auxs[aux] = auxs1[aux]
20 | else:
21 | auxs[aux] = auxs2[aux]
22 | save_checkpoint(prefix_out, epoch_out, args, auxs)
23 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/utils/load_data.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from ..logger import logger
3 | from ..config import config
4 | from ..dataset import *
5 |
6 |
7 | def load_gt_roidb(dataset_name, image_set_name, root_path, dataset_path,
8 | flip=False):
9 | """ load ground truth roidb """
10 | imdb = eval(dataset_name)(image_set_name, root_path, dataset_path)
11 | roidb = imdb.gt_roidb()
12 | print('roidb size', len(roidb))
13 | if flip:
14 | roidb = imdb.append_flipped_images(roidb)
15 | print('flipped roidb size', len(roidb))
16 | return roidb
17 |
18 |
19 | def load_proposal_roidb(dataset_name, image_set_name, root_path, dataset_path,
20 | proposal='rpn', append_gt=True, flip=False):
21 | """ load proposal roidb (append_gt when training) """
22 | imdb = eval(dataset_name)(image_set_name, root_path, dataset_path)
23 | gt_roidb = imdb.gt_roidb()
24 | roidb = eval('imdb.' + proposal + '_roidb')(gt_roidb, append_gt)
25 | if flip:
26 | roidb = imdb.append_flipped_images(roidb)
27 | return roidb
28 |
29 |
30 | def merge_roidb(roidbs):
31 | """ roidb are list, concat them together """
32 | roidb = roidbs[0]
33 | for r in roidbs[1:]:
34 | roidb.extend(r)
35 | return roidb
36 |
37 |
38 | def filter_roidb(roidb):
39 | """ remove roidb entries without usable rois """
40 |
41 | def is_valid(entry):
42 | """ valid images have at least 1 fg or bg roi """
43 | overlaps = entry['max_overlaps']
44 | fg_inds = np.where(overlaps >= config.TRAIN.FG_THRESH)[0]
45 | bg_inds = np.where((overlaps < config.TRAIN.BG_THRESH_HI) & (overlaps >= config.TRAIN.BG_THRESH_LO))[0]
46 | valid = len(fg_inds) > 0 or len(bg_inds) > 0
47 | #valid = len(fg_inds) > 0
48 | return valid
49 |
50 | num = len(roidb)
51 | filtered_roidb = [entry for entry in roidb if is_valid(entry)]
52 | num_after = len(filtered_roidb)
53 | logger.info('load data: filtered %d roidb entries: %d -> %d' % (num - num_after, num, num_after))
54 |
55 | return filtered_roidb
56 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/utils/load_model.py:
--------------------------------------------------------------------------------
1 | import mxnet as mx
2 |
3 |
4 | def load_checkpoint(prefix, epoch):
5 | """
6 | Load model checkpoint from file.
7 | :param prefix: Prefix of model name.
8 | :param epoch: Epoch number of model we would like to load.
9 | :return: (arg_params, aux_params)
10 | arg_params : dict of str to NDArray
11 | Model parameter, dict of name to NDArray of net's weights.
12 | aux_params : dict of str to NDArray
13 | Model parameter, dict of name to NDArray of net's auxiliary states.
14 | """
15 | save_dict = mx.nd.load('%s-%04d.params' % (prefix, epoch))
16 | arg_params = {}
17 | aux_params = {}
18 | for k, v in save_dict.items():
19 | tp, name = k.split(':', 1)
20 | if tp == 'arg':
21 | arg_params[name] = v
22 | if tp == 'aux':
23 | aux_params[name] = v
24 | return arg_params, aux_params
25 |
26 |
27 | def convert_context(params, ctx):
28 | """
29 | :param params: dict of str to NDArray
30 | :param ctx: the context to convert to
31 | :return: dict of str of NDArray with context ctx
32 | """
33 | new_params = dict()
34 | for k, v in params.items():
35 | new_params[k] = v.as_in_context(ctx)
36 | return new_params
37 |
38 |
39 | def load_param(prefix, epoch, convert=False, ctx=None, process=False):
40 | """
41 | wrapper for load checkpoint
42 | :param prefix: Prefix of model name.
43 | :param epoch: Epoch number of model we would like to load.
44 | :param convert: reference model should be converted to GPU NDArray first
45 | :param ctx: if convert then ctx must be designated.
46 | :param process: model should drop any test
47 | :return: (arg_params, aux_params)
48 | """
49 | arg_params, aux_params = load_checkpoint(prefix, epoch)
50 | if convert:
51 | if ctx is None:
52 | ctx = mx.cpu()
53 | arg_params = convert_context(arg_params, ctx)
54 | aux_params = convert_context(aux_params, ctx)
55 | if process:
56 | tests = [k for k in arg_params.keys() if '_test' in k]
57 | for test in tests:
58 | arg_params[test.replace('_test', '')] = arg_params.pop(test)
59 | return arg_params, aux_params
60 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/rcnn/utils/save_model.py:
--------------------------------------------------------------------------------
1 | import mxnet as mx
2 |
3 |
4 | def save_checkpoint(prefix, epoch, arg_params, aux_params):
5 | """Checkpoint the model data into file.
6 | :param prefix: Prefix of model name.
7 | :param epoch: The epoch number of the model.
8 | :param arg_params: dict of str to NDArray
9 | Model parameter, dict of name to NDArray of net's weights.
10 | :param aux_params: dict of str to NDArray
11 | Model parameter, dict of name to NDArray of net's auxiliary states.
12 | :return: None
13 | prefix-epoch.params will be saved for parameters.
14 | """
15 | save_dict = {('arg:%s' % k) : v for k, v in arg_params.items()}
16 | save_dict.update({('aux:%s' % k) : v for k, v in aux_params.items()})
17 | param_name = '%s-%04d.params' % (prefix, epoch)
18 | mx.nd.save(param_name, save_dict)
19 |
--------------------------------------------------------------------------------
/alignment/RetinaFace_Mx/test.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import sys
3 | import numpy as np
4 | import datetime
5 | import os
6 | import glob
7 | from retinaface import RetinaFace
8 | import mxnet as mx
9 | import time
10 | thresh = 0.8
11 | scales = [1024, 1024]
12 |
13 | count = 1
14 |
15 | gpuid = 0
16 | context = mx.cpu()
17 | detector = RetinaFace('./model/mnet.25', 0, -1, 'net3')
18 | stream = cv2.VideoCapture('rtsp://admin:a1b2c3d4@@10.0.20.226:554/profile2/media.smp')
19 | stream.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
20 | stream.set(cv2.CAP_PROP_FRAME_HEIGHT, 1024)
21 | _, img = stream.read()
22 | im_shape = img.shape
23 | target_size = scales[0]
24 | max_size = scales[1]
25 | im_size_min = np.min(im_shape[0:2])
26 | im_size_max = np.max(im_shape[0:2])
27 | #im_scale = 1.0
28 | #if im_size_min>target_size or im_size_max>max_size:
29 | im_scale = float(target_size) / float(im_size_min)
30 | # prevent bigger axis from being more than max_size:
31 | if np.round(im_scale * im_size_max) > max_size:
32 | im_scale = float(max_size) / float(im_size_max)
33 |
34 | print('im_scale', im_scale)
35 |
36 | scales = [im_scale]
37 | while 1:
38 | ret, img = stream.read()
39 | img_base = img.copy()
40 | flip = False
41 | t = time.time()
42 |
43 | for c in range(count):
44 | faces, landmarks = detector.detect(img, thresh, scales=scales, do_flip=flip)
45 | print(c, faces.shape, landmarks.shape)
46 |
47 | if faces is not None:
48 | print('find', faces.shape[0], 'faces')
49 | for i in range(faces.shape[0]):
50 | #print('score', faces[i][4])
51 | box = faces[i].astype(np.int)
52 | #color = (255,0,0)
53 | color = (0,0,255)
54 | cv2.rectangle(img_base, (box[0], box[1]), (box[2], box[3]), color, 2)
55 | if landmarks is not None:
56 | landmark5 = landmarks[i].astype(np.int)
57 | #print(landmark.shape)
58 | for l in range(landmark5.shape[0]):
59 | color = (0,0,255)
60 | if l==0 or l==3:
61 | color = (0,255,0)
62 | cv2.circle(img, (landmark5[l][0], landmark5[l][1]), 1, color, 2)
63 | cv2.imshow("a", img_base)
64 | if cv2.waitKey(1) & 0xFF == ord('q'):
65 | break
66 | print(time.time() - t)
67 | cap.release()
68 |
69 |
--------------------------------------------------------------------------------
/alignment/detector.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import numpy as np
4 | import cv2
5 | import torch
6 | from torch.autograd import Variable
7 | sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
8 | from PIL import Image
9 | from retinaface_pytorch.retinaface import load_retinaface_mbnet, RetinaFace_MobileNet
10 | from retinaface_pytorch.utils import RetinaFace_Utils
11 | from retinaface_pytorch.align_trans import get_reference_facial_points, warp_and_crop_face
12 | class Retinaface_Detector(object):
13 | def __init__(self, device = None, thresh = 0.6, scales = [320, 480]):
14 | self.target_size = scales[0]
15 | self.max_size = scales[1]
16 | self.threshold = thresh
17 | if device:
18 | # assert device in
19 | self.device = device
20 | else:
21 | self.device = torch.device("cpu")
22 | self.model = RetinaFace_MobileNet()
23 | self.model = self.model.to(self.device)
24 | checkpoint = torch.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'retinaface_pytorch/checkpoint.pth'))
25 | self.model.load_state_dict(checkpoint['state_dict'])
26 | del checkpoint
27 | # self.model = load_retinaface_mbnet('retinaface_pytorch/mnet.25').to(device)
28 |
29 | self.model.eval()
30 | self.pixel_means = np.array([0.0, 0.0, 0.0], dtype=np.float32)
31 | self.pixel_stds = np.array([1.0, 1.0, 1.0], dtype=np.float32)
32 | self.pixel_scale = float(1.0)
33 | self.refrence = get_reference_facial_points(default_square= True)
34 | self.utils = RetinaFace_Utils()
35 | def align(self, img, limit = None, min_face_size=None, thresholds = None, nms_thresholds=None):
36 | boxes, faces = self.align_multi(img)
37 | return boxes[0], faces[0]
38 | def align_multi(self, img, limit = None, min_face_size=None, thresholds = None, nms_thresholds=None):
39 | img = np.array(img)
40 | # img =cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
41 | # img =cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
42 | im, im_scale = self.img_process(img)
43 | im = torch.from_numpy(im)
44 | im_tensor = Variable(im).to(self.device)
45 | output = self.model(im_tensor)
46 | boxes, landmarks = self.utils.detect(im, output, self.threshold, im_scale)
47 | if len(boxes) ==0:
48 | return [], []
49 | if limit:
50 | boxes = boxes[:limit]
51 | landmarks = landmarks[:limit]
52 | faces = []
53 | for i, landmark in enumerate(landmarks):
54 | boxes[i] = boxes[i].astype(np.int)
55 | landmark = landmark.astype(np.int)
56 | warped_face = warp_and_crop_face(img, landmark, self.refrence, crop_size=(112,112))
57 | faces.append(Image.fromarray(warped_face))
58 | return boxes, faces
59 |
60 | def img_process(self, img):
61 | im_shape = img.shape
62 | im_size_min = np.min(im_shape[0:2])
63 | im_size_max = np.max(im_shape[0:2])
64 | im_scale = float(self.target_size) / float(im_size_min)
65 | if np.round(im_scale * im_size_max) > self.max_size:
66 | im_scale = float(self.max_size) / float(im_size_max)
67 | im = cv2.resize(img, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
68 | # im = im.astype(np.float32)
69 |
70 | im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]), dtype=np.float32)
71 | for i in range(3):
72 | im_tensor[0, i, :, :] = (im[:, :, 2 - i] / self.pixel_scale - self.pixel_means[2 - i])/self.pixel_stds[2 - i]
73 | return im_tensor, im_scale
74 |
75 | import time
76 | if __name__ == '__main__':
77 | reti = Retinaface_Detector()
78 | img = cv2.imread("t2.jpg")
79 | t = time.time()
80 | for i in range(10):
81 | bboxs, faces = reti.align_multi(img)
82 | t2 = time.time()
83 | print(t2 -t)
84 | t = t2
85 | i=0
86 | for face in faces:
87 | i+=1
88 | face.save("a%d.jpg"%i)
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/.gitignore:
--------------------------------------------------------------------------------
1 | .ipynb_checkpoints
2 | __pycache__
3 |
4 |
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Dan Antoshchenko
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/README.md:
--------------------------------------------------------------------------------
1 | # MTCNN
2 |
3 | `pytorch` implementation of **inference stage** of face detection algorithm described in
4 | [Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Networks](https://arxiv.org/abs/1604.02878).
5 |
6 | ## Example
7 | 
8 |
9 | ## How to use it
10 | Just download the repository and then do this
11 | ```python
12 | from src import detect_faces
13 | from PIL import Image
14 |
15 | image = Image.open('image.jpg')
16 | bounding_boxes, landmarks = detect_faces(image)
17 | ```
18 | For examples see `test_on_images.ipynb`.
19 |
20 | ## Requirements
21 | * pytorch 0.2
22 | * Pillow, numpy
23 |
24 | ## Credit
25 | This implementation is heavily inspired by:
26 | * [pangyupo/mxnet_mtcnn_face_detection](https://github.com/pangyupo/mxnet_mtcnn_face_detection)
27 |
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/extract_weights_from_caffe_models.py:
--------------------------------------------------------------------------------
1 | import caffe
2 | import numpy as np
3 |
4 | """
5 | The purpose of this script is to convert pretrained weights taken from
6 | official implementation here:
7 | https://github.com/kpzhang93/MTCNN_face_detection_alignment/tree/master/code/codes/MTCNNv2
8 | to required format.
9 |
10 | In a nutshell, it just renames and transposes some of the weights.
11 | You don't have to use this script because weights are already in `src/weights`.
12 | """
13 |
14 |
15 | def get_all_weights(net):
16 | all_weights = {}
17 | for p in net.params:
18 | if 'conv' in p:
19 | name = 'features.' + p
20 | if '-' in p:
21 | s = list(p)
22 | s[-2] = '_'
23 | s = ''.join(s)
24 | all_weights[s + '.weight'] = net.params[p][0].data
25 | all_weights[s + '.bias'] = net.params[p][1].data
26 | elif len(net.params[p][0].data.shape) == 4:
27 | all_weights[name + '.weight'] = net.params[p][0].data.transpose((0, 1, 3, 2))
28 | all_weights[name + '.bias'] = net.params[p][1].data
29 | else:
30 | all_weights[name + '.weight'] = net.params[p][0].data
31 | all_weights[name + '.bias'] = net.params[p][1].data
32 | elif 'prelu' in p.lower():
33 | all_weights['features.' + p.lower() + '.weight'] = net.params[p][0].data
34 | return all_weights
35 |
36 |
37 | # P-Net
38 | net = caffe.Net('caffe_models/det1.prototxt', 'caffe_models/det1.caffemodel', caffe.TEST)
39 | np.save('src/weights/pnet.npy', get_all_weights(net))
40 |
41 | # R-Net
42 | net = caffe.Net('caffe_models/det2.prototxt', 'caffe_models/det2.caffemodel', caffe.TEST)
43 | np.save('src/weights/rnet.npy', get_all_weights(net))
44 |
45 | # O-Net
46 | net = caffe.Net('caffe_models/det3.prototxt', 'caffe_models/det3.caffemodel', caffe.TEST)
47 | np.save('src/weights/onet.npy', get_all_weights(net))
48 |
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/src/__init__.py:
--------------------------------------------------------------------------------
1 | from .visualization_utils import show_bboxes
2 | from .detector import detect_faces
3 |
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/src/detector.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from torch.autograd import Variable
4 | from .get_nets import PNet, RNet, ONet
5 | from .box_utils import nms, calibrate_box, get_image_boxes, convert_to_square
6 | from .first_stage import run_first_stage
7 |
8 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
9 | def detect_faces(image, min_face_size=20.0,
10 | thresholds=[0.6, 0.7, 0.8],
11 | nms_thresholds=[0.7, 0.7, 0.7]):
12 | """
13 | Arguments:
14 | image: an instance of PIL.Image.
15 | min_face_size: a float number.
16 | thresholds: a list of length 3.
17 | nms_thresholds: a list of length 3.
18 |
19 | Returns:
20 | two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10],
21 | bounding boxes and facial landmarks.
22 | """
23 |
24 | # LOAD MODELS
25 | pnet = PNet().to(device)
26 | rnet = RNet()
27 | onet = ONet()
28 | onet.eval()
29 |
30 | # BUILD AN IMAGE PYRAMID
31 | width, height = image.size
32 | min_length = min(height, width)
33 |
34 | min_detection_size = 12
35 | factor = 0.707 # sqrt(0.5)
36 |
37 | # scales for scaling the image
38 | scales = []
39 |
40 | # scales the image so that
41 | # minimum size that we can detect equals to
42 | # minimum face size that we want to detect
43 | m = min_detection_size/min_face_size
44 | min_length *= m
45 |
46 | factor_count = 0
47 | while min_length > min_detection_size:
48 | scales.append(m*factor**factor_count)
49 | min_length *= factor
50 | factor_count += 1
51 |
52 | # STAGE 1
53 |
54 | # it will be returned
55 | bounding_boxes = []
56 |
57 | with torch.no_grad():
58 | # run P-Net on different scales
59 | for s in scales:
60 | boxes = run_first_stage(image, pnet, scale=s, threshold=thresholds[0])
61 | bounding_boxes.append(boxes)
62 |
63 | # collect boxes (and offsets, and scores) from different scales
64 | bounding_boxes = [i for i in bounding_boxes if i is not None]
65 | bounding_boxes = np.vstack(bounding_boxes)
66 |
67 | keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0])
68 | bounding_boxes = bounding_boxes[keep]
69 |
70 | # use offsets predicted by pnet to transform bounding boxes
71 | bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:])
72 | # shape [n_boxes, 5]
73 |
74 | bounding_boxes = convert_to_square(bounding_boxes)
75 | bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
76 |
77 | # STAGE 2
78 |
79 | img_boxes = get_image_boxes(bounding_boxes, image, size=24)
80 | img_boxes = torch.FloatTensor(img_boxes)
81 |
82 | output = rnet(img_boxes)
83 | offsets = output[0].data.numpy() # shape [n_boxes, 4]
84 | probs = output[1].data.numpy() # shape [n_boxes, 2]
85 |
86 | keep = np.where(probs[:, 1] > thresholds[1])[0]
87 | bounding_boxes = bounding_boxes[keep]
88 | bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
89 | offsets = offsets[keep]
90 |
91 | keep = nms(bounding_boxes, nms_thresholds[1])
92 | bounding_boxes = bounding_boxes[keep]
93 | bounding_boxes = calibrate_box(bounding_boxes, offsets[keep])
94 | bounding_boxes = convert_to_square(bounding_boxes)
95 | bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])
96 |
97 | # STAGE 3
98 |
99 | img_boxes = get_image_boxes(bounding_boxes, image, size=48)
100 | if len(img_boxes) == 0:
101 | return [], []
102 | img_boxes = torch.FloatTensor(img_boxes)
103 | output = onet(img_boxes)
104 | landmarks = output[0].data.numpy() # shape [n_boxes, 10]
105 | offsets = output[1].data.numpy() # shape [n_boxes, 4]
106 | probs = output[2].data.numpy() # shape [n_boxes, 2]
107 |
108 | keep = np.where(probs[:, 1] > thresholds[2])[0]
109 | bounding_boxes = bounding_boxes[keep]
110 | bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))
111 | offsets = offsets[keep]
112 | landmarks = landmarks[keep]
113 |
114 | # compute landmark points
115 | width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0
116 | height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0
117 | xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1]
118 | landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1)*landmarks[:, 0:5]
119 | landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1)*landmarks[:, 5:10]
120 |
121 | bounding_boxes = calibrate_box(bounding_boxes, offsets)
122 | keep = nms(bounding_boxes, nms_thresholds[2], mode='min')
123 | bounding_boxes = bounding_boxes[keep]
124 | landmarks = landmarks[keep]
125 |
126 | return bounding_boxes, landmarks
127 |
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/src/first_stage.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Variable
3 | import math
4 | from PIL import Image
5 | import numpy as np
6 | from .box_utils import nms, _preprocess
7 | devi= torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
8 | # device = 'cpu'
9 |
10 | def run_first_stage(image, net, scale, threshold, device = devi):
11 | """Run P-Net, generate bounding boxes, and do NMS.
12 |
13 | Arguments:
14 | image: an instance of PIL.Image.
15 | net: an instance of pytorch's nn.Module, P-Net.
16 | scale: a float number,
17 | scale width and height of the image by this number.
18 | threshold: a float number,
19 | threshold on the probability of a face when generating
20 | bounding boxes from predictions of the net.
21 |
22 | Returns:
23 | a float numpy array of shape [n_boxes, 9],
24 | bounding boxes with scores and offsets (4 + 1 + 4).
25 | """
26 |
27 | # scale the image and convert it to a float array
28 | width, height = image.size
29 | sw, sh = math.ceil(width*scale), math.ceil(height*scale)
30 | img = image.resize((sw, sh), Image.BILINEAR)
31 | img = np.asarray(img, 'float32')
32 | if len(img.shape) == 2:
33 | img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
34 |
35 | img = torch.FloatTensor(_preprocess(img)).to(device)
36 | with torch.no_grad():
37 | output = net(img)
38 | probs = output[1].cpu().data.numpy()[0, 1, :, :]
39 | offsets = output[0].cpu().data.numpy()
40 | # probs: probability of a face at each sliding window
41 | # offsets: transformations to true bounding boxes
42 |
43 | boxes = _generate_bboxes(probs, offsets, scale, threshold)
44 | if len(boxes) == 0:
45 | return None
46 |
47 | keep = nms(boxes[:, 0:5], overlap_threshold=0.5)
48 | return boxes[keep]
49 |
50 |
51 | def _generate_bboxes(probs, offsets, scale, threshold):
52 | """Generate bounding boxes at places
53 | where there is probably a face.
54 |
55 | Arguments:
56 | probs: a float numpy array of shape [n, m].
57 | offsets: a float numpy array of shape [1, 4, n, m].
58 | scale: a float number,
59 | width and height of the image were scaled by this number.
60 | threshold: a float number.
61 |
62 | Returns:
63 | a float numpy array of shape [n_boxes, 9]
64 | """
65 |
66 | # applying P-Net is equivalent, in some sense, to
67 | # moving 12x12 window with stride 2
68 | stride = 2
69 | cell_size = 12
70 |
71 | # indices of boxes where there is probably a face
72 | inds = np.where(probs > threshold)
73 |
74 | if inds[0].size == 0:
75 | return np.array([])
76 |
77 | # transformations of bounding boxes
78 | tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
79 | # they are defined as:
80 | # w = x2 - x1 + 1
81 | # h = y2 - y1 + 1
82 | # x1_true = x1 + tx1*w
83 | # x2_true = x2 + tx2*w
84 | # y1_true = y1 + ty1*h
85 | # y2_true = y2 + ty2*h
86 |
87 | offsets = np.array([tx1, ty1, tx2, ty2])
88 | score = probs[inds[0], inds[1]]
89 |
90 | # P-Net is applied to scaled images
91 | # so we need to rescale bounding boxes back
92 | bounding_boxes = np.vstack([
93 | np.round((stride*inds[1] + 1.0)/scale),
94 | np.round((stride*inds[0] + 1.0)/scale),
95 | np.round((stride*inds[1] + 1.0 + cell_size)/scale),
96 | np.round((stride*inds[0] + 1.0 + cell_size)/scale),
97 | score, offsets
98 | ])
99 | # why one is added?
100 |
101 | return bounding_boxes.T
102 |
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/src/visualization_utils.py:
--------------------------------------------------------------------------------
1 | from PIL import ImageDraw
2 |
3 |
4 | def show_bboxes(img, bounding_boxes, facial_landmarks=[]):
5 | """Draw bounding boxes and facial landmarks.
6 |
7 | Arguments:
8 | img: an instance of PIL.Image.
9 | bounding_boxes: a float numpy array of shape [n, 5].
10 | facial_landmarks: a float numpy array of shape [n, 10].
11 |
12 | Returns:
13 | an instance of PIL.Image.
14 | """
15 |
16 | img_copy = img.copy()
17 | draw = ImageDraw.Draw(img_copy)
18 |
19 | for b in bounding_boxes:
20 | draw.rectangle([
21 | (b[0], b[1]), (b[2], b[3])
22 | ], outline='white')
23 |
24 | for p in facial_landmarks:
25 | for i in range(5):
26 | draw.ellipse([
27 | (p[i] - 1.0, p[i + 5] - 1.0),
28 | (p[i] + 1.0, p[i + 5] + 1.0)
29 | ], outline='blue')
30 |
31 | return img_copy
32 |
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/src/weights/onet.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/mtcnn_pytorch/src/weights/onet.npy
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/src/weights/pnet.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/mtcnn_pytorch/src/weights/pnet.npy
--------------------------------------------------------------------------------
/alignment/mtcnn_pytorch/src/weights/rnet.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/mtcnn_pytorch/src/weights/rnet.npy
--------------------------------------------------------------------------------
/alignment/retinaface.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
3 | import numpy as np
4 | import cv2
5 | import sys
6 | from PIL import Image
7 | from RetinaFace_Mx.retinaface import RetinaFace as FaceDetect
8 | import mxnet as mx
9 | from RetinaFace_Mx.align_trans import get_reference_facial_points, warp_and_crop_face
10 | class RetinaFace():
11 | def __init__(self, gpu_id = -1, thresh = 0.6, scales = [320, 480]):
12 |
13 | self.thresh = thresh
14 | self.scales = scales
15 | self.refrence = get_reference_facial_points(default_square= True)
16 |
17 | self.detector = FaceDetect(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'RetinaFace_Mx/model/mnet.25'), 0, gpu_id, 'net3')
18 | def align(self, img, limit = None, min_face_size=None, thresholds = None, nms_thresholds=None):
19 | img = np.array(img)
20 | im_shape = img.shape
21 | target_size = self.scales[0]
22 | max_size = self.scales[1]
23 | im_size_min = np.min(im_shape[0:2])
24 | im_size_max = np.max(im_shape[0:2])
25 | im_scale = float(target_size) / float(im_size_min)
26 | # prevent bigger axis from being more than max_size:
27 | if np.round(im_scale * im_size_max) > max_size:
28 | im_scale = float(max_size) / float(im_size_max)
29 | scales_img = [im_scale]
30 | # img =cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
31 |
32 | for c in range(1):
33 | boxes, landmarks = self.detector.detect(img, self.thresh, scales=scales_img, do_flip=False)
34 |
35 | if len(boxes) ==0:
36 | return [], []
37 | if limit:
38 | boxes = boxes[:limit]
39 | landmarks = landmarks[:limit]
40 |
41 | faces = []
42 | for landmark in landmarks:
43 | warped_face = warp_and_crop_face(img, landmark, self.refrence, crop_size=(112,112))
44 | return Image.fromarray(warped_face)
45 | return None
46 | def align_multi(self, img, limit = None, min_face_size=None, thresholds = None, nms_thresholds=None):
47 |
48 | img = np.array(img)
49 | im_shape = img.shape
50 | target_size = self.scales[0]
51 | max_size = self.scales[1]
52 | im_size_min = np.min(im_shape[0:2])
53 | im_size_max = np.max(im_shape[0:2])
54 | im_scale = float(target_size) / float(im_size_min)
55 | # prevent bigger axis from being more than max_size:
56 | if np.round(im_scale * im_size_max) > max_size:
57 | im_scale = float(max_size) / float(im_size_max)
58 | scales_img = [im_scale]
59 | # img =cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
60 |
61 | for c in range(1):
62 | boxes, landmarks = self.detector.detect(img, self.thresh, scales=scales_img, do_flip=False)
63 | if len(boxes) ==0:
64 | return [], []
65 | if limit:
66 | boxes = boxes[:limit]
67 | landmarks = landmarks[:limit]
68 |
69 | faces = []
70 | for landmark in landmarks:
71 | # print(landmark)
72 | # facial5points = [[landmark[j, 0],landmark[j, 1]] for j in range(5)]
73 | # print(facial5points)
74 | warped_face = warp_and_crop_face(img, landmark, self.refrence, crop_size=(112,112))
75 | faces.append(Image.fromarray(warped_face))
76 |
77 | return boxes, faces
78 | import time
79 | if __name__ == '__main__':
80 | reti = RetinaFace()
81 | img = cv2.imread("t6.jpg")
82 |
83 | t = time.time()
84 | for i in range(10):
85 | bboxs, faces = reti.align_multi(img)
86 | t2 = time.time()
87 | print(t2 -t)
88 | t = t2
89 | i=0
90 | for face in faces:
91 | i+=1
92 | face.save("a%d.jpg"%i)
93 |
94 |
95 |
96 |
97 |
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/README.md:
--------------------------------------------------------------------------------
1 | # Inference Code for RetinaFace with MobileNet Backend in PyTorch
2 |
3 | ### Step 1:
4 | ```Shell
5 | cd cython
6 | python setup.py build_ext --inplace
7 | ```
8 |
9 | ### Step 2:
10 | ```Shell
11 | python inference.py
12 | ```
13 |
14 | ### Evaluation(WIDERFACE):
15 | Easy Val AP: 0.8872715908531869
16 |
17 | Medium Val AP: 0.8663337842229522
18 |
19 | Hard Val AP: 0.771796729363941
20 |
21 |
22 |
23 | ### Test Results:
24 |
25 |
26 |
27 | ### References:
28 | @inproceedings{deng2019retinaface, title={RetinaFace: Single-stage Dense Face Localisation in the Wild}, author={Deng, Jiankang and Guo, Jia and Yuxiang, Zhou and Jinke Yu and Irene Kotsia and Zafeiriou, Stefanos}, booktitle={arxiv}, year={2019} }
29 |
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/checkpoint.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/retinaface_pytorch/checkpoint.pth
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/cython/.gitignore:
--------------------------------------------------------------------------------
1 | *.c
2 | *.cpp
3 | *.so
4 |
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/cython/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/retinaface_pytorch/cython/__init__.py
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/cython/anchors.pyx:
--------------------------------------------------------------------------------
1 | cimport cython
2 | import numpy as np
3 | cimport numpy as np
4 |
5 | DTYPE = np.float32
6 | ctypedef np.float32_t DTYPE_t
7 |
8 | def anchors_cython(int height, int width, int stride, np.ndarray[DTYPE_t, ndim=2] base_anchors):
9 | """
10 | Parameters
11 | ----------
12 | height: height of plane
13 | width: width of plane
14 | stride: stride ot the original image
15 | anchors_base: (A, 4) a base set of anchors
16 | Returns
17 | -------
18 | all_anchors: (height, width, A, 4) ndarray of anchors spreading over the plane
19 | """
20 | cdef unsigned int A = base_anchors.shape[0]
21 | cdef np.ndarray[DTYPE_t, ndim=4] all_anchors = np.zeros((height, width, A, 4), dtype=DTYPE)
22 | cdef unsigned int iw, ih
23 | cdef unsigned int k
24 | cdef unsigned int sh
25 | cdef unsigned int sw
26 | for iw in range(width):
27 | sw = iw * stride
28 | for ih in range(height):
29 | sh = ih * stride
30 | for k in range(A):
31 | all_anchors[ih, iw, k, 0] = base_anchors[k, 0] + sw
32 | all_anchors[ih, iw, k, 1] = base_anchors[k, 1] + sh
33 | all_anchors[ih, iw, k, 2] = base_anchors[k, 2] + sw
34 | all_anchors[ih, iw, k, 3] = base_anchors[k, 3] + sh
35 | return all_anchors
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/cython/bbox.pyx:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Sergey Karayev
6 | # --------------------------------------------------------
7 |
8 | cimport cython
9 | import numpy as np
10 | cimport numpy as np
11 |
12 | DTYPE = np.float
13 | ctypedef np.float_t DTYPE_t
14 |
15 | def bbox_overlaps_cython(
16 | np.ndarray[DTYPE_t, ndim=2] boxes,
17 | np.ndarray[DTYPE_t, ndim=2] query_boxes):
18 | """
19 | Parameters
20 | ----------
21 | boxes: (N, 4) ndarray of float
22 | query_boxes: (K, 4) ndarray of float
23 | Returns
24 | -------
25 | overlaps: (N, K) ndarray of overlap between boxes and query_boxes
26 | """
27 | cdef unsigned int N = boxes.shape[0]
28 | cdef unsigned int K = query_boxes.shape[0]
29 | cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE)
30 | cdef DTYPE_t iw, ih, box_area
31 | cdef DTYPE_t ua
32 | cdef unsigned int k, n
33 | for k in range(K):
34 | box_area = (
35 | (query_boxes[k, 2] - query_boxes[k, 0] + 1) *
36 | (query_boxes[k, 3] - query_boxes[k, 1] + 1)
37 | )
38 | for n in range(N):
39 | iw = (
40 | min(boxes[n, 2], query_boxes[k, 2]) -
41 | max(boxes[n, 0], query_boxes[k, 0]) + 1
42 | )
43 | if iw > 0:
44 | ih = (
45 | min(boxes[n, 3], query_boxes[k, 3]) -
46 | max(boxes[n, 1], query_boxes[k, 1]) + 1
47 | )
48 | if ih > 0:
49 | ua = float(
50 | (boxes[n, 2] - boxes[n, 0] + 1) *
51 | (boxes[n, 3] - boxes[n, 1] + 1) +
52 | box_area - iw * ih
53 | )
54 | overlaps[n, k] = iw * ih / ua
55 | return overlaps
56 |
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/cython/cpu_nms.pyx:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
8 | import numpy as np
9 | cimport numpy as np
10 |
11 | cdef inline np.float32_t max(np.float32_t a, np.float32_t b):
12 | return a if a >= b else b
13 |
14 | cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
15 | return a if a <= b else b
16 |
17 | def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh):
18 | cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0]
19 | cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1]
20 | cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2]
21 | cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3]
22 | cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4]
23 |
24 | cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1)
25 | cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1]
26 |
27 | cdef int ndets = dets.shape[0]
28 | cdef np.ndarray[np.int_t, ndim=1] suppressed = \
29 | np.zeros((ndets), dtype=np.int)
30 |
31 | # nominal indices
32 | cdef int _i, _j
33 | # sorted indices
34 | cdef int i, j
35 | # temp variables for box i's (the box currently under consideration)
36 | cdef np.float32_t ix1, iy1, ix2, iy2, iarea
37 | # variables for computing overlap with box j (lower scoring box)
38 | cdef np.float32_t xx1, yy1, xx2, yy2
39 | cdef np.float32_t w, h
40 | cdef np.float32_t inter, ovr
41 |
42 | keep = []
43 | for _i in range(ndets):
44 | i = order[_i]
45 | if suppressed[i] == 1:
46 | continue
47 | keep.append(i)
48 | ix1 = x1[i]
49 | iy1 = y1[i]
50 | ix2 = x2[i]
51 | iy2 = y2[i]
52 | iarea = areas[i]
53 | for _j in range(_i + 1, ndets):
54 | j = order[_j]
55 | if suppressed[j] == 1:
56 | continue
57 | xx1 = max(ix1, x1[j])
58 | yy1 = max(iy1, y1[j])
59 | xx2 = min(ix2, x2[j])
60 | yy2 = min(iy2, y2[j])
61 | w = max(0.0, xx2 - xx1 + 1)
62 | h = max(0.0, yy2 - yy1 + 1)
63 | inter = w * h
64 | ovr = inter / (iarea + areas[j] - inter)
65 | if ovr >= thresh:
66 | suppressed[j] = 1
67 |
68 | return keep
69 |
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/cython/gpu_nms.hpp:
--------------------------------------------------------------------------------
1 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
2 | int boxes_dim, float nms_overlap_thresh, int device_id);
3 |
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/cython/gpu_nms.pyx:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Faster R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
8 | import numpy as np
9 | cimport numpy as np
10 |
11 | assert sizeof(int) == sizeof(np.int32_t)
12 |
13 | cdef extern from "gpu_nms.hpp":
14 | void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
15 |
16 | def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh,
17 | np.int32_t device_id=0):
18 | cdef int boxes_num = dets.shape[0]
19 | cdef int boxes_dim = dets.shape[1]
20 | cdef int num_out
21 | cdef np.ndarray[np.int32_t, ndim=1] \
22 | keep = np.zeros(boxes_num, dtype=np.int32)
23 | cdef np.ndarray[np.float32_t, ndim=1] \
24 | scores = dets[:, 4]
25 | cdef np.ndarray[np.int_t, ndim=1] \
26 | order = scores.argsort()[::-1]
27 | cdef np.ndarray[np.float32_t, ndim=2] \
28 | sorted_dets = dets[order, :]
29 | _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
30 | keep = keep[:num_out]
31 | return list(order[keep])
32 |
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/detector.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import cv2
4 | import torch
5 | from torch.autograd import Variable
6 | from retinaface import load_retinaface_mbnet
7 | from utils import RetinaFace_Utils
8 |
9 | class Retinaface_Detector(object):
10 | def __init__(self):
11 | self.threshold = 0.6
12 | self.model = load_retinaface_mbnet()
13 | self.pixel_means = np.array([0.0, 0.0, 0.0], dtype=np.float32)
14 | self.pixel_stds = np.array([1.0, 1.0, 1.0], dtype=np.float32)
15 | self.pixel_scale = float(1.0)
16 | self.utils = RetinaFace_Utils()
17 |
18 | def img_process(self, img):
19 | target_size = 320
20 | max_size = 640
21 | im_shape = img.shape
22 | im_size_min = np.min(im_shape[0:2])
23 | im_size_max = np.max(im_shape[0:2])
24 | im_scale = float(target_size) / float(im_size_min)
25 | if np.round(im_scale * im_size_max) > max_size:
26 | im_scale = float(max_size) / float(im_size_max)
27 | im = cv2.resize(img, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
28 | im = im.astype(np.float32)
29 |
30 | im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]), dtype=np.float32)
31 | for i in range(3):
32 | im_tensor[0, i, :, :] = (im[:, :, 2 - i] / self.pixel_scale - self.pixel_means[2 - i]) / \
33 | self.pixel_stds[2 - i]
34 | return im_tensor, im_scale
35 |
36 | def detect(self, img):
37 | results = []
38 | im, im_scale = self.img_process(img)
39 | im = torch.from_numpy(im)
40 | im_tensor = Variable(im)
41 | output = self.model(im_tensor)
42 | faces, landmarks = self.utils.detect(im, output, self.threshold, im_scale)
43 |
44 | if faces is None or landmarks is None:
45 | return results
46 |
47 | for face, landmark in zip(faces, landmarks):
48 | face = face.astype(np.int)
49 | landmark = landmark.astype(np.int)
50 | results.append([face, landmark])
51 |
52 | return results
53 |
54 |
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/inference.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | from detector import Retinaface_Detector
4 | import time
5 | detector = Retinaface_Detector()
6 | test_images = os.listdir('./test_images')
7 |
8 | for image in test_images:
9 | imgpath = os.path.join('./test_images', image)
10 | print (imgpath)
11 | t = time.time()
12 | img = cv2.imread(imgpath)
13 |
14 | results = detector.detect(img)
15 |
16 | print (len(results), ' faces found.')
17 |
18 | if len(results) == 0:
19 | continue
20 |
21 | for result in results:
22 | # pass
23 | face = result[0]
24 | landmark = result[1]
25 |
26 | color = (0, 0, 255)
27 | cv2.rectangle(img, (face[0], face[1]), (face[2], face[3]), color, 2)
28 |
29 | # for l in range(landmark.shape[0]):
30 | # color = (0, 0, 255)
31 | # if l == 0 or l == 3:
32 | # color = (0, 255, 0)
33 | # cv2.circle(img, (landmark[l][0], landmark[l][1]), 1, color, 2)
34 |
35 | # cv2.imwrite('./test_results/' + image, img)
36 | print(time.time() -t)
37 |
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/mnet.25-0000.params:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/retinaface_pytorch/mnet.25-0000.params
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/test_images/t2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/retinaface_pytorch/test_images/t2.jpg
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/test_images/t4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/retinaface_pytorch/test_images/t4.jpg
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/test_results/t2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/retinaface_pytorch/test_results/t2.jpg
--------------------------------------------------------------------------------
/alignment/retinaface_pytorch/test_results/t4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/retinaface_pytorch/test_results/t4.jpg
--------------------------------------------------------------------------------
/alignment/t2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/t2.jpg
--------------------------------------------------------------------------------
/alignment/t6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/alignment/t6.jpg
--------------------------------------------------------------------------------
/app/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Md Danish
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/app/README.md:
--------------------------------------------------------------------------------
1 | # AUFR: Authenticate Using Face Recognition
2 |
3 | 
4 |
5 | ## Algorithms Implemented
6 | - Eigenfaces
7 | - Localbinary Pattern Histograms[LBPH]
8 | - Fisherfaces
9 |
10 | # How to use?
11 | 1. Download miniconda/anaconda.
12 | 2. Create environment.
13 | 3. Installation.
14 | 4. Clone repository.
15 | 5. Execute.
16 |
17 | ### 1. Download
18 | - Download [Mininconda](https://conda.io/miniconda.html).
19 | - Download [Anaconda](https://www.anaconda.com/).
20 |
21 | ### 2. Create Environment
22 | - ```$ conda create -n cv python=3.*```
23 | - ```$ conda activate cv```
24 |
25 | ### 3. Package Installation
26 | - ```$ conda install pyqt=5.*```
27 | - ```$ conda install opencv=*.*```
28 | - ```$ conda install -c michael_wild opencv-contrib```
29 |
30 | ### 4. Clone Repository
31 | - Clone ```$ git clone https://github.com/indian-coder/authenticate-using-face-recognition.git aufr```
32 | - Cd into aufr ```$ cd aufr```
33 |
34 | ### 5. Execute Application
35 | - Execute ```$ python main.py```
36 |
37 | Note:Generate atleat two datasets to work properly.
38 |
39 | 1. Enter name,and unique key.
40 | 2. Check algorithm radio button which you want to train.
41 | 3. Click recognize button.
42 | 4. Click save button to save current displayed image.
43 | 5. Click record button to save video.
44 |
45 | ## Resources
46 | - [OpenCV face Recognition](https://docs.opencv.org/2.4/modules/contrib/doc/facerec/facerec_tutorial.html)
47 | - [PyQt5 Documentation](http://pyqt.sourceforge.net/Docs/PyQt5/)
48 |
--------------------------------------------------------------------------------
/app/capture_dialog.py:
--------------------------------------------------------------------------------
1 |
2 | from PyQt5 import QtCore, QtGui, QtWidgets
3 |
4 | import pickle
5 | from libs.canvas import Canvas
6 | from functools import partial
7 | from libs.utils import *
8 | from libs.label_dialog import LabelDialog
9 | class CaptureDialog(QDialog):
10 | def __init__(self, parent = None):
11 | QDialog.__init__(self, parent)
12 |
13 | self.parent = parent
14 | self.data = None
15 | self.init_ui()
16 |
17 | def init_ui(self):
18 | self.centralWidget = QtWidgets.QFrame(self)
19 | self.centralWidget.setObjectName("centralWidget")
20 | self.centralWidget.setGeometry(QtCore.QRect(0, 0, 600, 600))
21 | self.centralWidget.setFrameShape(QtWidgets.QFrame.Box)
22 |
23 | self.frame_2 = QtWidgets.QFrame(self.centralWidget)
24 | self.frame_2.setObjectName("frame_2")
25 | self.frame_2.setGeometry(QtCore.QRect(10, 10, 580, 580))
26 | self.frame_2.setFrameShape(QtWidgets.QFrame.Box)
27 | self.canvas= Canvas(self.frame_2)
28 |
29 | self.canvas.setGeometry(QtCore.QRect(0, 0, 580, 580))
30 | self.canvas.setEnabled(True)
31 | self.canvas.setFocus(True)
32 | self.canvas.setDrawingShapeToSquare(False)
33 | self.canvas.restoreCursor()
34 | self.canvas.mode = self.canvas.CREATE
35 | self.image = None
36 | self.canvas.show()
37 | self.canvas.newShape.connect(self.new_shape)
38 | def new_shape(self):
39 | """Pop-up and give focus to the label editor.
40 | position MUST be in global coordinates.
41 | """
42 | BB = QDialogButtonBox
43 |
44 | x1, y1, x2, y2 = int(self.canvas.line[0].x()), int(self.canvas.line[0].y()), int(self.canvas.line[1].x()), int(self.canvas.line[1].y())
45 | image = self.image[y1:y2, x1:x2]
46 | self.labelDialog = LabelDialog(parent=self, image = image)
47 |
48 | self.labelDialog.show()
49 |
50 |
--------------------------------------------------------------------------------
/app/gen_data_ui.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import sys
4 | import numpy as np
5 | from datetime import datetime
6 | from PyQt5 import QtGui, QtCore
7 | from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox
8 | from PyQt5 import QtCore, QtGui, QtWidgets
9 | from PyQt5.QtGui import *
10 | from PyQt5.QtWidgets import *
11 | from PyQt5 import QtCore
12 | from PyQt5.QtCore import Qt
13 | from PyQt5.QtCore import pyqtSlot
14 | from PyQt5.uic import loadUi
15 | class TakePic(QDialog):
16 | def __init__(self,parent = None):
17 | QDialog.__init__(self, parent)
18 | self.parent = parent
19 | self.id_cam = 0
20 | self.init_ui()
21 |
22 | def init_ui(self):
23 | self.centralWidget = QtWidgets.QFrame(self)
24 | self.centralWidget.setObjectName("centralWidget")
25 | self.centralWidget.setGeometry(QtCore.QRect(0, 0, 600, 700))
26 | # self.centralWidget.setFrameShape(QtWidgets.QFrame.Box)
27 |
28 | self.frame_2 = QtWidgets.QFrame(self.centralWidget)
29 | self.frame_2.setObjectName("frame_2")
30 | self.frame_2.setGeometry(QtCore.QRect(10, 10, 580, 580))
31 | self.frame_2.setFrameShape(QtWidgets.QFrame.Box)
32 | self.video_feed = QtWidgets.QLabel(self.frame_2)
33 | self.video_feed.setGeometry(QtCore.QRect(0, 0, 580, 580))
34 | self.video_feed.setObjectName("video_feed")
35 | self.video_feed.raise_()
36 |
37 | self.frame3 = QtWidgets.QFrame(self.centralWidget)
38 | self.frame3.setGeometry(QtCore.QRect(10, 590, 690, 110))
39 | self.frame3.setObjectName("frame3")
40 |
41 | self.label_3 = QLabel('Name: ', self.frame3)
42 | self.label_3.setGeometry(QtCore.QRect(10, 15, 71, 25))
43 |
44 | self.img_name = QTextEdit(self.frame3)
45 | self.img_name.setGeometry(QtCore.QRect(91, 15, 287, 25))
46 |
47 |
48 | self.btn_close = QtWidgets.QPushButton(self.frame3)
49 | self.btn_close.clicked.connect(self.start_timer)
50 | self.btn_close.setGeometry(QtCore.QRect(50, 55, 87, 31))
51 | self.btn_close.setObjectName("btn_close")
52 | self.btn_close.setText('Close')
53 |
54 | self.btn_ok = QtWidgets.QPushButton(self.frame3)
55 | self.btn_ok.setGeometry(QtCore.QRect(450, 55, 87, 31))
56 | self.btn_ok.setObjectName("btn_ok")
57 | self.btn_ok.setText('OK')
58 | self.btn_take_pic = QtWidgets.QPushButton(self.frame3)
59 | self.btn_take_pic.setGeometry(QtCore.QRect(250, 55, 87, 31))
60 | self.btn_take_pic.setObjectName("btn_take_pic")
61 | self.btn_take_pic.setText('Capture')
62 | self.allow_capture = True
63 | self.images = None
64 |
65 | def run_video_capture(self):
66 | self.capture = cv2.VideoCapture(self.id_cam)
67 | self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
68 | self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 600)
69 | self.timer = QtCore.QTimer()
70 | self.timer.timeout.connect(self.start_timer)
71 | self.timer.start(50)
72 | def start_timer(self):
73 | self.ret, frame = self.capture.read()
74 | if self.ret and self.allow_capture:
75 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
76 | frame = cv2.resize(frame,(600,600))
77 | self.image = frame.copy()
78 | # frame = cv2.flip(frame, 1)
79 | image = QtGui.QImage(frame, frame.shape[1], frame.shape[0],
80 | frame.strides[0], QtGui.QImage.Format_RGB888)
81 | self.video_feed.setPixmap(QtGui.QPixmap.fromImage(image))
82 |
83 |
84 |
85 |
86 | def stop_timer(self): # stop timer or come out of the loop.
87 | self.timer.stop()
88 | self.ret = False
89 | self.capture.release()
90 |
91 | def openFileNameDialog(self):
92 | options = QFileDialog.Options()
93 | options |= QFileDialog.DontUseNativeDialog
94 | fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;Python Files (*.py)", options=options)
95 | if fileName:
96 | print(fileName)
97 | return fileName
98 |
--------------------------------------------------------------------------------
/app/icon/default.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/app/icon/default.jpg
--------------------------------------------------------------------------------
/app/libs/constants.py:
--------------------------------------------------------------------------------
1 | import os
2 | WORK_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
3 | SETTING_FILENAME = 'filename'
4 | SETTING_RECENT_FILES = 'recentFiles'
5 | SETTING_WIN_SIZE = 'window/size'
6 | SETTING_WIN_POSE = 'window/position'
7 | SETTING_WIN_GEOMETRY = 'window/geometry'
8 | SETTING_LINE_COLOR = 'line/color'
9 | SETTING_FILL_COLOR = 'fill/color'
10 | SETTING_ADVANCE_MODE = 'advanced'
11 | SETTING_WIN_STATE = 'window/state'
12 | SETTING_SAVE_DIR = 'savedir'
13 | SETTING_PAINT_LABEL = 'paintlabel'
14 | SETTING_LAST_OPEN_DIR = 'lastOpenDir'
15 | SETTING_AUTO_SAVE = 'autosave'
16 | SETTING_DRAW_SQUARE = 'draw/square'
17 | DEFAULT_ENCODING = 'utf-8'
18 | FACE_BANK = '%s/Face_bank'%WORK_PATH
19 | MODEL_RECOGNIZE_PATH= 'src'
20 |
21 |
22 |
--------------------------------------------------------------------------------
/app/libs/label_dialog.py:
--------------------------------------------------------------------------------
1 | try:
2 | from PyQt5.QtGui import *
3 | from PyQt5.QtCore import *
4 | from PyQt5.QtWidgets import *
5 | except ImportError:
6 | from PyQt4.QtGui import *
7 | from PyQt4.QtCore import *
8 |
9 | from libs.utils import newIcon, labelValidator
10 |
11 | BB = QDialogButtonBox
12 | from libs.constants import FACE_BANK
13 | from datetime import datetime
14 | import os,cv2
15 | class LabelDialog(QDialog):
16 |
17 | def __init__(self, text="Enter name label", parent=None, image=None):
18 | super(LabelDialog, self).__init__(parent)
19 |
20 | self.edit = QLineEdit()
21 | self.edit.setText(text)
22 | self.edit.setValidator(labelValidator())
23 | self.edit.editingFinished.connect(self.postProcess)
24 | self.image = image
25 | layout = QVBoxLayout()
26 | layout.addWidget(self.edit)
27 | self.buttonBox = bb = BB(BB.Ok | BB.Cancel, Qt.Horizontal, self)
28 | bb.button(BB.Ok).setIcon(newIcon('done'))
29 | bb.button(BB.Cancel).setIcon(newIcon('undo'))
30 | bb.accepted.connect(self.handleOK)
31 | bb.rejected.connect(self.reject)
32 | layout.addWidget(bb)
33 |
34 | self.setLayout(layout)
35 | # def load(self):
36 | # self.setDefault()
37 | # try:
38 | # if os.path.exists(self.path):
39 | # with open(self.path, 'rb') as f:
40 | # self.data = pickle.load(f)
41 | # else:
42 | # self.setDefault()
43 | # return True
44 | # except:
45 | # print('Loading setting failed')
46 | # return False
47 |
48 | def handleOK(self):
49 | Name = self.edit.text()
50 | try:
51 | if self.edit.text().trimmed():
52 | self.accept()
53 | except AttributeError:
54 | # PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
55 | if self.edit.text().strip():
56 | self.accept()
57 |
58 | qm = QMessageBox
59 | g_image_name = '%s/%s/%s'%(FACE_BANK, Name, Name) + '_' + datetime.now().strftime('%Y%m%d_%H:%M:%S')+ '.jpg'
60 | if os.path.exists('%s/%s'%(FACE_BANK, Name)):
61 | qm().question(self, '', "%s is exists! Are you continue?"%Name, qm.Yes | qm.No)
62 | if qm.Yes:
63 | cv2.imwrite(g_image_name, self.image)
64 | else:
65 | os.mkdir('%s/%s'%(FACE_BANK, Name))
66 | cv2.imwrite(g_image_name, self.image)
67 |
68 | def validate(self):
69 | try:
70 | if self.edit.text().trimmed():
71 | self.accept()
72 | except AttributeError:
73 | # PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
74 | if self.edit.text().strip():
75 | self.accept()
76 |
77 | def postProcess(self):
78 | try:
79 | self.edit.setText(self.edit.text().trimmed())
80 | except AttributeError:
81 | # PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
82 | self.edit.setText(self.edit.text())
83 |
84 | def popUp(self, text='', move=True):
85 | self.edit.setText(text)
86 | self.edit.setSelection(0, len(text))
87 | self.edit.setFocus(Qt.PopupFocusReason)
88 | if move:
89 | self.move(QCursor.pos())
90 | return self.edit.text() if self.exec_() else None
91 |
92 | def listItemClick(self, tQListWidgetItem):
93 | try:
94 | text = tQListWidgetItem.text().trimmed()
95 | except AttributeError:
96 | # PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
97 | text = tQListWidgetItem.text().strip()
98 | self.edit.setText(text)
99 |
100 | def listItemDoubleClick(self, tQListWidgetItem):
101 | self.listItemClick(tQListWidgetItem)
102 | self.validate()
--------------------------------------------------------------------------------
/app/libs/setting/setting.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/app/libs/setting/setting.pkl
--------------------------------------------------------------------------------
/app/libs/ustr.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from libs.constants import DEFAULT_ENCODING
3 |
4 | def ustr(x):
5 | '''py2/py3 unicode helper'''
6 |
7 | if sys.version_info < (3, 0, 0):
8 | from PyQt4.QtCore import QString
9 | if type(x) == str:
10 | return x.decode(DEFAULT_ENCODING)
11 | if type(x) == QString:
12 | #https://blog.csdn.net/friendan/article/details/51088476
13 | #https://blog.csdn.net/xxm524/article/details/74937308
14 | return unicode(x.toUtf8(), DEFAULT_ENCODING, 'ignore')
15 | return x
16 | else:
17 | return x
--------------------------------------------------------------------------------
/app/libs/utils.py:
--------------------------------------------------------------------------------
1 | from math import sqrt
2 | from libs.ustr import ustr
3 | import hashlib
4 | import re
5 | import sys
6 |
7 | try:
8 | from PyQt5.QtGui import *
9 | from PyQt5.QtCore import *
10 | from PyQt5.QtWidgets import *
11 | except ImportError:
12 | from PyQt4.QtGui import *
13 | from PyQt4.QtCore import *
14 |
15 |
16 | def newIcon(icon):
17 | return QIcon(':/' + icon)
18 |
19 |
20 | def newButton(text, icon=None, slot=None):
21 | b = QPushButton(text)
22 | if icon is not None:
23 | b.setIcon(newIcon(icon))
24 | if slot is not None:
25 | b.clicked.connect(slot)
26 | return b
27 |
28 |
29 | def newAction(parent, text, slot=None, shortcut=None, icon=None,
30 | tip=None, checkable=False, enabled=True):
31 | """Create a new action and assign callbacks, shortcuts, etc."""
32 | a = QAction(text, parent)
33 | if icon is not None:
34 | a.setIcon(newIcon(icon))
35 | if shortcut is not None:
36 | if isinstance(shortcut, (list, tuple)):
37 | a.setShortcuts(shortcut)
38 | else:
39 | a.setShortcut(shortcut)
40 | if tip is not None:
41 | a.setToolTip(tip)
42 | a.setStatusTip(tip)
43 | if slot is not None:
44 | a.triggered.connect(slot)
45 | if checkable:
46 | a.setCheckable(True)
47 | a.setEnabled(enabled)
48 | return a
49 |
50 |
51 | def addActions(widget, actions):
52 | for action in actions:
53 | if action is None:
54 | widget.addSeparator()
55 | elif isinstance(action, QMenu):
56 | widget.addMenu(action)
57 | else:
58 | widget.addAction(action)
59 |
60 |
61 | def labelValidator():
62 | return QRegExpValidator(QRegExp(r'^[^ \t].+'), None)
63 |
64 |
65 | class struct(object):
66 |
67 | def __init__(self, **kwargs):
68 | self.__dict__.update(kwargs)
69 |
70 |
71 | def distance(p):
72 | return sqrt(p.x() * p.x() + p.y() * p.y())
73 |
74 |
75 | def fmtShortcut(text):
76 | mod, key = text.split('+', 1)
77 | return '%s+%s' % (mod, key)
78 |
79 |
80 | def generateColorByText(text):
81 | s = ustr(text)
82 | hashCode = int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16)
83 | r = int((hashCode / 255) % 255)
84 | g = int((hashCode / 65025) % 255)
85 | b = int((hashCode / 16581375) % 255)
86 | return QColor(r, g, b, 100)
87 |
88 | def have_qstring():
89 | '''p3/qt5 get rid of QString wrapper as py3 has native unicode str type'''
90 | return not (sys.version_info.major >= 3 or QT_VERSION_STR.startswith('5.'))
91 |
92 | def util_qt_strlistclass():
93 | return QStringList if have_qstring() else list
94 |
95 | def natural_sort(list, key=lambda s:s):
96 | """
97 | Sort the list into natural alphanumeric order.
98 | """
99 | def get_alphanum_key_func(key):
100 | convert = lambda text: int(text) if text.isdigit() else text
101 | return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]
102 | sort_key = get_alphanum_key_func(key)
103 | list.sort(key=sort_key)
104 |
--------------------------------------------------------------------------------
/app/recordings/04-Th04-2019:05-27-17.avi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/app/recordings/04-Th04-2019:05-27-17.avi
--------------------------------------------------------------------------------
/app/recordings/30-Mar-2019:11-05-13.avi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/app/recordings/30-Mar-2019:11-05-13.avi
--------------------------------------------------------------------------------
/app/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2018.8.24
2 | numpy==1.15.2
3 | opencv-contrib-python==3.4.3.18
4 | opencv-python==3.4.3.18
5 | PyQt5==5.11.3
6 | PyQt5-sip==4.19.13
7 |
--------------------------------------------------------------------------------
/app/res.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/app/res.jpg
--------------------------------------------------------------------------------
/demo/face_recognition.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, jsonify, make_response, request, abort, redirect
2 | import logging
3 | from processer import process
4 | from processer import process_two_image
5 | app = Flask(__name__)
6 |
7 | @app.route('/')
8 | def index():
9 | return redirect("http://tradersupport.club", code=302)
10 |
11 | @app.route('/face_recognition', methods=['POST'])
12 | def face_recognition():
13 | try:
14 | data = request.json
15 | result = process(data)
16 | return make_response(result, 200)
17 | except Exception as err:
18 | logging.error('An error has occurred whilst processing the file: "{0}"'.format(err))
19 | abort(400)
20 |
21 | @app.route('/face_recognition_two_image', methods=['POST'])
22 | def face_recognition_two_image():
23 | try:
24 | data = request.json
25 | result = process_two_image(data)
26 | return make_response(result, 200)
27 | except Exception as err:
28 | logging.error('An error has occurred whilst processing the file: "{0}"'.format(err))
29 | abort(400)
30 |
31 | @app.errorhandler(400)
32 | def bad_request(erro):
33 | return make_response(jsonify({'error': 'We cannot process the file sent in the request.'}), 400)
34 |
35 | @app.errorhandler(404)
36 | def not_found(error):
37 | return make_response(jsonify({'error': 'Resource no found.'}), 404)
38 |
39 | if __name__ == '__main__':
40 | app.run(debug=True, host='0.0.0.0', port=8084)
--------------------------------------------------------------------------------
/demo/face_verify.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from PIL import Image
3 | from pathlib import Path
4 | import torch
5 | from config import get_config
6 | from api import face_recognize
7 | from utils.utils import draw_box_name
8 | import glob
9 | import argparse
10 | from tqdm import tqdm
11 | import pandas as pd
12 | import time
13 | import os
14 |
15 | if __name__ == '__main__':
16 | base_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17 | parser = argparse.ArgumentParser(description="face recognition")
18 | parser.add_argument('-image',type=str,help="-image path image")
19 | # parser.add_argument('-path',type=str,help="-path path folder list image")
20 | parser.add_argument('-csv',type=str,help="-path path to annotation.csv", default='%s/dataset/annotation.csv'%base_folder)
21 | parser.add_argument('-path',type=str,help="-path path to image folder", default='%s/dataset/public_test'%base_folder)
22 | parser.add_argument('-threshold', '--threshold',type=float,help="-threshold threshold", default=1.2)
23 | parser.add_argument('-use_mtcnn', '--use_mtcnn',type=float,help="using mtcnn", default=1)
24 | args = parser.parse_args()
25 |
26 | conf = get_config(net_size = 'large', net_mode = 'ir_se',threshold = args.threshold, use_mtcnn = args.use_mtcnn)
27 | face_recognize = face_recognize(conf)
28 | targets , names = face_recognize.load_single_face(args.image)
29 |
30 | submiter = [['image','x1','y1','x2','y2','result']]
31 | sample_df = pd.read_csv(args.csv)
32 | sample_list = list(sample_df.image)
33 |
34 | for img in tqdm(sample_list):
35 | temp = [img.split('/')[-1], 0, 0, 0, 0, 0]
36 | for tp in ['.jpg', '.png', '.jpeg','.img', '.JPG', '.PNG', '.IMG', '.JPEG']:
37 | img_path = '%s/%s%s'%(args.path, img, tp)
38 | if os.path.isfile(img_path):
39 | break
40 | image = Image.open(img_path)
41 | try:
42 | bboxes, faces = face_recognize.align_multi(image)
43 | except:
44 | bboxes = []
45 | faces = []
46 | if len(bboxes) > 0:
47 | bboxes = bboxes[:,:-1]
48 | bboxes = bboxes.astype(int)
49 | bboxes = bboxes + [-1,-1,1,1]
50 | results, score, _ = face_recognize.infer(faces, targets)
51 | for id,(re, sc) in enumerate(zip(results, score)):
52 | if re != -1:
53 | temp = [img.split('/')[-1].replace('.png', '.jpg'), bboxes[id][0], bboxes[id][1], bboxes[id][2], bboxes[id][3], 1]
54 | print(img_path, results)
55 | break
56 | submiter.append(temp)
57 | df = pd.DataFrame.from_records(submiter)
58 | headers = df.iloc[0]
59 | df = pd.DataFrame(df.values[1:], columns=headers)
60 | df.to_csv("output.csv"%base_folder,index=None)
61 |
--------------------------------------------------------------------------------
/demo/infer_on_video.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import cv2
3 | import os
4 | from PIL import Image
5 | import argparse
6 | import torch
7 | import numpy as np
8 | import time
9 | from datetime import datetime
10 | sys.path.insert(0, "..")
11 | from api import face_recognize
12 | from utils.utils import draw_box_name
13 | from utils.config import get_config
14 |
15 |
16 | import time
17 | print(torch.cuda.is_available())
18 | if __name__ == '__main__':
19 | parser = argparse.ArgumentParser(description='for face verification')
20 | parser.add_argument("-f", "--file_name", help="video file name",default='video.mp4', type=str)
21 | parser.add_argument("-s", "--save_name", help="output file name",default='recording', type=str)
22 | parser.add_argument('-th','--threshold',help='threshold to decide identical faces',default=1.25, type=float)
23 | parser.add_argument("-u", "--update", help="whether perform update the facebank",action="store_true")
24 | parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true")
25 | parser.add_argument("-c", "--score", help="whether show the confidence score",action="store_true")
26 | parser.add_argument("-b", "--begin", help="from when to start detection(in seconds)", default=0, type=int)
27 | parser.add_argument("-d", "--duration", help="perform detection for how long(in seconds)", default=0, type=int)
28 | parser.add_argument("-save_unknow", "--save_unknow", help="save unknow person", default=1, type=int)
29 | link_cam = 'rtsp://admin:a1b2c3d4@@10.0.20.226:554/profile2/media.smp'
30 |
31 | args = parser.parse_args()
32 | conf = get_config(net_mode = 'ir_se', threshold = args.threshold, detect_id = 1)
33 | face_recognize = face_recognize(conf)
34 | if args.update:
35 | targets, names = face_recognize.update_facebank()
36 | print('facebank updated')
37 | else:
38 | targets, names = face_recognize.load_facebank()
39 | print('facebank loaded')
40 | if (not isinstance(targets, torch.Tensor)) and face_recognize.use_tensor:
41 | targets, names = face_recognize.update_facebank()
42 |
43 | cap = cv2.VideoCapture('video.mp4')
44 | # cap = cv2.VideoCapture(args.file_name)
45 |
46 | cap.set(cv2.CAP_PROP_POS_MSEC, args.begin* 1000)
47 |
48 | fps = cap.get(cv2.CAP_PROP_FPS)
49 | isSuccess, frame = cap.read()
50 | # r = cv2.selectROI(frame)
51 | # Crop image
52 |
53 | if args.duration != 0:
54 | i = 0
55 | count = 0
56 | while cap.isOpened():
57 | t = time.time()
58 | isSuccess, frame = cap.read()
59 | if isSuccess:
60 | img_bg = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
61 | image = Image.fromarray(img_bg)
62 | # try:
63 | bboxes, faces = face_recognize.align_multi(image)
64 | # except:
65 | # bboxes = []
66 | # faces = []
67 | if len(bboxes) != 0:
68 | bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces
69 | bboxes = bboxes.astype(int)
70 | bboxes = bboxes + [-1,-1,1,1] # personal choice
71 | results, score, embs = face_recognize.infer(faces, targets)
72 |
73 | for idx, bbox in enumerate(bboxes):
74 | # faces[idx].save("%d.jpg"%count)
75 | # print(results[idx])
76 | count+=1
77 | if args.score:
78 | frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame)
79 | else:
80 | frame = draw_box_name(bbox, names[results[idx] + 1], frame)
81 | # frame = cv2.resize(frame, (960, 760))
82 | # video_writer.write(frame)
83 | cv2.imshow("face_recognize", frame)
84 | if cv2.waitKey(1) & 0xFF == ord('q'):
85 | break
86 | else:
87 | break
88 | print(time.time() -t)
89 | if args.duration != 0:
90 | i += 1
91 | if i % 25 == 0:
92 | print('{} second'.format(i // 25))
93 | if i > 25 * args.duration:
94 | break
95 |
96 | cap.release()
97 | # video_writer.release()
98 |
--------------------------------------------------------------------------------
/demo/processer.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from PIL import Image
3 | import torch
4 | from config import get_config
5 | import glob
6 | from tqdm import tqdm
7 | import pandas as pd
8 | import uuid
9 | import subprocess
10 | import zipfile
11 | import os
12 | from api import face_recognize
13 | def download_file_by_url(url, folder_name):
14 | file_path = folder_name + '/' + url.split('/')[-1]
15 | command = 'wget %s -P %s'%(url, folder_name)
16 | subprocess.call(command, shell=True)
17 | return file_path
18 |
19 | def unzip_file(file_zip_path, folder_name):
20 | path_folder = folder_name + '/unzip'
21 | zip_ref = zipfile.ZipFile(file_zip_path, 'r')
22 | zip_ref.extractall(path_folder)
23 | zip_ref.close()
24 | return path_folder
25 |
26 | def process(data):
27 | folder_name = str(uuid.uuid1())
28 | command = 'mkdir %s'%folder_name
29 | subprocess.call(command, shell=True)
30 | image_path = download_file_by_url(data['image_url'], folder_name)
31 | file_zip_path = download_file_by_url(data['file_zip_url'], folder_name)
32 | path = unzip_file(file_zip_path, folder_name)
33 | results = process_images(image_path=image_path, path=path)
34 | command = 'rm -rf %s'%folder_name
35 | subprocess.call(command, shell=True)
36 | return results
37 |
38 | def process_images(image_path='', path=''):
39 |
40 | conf = get_config()
41 | face_recognize = face_recognize(conf)
42 | targets, names = face_recognize.load_single_face(image_path)
43 | submiter = [['image','x1','y1','x2','y2','result']]
44 | list_file = glob.glob(path + '/*')
45 | if os.path.isfile(list_file[0]) == False:
46 | path = list_file[0]
47 | print(path)
48 | for img in tqdm(glob.glob(path + '/*')):
49 | temp = [img.split('/')[-1], 0,0,0,0,0]
50 | image = Image.open(img)
51 | try:
52 | bboxes, faces = face_recognize.align_multi(image)
53 | except:
54 | bboxes = []
55 | faces = []
56 | if len(bboxes) > 0:
57 | bboxes = bboxes[:,:-1]
58 | bboxes = bboxes.astype(int)
59 | bboxes = bboxes + [-1,-1,1,1]
60 | results, score = face_recognize.infer(faces, targets)
61 |
62 | for id,(re, sc) in enumerate(zip(results, score)):
63 | if re != -1:
64 | temp = [img.split('/')[-1], bboxes[id][0], bboxes[id][1], bboxes[id][2], bboxes[id][3], 1]
65 | submiter.append(temp)
66 | df = pd.DataFrame.from_records(submiter)
67 | headers = df.iloc[0]
68 | df = pd.DataFrame(df.values[1:], columns=headers)
69 | df = df.sort_values(by=['result'], ascending=False)
70 | results = df.to_json(orient='records')
71 | return results
72 |
73 | def process_two_image(data):
74 | folder_name = str(uuid.uuid1())
75 | command = 'mkdir %s'%folder_name
76 | subprocess.call(command, shell=True)
77 | image_path_origin = download_file_by_url(data['image_url_origin'], folder_name)
78 | image_path_detection = download_file_by_url(data['image_url_detection'], folder_name)
79 |
80 | from api import face_recognize
81 | conf = get_config()
82 | face_recognize = face_recognize(conf)
83 | face_recognize._raw_load_single_face(image_path_origin)
84 | targets = face_recognize.embeddings
85 | image = Image.open(image_path_detection)
86 | submiter = [['image_url','x1','y1','x2','y2','result']]
87 | try:
88 | bboxes, faces = face_recognize.align_multi(image)
89 | except:
90 | bboxes = []
91 | faces = []
92 | if len(bboxes) > 0:
93 | bboxes = bboxes[:,:-1]
94 | bboxes = bboxes.astype(int)
95 | bboxes = bboxes + [-1,-1,1,1]
96 | results, score = face_recognize.infer(faces, targets)
97 |
98 | for id,(re, sc) in enumerate(zip(results, score)):
99 | if re != -1:
100 | temp = {
101 | 'x1': bboxes[id][0],
102 | 'y1': bboxes[id][1],
103 | 'x2': bboxes[id][2],
104 | 'y2': bboxes[id][3],
105 | 'result':1
106 | }
107 | temp = [data['image_url_detection'], bboxes[id][0], bboxes[id][1], bboxes[id][2], bboxes[id][3], 1]
108 | submiter.append(temp)
109 | command = 'rm -rf %s'%folder_name
110 | subprocess.call(command, shell=True)
111 | df = pd.DataFrame.from_records(submiter)
112 | headers = df.iloc[0]
113 | df = pd.DataFrame(df.values[1:], columns=headers)
114 | df = df.sort_values(by=['result'], ascending=False)
115 | results = df.to_json(orient='records')
116 | return results
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2018.8.24
2 | opencv-contrib-python==3.4.3.18
3 | pandas
4 | torch>=0.4.1
5 | numpy>=1.14.5
6 | matplotlib==2.1.2
7 | tqdm==4.23.4
8 | scipy>=1.0.0
9 | bcolz==1.2.1
10 | Jinja2>=2.10.1
11 | requests>=2.20.0
12 | flask>=1.0.0
13 | easydict==1.7
14 | opencv_python>=3.4.0.12
15 | Pillow==5.2.0
16 | scikit_learn>=0.19.2
17 | tensorboardX==1.2
18 | torchvision==0.2.1
19 | torchsummary
20 | cython
21 | imutils
22 | zmq
23 |
--------------------------------------------------------------------------------
/smoofing/Smoofing.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | from sklearn.externals import joblib
4 | import os
5 | class Smoofing():
6 | """docstring for Smoofing"""
7 | def __init__(self):
8 | super(Smoofing, self).__init__()
9 | current_path = os.path.dirname(os.path.abspath(__file__))
10 | self.clf = joblib.load(os.path.join(current_path,"replay-attack_ycrcb_luv_extraTreesClassifier.pkl"))
11 | # self.sample_number = 1
12 | # self.count = 0
13 | # self.measures = np.zeros(self.sample_number, dtype=np.float)
14 | def calc_hist(self, img):
15 | histogram = [0] * 3
16 | for j in range(3):
17 | histr = cv2.calcHist([img], [j], None, [256], [0, 256])
18 | histr *= 255.0 / histr.max()
19 | histogram[j] = histr
20 | return np.array(histogram)
21 | def predict(self, face):
22 | roi = np.array(face)
23 | img_ycrcb = cv2.cvtColor(roi, cv2.COLOR_BGR2YCR_CB)
24 | img_luv = cv2.cvtColor(roi, cv2.COLOR_BGR2LUV)
25 |
26 | ycrcb_hist = self.calc_hist(img_ycrcb)
27 | luv_hist = self.calc_hist(img_luv)
28 |
29 | feature_vector = np.append(ycrcb_hist.ravel(), luv_hist.ravel())
30 | feature_vector = feature_vector.reshape(1, len(feature_vector))
31 |
32 | prediction = self.clf.predict_proba(feature_vector)
33 | prob = prediction[0][1]
34 | if prob > 0.7:
35 | return True
36 |
37 | # self.measures[self.count % self.sample_number] = prob
38 | # point = (x, y-5)
39 | # if 0 not in self.measures:
40 | # text = "True"
41 | # if np.mean(self.measures) >= 0.7:
42 | # text = "False"
43 | # # font = cv2.FONT_HERSHEY_SIMPLEX
44 | # # cv2.putText(img=img_bgr, text=text, org=point, fontFace=font, fontScale=0.9, color=(0, 0, 255),
45 | # # thickness=2, lineType=cv2.LINE_AA)
46 | # else:
47 | # font = cv2.FONT_HERSHEY_SIMPLEX
48 | # # cv2.putText(img=img_bgr, text=text, org=point, fontFace=font, fontScale=0.9,
49 | # # color=(0, 255, 0), thickness=2, lineType=cv2.LINE_AA)
50 | # print(text, np.mean(self.measures))
51 | return False
--------------------------------------------------------------------------------
/smoofing/replay-attack_ycrcb_luv_extraTreesClassifier.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/smoofing/replay-attack_ycrcb_luv_extraTreesClassifier.pkl
--------------------------------------------------------------------------------
/src/.gitignore:
--------------------------------------------------------------------------------
1 | #pre-train
2 | backbone/proxyless_cpu.pth
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | public_test/
15 | /weights/
16 | *.mp4/
17 | *.flv/
18 | build/
19 | Face_bank/
20 | develop-eggs/
21 | dist/
22 | downloads/
23 | eggs/
24 | .eggs/
25 | lib/
26 | lib64/
27 | parts/
28 | sdist/
29 | var/
30 | wheels/
31 | *.egg-info/
32 | .installed.cfg
33 | *.egg
34 | MANIFEST
35 |
36 | # PyInstaller
37 | # Usually these files are written by a python script from a template
38 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
39 | *.manifest
40 | *.spec
41 |
42 | # Installer logs
43 | pip-log.txt
44 | pip-delete-this-directory.txt
45 |
46 | # Unit test / coverage reports
47 | htmlcov/
48 | .tox/
49 | .coverage
50 | .coverage.*
51 | .cache
52 | nosetests.xml
53 | coverage.xml
54 | *.cover
55 | .hypothesis/
56 | .pytest_cache/
57 |
58 | # Translations
59 | *.mo
60 | *.pot
61 |
62 | # Django stuff:
63 | *.log
64 | local_settings.py
65 | db.sqlite3
66 |
67 | # Flask stuff:
68 | instance/
69 | .webassets-cache
70 |
71 | # Scrapy stuff:
72 | .scrapy
73 |
74 | # Sphinx documentation
75 | docs/_build/
76 |
77 | # PyBuilder
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # pyenv
84 | .python-version
85 |
86 | # celery beat schedule file
87 | celerybeat-schedule
88 |
89 | # SageMath parsed files
90 | *.sage.py
91 |
92 | # Environments
93 | .env
94 | .venv
95 | env/
96 | venv/
97 | ENV/
98 | env.bak/
99 | venv.bak/
100 |
101 | # Spyder project settings
102 | .spyderproject
103 | .spyproject
104 |
105 | # Rope project settings
106 | .ropeproject
107 |
108 | # mkdocs documentation
109 | /site
110 |
111 | # mypy
112 | .mypy_cache/
113 |
--------------------------------------------------------------------------------
/src/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:latest
2 |
3 | RUN apt-get -y update && apt-get install -y git python3-pip python3-dev python3-tk procps curl
4 |
5 | RUN apt-get install -y libsm6 libxext6
6 |
7 | RUN pip3 install Flask pandas torch==0.4.0 numpy==1.14.5 matplotlib==2.1.2 tqdm==4.23.4 mxnet_cu90==1.2.1 scipy==1.0.0 bcolz==1.2.1 easydict==1.7 opencv_python==3.4.0.12 Pillow==5.2.0 mxnet==1.2.1.post1 scikit_learn==0.19.2 tensorboardX==1.2 torchvision==0.2.1
8 |
9 | ADD . /face_recognition
10 |
11 | WORKDIR face_recognition
12 |
13 | RUN apt-get install -y wget
14 |
15 | ENV PYTHONPATH=$PYTHONPATH:src
16 | ENV FACE_RECOGNITION_PORT=8084
17 | EXPOSE $FACE_RECOGNITION_PORT
18 |
19 | ENTRYPOINT ["python3"]
20 | CMD ["face_recognition.py"]
21 |
--------------------------------------------------------------------------------
/src/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 TreB1eN
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/src/README.MD:
--------------------------------------------------------------------------------
1 | # Face recognition
2 | ## Requirements
3 | Python 3.5+
4 |
5 | Commanline:
6 | ```
7 | pip3 install -r requirements.txt
8 | ```
9 |
10 | ## Usage:
11 | ### Download
12 | ```
13 | git clone https://github.com/vanlong96tg/Face_recognize_pytorch
14 | mkdir face_recognize/weights
15 | cd face_recognize/weights
16 | wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth
17 | wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth
18 | wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth
19 | ```
20 | ### Python:
21 | Run with default threshold=1.2:
22 | ```
23 | python3 face_verify.py -csv {path_sample submit_csv} -path {path_folder_image} -image {path_image}
24 | ```
25 | Use model ir_se50 (slower but more accurate):
26 | ```
27 | python3 face_verify.py -csv {path_sample submit_csv} -path {path_folder_image} -image {path_image}
28 | ```
29 | Use model MobileNet change config.py:
30 | ```
31 | change args net_size in get_config with net_size='mobi'
32 | ```
33 | Args get_config(mode = 'app', net_size = 'large', net_mode = 'ir_se', use_mtcnn = 1, threshold = 1.25) in config.py:
34 | ```
35 | mode: for demo
36 | net_size: 'large' = model SE_IR50, 'mobi' = model MobileNet
37 | net_mode: for net_size='large' value in ['ir_se', 'ir']
38 | use_mtcnn: 1 using mtcnn default: recommend
39 | threshold: distance > threshold => unknow
40 |
41 | ```
42 | Use model mtcnn for face detection:
43 | ```
44 | python3 face_verify.py -csv {path_sample submit_csv} -path {path_folder_image} -image {path_image} -user_mtcnn 1
45 | ```
46 | Run on video:
47 | ```
48 | python3 infer_on_video.py
49 | ```
50 | [Video](https://www.dropbox.com/s/7g26jvp1j4epo7n/video.mp4?dl=0) and [Face bank](https://www.dropbox.com/s/4pstxap2uozvukc/Face_bank.zip?dl=0).Download video, Face bank and extract in dir.
51 |
52 | ### Docker:
53 | Install docker
54 | ```
55 | curl -fsSL https://get.docker.com -o get-docker.sh
56 | sudo sh get-docker.sh
57 | sudo groupadd docker
58 | sudo usermod -aG docker $USER
59 | ```
60 | Install docker-compose
61 | ```
62 | pip3 install docker-compose
63 | ```
64 | Run
65 | ```
66 | docker-compose up --build -d
67 | ```
68 | ## Test API with docker
69 | Requirements: url not authenticate
70 |
71 | Test with Postman
72 | ### Two image detection
73 | **URL:** http://localhost:8084/face_recognition
74 | ```
75 | {
76 | "image_url_origin":"https://www.dropbox.com/s/vm8fvi9xdmjrdmr/PQH_0000.png?dl=0",
77 | "image_url_detection":"https://www.dropbox.com/s/vm8fvi9xdmjrdmr/PQH_0000.png?dl=0"
78 | }
79 | ```
80 | ### Multiple image
81 | Please zip image to file
82 |
83 | **URL:** http://localhost:8084/face_recognition_two_image
84 | ```
85 | {
86 | "image_url":"https://www.dropbox.com/s/vm8fvi9xdmjrdmr/PQH_0000.png?dl=0",
87 | "file_zip_url":"https://www.dropbox.com/s/bf705wgk2n9vog6/test.zip?dl=0"
88 | }
89 | ```
90 | ### Training:
91 | * Performance
92 |
93 | |[LFW](https://hal.inria.fr/file/index/docid/321923/filename/Huang_long_eccv2008-lfw.pdf)|[CFP_FF](http://www.cfpw.io/paper.pdf)|[AgeDB](http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf)|[Vggface2_FP](https://arxiv.org/pdf/1710.08092.pdf)|
94 | |:---:|:---:|:---:|:---:|
95 | |99.73|99.68|97.32|94.88|
96 |
97 | ### Acknowledgement
98 | * This repo is inspired by [InsightFace.MXNet](https://github.com/deepinsight/insightface), [InsightFace.PyTorch](https://github.com/TreB1eN/InsightFace_Pytorch), [ArcFace.PyTorch](https://github.com/ronghuaiyang/arcface-pytorch), [MTCNN.MXNet](https://github.com/pangyupo/mxnet_mtcnn_face_detection) and [PretrainedModels.PyTorch](https://github.com/Cadene/pretrained-models.pytorch).
99 | * Training Datasets [Dataset-Zoo](https://github.com/deepinsight/insightface/wiki/Dataset-Zoo)
100 |
--------------------------------------------------------------------------------
/src/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: __init__.py.py
7 | @time: 2018/12/21 15:30
8 | @desc:
9 | '''
--------------------------------------------------------------------------------
/src/backbone/demo_postrequest.py:
--------------------------------------------------------------------------------
1 | # importing the requests library
2 | import requests
3 |
4 | # defining the api-endpoint
5 | API_ENDPOINT = "http://35.187.243.134:8084/emotion/add_raw"
6 |
7 |
8 | data = {
9 | “id_class”: "CL485162",
10 | “time_video”:"0:0:30",
11 | “time_class”: "19:00",
12 | “id_teacher”: 234,
13 | “name_teacher”: "John Wick"
14 | “angry”: ,
15 | “happy”:,
16 | “surprise: ,
17 | “sad”:
18 | }
19 |
20 |
21 | # sending post request and saving response as response object
22 | r = requests.post(url = API_ENDPOINT, data = data)
23 |
--------------------------------------------------------------------------------
/src/backbone/model_proxyless_nas.py:
--------------------------------------------------------------------------------
1 | from .proxyless_nas import model_zoo
2 | import torch
3 | import torch.nn as nn
4 | def proxyless_nas(num_feauture = 512):
5 | net = model_zoo.proxyless_base()
6 | net.classifier = nn.Linear(1432, num_feauture, bias = True)
7 | return net
8 |
--------------------------------------------------------------------------------
/src/backbone/proxyless_nas/__init__.py:
--------------------------------------------------------------------------------
1 | from .model_zoo import *
2 |
--------------------------------------------------------------------------------
/src/backbone/proxyless_nas/model_zoo.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import torch
4 |
5 | from .utils import download_url
6 | from .nas_modules import ProxylessNASNets
7 |
8 |
9 | def proxyless_base(pretrained=True, net_config="http://hanlab.mit.edu/files/proxylessNAS/proxyless_cpu.config",
10 | net_weight="http://hanlab.mit.edu/files/proxylessNAS/proxyless_cpu.pth"):
11 | assert net_config is not None, "Please input a network config"
12 | net_config_path = download_url(net_config)
13 | net_config_json = json.load(open(net_config_path, 'r'))
14 | net = ProxylessNASNets.build_from_config(net_config_json)
15 |
16 | if 'bn' in net_config_json:
17 | net.set_bn_param(bn_momentum=net_config_json['bn']['momentum'], bn_eps=net_config_json['bn']['eps'])
18 | else:
19 | net.set_bn_param(bn_momentum=0.1, bn_eps=1e-3)
20 |
21 | if pretrained:
22 | assert net_weight is not None, "Please specify network weights"
23 | init_path = download_url(net_weight)
24 | init = torch.load(init_path, map_location='cpu')
25 | net.load_state_dict(init['state_dict'])
26 |
27 | return net
28 | def proxyless_cpu_(pretrained=True, net_config_path=None, net_weight=None):
29 | assert net_config_path is not None, "Please input a network config"
30 | # net_config_path = download_url(net_config)
31 | net_config_json = json.load(open(net_config_path, 'r'))
32 | net = ProxylessNASNets.build_from_config(net_config_json)
33 |
34 | if 'bn' in net_config_json:
35 | net.set_bn_param(bn_momentum=net_config_json['bn']['momentum'], bn_eps=net_config_json['bn']['eps'])
36 | else:
37 | net.set_bn_param(bn_momentum=0.1, bn_eps=1e-3)
38 |
39 | if pretrained:
40 | assert net_weight is not None, "Please specify network weights"
41 | # init_path = download_url(net_weight)
42 | init = torch.load(net_weight, map_location='cpu')
43 | net.load_state_dict(init['state_dict'])
44 |
45 | return net
46 |
47 | from functools import partial
48 | # from torchsummary import summary
49 | proxyless_cpu = partial(proxyless_base,
50 | net_config="http://hanlab.mit.edu/files/proxylessNAS/proxyless_cpu.config",
51 | net_weight="http://hanlab.mit.edu/files/proxylessNAS/proxyless_cpu.pth")
52 |
53 | proxyless_gpu = partial(proxyless_base,
54 | net_config="http://hanlab.mit.edu/files/proxylessNAS/proxyless_gpu.config",
55 | net_weight="http://hanlab.mit.edu/files/proxylessNAS/proxyless_gpu.pth")
56 | # summary(proxyless_gpu, (3, 224, 224))
57 | proxyless_mobile = partial(proxyless_base,
58 | net_config="http://hanlab.mit.edu/files/proxylessNAS/proxyless_mobile.config",
59 | net_weight="http://hanlab.mit.edu/files/proxylessNAS/proxyless_mobile.pth")
60 |
61 | proxyless_mobile_14 = partial(proxyless_base,
62 | net_config="http://hanlab.mit.edu/files/proxylessNAS/proxyless_mobile_14.config",
63 | net_weight="http://hanlab.mit.edu/files/proxylessNAS/proxyless_mobile_14.pth")
64 |
--------------------------------------------------------------------------------
/src/backbone/proxyless_nas/utils.py:
--------------------------------------------------------------------------------
1 | import os, sys
2 |
3 | try:
4 | from urllib import urlretrieve
5 | except ImportError:
6 | from urllib.request import urlretrieve
7 |
8 | import torch
9 | import torch.nn as nn
10 | import torch.optim
11 |
12 |
13 | def download_url(url, model_dir="~/.torch/proxyless_nas", overwrite=False):
14 | model_dir = os.path.expanduser(model_dir)
15 | filename = url.split('/')[-1]
16 | cached_file = os.path.join(model_dir, filename)
17 | if not os.path.exists(cached_file) or overwrite:
18 | os.makedirs(model_dir, exist_ok=True)
19 | sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
20 | urlretrieve(url, cached_file)
21 | return cached_file
22 |
23 |
24 | def load_url(url, model_dir='~/.torch/proxyless_nas', map_location=None):
25 | cached_file = download_url(url, model_dir)
26 | map_location = "cpu" if not torch.cuda.is_available() and map_location is None else None
27 | return torch.load(cached_file, map_location=map_location)
28 |
29 |
30 | def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.1):
31 | logsoftmax = nn.LogSoftmax()
32 | n_classes = pred.size(1)
33 | # convert to one-hot
34 | target = torch.unsqueeze(target, 1)
35 | soft_target = torch.zeros_like(pred)
36 | soft_target.scatter_(1, target, 1)
37 | # label smoothing
38 | soft_target = soft_target * (1 - label_smoothing) + label_smoothing / n_classes
39 | return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))
40 |
41 |
42 | def get_same_padding(kernel_size):
43 | if isinstance(kernel_size, tuple):
44 | assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size
45 | p1 = get_same_padding(kernel_size[0])
46 | p2 = get_same_padding(kernel_size[1])
47 | return p1, p2
48 | assert isinstance(kernel_size, int), 'kernel size should be either `int` or `tuple`'
49 | assert kernel_size % 2 > 0, 'kernel size should be odd number'
50 | return kernel_size // 2
51 |
52 |
53 | def shuffle_layer(x, groups):
54 | batchsize, num_channels, height, width = x.data.size()
55 | channels_per_group = num_channels // groups
56 | # reshape
57 | x = x.view(batchsize, groups, channels_per_group, height, width)
58 | # transpose
59 | x = torch.transpose(x, 1, 2).contiguous()
60 | # flatten
61 | x = x.view(batchsize, -1, height, width)
62 | return x
63 |
64 |
65 | def get_split_list(in_dim, child_num):
66 | in_dim_list = [in_dim // child_num] * child_num
67 | for _i in range(in_dim % child_num):
68 | in_dim_list[_i] += 1
69 | return in_dim_list
70 |
71 |
72 | def list_sum(x):
73 | if len(x) == 1:
74 | return x[0]
75 | else:
76 | return x[0] + list_sum(x[1:])
77 |
78 |
79 | def count_parameters(model):
80 | total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
81 | return total_params
82 |
83 |
84 | def count_conv_flop(layer, x):
85 | out_h = int(x.size()[2] / layer.stride[0])
86 | out_w = int(x.size()[3] / layer.stride[1])
87 | delta_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * layer.kernel_size[
88 | 1] * out_h * out_w / layer.groups
89 | return delta_ops
90 |
91 |
92 | def accuracy(output, target, topk=(1,)):
93 | """ Computes the precision@k for the specified values of k """
94 | maxk = max(topk)
95 | batch_size = target.size(0)
96 |
97 | _, pred = output.topk(maxk, 1, True, True)
98 | pred = pred.t()
99 | correct = pred.eq(target.view(1, -1).expand_as(pred))
100 |
101 | res = []
102 | for k in topk:
103 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
104 | res.append(correct_k.mul_(100.0 / batch_size))
105 | return res
106 |
107 |
108 | class AverageMeter(object):
109 | """
110 | Computes and stores the average and current value
111 | Copied from: https://github.com/pytorch/examples/blob/master/imagenet/main.py
112 | """
113 |
114 | def __init__(self):
115 | self.val = 0
116 | self.avg = 0
117 | self.sum = 0
118 | self.count = 0
119 |
120 | def reset(self):
121 | self.val = 0
122 | self.avg = 0
123 | self.sum = 0
124 | self.count = 0
125 |
126 | def update(self, val, n=1):
127 | self.val = val
128 | self.sum += val * n
129 | self.count += n
130 | self.avg = self.sum / self.count
131 |
132 |
133 | class BasicUnit(nn.Module):
134 |
135 | def forward(self, x):
136 | raise NotImplementedError
137 |
138 | @property
139 | def unit_str(self):
140 | raise NotImplementedError
141 |
142 | @property
143 | def config(self):
144 | raise NotImplementedError
145 |
146 | @staticmethod
147 | def build_from_config(config):
148 | raise NotImplementedError
149 |
150 | def get_flops(self, x):
151 | raise NotImplementedError
152 |
--------------------------------------------------------------------------------
/src/config.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 | # from pathlib import Path
3 | import torch
4 | import os
5 | from torchvision import transforms as trans
6 | from utils.constants import *
7 | list_model = ['wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth',
8 | 'wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth',
9 | 'wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth']
10 | def get_config():
11 | conf = edict()
12 | conf.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13 | conf.lr = 1e-3
14 | conf.milestones = [18,30,42]
15 | conf.momentum = 0.9
16 | conf.pin_memory = True
17 | # conf.num_workers = 4 # when batchsize is 200
18 | conf.num_workers = 3
19 | conf.train_root = "/mnt/01D4A1D481139570/Dataset/Face/casia"
20 | conf.file_list = '/mnt/01D4A1D481139570/Dataset/Face/casia_train.txt'
21 | conf.batch_size = 4
22 | conf.lfw_root = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/lfw_align_112'
23 | conf.lfw_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/pairs.txt'
24 | conf.agedb_root = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb30_align_112'
25 | conf.agedb_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb_30_pair.txt'
26 | conf.cfp_root = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/CFP_FP_aligned_112'
27 | conf.cfp_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/cfp_fp_pair.txt'
28 | return conf
--------------------------------------------------------------------------------
/src/dataset/VGG_FP.py:
--------------------------------------------------------------------------------
1 | import torchvision.transforms as transforms
2 | import torch.utils.data as data
3 | import numpy as np
4 | import cv2
5 | import os
6 | import torch
7 | import PIL.Image as Image
8 | from config import get_config
9 | conf = get_config(mode='training_eval')
10 | def img_loader(path):
11 | try:
12 | with open(path, 'rb') as f:
13 | img = cv2.imread(path)
14 | if len(img.shape) == 2:
15 | img = np.stack([img] * 3, 2)
16 |
17 | return img
18 | except IOError:
19 | print('Cannot load image ' + path)
20 |
21 | class VGG_FP(data.Dataset):
22 | def __init__(self, config, transform=None, loader=img_loader):
23 | self.root = config.train_root
24 | self.file_list = config.file_list
25 | self.transform = transform
26 | self.loader = loader
27 | image_list = []
28 | label_list = []
29 | with open(config.file_list) as f:
30 | img_label_list = f.read().splitlines()
31 | for info in img_label_list:
32 | image_path, label_name = info.split(' ')
33 | image_list.append(image_path)
34 | label_list.append(int(label_name))
35 | self.image_list = image_list
36 | self.label_list = label_list
37 | self.class_nums = len(np.unique(self.label_list))
38 | self.num_iter = len(self.image_list)// 64
39 | print("dataset size: ", len(self.image_list), '/', self.class_nums)
40 | def __getitem__(self, index):
41 | img_path = self.image_list[index]
42 | label = self.label_list[index]
43 | img = self.loader(os.path.join(self.root, img_path))
44 | # random flip with ratio of 0.5
45 | flip = np.random.choice(2) * 2 - 1
46 | img = img[:, ::flip, :]
47 | # img = (img - 127.5) / 128.0
48 | img = Image.fromarray(img.astype('uint8'), 'RGB')
49 | if self.transform is not None:
50 | img = self.transform(img)
51 | else:
52 | img = torch.from_numpy(img)
53 | return img, label
54 |
55 | def __len__(self):
56 | return len(self.image_list)
--------------------------------------------------------------------------------
/src/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: __init__.py.py
7 | @time: 2018/12/21 15:31
8 | @desc:
9 | '''
--------------------------------------------------------------------------------
/src/dataset/agedb.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: agedb.py.py
7 | @time: 2018/12/25 18:43
8 | @desc: AgeDB-30 test data loader, agedb test protocol is the same with lfw
9 | '''
10 |
11 | import numpy as np
12 | import cv2
13 | import os
14 | import torch.utils.data as data
15 | import PIL.Image as Image
16 | import torch
17 | import torchvision.transforms as transforms
18 |
19 | def img_loader(path):
20 | try:
21 | with open(path, 'rb') as f:
22 | img = cv2.imread(path)
23 | if len(img.shape) == 2:
24 | img = np.stack([img] * 3, 2)
25 | return img
26 | except IOError:
27 | print('Cannot load image ' + path)
28 |
29 | class AgeDB30(data.Dataset):
30 | def __init__(self, config, transform=None, loader=img_loader):
31 |
32 | self.root = config.agedb_root
33 | self.file_list = config.agedb_file_list
34 | self.transform = transform
35 | self.loader = loader
36 | self.nameLs = []
37 | self.nameRs = []
38 | self.folds = []
39 | self.flags = []
40 |
41 | with open(config.agedb_file_list) as f:
42 | pairs = f.read().splitlines()
43 | for i, p in enumerate(pairs):
44 | p = p.split(' ')
45 | nameL = p[0]
46 | nameR = p[1]
47 | fold = i // 600
48 | flag = int(p[2])
49 |
50 | self.nameLs.append(nameL)
51 | self.nameRs.append(nameR)
52 | self.folds.append(fold)
53 | self.flags.append(flag)
54 |
55 | def __getitem__(self, index):
56 |
57 | img_l = self.loader(os.path.join(self.root, self.nameLs[index]))
58 | img_r = self.loader(os.path.join(self.root, self.nameRs[index]))
59 | imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)]
60 |
61 | if self.transform is not None:
62 | for i in range(len(imglist)):
63 | imglist[i] = Image.fromarray(imglist[i].astype('uint8'), 'RGB')
64 | imglist[i] = self.transform(imglist[i])
65 |
66 | imgs = imglist
67 | return imgs
68 | else:
69 | imgs = [torch.from_numpy(i) for i in imglist]
70 | return imgs
71 |
72 | def __len__(self):
73 | return len(self.nameLs)
74 |
75 |
76 | # if __name__ == '__main__':
77 | # root = '/media/sda/AgeDB-30/agedb30_align_112'
78 | # file_list = '/media/sda/AgeDB-30/agedb_30_pair.txt'
79 |
80 | # transform = transforms.Compose([
81 | # transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
82 | # transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0]
83 | # ])
84 |
85 | # dataset = AgeDB30(root, file_list, transform=transform)
86 | # trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False)
87 | # for data in trainloader:
88 | # for d in data:
89 | # print(d[0].shape)
--------------------------------------------------------------------------------
/src/dataset/casia_webface.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: casia_webface.py
7 | @time: 2018/12/21 19:09
8 | @desc: CASIA-WebFace dataset loader
9 | '''
10 |
11 | import torchvision.transforms as transforms
12 | import torch.utils.data as data
13 | import numpy as np
14 | import cv2
15 | import os
16 | import torch
17 | import PIL.Image as Image
18 |
19 | def img_loader(path):
20 | try:
21 | with open(path, 'rb') as f:
22 | img = cv2.imread(path)
23 | if len(img.shape) == 2:
24 | img = np.stack([img] * 3, 2)
25 |
26 | return img
27 | except IOError:
28 | print('Cannot load image ' + path)
29 |
30 |
31 | class CASIAWebFace(data.Dataset):
32 | def __init__(self, root= '/mnt/01D4A1D481139570/Dataset/Face/CASIA/CASIA/CASIA-WebFace-112X96', file_list = '/mnt/01D4A1D481139570/Dataset/Face/CASIA/CASIA/CASIA-WebFace-112X96.txt', transform=None, loader=img_loader):
33 |
34 | self.root = root
35 | self.transform = transform
36 | self.loader = loader
37 |
38 | image_list = []
39 | label_list = []
40 | with open(file_list) as f:
41 | img_label_list = f.read().splitlines()
42 | for info in img_label_list:
43 | image_path, label_name = info.split(' ')
44 | image_list.append(image_path)
45 | label_list.append(int(label_name))
46 |
47 | self.image_list = image_list
48 | self.label_list = label_list
49 | self.class_nums = len(np.unique(self.label_list))
50 | print("dataset size: ", len(self.image_list), '/', self.class_nums)
51 |
52 | def __getitem__(self, index):
53 | img_path = self.image_list[index]
54 | label = self.label_list[index]
55 |
56 | img = self.loader(os.path.join(self.root, img_path))
57 |
58 |
59 | # random flip with ratio of 0.5
60 | flip = np.random.choice(2) * 2 - 1
61 | if flip == 1:
62 | img = cv2.flip(img, 1)
63 | # for im in img:
64 | img = Image.fromarray(img.astype('uint8'), 'RGB')
65 | if self.transform is not None:
66 | img = self.transform(img)
67 | else:
68 | img = torch.from_numpy(img)
69 |
70 | return img, label
71 |
72 | def __len__(self):
73 | return len(self.image_list)
74 |
75 |
76 | # if __name__ == '__main__':
77 | # root = 'D:/data/webface_align_112'
78 | # file_list = 'D:/data/webface_align_train.list'
79 |
80 | # transform = transforms.Compose([
81 | # transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
82 | # transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0]
83 | # ])
84 | # dataset = CASIAWebFace(root, file_list, transform=transform)
85 | # trainloader = data.DataLoader(dataset, batch_size=64, shuffle=True, num_workers=2, drop_last=False)
86 | # print(len(dataset))
87 | # for data in trainloader:
88 | # print(data[0].shape)
--------------------------------------------------------------------------------
/src/dataset/cfp.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: cfp.py
7 | @time: 2018/12/26 16:19
8 | @desc: the CFP-FP test dataset loader, it's similar with lfw and adedb, except that it has 700 pairs every fold
9 | '''
10 |
11 |
12 | import numpy as np
13 | import cv2
14 | import os
15 | import torch.utils.data as data
16 | import PIL.Image as Image
17 | import torch
18 | import torchvision.transforms as transforms
19 |
20 | def img_loader(path):
21 | try:
22 | with open(path, 'rb') as f:
23 | img = cv2.imread(path)
24 | if len(img.shape) == 2:
25 | img = np.stack([img] * 3, 2)
26 | return img
27 | except IOError:
28 | print('Cannot load image ' + path)
29 |
30 | class CFP_FP(data.Dataset):
31 | def __init__(self, config, transform=None, loader=img_loader):
32 |
33 | self.root = config.cfp_root
34 | self.file_list = config.cfp_file_list
35 | self.transform = transform
36 | self.loader = loader
37 | self.nameLs = []
38 | self.nameRs = []
39 | self.folds = []
40 | self.flags = []
41 |
42 | with open(config.cfp_file_list) as f:
43 | pairs = f.read().splitlines()
44 | for i, p in enumerate(pairs):
45 | p = p.split(' ')
46 | nameL = p[0]
47 | nameR = p[1]
48 | fold = i // 700
49 | flag = int(p[2])
50 |
51 | self.nameLs.append(nameL)
52 | self.nameRs.append(nameR)
53 | self.folds.append(fold)
54 | self.flags.append(flag)
55 |
56 | def __getitem__(self, index):
57 |
58 | img_l = self.loader(os.path.join(self.root, self.nameLs[index]))
59 | img_r = self.loader(os.path.join(self.root, self.nameRs[index]))
60 | imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)]
61 |
62 | if self.transform is not None:
63 | for i in range(len(imglist)):
64 | imglist[i] = Image.fromarray(imglist[i].astype('uint8'), 'RGB')
65 | imglist[i] = self.transform(imglist[i])
66 |
67 | imgs = imglist
68 | return imgs
69 | else:
70 | imgs = [torch.from_numpy(i) for i in imglist]
71 | return imgs
72 |
73 | def __len__(self):
74 | return len(self.nameLs)
75 |
76 |
--------------------------------------------------------------------------------
/src/dataset/lfw.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: lfw.py.py
7 | @time: 2018/12/22 10:00
8 | @desc: lfw dataset loader
9 | '''
10 |
11 | import numpy as np
12 | import cv2
13 | import os
14 | import torch.utils.data as data
15 | import PIL.Image as Image
16 | import torch
17 | import torchvision.transforms as transforms
18 |
19 | def img_loader(path):
20 | try:
21 | with open(path, 'rb') as f:
22 | img = cv2.imread(path)
23 | if len(img.shape) == 2:
24 | img = np.stack([img] * 3, 2)
25 | return img
26 | except IOError:
27 | print('Cannot load image ' + path)
28 |
29 | class LFW(data.Dataset):
30 | def __init__(self, config, transform=None, loader=img_loader):
31 |
32 | self.root = config.lfw_root
33 | self.file_list = config.lfw_file_list
34 | self.transform = transform
35 | self.loader = loader
36 | self.nameLs = []
37 | self.nameRs = []
38 | self.folds = []
39 | self.flags = []
40 |
41 | with open(config.lfw_file_list) as f:
42 | pairs = f.read().splitlines()[1:]
43 | for i, p in enumerate(pairs):
44 | p = p.split('\t')
45 | if len(p) == 3:
46 | nameL = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[1]))
47 | nameR = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[2]))
48 | fold = i // 600
49 | flag = 1
50 | elif len(p) == 4:
51 | nameL = p[0] + '/' + p[0] + '_' + '{:04}.jpg'.format(int(p[1]))
52 | nameR = p[2] + '/' + p[2] + '_' + '{:04}.jpg'.format(int(p[3]))
53 | fold = i // 600
54 | flag = -1
55 | self.nameLs.append(nameL)
56 | self.nameRs.append(nameR)
57 | self.folds.append(fold)
58 | self.flags.append(flag)
59 |
60 | def __getitem__(self, index):
61 |
62 | img_l = self.loader(os.path.join(self.root, self.nameLs[index]))
63 | img_r = self.loader(os.path.join(self.root, self.nameRs[index]))
64 | imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)]
65 |
66 | if self.transform is not None:
67 | for i in range(len(imglist)):
68 | imglist[i] = Image.fromarray(imglist[i].astype('uint8'), 'RGB')
69 | imglist[i] = self.transform(imglist[i])
70 |
71 | imgs = imglist
72 | return imgs
73 | else:
74 | imgs = [torch.from_numpy(i) for i in imglist]
75 | return imgs
76 |
77 | def __len__(self):
78 | return len(self.nameLs)
79 |
80 |
81 | # if __name__ == '__main__':
82 | # root = '/mnt/01D4A1D481139570/Dataset/Face/lfw_process/lfw-112X96'
83 | # file_list = '/mnt/01D4A1D481139570/Dataset/Face/lfw_process/pairs.txt'
84 |
85 | # transform = transforms.Compose([
86 | # transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
87 | # transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0]
88 | # ])
89 |
90 | # dataset = LFW(root, file_list, transform=transform)
91 | # #dataset = LFW(root, file_list)
92 | # trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False)
93 | # print(len(dataset))
94 | # for data in trainloader:
95 | # for d in data:
96 | # print(d[0].shape)
--------------------------------------------------------------------------------
/src/dataset/megaface.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: megaface.py
7 | @time: 2018/12/24 16:29
8 | @desc:
9 | '''
10 |
11 | import torchvision.transforms as transforms
12 | import torch.utils.data as data
13 | import numpy as np
14 | import cv2
15 | import os
16 | import torch
17 |
18 | def img_loader(path):
19 | try:
20 | with open(path, 'rb') as f:
21 | img = cv2.imread(path)
22 | if len(img.shape) == 2:
23 | img = np.stack([img] * 3, 2)
24 | return img
25 | except IOError:
26 | print('Cannot load image ' + path)
27 |
28 |
29 | class MegaFace(data.Dataset):
30 | def __init__(self, facescrub_dir, megaface_dir, transform=None, loader=img_loader):
31 |
32 | self.transform = transform
33 | self.loader = loader
34 |
35 | test_image_file_list = []
36 | print('Scanning files under facescrub and megaface...')
37 | for root, dirs, files in os.walk(facescrub_dir):
38 | for e in files:
39 | filename = os.path.join(root, e)
40 | ext = os.path.splitext(filename)[1].lower()
41 | if ext in ('.png', '.bmp', '.jpg', '.jpeg'):
42 | test_image_file_list.append(filename)
43 | for root, dirs, files in os.walk(megaface_dir):
44 | for e in files:
45 | filename = os.path.join(root, e)
46 | ext = os.path.splitext(filename)[1].lower()
47 | if ext in ('.png', '.bmp', '.jpg', '.jpeg'):
48 | test_image_file_list.append(filename)
49 |
50 | self.image_list = test_image_file_list
51 |
52 | def __getitem__(self, index):
53 | img_path = self.image_list[index]
54 | img = self.loader(img_path)
55 |
56 | if self.transform is not None:
57 | img = self.transform(img)
58 | else:
59 | img = torch.from_numpy(img)
60 |
61 | return img, img_path
62 |
63 | def __len__(self):
64 | return len(self.image_list)
65 |
66 |
67 | if __name__ == '__main__':
68 | facescrub = '/media/sda/megaface_test_kit/facescrub_align_112/'
69 | megaface = '/media/sda/megaface_test_kit/megaface_align_112/'
70 |
71 | transform = transforms.Compose([
72 | transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]
73 | transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0]
74 | ])
75 | dataset = MegaFace(facescrub, megaface, transform=transform)
76 | trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False)
77 | print(len(dataset))
78 | for data in trainloader:
79 | print(data.shape)
--------------------------------------------------------------------------------
/src/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | face_recognition:
4 | build: .
5 | ports:
6 | - "8084:8084"
--------------------------------------------------------------------------------
/src/eval/eval_lfw_blufr.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: eval_lfw_blufr.py
7 | @time: 2019/1/17 15:52
8 | @desc: test lfw accuracy on blufr protocol
9 | '''
10 | '''
11 | LFW BLUFR TEST PROTOCOL
12 |
13 | Official Website: http://www.cbsr.ia.ac.cn/users/scliao/projects/blufr/
14 |
15 | When I try to do this, I find that the blufr_lfw_config.mat file provided by above site is too old.
16 | Some image files listed in the mat have been removed in lfw pairs.txt
17 | So this work is suspended for now...
18 | '''
19 |
20 | import scipy.io as sio
21 | import argparse
22 |
23 | def readName(file='pairs.txt'):
24 | name_list = []
25 | f = open(file, 'r')
26 | lines = f.readlines()
27 |
28 | for line in lines[1:]:
29 | line_split = line.rstrip().split()
30 | if len(line_split) == 3:
31 | name_list.append(line_split[0])
32 | elif len(line_split) == 4:
33 | name_list.append(line_split[0])
34 | name_list.append(line_split[2])
35 | else:
36 | print('wrong file, please check again')
37 |
38 | return list(set(name_list))
39 |
40 |
41 | def main(args):
42 | blufr_info = sio.loadmat(args.lfw_blufr_file)
43 | #print(blufr_info)
44 | name_list = readName()
45 |
46 | image = blufr_info['imageList']
47 | missing_files = []
48 | for i in range(image.shape[0]):
49 | name = image[i][0][0]
50 | index = name.rfind('_')
51 | name = name[0:index]
52 | if name not in name_list:
53 | print(name)
54 | missing_files.append(name)
55 | print('lfw pairs.txt total persons: ', len(name_list))
56 | print('blufr_mat_missing persons: ', len(missing_files))
57 |
58 | '''
59 | Some of the missing file:
60 | Zdravko_Mucic
61 | Zelma_Novelo
62 | Zeng_Qinghong
63 | Zumrati_Juma
64 | lfw pairs.txt total persons: 4281
65 | blufr_mat_missing persons: 1549
66 |
67 | '''
68 |
69 | if __name__ == '__main__':
70 | parser = argparse.ArgumentParser(description='lfw blufr test')
71 | parser.add_argument('--lfw_blufr_file', type=str, default='./blufr_lfw_config.mat', help='feature dimension')
72 | parser.add_argument('--lfw_pairs.txt', type=str, default='./pairs.txt', help='feature dimension')
73 | parser.add_argument('--gpus', type=str, default='2,3', help='gpu list')
74 | args = parser.parse_args()
75 |
76 | main(args)
--------------------------------------------------------------------------------
/src/margin/ArcMarginProduct.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: ArcMarginProduct.py
7 | @time: 2018/12/25 9:13
8 | @desc: additive angular margin for arcface/insightface
9 | '''
10 |
11 | import math
12 | import torch
13 | from torch import nn
14 | from torch.nn import Parameter
15 | import torch.nn.functional as F
16 |
17 | class ArcMarginProduct(nn.Module):
18 | def __init__(self, in_feature=128, out_feature=10575, s=32.0, m=0.50, easy_margin=False):
19 | super(ArcMarginProduct, self).__init__()
20 | self.in_feature = in_feature
21 | self.out_feature = out_feature
22 | self.s = s
23 | self.m = m
24 | self.weight = Parameter(torch.Tensor(out_feature, in_feature))
25 | nn.init.xavier_uniform_(self.weight)
26 |
27 | self.easy_margin = easy_margin
28 | self.cos_m = math.cos(m)
29 | self.sin_m = math.sin(m)
30 |
31 | # make the function cos(theta+m) monotonic decreasing while theta in [0°,180°]
32 | self.th = math.cos(math.pi - m)
33 | self.mm = math.sin(math.pi - m) * m
34 |
35 | def forward(self, x, label):
36 | # cos(theta)
37 | cosine = F.linear(F.normalize(x), F.normalize(self.weight))
38 | # cos(theta + m)
39 | sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
40 | phi = cosine * self.cos_m - sine * self.sin_m
41 |
42 | if self.easy_margin:
43 | phi = torch.where(cosine > 0, phi, cosine)
44 | else:
45 | phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm)
46 |
47 | #one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu')
48 | one_hot = torch.zeros_like(cosine)
49 | one_hot.scatter_(1, label.view(-1, 1), 1)
50 | output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
51 | output = output * self.s
52 |
53 | return output
54 |
55 |
56 | if __name__ == '__main__':
57 | pass
--------------------------------------------------------------------------------
/src/margin/CosineMarginProduct.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: CosineMarginProduct.py
7 | @time: 2018/12/25 9:13
8 | @desc: additive cosine margin for cosface
9 | '''
10 |
11 | import torch
12 | import torch.nn as nn
13 | import torch.nn.functional as F
14 | from torch.nn import Parameter
15 |
16 |
17 | class CosineMarginProduct(nn.Module):
18 | def __init__(self, in_feature=128, out_feature=10575, s=30.0, m=0.40):
19 | super(CosineMarginProduct, self).__init__()
20 | self.in_feature = in_feature
21 | self.out_feature = out_feature
22 | self.s = s
23 | self.m = m
24 | self.weight = Parameter(torch.Tensor(out_feature, in_feature))
25 | nn.init.xavier_uniform_(self.weight)
26 |
27 |
28 | def forward(self, input, label):
29 | cosine = F.linear(F.normalize(input), F.normalize(self.weight))
30 | # one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu')
31 | one_hot = torch.zeros_like(cosine)
32 | one_hot.scatter_(1, label.view(-1, 1), 1.0)
33 |
34 | output = self.s * (cosine - one_hot * self.m)
35 | return output
36 |
37 |
38 | if __name__ == '__main__':
39 | pass
--------------------------------------------------------------------------------
/src/margin/InnerProduct.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: InnerProduct.py
7 | @time: 2019/1/4 16:54
8 | @desc: just normal inner product as fully connected layer do.
9 | '''
10 | import torch
11 | import torch.nn as nn
12 | import torch.nn.functional as F
13 | from torch.nn import Parameter
14 |
15 | class InnerProduct(nn.Module):
16 | def __init__(self, in_feature=128, out_feature=10575):
17 | super(InnerProduct, self).__init__()
18 | self.in_feature = in_feature
19 | self.out_feature = out_feature
20 |
21 | self.weight = Parameter(torch.Tensor(out_feature, in_feature))
22 | nn.init.xavier_uniform_(self.weight)
23 |
24 |
25 | def forward(self, input):
26 | output = F.linear(input, self.weight)
27 | return output
28 |
29 |
30 | if __name__ == '__main__':
31 | pass
--------------------------------------------------------------------------------
/src/margin/SphereMarginProduct.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: SphereMarginProduct.py
7 | @time: 2018/12/25 9:19
8 | @desc: multiplicative angular margin for sphereface
9 | '''
10 |
11 | import torch
12 | import torch.nn as nn
13 | import torch.nn.functional as F
14 | from torch.nn import Parameter
15 | import math
16 |
17 | class SphereMarginProduct(nn.Module):
18 | def __init__(self, in_feature, out_feature, m=4, base=1000.0, gamma=0.0001, power=2, lambda_min=5.0, iter=0):
19 | assert m in [1, 2, 3, 4], 'margin should be 1, 2, 3 or 4'
20 | self.in_feature = in_feature
21 | self.out_feature = out_feature
22 | self.m = m
23 | self.base = base
24 | self.gamma = gamma
25 | self.power = power
26 | self.lambda_min = lambda_min
27 | self.iter = 0
28 | self.weight = Parameter(torch.Tensor(out_feature, in_feature))
29 | nn.init.xavier_uniform_(self.weight)
30 |
31 | # duplication formula
32 | self.margin_formula = [
33 | lambda x : x ** 0,
34 | lambda x : x ** 1,
35 | lambda x : 2 * x ** 2 - 1,
36 | lambda x : 4 * x ** 3 - 3 * x,
37 | lambda x : 8 * x ** 4 - 8 * x ** 2 + 1,
38 | lambda x : 16 * x ** 5 - 20 * x ** 3 + 5 * x
39 | ]
40 |
41 | def forward(self, input, label):
42 | self.iter += 1
43 | self.cur_lambda = max(self.lambda_min, self.base * (1 + self.gamma * self.iter) ** (-1 * self.power))
44 |
45 | cos_theta = F.linear(F.normalize(input), F.normalize(self.weight))
46 | cos_theta = cos_theta(-1, 1)
47 |
48 | cos_m_theta = self.margin_formula(self.m)(cos_theta)
49 | theta = cos_theta.data.acos()
50 | k = ((self.m * theta) / math.pi).floor()
51 | phi_theta = ((-1.0) ** k) * cos_m_theta - 2 * k
52 | phi_theta_ = (self.cur_lambda * cos_theta + phi_theta) / (1 + self.cur_lambda)
53 | norm_of_feature = torch.norm(input, 2, 1)
54 |
55 | one_hot = torch.zeros_like(cos_theta)
56 | one_hot.scatter_(1, label.view(-1, 1), 1)
57 |
58 | output = one_hot * phi_theta_ + (1 - one_hot) * cos_theta
59 | output *= norm_of_feature.view(-1, 1)
60 |
61 | return output
62 |
63 |
64 | if __name__ == '__main__':
65 | pass
--------------------------------------------------------------------------------
/src/margin/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: __init__.py.py
7 | @time: 2018/12/25 9:12
8 | @desc:
9 | '''
--------------------------------------------------------------------------------
/src/requirements.txt:
--------------------------------------------------------------------------------
1 | pandas
2 | torch==0.4.0
3 | numpy==1.14.5
4 | matplotlib==2.1.2
5 | tqdm==4.23.4
6 | mxnet_cu90==1.2.1
7 | scipy==1.0.0
8 | bcolz==1.2.1
9 | easydict==1.7
10 | opencv_python==3.4.0.12
11 | Pillow==5.2.0
12 | mxnet==1.2.1.post1
13 | scikit_learn==0.19.2
14 | tensorboardX==1.2
15 | torchvision==0.2.1
16 | torchsummary
17 |
--------------------------------------------------------------------------------
/src/res.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/src/res.jpg
--------------------------------------------------------------------------------
/src/train.sh:
--------------------------------------------------------------------------------
1 | python3 train.py --backbone ProxyNas --margin_type ArcFace
2 |
3 |
--------------------------------------------------------------------------------
/src/utils/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: __init__.py.py
7 | @time: 2018/12/22 9:41
8 | @desc:
9 | '''
--------------------------------------------------------------------------------
/src/utils/constants.py:
--------------------------------------------------------------------------------
1 | import os
2 | WORK_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
3 | FACE_BANK = '%s/Face_bank'%WORK_PATH
4 | MODEL_PATH = '%s/weights'%WORK_PATH
5 | LOG_PATH = '%s/log'%WORK_PATH
6 | SAVE_PATH = '%s/save'%WORK_PATH
7 | FACE_BANK = '%s/Face_bank'%WORK_PATH
--------------------------------------------------------------------------------
/src/utils/load_images_from_bin.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: load_images_from_bin.py
7 | @time: 2018/12/25 19:21
8 | @desc: For AgeDB-30 and CFP-FP test dataset, we use the mxnet binary file provided by insightface, this is the tool to restore
9 | the aligned images from mxnet binary file.
10 | You should install a mxnet-cpu first, just do 'pip install mxnet' is ok.
11 | '''
12 |
13 | from PIL import Image
14 | import cv2
15 | import os
16 | import pickle
17 | import mxnet as mx
18 | from tqdm import tqdm
19 |
20 | '''
21 | For train dataset, insightface provide a mxnet .rec file, just install a mxnet-cpu for extract images
22 | '''
23 |
24 | def load_mx_rec(rec_path):
25 | save_path = os.path.join(rec_path, 'images')
26 | if not os.path.exists(save_path):
27 | os.makedirs(save_path)
28 |
29 | imgrec = mx.recordio.MXIndexedRecordIO(os.path.join(rec_path, 'train.idx'), os.path.join(rec_path, 'train.rec'), 'r')
30 | img_info = imgrec.read_idx(0)
31 | header,_ = mx.recordio.unpack(img_info)
32 | max_idx = int(header.label[0])
33 | for idx in tqdm(range(1,max_idx)):
34 | img_info = imgrec.read_idx(idx)
35 | header, img = mx.recordio.unpack_img(img_info)
36 | label = int(header.label)
37 | img = Image.fromarray(img)
38 | label_path = os.path.join(save_path, str(label).zfill(5))
39 | if not os.path.exists(label_path):
40 | os.makedirs(label_path)
41 | img.save(os.path.join(label_path, str(idx).zfill(4) + '.jpg'), quality=95)
42 |
43 |
44 | def load_image_from_bin(bin_path, save_dir):
45 | if not os.path.exists(save_dir):
46 | os.makedirs(save_dir)
47 | file = open(os.path.join(save_dir, '../', 'agedb-30-pair.txt'), 'w')
48 | bins, issame_list = pickle.load(open(bin_path, 'rb'), encoding='bytes')
49 | for idx in tqdm(range(len(bins))):
50 | _bin = bins[idx]
51 | img = mx.image.imdecode(_bin).asnumpy()
52 | img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
53 | cv2.imwrite(os.path.join(save_dir, str(idx+1).zfill(5)+'.jpg'), img)
54 | if idx % 2 == 0:
55 | label = 1 if issame_list[idx//2] == True else -1
56 | file.write(str(idx+1).zfill(5) + '.jpg' + ' ' + str(idx+2).zfill(5) +'.jpg' + ' ' + str(label) + '\n')
57 |
58 |
59 | if __name__ == '__main__':
60 | bin_path = ''
61 | save_dir = ''
62 | rec_path = ''
63 |
--------------------------------------------------------------------------------
/src/utils/logging.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: logging.py
7 | @time: 2018/12/22 9:42
8 | @desc: logging tools
9 | '''
10 |
11 | from __future__ import print_function
12 | import os
13 | import logging
14 |
15 |
16 | def init_log(output_dir):
17 | logging.basicConfig(level=logging.DEBUG,
18 | format='%(asctime)s %(message)s',
19 | datefmt='%Y%m%d-%H:%M:%S',
20 | filename=os.path.join(output_dir, 'log.log'),
21 | filemode='w')
22 | console = logging.StreamHandler()
23 | console.setLevel(logging.INFO)
24 | logging.getLogger('').addHandler(console)
25 | return logging
26 |
27 |
28 | if __name__ == '__main__':
29 | pass
30 |
--------------------------------------------------------------------------------
/src/utils/visualize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | '''
4 | @author: wujiyang
5 | @contact: wujiyang@hust.edu.cn
6 | @file: visualize.py
7 | @time: 2019/1/7 16:07
8 | @desc: visualize tools
9 | '''
10 |
11 | import visdom
12 | import numpy as np
13 | import time
14 |
15 | class Visualizer():
16 | def __init__(self, env='default', **kwargs):
17 | self.vis = visdom.Visdom(env=env, **kwargs)
18 | self.index = 1
19 |
20 | def plot_curves(self, d, iters, title='loss', xlabel='iters', ylabel='accuracy'):
21 | name = list(d.keys())
22 | val = list(d.values())
23 | if len(val) == 1:
24 | y = np.array(val)
25 | else:
26 | y = np.array(val).reshape(-1, len(val))
27 | self.vis.line(Y=y,
28 | X=np.array([self.index]),
29 | win=title,
30 | opts=dict(legend=name, title = title, xlabel=xlabel, ylabel=ylabel),
31 | update=None if self.index == 0 else 'append')
32 | self.index = iters
33 |
34 |
35 | # if __name__ == '__main__':
36 | # vis = Visualizer(env='test')
37 | # for i in range(10):
38 | # x = i
39 | # y = 2 * i
40 | # z = 4 * i
41 | # vis.plot_curves({'train': x, 'test': y}, iters=i, title='train')
42 | # vis.plot_curves({'train': z, 'test': y, 'val': i}, iters=i, title='test')
43 | # time.sleep(1)
--------------------------------------------------------------------------------
/stream/.gitignore:
--------------------------------------------------------------------------------
1 | .env/*
2 | *.db
3 | *.pyc
4 | *.DS_Store
5 | media/*
6 | sftp-config.json
7 | .coverage
8 | cover/*
9 |
--------------------------------------------------------------------------------
/stream/client.py:
--------------------------------------------------------------------------------
1 | # USAGE
2 | # python client.py --server-ip SERVER_IP
3 |
4 | # import the necessary packages
5 | from imutils.video import VideoStream
6 | import imagezmq
7 | import argparse
8 | import socket
9 | import time
10 | import cv2
11 |
12 | # construct the argument parser and parse the arguments
13 | ap = argparse.ArgumentParser()
14 | ap.add_argument("-s", "--server-ip", required=True,
15 | help="ip address of the server to which the client will connect")
16 | args = vars(ap.parse_args())
17 |
18 | # initialize the ImageSender object with the socket address of the
19 | # server
20 | sender = imagezmq.ImageSender(connect_to="tcp://{}:5555".format(
21 | args["server_ip"]))
22 |
23 | # get the host name, initialize the video stream, and allow the
24 | # camera sensor to warmup
25 | rpiName = socket.gethostname()
26 | # vs = VideoStream(usePiCamera=False, src = 'video.mp4').start()
27 | vs = cv2.VideoCapture('video.mp4')
28 | vs.set(cv2.CAP_PROP_FPS, 1)
29 | #vs = VideoStream(src=0).start()
30 | rpiName = "video"
31 | while True:
32 | time.sleep(0.04)
33 | # read the frame from the camera and send it to the server
34 | _, frame = vs.read()
35 | sender.send_image(rpiName, frame)
36 |
--------------------------------------------------------------------------------
/stream/imagezmq/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | imagezmq: transport OpenCV images via ZMQ.
3 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4 |
5 | A pair of Python classes that transport OpenCV images from one
6 | computer to another. For example, OpenCV images gathered by
7 | a Raspberry Pi camera could be sent to another computer
8 | for displaying the images using cv2.imshow() or for further image processing.
9 |
10 | Copyright (c) 2017 by Jeff Bass.
11 | License: MIT, see LICENSE for more details.
12 | """
13 | # populate fields for >>>help(imagezmq)
14 | from .__version__ import __title__, __description__, __url__, __version__
15 | from .__version__ import __author__, __author_email__, __license__
16 | from .__version__ import __copyright__
17 | from .imagezmq import ImageHub, ImageSender
--------------------------------------------------------------------------------
/stream/imagezmq/__version__.py:
--------------------------------------------------------------------------------
1 | # populates fields for >>>help(imagezmq)
2 | __title__ = 'imagezmq'
3 | __description__ = 'Transporting OpenCV images via ZMQ'
4 | __url__ = ''
5 | __version__ = '0.0.2'
6 | __author__ = 'Jeff Bass'
7 | __author_email__ = 'jeff@yin-yan-ranch.com'
8 | __license__ = 'MIT 1.0'
9 | __copyright__ = 'Copyright 2017 Jeff Bass'
10 |
--------------------------------------------------------------------------------
/stream/static/base.css:
--------------------------------------------------------------------------------
1 | /* ----- General ----- */
2 |
3 | html, body {
4 | margin: 0;
5 | padding: 0;
6 | font-family: -apple-system, sans-serif;
7 | color: #935347;
8 | }
9 |
10 | a {
11 | text-decoration: none;
12 | color: inherit;
13 | }
14 |
15 |
16 | /* ----- Nav ----- */
17 |
18 | nav {
19 | display: flex;
20 | flex-wrap: wrap;
21 | width: 100%;
22 | background-color: #64706c;
23 | color: #ede9ce;
24 | font-family: HelveticaNeue-Light, sans-serif;
25 | font-size: x-large;
26 | }
27 |
28 | nav a {
29 | flex-grow: 1;
30 | text-align: center;
31 | padding: .3em;
32 | }
33 |
34 | nav a:hover {
35 | background-color: #9fa8a3;
36 | }
37 |
38 |
39 | /* ----- Headings ----- */
40 |
41 | h1, h2, h3 {
42 | padding: .3em 1em;
43 | background-color: #ede9ce;
44 | color: #935347;
45 | }
46 |
47 | .season {
48 | background-color: #ecd9bd;
49 | }
50 |
51 |
52 | /* ----- Lists ----- */
53 |
54 | ul {
55 | display: flex;
56 | flex-wrap: wrap;
57 | list-style: none;
58 | font-size: large;
59 | margin: 0;
60 | padding: 1em;
61 | }
62 |
63 | li {
64 | padding: 1em;
65 | margin: .2em;
66 | text-align: center;
67 | background-color: #ede9ce;
68 | color: #935347;
69 | }
70 |
71 | ul a {
72 | flex-grow: 1;
73 | }
74 |
75 | li:hover {
76 | background-color: #935347;
77 | color: #ede9ce;
78 | }
79 |
80 | .movie-item, .show-item {
81 | margin: .3em;
82 | }
83 |
84 |
85 | /* ----- Item Pages ----- */
86 |
87 | video {
88 | width: 80vw;
89 | margin: 2vh 10vw;
90 | background-color: #ecd9bd;
91 | }
92 |
93 | .edit-metadata {
94 | text-align: center;
95 | padding-top: 2vh;
96 | margin: 2vh 5vw;
97 | color: #935347;
98 | }
99 |
100 | .edit-metadata a {
101 | background-color: #ecd9bd;
102 | padding: .5em;
103 | }
104 |
105 | .edit-metadata a:hover {
106 | background-color: #935347;
107 | color: #ecd9bd;
108 | }
109 |
110 |
111 | /* ----- Edit Metadata ----- */
112 |
113 | .metadata-form {
114 | margin: 5vh 5vw;
115 | }
116 |
117 | .metadata-form label, .uneditable-label {
118 | color: #935347;
119 | background-color: #ecd9bd;
120 | padding: .2em .5em;
121 | font-size: large;
122 | }
123 |
124 | .metadata-form input[type="text"], input[type="number"], .uneditable-content {
125 | width: 100%;
126 | font-size: medium;
127 | color: inherit;
128 | margin: 2vh 1vw 4vh;
129 | box-sizing: border-box;
130 | }
131 |
132 | .metadata-form input[type="submit"] {
133 | border: none;
134 | background-color: #935347;
135 | color: #ecd9bd;
136 | font-size: medium;
137 | }
138 |
139 | .metadata-error {
140 | margin: 0 5%;
141 | }
142 |
143 |
144 | /* ----- Settings ----- */
145 |
146 | .settings-buttons {
147 | display: flex;
148 | justify-content: center;
149 | padding: 1em 2em;
150 | }
151 |
152 | .settings-button {
153 | flex: 1 1 25%;
154 | padding: .8em 1em;
155 | margin: 0 5%;
156 | border: none;
157 | cursor: pointer;
158 | background-color: #935347;
159 | color: #fff;
160 | font-size: medium;
161 | }
162 |
163 | .settings-button:hover {
164 | background-color: #ede9ce;
165 | color: #935347;
166 | }
167 |
168 | .media-path {
169 | margin: 0 1.5em;
170 | word-wrap: break-word;
171 | font-family: monospace;
172 | }
173 |
174 | .dialog {
175 | border: 2px solid #64706c;
176 | padding: 0;
177 | width: 80vw;
178 | }
179 |
180 | .dialog-heading {
181 | text-align: center;
182 | }
183 |
184 | .form-fields {
185 | padding: 0 1em;
186 | }
187 |
188 | .form-fields label {
189 | background-color: #ecd9bd;
190 | margin: 1.5em 0 .5em;
191 | padding: .5em 0;
192 | text-align: center;
193 | display: block;
194 | width: 100%;
195 | }
196 |
197 | .form-fields > input, .form-fields > select {
198 | padding: 0;
199 | border: 1px solid #935347;
200 | border-radius: 0;
201 | display: block;
202 | width: 99%;
203 | margin-bottom: 2em;
204 | }
205 |
206 | input[type="submit"] {
207 | -webkit-appearance: none;
208 | }
209 |
210 | #error-message {
211 | margin: 1% 4%;
212 | }
213 |
214 | @media (min-width: 500px) {
215 |
216 | .dialog {
217 | width: 450px;
218 | }
219 |
220 | }
221 |
--------------------------------------------------------------------------------
/stream/static/dialog-polyfill.css:
--------------------------------------------------------------------------------
1 | dialog {
2 | position: absolute;
3 | left: 0; right: 0;
4 | width: -moz-fit-content;
5 | width: -webkit-fit-content;
6 | width: fit-content;
7 | height: -moz-fit-content;
8 | height: -webkit-fit-content;
9 | height: fit-content;
10 | margin: auto;
11 | border: solid;
12 | padding: 1em;
13 | background: white;
14 | color: black;
15 | display: none;
16 | }
17 |
18 | dialog[open] {
19 | display: block;
20 | }
21 |
22 | dialog + .backdrop {
23 | position: fixed;
24 | top: 0; right: 0; bottom: 0; left: 0;
25 | background: rgba(0,0,0,0.1);
26 | }
27 |
28 | /* for small devices, modal dialogs go full-screen */
29 | @media screen and (max-width: 540px) {
30 | dialog[_polyfill_modal] { /* TODO: implement */
31 | top: 0;
32 | width: auto;
33 | margin: 1em;
34 | }
35 | }
36 |
37 | ._dialog_overlay {
38 | position: fixed;
39 | top: 0; right: 0; bottom: 0; left: 0;
40 | }
--------------------------------------------------------------------------------
/stream/static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nvlong21/Face_Recognize/d01ecf25b5e29d28edf53af93219c7f1805ef308/stream/static/favicon.ico
--------------------------------------------------------------------------------
/stream/static/script.js:
--------------------------------------------------------------------------------
1 | function openAct(evt, actName) {
2 | var i, tabcontent, tablinks;
3 | tabcontent = document.getElementsByClassName("tabcontent");
4 | for (i = 0; i < tabcontent.length; i++) {
5 | tabcontent[i].style.display = "none";
6 | }
7 | tablinks = document.getElementsByClassName("tablinks");
8 | for (i = 0; i < tablinks.length; i++) {
9 | tablinks[i].className = tablinks[i].className.replace(" active", "");
10 | }
11 | document.getElementById(actName).style.display = "block";
12 | evt.currentTarget.className += " active";
13 | }
14 |
15 | // Get the element with id="defaultOpen" and click on it
16 | defaultOpen = document.getElementById("defaultOpen");
17 | if (defaultOpen){
18 | defaultOpen.click();
19 | }
20 | function d_load() {
21 | document.getElementById("imgOpen").click();
22 | }
23 |
24 | function b_nav() {
25 | window.history.back();
26 | }
27 |
28 | function ChangePhoto(name, img) {
29 | img = typeof img !== 'undefined' ? img : "{{ result['original'] }}";
30 | target = document.getElementById("label");
31 | if (target){
32 | target.innerHTML = name;
33 | target = document.getElementById("photo");
34 | target.src = img;
35 | }
36 | }
37 |
38 | function WaitDisplay(upName) {
39 | target = document.getElementById("result");
40 | if (target){
41 | target.style.display = "none";
42 | }
43 | target = document.getElementById("loading");
44 | if (target){
45 | target.style.display = "";
46 | }
47 | setTimeout(function() {
48 | document.getElementById(upName).submit();
49 | }, 100);
50 | }
51 | function compare() {
52 | var img1 = document.getElementById("img_1").src
53 | var img2 = document.getElementById("img_2").src
54 | $.ajax({
55 | type: 'POST',
56 | url: "/compare_two_img",
57 | dataType: 'json',
58 | contentType: 'application/json; charset=utf-8',
59 | data: JSON.stringify({img_1: img1, img_2: img2}),
60 | // dataType: "text",
61 | success: function(data){
62 | console.log(data["results"])
63 | var html_temp = "
{{ field.label }}
5 |{{ field(**kwargs) | safe }}
6 | {% if field.errors %} 7 |